Root/
1 | /* |
2 | * Disk Array driver for HP Smart Array SAS controllers |
3 | * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; version 2 of the License. |
8 | * |
9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
12 | * NON INFRINGEMENT. See the GNU General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
17 | * |
18 | * Questions/Comments/Bugfixes to iss_storagedev@hp.com |
19 | * |
20 | */ |
21 | |
22 | #include <linux/module.h> |
23 | #include <linux/interrupt.h> |
24 | #include <linux/types.h> |
25 | #include <linux/pci.h> |
26 | #include <linux/kernel.h> |
27 | #include <linux/slab.h> |
28 | #include <linux/delay.h> |
29 | #include <linux/fs.h> |
30 | #include <linux/timer.h> |
31 | #include <linux/seq_file.h> |
32 | #include <linux/init.h> |
33 | #include <linux/spinlock.h> |
34 | #include <linux/smp_lock.h> |
35 | #include <linux/compat.h> |
36 | #include <linux/blktrace_api.h> |
37 | #include <linux/uaccess.h> |
38 | #include <linux/io.h> |
39 | #include <linux/dma-mapping.h> |
40 | #include <linux/completion.h> |
41 | #include <linux/moduleparam.h> |
42 | #include <scsi/scsi.h> |
43 | #include <scsi/scsi_cmnd.h> |
44 | #include <scsi/scsi_device.h> |
45 | #include <scsi/scsi_host.h> |
46 | #include <scsi/scsi_tcq.h> |
47 | #include <linux/cciss_ioctl.h> |
48 | #include <linux/string.h> |
49 | #include <linux/bitmap.h> |
50 | #include <asm/atomic.h> |
51 | #include <linux/kthread.h> |
52 | #include "hpsa_cmd.h" |
53 | #include "hpsa.h" |
54 | |
55 | /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ |
56 | #define HPSA_DRIVER_VERSION "2.0.2-1" |
57 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" |
58 | |
59 | /* How long to wait (in milliseconds) for board to go into simple mode */ |
60 | #define MAX_CONFIG_WAIT 30000 |
61 | #define MAX_IOCTL_CONFIG_WAIT 1000 |
62 | |
63 | /*define how many times we will try a command because of bus resets */ |
64 | #define MAX_CMD_RETRIES 3 |
65 | |
66 | /* Embedded module documentation macros - see modules.h */ |
67 | MODULE_AUTHOR("Hewlett-Packard Company"); |
68 | MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ |
69 | HPSA_DRIVER_VERSION); |
70 | MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); |
71 | MODULE_VERSION(HPSA_DRIVER_VERSION); |
72 | MODULE_LICENSE("GPL"); |
73 | |
74 | static int hpsa_allow_any; |
75 | module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); |
76 | MODULE_PARM_DESC(hpsa_allow_any, |
77 | "Allow hpsa driver to access unknown HP Smart Array hardware"); |
78 | |
79 | /* define the PCI info for the cards we can control */ |
80 | static const struct pci_device_id hpsa_pci_device_id[] = { |
81 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, |
82 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, |
83 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, |
84 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, |
85 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, |
86 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, |
87 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, |
88 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, |
89 | #define PCI_DEVICE_ID_HP_CISSF 0x333f |
90 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F}, |
91 | {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
92 | PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, |
93 | {0,} |
94 | }; |
95 | |
96 | MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); |
97 | |
98 | /* board_id = Subsystem Device ID & Vendor ID |
99 | * product = Marketing Name for the board |
100 | * access = Address of the struct of function pointers |
101 | */ |
102 | static struct board_type products[] = { |
103 | {0x3241103C, "Smart Array P212", &SA5_access}, |
104 | {0x3243103C, "Smart Array P410", &SA5_access}, |
105 | {0x3245103C, "Smart Array P410i", &SA5_access}, |
106 | {0x3247103C, "Smart Array P411", &SA5_access}, |
107 | {0x3249103C, "Smart Array P812", &SA5_access}, |
108 | {0x324a103C, "Smart Array P712m", &SA5_access}, |
109 | {0x324b103C, "Smart Array P711m", &SA5_access}, |
110 | {0x3233103C, "StorageWorks P1210m", &SA5_access}, |
111 | {0x333F103C, "StorageWorks P1210m", &SA5_access}, |
112 | {0xFFFF103C, "Unknown Smart Array", &SA5_access}, |
113 | }; |
114 | |
115 | static int number_of_controllers; |
116 | |
117 | static irqreturn_t do_hpsa_intr(int irq, void *dev_id); |
118 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); |
119 | static void start_io(struct ctlr_info *h); |
120 | |
121 | #ifdef CONFIG_COMPAT |
122 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); |
123 | #endif |
124 | |
125 | static void cmd_free(struct ctlr_info *h, struct CommandList *c); |
126 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); |
127 | static struct CommandList *cmd_alloc(struct ctlr_info *h); |
128 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h); |
129 | static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
130 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, |
131 | int cmd_type); |
132 | |
133 | static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, |
134 | void (*done)(struct scsi_cmnd *)); |
135 | static void hpsa_scan_start(struct Scsi_Host *); |
136 | static int hpsa_scan_finished(struct Scsi_Host *sh, |
137 | unsigned long elapsed_time); |
138 | static int hpsa_change_queue_depth(struct scsi_device *sdev, |
139 | int qdepth, int reason); |
140 | |
141 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); |
142 | static int hpsa_slave_alloc(struct scsi_device *sdev); |
143 | static void hpsa_slave_destroy(struct scsi_device *sdev); |
144 | |
145 | static ssize_t raid_level_show(struct device *dev, |
146 | struct device_attribute *attr, char *buf); |
147 | static ssize_t lunid_show(struct device *dev, |
148 | struct device_attribute *attr, char *buf); |
149 | static ssize_t unique_id_show(struct device *dev, |
150 | struct device_attribute *attr, char *buf); |
151 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); |
152 | static ssize_t host_store_rescan(struct device *dev, |
153 | struct device_attribute *attr, const char *buf, size_t count); |
154 | static int check_for_unit_attention(struct ctlr_info *h, |
155 | struct CommandList *c); |
156 | static void check_ioctl_unit_attention(struct ctlr_info *h, |
157 | struct CommandList *c); |
158 | /* performant mode helper functions */ |
159 | static void calc_bucket_map(int *bucket, int num_buckets, |
160 | int nsgs, int *bucket_map); |
161 | static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); |
162 | static inline u32 next_command(struct ctlr_info *h); |
163 | |
164 | static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); |
165 | static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); |
166 | static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); |
167 | static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); |
168 | |
169 | static struct device_attribute *hpsa_sdev_attrs[] = { |
170 | &dev_attr_raid_level, |
171 | &dev_attr_lunid, |
172 | &dev_attr_unique_id, |
173 | NULL, |
174 | }; |
175 | |
176 | static struct device_attribute *hpsa_shost_attrs[] = { |
177 | &dev_attr_rescan, |
178 | NULL, |
179 | }; |
180 | |
181 | static struct scsi_host_template hpsa_driver_template = { |
182 | .module = THIS_MODULE, |
183 | .name = "hpsa", |
184 | .proc_name = "hpsa", |
185 | .queuecommand = hpsa_scsi_queue_command, |
186 | .scan_start = hpsa_scan_start, |
187 | .scan_finished = hpsa_scan_finished, |
188 | .change_queue_depth = hpsa_change_queue_depth, |
189 | .this_id = -1, |
190 | .use_clustering = ENABLE_CLUSTERING, |
191 | .eh_device_reset_handler = hpsa_eh_device_reset_handler, |
192 | .ioctl = hpsa_ioctl, |
193 | .slave_alloc = hpsa_slave_alloc, |
194 | .slave_destroy = hpsa_slave_destroy, |
195 | #ifdef CONFIG_COMPAT |
196 | .compat_ioctl = hpsa_compat_ioctl, |
197 | #endif |
198 | .sdev_attrs = hpsa_sdev_attrs, |
199 | .shost_attrs = hpsa_shost_attrs, |
200 | }; |
201 | |
202 | static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) |
203 | { |
204 | unsigned long *priv = shost_priv(sdev->host); |
205 | return (struct ctlr_info *) *priv; |
206 | } |
207 | |
208 | static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) |
209 | { |
210 | unsigned long *priv = shost_priv(sh); |
211 | return (struct ctlr_info *) *priv; |
212 | } |
213 | |
214 | static int check_for_unit_attention(struct ctlr_info *h, |
215 | struct CommandList *c) |
216 | { |
217 | if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) |
218 | return 0; |
219 | |
220 | switch (c->err_info->SenseInfo[12]) { |
221 | case STATE_CHANGED: |
222 | dev_warn(&h->pdev->dev, "hpsa%d: a state change " |
223 | "detected, command retried\n", h->ctlr); |
224 | break; |
225 | case LUN_FAILED: |
226 | dev_warn(&h->pdev->dev, "hpsa%d: LUN failure " |
227 | "detected, action required\n", h->ctlr); |
228 | break; |
229 | case REPORT_LUNS_CHANGED: |
230 | dev_warn(&h->pdev->dev, "hpsa%d: report LUN data " |
231 | "changed, action required\n", h->ctlr); |
232 | /* |
233 | * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. |
234 | */ |
235 | break; |
236 | case POWER_OR_RESET: |
237 | dev_warn(&h->pdev->dev, "hpsa%d: a power on " |
238 | "or device reset detected\n", h->ctlr); |
239 | break; |
240 | case UNIT_ATTENTION_CLEARED: |
241 | dev_warn(&h->pdev->dev, "hpsa%d: unit attention " |
242 | "cleared by another initiator\n", h->ctlr); |
243 | break; |
244 | default: |
245 | dev_warn(&h->pdev->dev, "hpsa%d: unknown " |
246 | "unit attention detected\n", h->ctlr); |
247 | break; |
248 | } |
249 | return 1; |
250 | } |
251 | |
252 | static ssize_t host_store_rescan(struct device *dev, |
253 | struct device_attribute *attr, |
254 | const char *buf, size_t count) |
255 | { |
256 | struct ctlr_info *h; |
257 | struct Scsi_Host *shost = class_to_shost(dev); |
258 | h = shost_to_hba(shost); |
259 | hpsa_scan_start(h->scsi_host); |
260 | return count; |
261 | } |
262 | |
263 | /* Enqueuing and dequeuing functions for cmdlists. */ |
264 | static inline void addQ(struct hlist_head *list, struct CommandList *c) |
265 | { |
266 | hlist_add_head(&c->list, list); |
267 | } |
268 | |
269 | static inline u32 next_command(struct ctlr_info *h) |
270 | { |
271 | u32 a; |
272 | |
273 | if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) |
274 | return h->access.command_completed(h); |
275 | |
276 | if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { |
277 | a = *(h->reply_pool_head); /* Next cmd in ring buffer */ |
278 | (h->reply_pool_head)++; |
279 | h->commands_outstanding--; |
280 | } else { |
281 | a = FIFO_EMPTY; |
282 | } |
283 | /* Check for wraparound */ |
284 | if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { |
285 | h->reply_pool_head = h->reply_pool; |
286 | h->reply_pool_wraparound ^= 1; |
287 | } |
288 | return a; |
289 | } |
290 | |
291 | /* set_performant_mode: Modify the tag for cciss performant |
292 | * set bit 0 for pull model, bits 3-1 for block fetch |
293 | * register number |
294 | */ |
295 | static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) |
296 | { |
297 | if (likely(h->transMethod == CFGTBL_Trans_Performant)) |
298 | c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); |
299 | } |
300 | |
301 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, |
302 | struct CommandList *c) |
303 | { |
304 | unsigned long flags; |
305 | |
306 | set_performant_mode(h, c); |
307 | spin_lock_irqsave(&h->lock, flags); |
308 | addQ(&h->reqQ, c); |
309 | h->Qdepth++; |
310 | start_io(h); |
311 | spin_unlock_irqrestore(&h->lock, flags); |
312 | } |
313 | |
314 | static inline void removeQ(struct CommandList *c) |
315 | { |
316 | if (WARN_ON(hlist_unhashed(&c->list))) |
317 | return; |
318 | hlist_del_init(&c->list); |
319 | } |
320 | |
321 | static inline int is_hba_lunid(unsigned char scsi3addr[]) |
322 | { |
323 | return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; |
324 | } |
325 | |
326 | static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) |
327 | { |
328 | return (scsi3addr[3] & 0xC0) == 0x40; |
329 | } |
330 | |
331 | static inline int is_scsi_rev_5(struct ctlr_info *h) |
332 | { |
333 | if (!h->hba_inquiry_data) |
334 | return 0; |
335 | if ((h->hba_inquiry_data[2] & 0x07) == 5) |
336 | return 1; |
337 | return 0; |
338 | } |
339 | |
340 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", |
341 | "UNKNOWN" |
342 | }; |
343 | #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) |
344 | |
345 | static ssize_t raid_level_show(struct device *dev, |
346 | struct device_attribute *attr, char *buf) |
347 | { |
348 | ssize_t l = 0; |
349 | unsigned char rlevel; |
350 | struct ctlr_info *h; |
351 | struct scsi_device *sdev; |
352 | struct hpsa_scsi_dev_t *hdev; |
353 | unsigned long flags; |
354 | |
355 | sdev = to_scsi_device(dev); |
356 | h = sdev_to_hba(sdev); |
357 | spin_lock_irqsave(&h->lock, flags); |
358 | hdev = sdev->hostdata; |
359 | if (!hdev) { |
360 | spin_unlock_irqrestore(&h->lock, flags); |
361 | return -ENODEV; |
362 | } |
363 | |
364 | /* Is this even a logical drive? */ |
365 | if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { |
366 | spin_unlock_irqrestore(&h->lock, flags); |
367 | l = snprintf(buf, PAGE_SIZE, "N/A\n"); |
368 | return l; |
369 | } |
370 | |
371 | rlevel = hdev->raid_level; |
372 | spin_unlock_irqrestore(&h->lock, flags); |
373 | if (rlevel > RAID_UNKNOWN) |
374 | rlevel = RAID_UNKNOWN; |
375 | l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); |
376 | return l; |
377 | } |
378 | |
379 | static ssize_t lunid_show(struct device *dev, |
380 | struct device_attribute *attr, char *buf) |
381 | { |
382 | struct ctlr_info *h; |
383 | struct scsi_device *sdev; |
384 | struct hpsa_scsi_dev_t *hdev; |
385 | unsigned long flags; |
386 | unsigned char lunid[8]; |
387 | |
388 | sdev = to_scsi_device(dev); |
389 | h = sdev_to_hba(sdev); |
390 | spin_lock_irqsave(&h->lock, flags); |
391 | hdev = sdev->hostdata; |
392 | if (!hdev) { |
393 | spin_unlock_irqrestore(&h->lock, flags); |
394 | return -ENODEV; |
395 | } |
396 | memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); |
397 | spin_unlock_irqrestore(&h->lock, flags); |
398 | return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", |
399 | lunid[0], lunid[1], lunid[2], lunid[3], |
400 | lunid[4], lunid[5], lunid[6], lunid[7]); |
401 | } |
402 | |
403 | static ssize_t unique_id_show(struct device *dev, |
404 | struct device_attribute *attr, char *buf) |
405 | { |
406 | struct ctlr_info *h; |
407 | struct scsi_device *sdev; |
408 | struct hpsa_scsi_dev_t *hdev; |
409 | unsigned long flags; |
410 | unsigned char sn[16]; |
411 | |
412 | sdev = to_scsi_device(dev); |
413 | h = sdev_to_hba(sdev); |
414 | spin_lock_irqsave(&h->lock, flags); |
415 | hdev = sdev->hostdata; |
416 | if (!hdev) { |
417 | spin_unlock_irqrestore(&h->lock, flags); |
418 | return -ENODEV; |
419 | } |
420 | memcpy(sn, hdev->device_id, sizeof(sn)); |
421 | spin_unlock_irqrestore(&h->lock, flags); |
422 | return snprintf(buf, 16 * 2 + 2, |
423 | "%02X%02X%02X%02X%02X%02X%02X%02X" |
424 | "%02X%02X%02X%02X%02X%02X%02X%02X\n", |
425 | sn[0], sn[1], sn[2], sn[3], |
426 | sn[4], sn[5], sn[6], sn[7], |
427 | sn[8], sn[9], sn[10], sn[11], |
428 | sn[12], sn[13], sn[14], sn[15]); |
429 | } |
430 | |
431 | static int hpsa_find_target_lun(struct ctlr_info *h, |
432 | unsigned char scsi3addr[], int bus, int *target, int *lun) |
433 | { |
434 | /* finds an unused bus, target, lun for a new physical device |
435 | * assumes h->devlock is held |
436 | */ |
437 | int i, found = 0; |
438 | DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA); |
439 | |
440 | memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3); |
441 | |
442 | for (i = 0; i < h->ndevices; i++) { |
443 | if (h->dev[i]->bus == bus && h->dev[i]->target != -1) |
444 | set_bit(h->dev[i]->target, lun_taken); |
445 | } |
446 | |
447 | for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) { |
448 | if (!test_bit(i, lun_taken)) { |
449 | /* *bus = 1; */ |
450 | *target = i; |
451 | *lun = 0; |
452 | found = 1; |
453 | break; |
454 | } |
455 | } |
456 | return !found; |
457 | } |
458 | |
459 | /* Add an entry into h->dev[] array. */ |
460 | static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, |
461 | struct hpsa_scsi_dev_t *device, |
462 | struct hpsa_scsi_dev_t *added[], int *nadded) |
463 | { |
464 | /* assumes h->devlock is held */ |
465 | int n = h->ndevices; |
466 | int i; |
467 | unsigned char addr1[8], addr2[8]; |
468 | struct hpsa_scsi_dev_t *sd; |
469 | |
470 | if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) { |
471 | dev_err(&h->pdev->dev, "too many devices, some will be " |
472 | "inaccessible.\n"); |
473 | return -1; |
474 | } |
475 | |
476 | /* physical devices do not have lun or target assigned until now. */ |
477 | if (device->lun != -1) |
478 | /* Logical device, lun is already assigned. */ |
479 | goto lun_assigned; |
480 | |
481 | /* If this device a non-zero lun of a multi-lun device |
482 | * byte 4 of the 8-byte LUN addr will contain the logical |
483 | * unit no, zero otherise. |
484 | */ |
485 | if (device->scsi3addr[4] == 0) { |
486 | /* This is not a non-zero lun of a multi-lun device */ |
487 | if (hpsa_find_target_lun(h, device->scsi3addr, |
488 | device->bus, &device->target, &device->lun) != 0) |
489 | return -1; |
490 | goto lun_assigned; |
491 | } |
492 | |
493 | /* This is a non-zero lun of a multi-lun device. |
494 | * Search through our list and find the device which |
495 | * has the same 8 byte LUN address, excepting byte 4. |
496 | * Assign the same bus and target for this new LUN. |
497 | * Use the logical unit number from the firmware. |
498 | */ |
499 | memcpy(addr1, device->scsi3addr, 8); |
500 | addr1[4] = 0; |
501 | for (i = 0; i < n; i++) { |
502 | sd = h->dev[i]; |
503 | memcpy(addr2, sd->scsi3addr, 8); |
504 | addr2[4] = 0; |
505 | /* differ only in byte 4? */ |
506 | if (memcmp(addr1, addr2, 8) == 0) { |
507 | device->bus = sd->bus; |
508 | device->target = sd->target; |
509 | device->lun = device->scsi3addr[4]; |
510 | break; |
511 | } |
512 | } |
513 | if (device->lun == -1) { |
514 | dev_warn(&h->pdev->dev, "physical device with no LUN=0," |
515 | " suspect firmware bug or unsupported hardware " |
516 | "configuration.\n"); |
517 | return -1; |
518 | } |
519 | |
520 | lun_assigned: |
521 | |
522 | h->dev[n] = device; |
523 | h->ndevices++; |
524 | added[*nadded] = device; |
525 | (*nadded)++; |
526 | |
527 | /* initially, (before registering with scsi layer) we don't |
528 | * know our hostno and we don't want to print anything first |
529 | * time anyway (the scsi layer's inquiries will show that info) |
530 | */ |
531 | /* if (hostno != -1) */ |
532 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", |
533 | scsi_device_type(device->devtype), hostno, |
534 | device->bus, device->target, device->lun); |
535 | return 0; |
536 | } |
537 | |
538 | /* Replace an entry from h->dev[] array. */ |
539 | static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, |
540 | int entry, struct hpsa_scsi_dev_t *new_entry, |
541 | struct hpsa_scsi_dev_t *added[], int *nadded, |
542 | struct hpsa_scsi_dev_t *removed[], int *nremoved) |
543 | { |
544 | /* assumes h->devlock is held */ |
545 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); |
546 | removed[*nremoved] = h->dev[entry]; |
547 | (*nremoved)++; |
548 | h->dev[entry] = new_entry; |
549 | added[*nadded] = new_entry; |
550 | (*nadded)++; |
551 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", |
552 | scsi_device_type(new_entry->devtype), hostno, new_entry->bus, |
553 | new_entry->target, new_entry->lun); |
554 | } |
555 | |
556 | /* Remove an entry from h->dev[] array. */ |
557 | static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, |
558 | struct hpsa_scsi_dev_t *removed[], int *nremoved) |
559 | { |
560 | /* assumes h->devlock is held */ |
561 | int i; |
562 | struct hpsa_scsi_dev_t *sd; |
563 | |
564 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); |
565 | |
566 | sd = h->dev[entry]; |
567 | removed[*nremoved] = h->dev[entry]; |
568 | (*nremoved)++; |
569 | |
570 | for (i = entry; i < h->ndevices-1; i++) |
571 | h->dev[i] = h->dev[i+1]; |
572 | h->ndevices--; |
573 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", |
574 | scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, |
575 | sd->lun); |
576 | } |
577 | |
578 | #define SCSI3ADDR_EQ(a, b) ( \ |
579 | (a)[7] == (b)[7] && \ |
580 | (a)[6] == (b)[6] && \ |
581 | (a)[5] == (b)[5] && \ |
582 | (a)[4] == (b)[4] && \ |
583 | (a)[3] == (b)[3] && \ |
584 | (a)[2] == (b)[2] && \ |
585 | (a)[1] == (b)[1] && \ |
586 | (a)[0] == (b)[0]) |
587 | |
588 | static void fixup_botched_add(struct ctlr_info *h, |
589 | struct hpsa_scsi_dev_t *added) |
590 | { |
591 | /* called when scsi_add_device fails in order to re-adjust |
592 | * h->dev[] to match the mid layer's view. |
593 | */ |
594 | unsigned long flags; |
595 | int i, j; |
596 | |
597 | spin_lock_irqsave(&h->lock, flags); |
598 | for (i = 0; i < h->ndevices; i++) { |
599 | if (h->dev[i] == added) { |
600 | for (j = i; j < h->ndevices-1; j++) |
601 | h->dev[j] = h->dev[j+1]; |
602 | h->ndevices--; |
603 | break; |
604 | } |
605 | } |
606 | spin_unlock_irqrestore(&h->lock, flags); |
607 | kfree(added); |
608 | } |
609 | |
610 | static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, |
611 | struct hpsa_scsi_dev_t *dev2) |
612 | { |
613 | if ((is_logical_dev_addr_mode(dev1->scsi3addr) || |
614 | (dev1->lun != -1 && dev2->lun != -1)) && |
615 | dev1->devtype != 0x0C) |
616 | return (memcmp(dev1, dev2, sizeof(*dev1)) == 0); |
617 | |
618 | /* we compare everything except lun and target as these |
619 | * are not yet assigned. Compare parts likely |
620 | * to differ first |
621 | */ |
622 | if (memcmp(dev1->scsi3addr, dev2->scsi3addr, |
623 | sizeof(dev1->scsi3addr)) != 0) |
624 | return 0; |
625 | if (memcmp(dev1->device_id, dev2->device_id, |
626 | sizeof(dev1->device_id)) != 0) |
627 | return 0; |
628 | if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) |
629 | return 0; |
630 | if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) |
631 | return 0; |
632 | if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0) |
633 | return 0; |
634 | if (dev1->devtype != dev2->devtype) |
635 | return 0; |
636 | if (dev1->raid_level != dev2->raid_level) |
637 | return 0; |
638 | if (dev1->bus != dev2->bus) |
639 | return 0; |
640 | return 1; |
641 | } |
642 | |
643 | /* Find needle in haystack. If exact match found, return DEVICE_SAME, |
644 | * and return needle location in *index. If scsi3addr matches, but not |
645 | * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle |
646 | * location in *index. If needle not found, return DEVICE_NOT_FOUND. |
647 | */ |
648 | static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, |
649 | struct hpsa_scsi_dev_t *haystack[], int haystack_size, |
650 | int *index) |
651 | { |
652 | int i; |
653 | #define DEVICE_NOT_FOUND 0 |
654 | #define DEVICE_CHANGED 1 |
655 | #define DEVICE_SAME 2 |
656 | for (i = 0; i < haystack_size; i++) { |
657 | if (haystack[i] == NULL) /* previously removed. */ |
658 | continue; |
659 | if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { |
660 | *index = i; |
661 | if (device_is_the_same(needle, haystack[i])) |
662 | return DEVICE_SAME; |
663 | else |
664 | return DEVICE_CHANGED; |
665 | } |
666 | } |
667 | *index = -1; |
668 | return DEVICE_NOT_FOUND; |
669 | } |
670 | |
671 | static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, |
672 | struct hpsa_scsi_dev_t *sd[], int nsds) |
673 | { |
674 | /* sd contains scsi3 addresses and devtypes, and inquiry |
675 | * data. This function takes what's in sd to be the current |
676 | * reality and updates h->dev[] to reflect that reality. |
677 | */ |
678 | int i, entry, device_change, changes = 0; |
679 | struct hpsa_scsi_dev_t *csd; |
680 | unsigned long flags; |
681 | struct hpsa_scsi_dev_t **added, **removed; |
682 | int nadded, nremoved; |
683 | struct Scsi_Host *sh = NULL; |
684 | |
685 | added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA, |
686 | GFP_KERNEL); |
687 | removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA, |
688 | GFP_KERNEL); |
689 | |
690 | if (!added || !removed) { |
691 | dev_warn(&h->pdev->dev, "out of memory in " |
692 | "adjust_hpsa_scsi_table\n"); |
693 | goto free_and_out; |
694 | } |
695 | |
696 | spin_lock_irqsave(&h->devlock, flags); |
697 | |
698 | /* find any devices in h->dev[] that are not in |
699 | * sd[] and remove them from h->dev[], and for any |
700 | * devices which have changed, remove the old device |
701 | * info and add the new device info. |
702 | */ |
703 | i = 0; |
704 | nremoved = 0; |
705 | nadded = 0; |
706 | while (i < h->ndevices) { |
707 | csd = h->dev[i]; |
708 | device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); |
709 | if (device_change == DEVICE_NOT_FOUND) { |
710 | changes++; |
711 | hpsa_scsi_remove_entry(h, hostno, i, |
712 | removed, &nremoved); |
713 | continue; /* remove ^^^, hence i not incremented */ |
714 | } else if (device_change == DEVICE_CHANGED) { |
715 | changes++; |
716 | hpsa_scsi_replace_entry(h, hostno, i, sd[entry], |
717 | added, &nadded, removed, &nremoved); |
718 | /* Set it to NULL to prevent it from being freed |
719 | * at the bottom of hpsa_update_scsi_devices() |
720 | */ |
721 | sd[entry] = NULL; |
722 | } |
723 | i++; |
724 | } |
725 | |
726 | /* Now, make sure every device listed in sd[] is also |
727 | * listed in h->dev[], adding them if they aren't found |
728 | */ |
729 | |
730 | for (i = 0; i < nsds; i++) { |
731 | if (!sd[i]) /* if already added above. */ |
732 | continue; |
733 | device_change = hpsa_scsi_find_entry(sd[i], h->dev, |
734 | h->ndevices, &entry); |
735 | if (device_change == DEVICE_NOT_FOUND) { |
736 | changes++; |
737 | if (hpsa_scsi_add_entry(h, hostno, sd[i], |
738 | added, &nadded) != 0) |
739 | break; |
740 | sd[i] = NULL; /* prevent from being freed later. */ |
741 | } else if (device_change == DEVICE_CHANGED) { |
742 | /* should never happen... */ |
743 | changes++; |
744 | dev_warn(&h->pdev->dev, |
745 | "device unexpectedly changed.\n"); |
746 | /* but if it does happen, we just ignore that device */ |
747 | } |
748 | } |
749 | spin_unlock_irqrestore(&h->devlock, flags); |
750 | |
751 | /* Don't notify scsi mid layer of any changes the first time through |
752 | * (or if there are no changes) scsi_scan_host will do it later the |
753 | * first time through. |
754 | */ |
755 | if (hostno == -1 || !changes) |
756 | goto free_and_out; |
757 | |
758 | sh = h->scsi_host; |
759 | /* Notify scsi mid layer of any removed devices */ |
760 | for (i = 0; i < nremoved; i++) { |
761 | struct scsi_device *sdev = |
762 | scsi_device_lookup(sh, removed[i]->bus, |
763 | removed[i]->target, removed[i]->lun); |
764 | if (sdev != NULL) { |
765 | scsi_remove_device(sdev); |
766 | scsi_device_put(sdev); |
767 | } else { |
768 | /* We don't expect to get here. |
769 | * future cmds to this device will get selection |
770 | * timeout as if the device was gone. |
771 | */ |
772 | dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " |
773 | " for removal.", hostno, removed[i]->bus, |
774 | removed[i]->target, removed[i]->lun); |
775 | } |
776 | kfree(removed[i]); |
777 | removed[i] = NULL; |
778 | } |
779 | |
780 | /* Notify scsi mid layer of any added devices */ |
781 | for (i = 0; i < nadded; i++) { |
782 | if (scsi_add_device(sh, added[i]->bus, |
783 | added[i]->target, added[i]->lun) == 0) |
784 | continue; |
785 | dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " |
786 | "device not added.\n", hostno, added[i]->bus, |
787 | added[i]->target, added[i]->lun); |
788 | /* now we have to remove it from h->dev, |
789 | * since it didn't get added to scsi mid layer |
790 | */ |
791 | fixup_botched_add(h, added[i]); |
792 | } |
793 | |
794 | free_and_out: |
795 | kfree(added); |
796 | kfree(removed); |
797 | } |
798 | |
799 | /* |
800 | * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t * |
801 | * Assume's h->devlock is held. |
802 | */ |
803 | static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, |
804 | int bus, int target, int lun) |
805 | { |
806 | int i; |
807 | struct hpsa_scsi_dev_t *sd; |
808 | |
809 | for (i = 0; i < h->ndevices; i++) { |
810 | sd = h->dev[i]; |
811 | if (sd->bus == bus && sd->target == target && sd->lun == lun) |
812 | return sd; |
813 | } |
814 | return NULL; |
815 | } |
816 | |
817 | /* link sdev->hostdata to our per-device structure. */ |
818 | static int hpsa_slave_alloc(struct scsi_device *sdev) |
819 | { |
820 | struct hpsa_scsi_dev_t *sd; |
821 | unsigned long flags; |
822 | struct ctlr_info *h; |
823 | |
824 | h = sdev_to_hba(sdev); |
825 | spin_lock_irqsave(&h->devlock, flags); |
826 | sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), |
827 | sdev_id(sdev), sdev->lun); |
828 | if (sd != NULL) |
829 | sdev->hostdata = sd; |
830 | spin_unlock_irqrestore(&h->devlock, flags); |
831 | return 0; |
832 | } |
833 | |
834 | static void hpsa_slave_destroy(struct scsi_device *sdev) |
835 | { |
836 | /* nothing to do. */ |
837 | } |
838 | |
839 | static void hpsa_scsi_setup(struct ctlr_info *h) |
840 | { |
841 | h->ndevices = 0; |
842 | h->scsi_host = NULL; |
843 | spin_lock_init(&h->devlock); |
844 | } |
845 | |
846 | static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) |
847 | { |
848 | int i; |
849 | |
850 | if (!h->cmd_sg_list) |
851 | return; |
852 | for (i = 0; i < h->nr_cmds; i++) { |
853 | kfree(h->cmd_sg_list[i]); |
854 | h->cmd_sg_list[i] = NULL; |
855 | } |
856 | kfree(h->cmd_sg_list); |
857 | h->cmd_sg_list = NULL; |
858 | } |
859 | |
860 | static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) |
861 | { |
862 | int i; |
863 | |
864 | if (h->chainsize <= 0) |
865 | return 0; |
866 | |
867 | h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, |
868 | GFP_KERNEL); |
869 | if (!h->cmd_sg_list) |
870 | return -ENOMEM; |
871 | for (i = 0; i < h->nr_cmds; i++) { |
872 | h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * |
873 | h->chainsize, GFP_KERNEL); |
874 | if (!h->cmd_sg_list[i]) |
875 | goto clean; |
876 | } |
877 | return 0; |
878 | |
879 | clean: |
880 | hpsa_free_sg_chain_blocks(h); |
881 | return -ENOMEM; |
882 | } |
883 | |
884 | static void hpsa_map_sg_chain_block(struct ctlr_info *h, |
885 | struct CommandList *c) |
886 | { |
887 | struct SGDescriptor *chain_sg, *chain_block; |
888 | u64 temp64; |
889 | |
890 | chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; |
891 | chain_block = h->cmd_sg_list[c->cmdindex]; |
892 | chain_sg->Ext = HPSA_SG_CHAIN; |
893 | chain_sg->Len = sizeof(*chain_sg) * |
894 | (c->Header.SGTotal - h->max_cmd_sg_entries); |
895 | temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, |
896 | PCI_DMA_TODEVICE); |
897 | chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); |
898 | chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); |
899 | } |
900 | |
901 | static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, |
902 | struct CommandList *c) |
903 | { |
904 | struct SGDescriptor *chain_sg; |
905 | union u64bit temp64; |
906 | |
907 | if (c->Header.SGTotal <= h->max_cmd_sg_entries) |
908 | return; |
909 | |
910 | chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; |
911 | temp64.val32.lower = chain_sg->Addr.lower; |
912 | temp64.val32.upper = chain_sg->Addr.upper; |
913 | pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); |
914 | } |
915 | |
916 | static void complete_scsi_command(struct CommandList *cp, |
917 | int timeout, u32 tag) |
918 | { |
919 | struct scsi_cmnd *cmd; |
920 | struct ctlr_info *h; |
921 | struct ErrorInfo *ei; |
922 | |
923 | unsigned char sense_key; |
924 | unsigned char asc; /* additional sense code */ |
925 | unsigned char ascq; /* additional sense code qualifier */ |
926 | |
927 | ei = cp->err_info; |
928 | cmd = (struct scsi_cmnd *) cp->scsi_cmd; |
929 | h = cp->h; |
930 | |
931 | scsi_dma_unmap(cmd); /* undo the DMA mappings */ |
932 | if (cp->Header.SGTotal > h->max_cmd_sg_entries) |
933 | hpsa_unmap_sg_chain_block(h, cp); |
934 | |
935 | cmd->result = (DID_OK << 16); /* host byte */ |
936 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ |
937 | cmd->result |= ei->ScsiStatus; |
938 | |
939 | /* copy the sense data whether we need to or not. */ |
940 | memcpy(cmd->sense_buffer, ei->SenseInfo, |
941 | ei->SenseLen > SCSI_SENSE_BUFFERSIZE ? |
942 | SCSI_SENSE_BUFFERSIZE : |
943 | ei->SenseLen); |
944 | scsi_set_resid(cmd, ei->ResidualCnt); |
945 | |
946 | if (ei->CommandStatus == 0) { |
947 | cmd->scsi_done(cmd); |
948 | cmd_free(h, cp); |
949 | return; |
950 | } |
951 | |
952 | /* an error has occurred */ |
953 | switch (ei->CommandStatus) { |
954 | |
955 | case CMD_TARGET_STATUS: |
956 | if (ei->ScsiStatus) { |
957 | /* Get sense key */ |
958 | sense_key = 0xf & ei->SenseInfo[2]; |
959 | /* Get additional sense code */ |
960 | asc = ei->SenseInfo[12]; |
961 | /* Get addition sense code qualifier */ |
962 | ascq = ei->SenseInfo[13]; |
963 | } |
964 | |
965 | if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { |
966 | if (check_for_unit_attention(h, cp)) { |
967 | cmd->result = DID_SOFT_ERROR << 16; |
968 | break; |
969 | } |
970 | if (sense_key == ILLEGAL_REQUEST) { |
971 | /* |
972 | * SCSI REPORT_LUNS is commonly unsupported on |
973 | * Smart Array. Suppress noisy complaint. |
974 | */ |
975 | if (cp->Request.CDB[0] == REPORT_LUNS) |
976 | break; |
977 | |
978 | /* If ASC/ASCQ indicate Logical Unit |
979 | * Not Supported condition, |
980 | */ |
981 | if ((asc == 0x25) && (ascq == 0x0)) { |
982 | dev_warn(&h->pdev->dev, "cp %p " |
983 | "has check condition\n", cp); |
984 | break; |
985 | } |
986 | } |
987 | |
988 | if (sense_key == NOT_READY) { |
989 | /* If Sense is Not Ready, Logical Unit |
990 | * Not ready, Manual Intervention |
991 | * required |
992 | */ |
993 | if ((asc == 0x04) && (ascq == 0x03)) { |
994 | dev_warn(&h->pdev->dev, "cp %p " |
995 | "has check condition: unit " |
996 | "not ready, manual " |
997 | "intervention required\n", cp); |
998 | break; |
999 | } |
1000 | } |
1001 | if (sense_key == ABORTED_COMMAND) { |
1002 | /* Aborted command is retryable */ |
1003 | dev_warn(&h->pdev->dev, "cp %p " |
1004 | "has check condition: aborted command: " |
1005 | "ASC: 0x%x, ASCQ: 0x%x\n", |
1006 | cp, asc, ascq); |
1007 | cmd->result = DID_SOFT_ERROR << 16; |
1008 | break; |
1009 | } |
1010 | /* Must be some other type of check condition */ |
1011 | dev_warn(&h->pdev->dev, "cp %p has check condition: " |
1012 | "unknown type: " |
1013 | "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " |
1014 | "Returning result: 0x%x, " |
1015 | "cmd=[%02x %02x %02x %02x %02x " |
1016 | "%02x %02x %02x %02x %02x %02x " |
1017 | "%02x %02x %02x %02x %02x]\n", |
1018 | cp, sense_key, asc, ascq, |
1019 | cmd->result, |
1020 | cmd->cmnd[0], cmd->cmnd[1], |
1021 | cmd->cmnd[2], cmd->cmnd[3], |
1022 | cmd->cmnd[4], cmd->cmnd[5], |
1023 | cmd->cmnd[6], cmd->cmnd[7], |
1024 | cmd->cmnd[8], cmd->cmnd[9], |
1025 | cmd->cmnd[10], cmd->cmnd[11], |
1026 | cmd->cmnd[12], cmd->cmnd[13], |
1027 | cmd->cmnd[14], cmd->cmnd[15]); |
1028 | break; |
1029 | } |
1030 | |
1031 | |
1032 | /* Problem was not a check condition |
1033 | * Pass it up to the upper layers... |
1034 | */ |
1035 | if (ei->ScsiStatus) { |
1036 | dev_warn(&h->pdev->dev, "cp %p has status 0x%x " |
1037 | "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " |
1038 | "Returning result: 0x%x\n", |
1039 | cp, ei->ScsiStatus, |
1040 | sense_key, asc, ascq, |
1041 | cmd->result); |
1042 | } else { /* scsi status is zero??? How??? */ |
1043 | dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " |
1044 | "Returning no connection.\n", cp), |
1045 | |
1046 | /* Ordinarily, this case should never happen, |
1047 | * but there is a bug in some released firmware |
1048 | * revisions that allows it to happen if, for |
1049 | * example, a 4100 backplane loses power and |
1050 | * the tape drive is in it. We assume that |
1051 | * it's a fatal error of some kind because we |
1052 | * can't show that it wasn't. We will make it |
1053 | * look like selection timeout since that is |
1054 | * the most common reason for this to occur, |
1055 | * and it's severe enough. |
1056 | */ |
1057 | |
1058 | cmd->result = DID_NO_CONNECT << 16; |
1059 | } |
1060 | break; |
1061 | |
1062 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ |
1063 | break; |
1064 | case CMD_DATA_OVERRUN: |
1065 | dev_warn(&h->pdev->dev, "cp %p has" |
1066 | " completed with data overrun " |
1067 | "reported\n", cp); |
1068 | break; |
1069 | case CMD_INVALID: { |
1070 | /* print_bytes(cp, sizeof(*cp), 1, 0); |
1071 | print_cmd(cp); */ |
1072 | /* We get CMD_INVALID if you address a non-existent device |
1073 | * instead of a selection timeout (no response). You will |
1074 | * see this if you yank out a drive, then try to access it. |
1075 | * This is kind of a shame because it means that any other |
1076 | * CMD_INVALID (e.g. driver bug) will get interpreted as a |
1077 | * missing target. */ |
1078 | cmd->result = DID_NO_CONNECT << 16; |
1079 | } |
1080 | break; |
1081 | case CMD_PROTOCOL_ERR: |
1082 | dev_warn(&h->pdev->dev, "cp %p has " |
1083 | "protocol error \n", cp); |
1084 | break; |
1085 | case CMD_HARDWARE_ERR: |
1086 | cmd->result = DID_ERROR << 16; |
1087 | dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); |
1088 | break; |
1089 | case CMD_CONNECTION_LOST: |
1090 | cmd->result = DID_ERROR << 16; |
1091 | dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); |
1092 | break; |
1093 | case CMD_ABORTED: |
1094 | cmd->result = DID_ABORT << 16; |
1095 | dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", |
1096 | cp, ei->ScsiStatus); |
1097 | break; |
1098 | case CMD_ABORT_FAILED: |
1099 | cmd->result = DID_ERROR << 16; |
1100 | dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); |
1101 | break; |
1102 | case CMD_UNSOLICITED_ABORT: |
1103 | cmd->result = DID_RESET << 16; |
1104 | dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " |
1105 | "abort\n", cp); |
1106 | break; |
1107 | case CMD_TIMEOUT: |
1108 | cmd->result = DID_TIME_OUT << 16; |
1109 | dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); |
1110 | break; |
1111 | default: |
1112 | cmd->result = DID_ERROR << 16; |
1113 | dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", |
1114 | cp, ei->CommandStatus); |
1115 | } |
1116 | cmd->scsi_done(cmd); |
1117 | cmd_free(h, cp); |
1118 | } |
1119 | |
1120 | static int hpsa_scsi_detect(struct ctlr_info *h) |
1121 | { |
1122 | struct Scsi_Host *sh; |
1123 | int error; |
1124 | |
1125 | sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); |
1126 | if (sh == NULL) |
1127 | goto fail; |
1128 | |
1129 | sh->io_port = 0; |
1130 | sh->n_io_port = 0; |
1131 | sh->this_id = -1; |
1132 | sh->max_channel = 3; |
1133 | sh->max_cmd_len = MAX_COMMAND_SIZE; |
1134 | sh->max_lun = HPSA_MAX_LUN; |
1135 | sh->max_id = HPSA_MAX_LUN; |
1136 | sh->can_queue = h->nr_cmds; |
1137 | sh->cmd_per_lun = h->nr_cmds; |
1138 | sh->sg_tablesize = h->maxsgentries; |
1139 | h->scsi_host = sh; |
1140 | sh->hostdata[0] = (unsigned long) h; |
1141 | sh->irq = h->intr[PERF_MODE_INT]; |
1142 | sh->unique_id = sh->irq; |
1143 | error = scsi_add_host(sh, &h->pdev->dev); |
1144 | if (error) |
1145 | goto fail_host_put; |
1146 | scsi_scan_host(sh); |
1147 | return 0; |
1148 | |
1149 | fail_host_put: |
1150 | dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host" |
1151 | " failed for controller %d\n", h->ctlr); |
1152 | scsi_host_put(sh); |
1153 | return error; |
1154 | fail: |
1155 | dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc" |
1156 | " failed for controller %d\n", h->ctlr); |
1157 | return -ENOMEM; |
1158 | } |
1159 | |
1160 | static void hpsa_pci_unmap(struct pci_dev *pdev, |
1161 | struct CommandList *c, int sg_used, int data_direction) |
1162 | { |
1163 | int i; |
1164 | union u64bit addr64; |
1165 | |
1166 | for (i = 0; i < sg_used; i++) { |
1167 | addr64.val32.lower = c->SG[i].Addr.lower; |
1168 | addr64.val32.upper = c->SG[i].Addr.upper; |
1169 | pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, |
1170 | data_direction); |
1171 | } |
1172 | } |
1173 | |
1174 | static void hpsa_map_one(struct pci_dev *pdev, |
1175 | struct CommandList *cp, |
1176 | unsigned char *buf, |
1177 | size_t buflen, |
1178 | int data_direction) |
1179 | { |
1180 | u64 addr64; |
1181 | |
1182 | if (buflen == 0 || data_direction == PCI_DMA_NONE) { |
1183 | cp->Header.SGList = 0; |
1184 | cp->Header.SGTotal = 0; |
1185 | return; |
1186 | } |
1187 | |
1188 | addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); |
1189 | cp->SG[0].Addr.lower = |
1190 | (u32) (addr64 & (u64) 0x00000000FFFFFFFF); |
1191 | cp->SG[0].Addr.upper = |
1192 | (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); |
1193 | cp->SG[0].Len = buflen; |
1194 | cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ |
1195 | cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ |
1196 | } |
1197 | |
1198 | static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, |
1199 | struct CommandList *c) |
1200 | { |
1201 | DECLARE_COMPLETION_ONSTACK(wait); |
1202 | |
1203 | c->waiting = &wait; |
1204 | enqueue_cmd_and_start_io(h, c); |
1205 | wait_for_completion(&wait); |
1206 | } |
1207 | |
1208 | static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, |
1209 | struct CommandList *c, int data_direction) |
1210 | { |
1211 | int retry_count = 0; |
1212 | |
1213 | do { |
1214 | memset(c->err_info, 0, sizeof(c->err_info)); |
1215 | hpsa_scsi_do_simple_cmd_core(h, c); |
1216 | retry_count++; |
1217 | } while (check_for_unit_attention(h, c) && retry_count <= 3); |
1218 | hpsa_pci_unmap(h->pdev, c, 1, data_direction); |
1219 | } |
1220 | |
1221 | static void hpsa_scsi_interpret_error(struct CommandList *cp) |
1222 | { |
1223 | struct ErrorInfo *ei; |
1224 | struct device *d = &cp->h->pdev->dev; |
1225 | |
1226 | ei = cp->err_info; |
1227 | switch (ei->CommandStatus) { |
1228 | case CMD_TARGET_STATUS: |
1229 | dev_warn(d, "cmd %p has completed with errors\n", cp); |
1230 | dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, |
1231 | ei->ScsiStatus); |
1232 | if (ei->ScsiStatus == 0) |
1233 | dev_warn(d, "SCSI status is abnormally zero. " |
1234 | "(probably indicates selection timeout " |
1235 | "reported incorrectly due to a known " |
1236 | "firmware bug, circa July, 2001.)\n"); |
1237 | break; |
1238 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ |
1239 | dev_info(d, "UNDERRUN\n"); |
1240 | break; |
1241 | case CMD_DATA_OVERRUN: |
1242 | dev_warn(d, "cp %p has completed with data overrun\n", cp); |
1243 | break; |
1244 | case CMD_INVALID: { |
1245 | /* controller unfortunately reports SCSI passthru's |
1246 | * to non-existent targets as invalid commands. |
1247 | */ |
1248 | dev_warn(d, "cp %p is reported invalid (probably means " |
1249 | "target device no longer present)\n", cp); |
1250 | /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); |
1251 | print_cmd(cp); */ |
1252 | } |
1253 | break; |
1254 | case CMD_PROTOCOL_ERR: |
1255 | dev_warn(d, "cp %p has protocol error \n", cp); |
1256 | break; |
1257 | case CMD_HARDWARE_ERR: |
1258 | /* cmd->result = DID_ERROR << 16; */ |
1259 | dev_warn(d, "cp %p had hardware error\n", cp); |
1260 | break; |
1261 | case CMD_CONNECTION_LOST: |
1262 | dev_warn(d, "cp %p had connection lost\n", cp); |
1263 | break; |
1264 | case CMD_ABORTED: |
1265 | dev_warn(d, "cp %p was aborted\n", cp); |
1266 | break; |
1267 | case CMD_ABORT_FAILED: |
1268 | dev_warn(d, "cp %p reports abort failed\n", cp); |
1269 | break; |
1270 | case CMD_UNSOLICITED_ABORT: |
1271 | dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); |
1272 | break; |
1273 | case CMD_TIMEOUT: |
1274 | dev_warn(d, "cp %p timed out\n", cp); |
1275 | break; |
1276 | default: |
1277 | dev_warn(d, "cp %p returned unknown status %x\n", cp, |
1278 | ei->CommandStatus); |
1279 | } |
1280 | } |
1281 | |
1282 | static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, |
1283 | unsigned char page, unsigned char *buf, |
1284 | unsigned char bufsize) |
1285 | { |
1286 | int rc = IO_OK; |
1287 | struct CommandList *c; |
1288 | struct ErrorInfo *ei; |
1289 | |
1290 | c = cmd_special_alloc(h); |
1291 | |
1292 | if (c == NULL) { /* trouble... */ |
1293 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); |
1294 | return -ENOMEM; |
1295 | } |
1296 | |
1297 | fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); |
1298 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); |
1299 | ei = c->err_info; |
1300 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { |
1301 | hpsa_scsi_interpret_error(c); |
1302 | rc = -1; |
1303 | } |
1304 | cmd_special_free(h, c); |
1305 | return rc; |
1306 | } |
1307 | |
1308 | static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) |
1309 | { |
1310 | int rc = IO_OK; |
1311 | struct CommandList *c; |
1312 | struct ErrorInfo *ei; |
1313 | |
1314 | c = cmd_special_alloc(h); |
1315 | |
1316 | if (c == NULL) { /* trouble... */ |
1317 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); |
1318 | return -ENOMEM; |
1319 | } |
1320 | |
1321 | fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); |
1322 | hpsa_scsi_do_simple_cmd_core(h, c); |
1323 | /* no unmap needed here because no data xfer. */ |
1324 | |
1325 | ei = c->err_info; |
1326 | if (ei->CommandStatus != 0) { |
1327 | hpsa_scsi_interpret_error(c); |
1328 | rc = -1; |
1329 | } |
1330 | cmd_special_free(h, c); |
1331 | return rc; |
1332 | } |
1333 | |
1334 | static void hpsa_get_raid_level(struct ctlr_info *h, |
1335 | unsigned char *scsi3addr, unsigned char *raid_level) |
1336 | { |
1337 | int rc; |
1338 | unsigned char *buf; |
1339 | |
1340 | *raid_level = RAID_UNKNOWN; |
1341 | buf = kzalloc(64, GFP_KERNEL); |
1342 | if (!buf) |
1343 | return; |
1344 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); |
1345 | if (rc == 0) |
1346 | *raid_level = buf[8]; |
1347 | if (*raid_level > RAID_UNKNOWN) |
1348 | *raid_level = RAID_UNKNOWN; |
1349 | kfree(buf); |
1350 | return; |
1351 | } |
1352 | |
1353 | /* Get the device id from inquiry page 0x83 */ |
1354 | static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, |
1355 | unsigned char *device_id, int buflen) |
1356 | { |
1357 | int rc; |
1358 | unsigned char *buf; |
1359 | |
1360 | if (buflen > 16) |
1361 | buflen = 16; |
1362 | buf = kzalloc(64, GFP_KERNEL); |
1363 | if (!buf) |
1364 | return -1; |
1365 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); |
1366 | if (rc == 0) |
1367 | memcpy(device_id, &buf[8], buflen); |
1368 | kfree(buf); |
1369 | return rc != 0; |
1370 | } |
1371 | |
1372 | static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, |
1373 | struct ReportLUNdata *buf, int bufsize, |
1374 | int extended_response) |
1375 | { |
1376 | int rc = IO_OK; |
1377 | struct CommandList *c; |
1378 | unsigned char scsi3addr[8]; |
1379 | struct ErrorInfo *ei; |
1380 | |
1381 | c = cmd_special_alloc(h); |
1382 | if (c == NULL) { /* trouble... */ |
1383 | dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); |
1384 | return -1; |
1385 | } |
1386 | /* address the controller */ |
1387 | memset(scsi3addr, 0, sizeof(scsi3addr)); |
1388 | fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, |
1389 | buf, bufsize, 0, scsi3addr, TYPE_CMD); |
1390 | if (extended_response) |
1391 | c->Request.CDB[1] = extended_response; |
1392 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); |
1393 | ei = c->err_info; |
1394 | if (ei->CommandStatus != 0 && |
1395 | ei->CommandStatus != CMD_DATA_UNDERRUN) { |
1396 | hpsa_scsi_interpret_error(c); |
1397 | rc = -1; |
1398 | } |
1399 | cmd_special_free(h, c); |
1400 | return rc; |
1401 | } |
1402 | |
1403 | static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, |
1404 | struct ReportLUNdata *buf, |
1405 | int bufsize, int extended_response) |
1406 | { |
1407 | return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); |
1408 | } |
1409 | |
1410 | static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, |
1411 | struct ReportLUNdata *buf, int bufsize) |
1412 | { |
1413 | return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); |
1414 | } |
1415 | |
1416 | static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, |
1417 | int bus, int target, int lun) |
1418 | { |
1419 | device->bus = bus; |
1420 | device->target = target; |
1421 | device->lun = lun; |
1422 | } |
1423 | |
1424 | static int hpsa_update_device_info(struct ctlr_info *h, |
1425 | unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) |
1426 | { |
1427 | #define OBDR_TAPE_INQ_SIZE 49 |
1428 | unsigned char *inq_buff; |
1429 | |
1430 | inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); |
1431 | if (!inq_buff) |
1432 | goto bail_out; |
1433 | |
1434 | /* Do an inquiry to the device to see what it is. */ |
1435 | if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, |
1436 | (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { |
1437 | /* Inquiry failed (msg printed already) */ |
1438 | dev_err(&h->pdev->dev, |
1439 | "hpsa_update_device_info: inquiry failed\n"); |
1440 | goto bail_out; |
1441 | } |
1442 | |
1443 | /* As a side effect, record the firmware version number |
1444 | * if we happen to be talking to the RAID controller. |
1445 | */ |
1446 | if (is_hba_lunid(scsi3addr)) |
1447 | memcpy(h->firm_ver, &inq_buff[32], 4); |
1448 | |
1449 | this_device->devtype = (inq_buff[0] & 0x1f); |
1450 | memcpy(this_device->scsi3addr, scsi3addr, 8); |
1451 | memcpy(this_device->vendor, &inq_buff[8], |
1452 | sizeof(this_device->vendor)); |
1453 | memcpy(this_device->model, &inq_buff[16], |
1454 | sizeof(this_device->model)); |
1455 | memcpy(this_device->revision, &inq_buff[32], |
1456 | sizeof(this_device->revision)); |
1457 | memset(this_device->device_id, 0, |
1458 | sizeof(this_device->device_id)); |
1459 | hpsa_get_device_id(h, scsi3addr, this_device->device_id, |
1460 | sizeof(this_device->device_id)); |
1461 | |
1462 | if (this_device->devtype == TYPE_DISK && |
1463 | is_logical_dev_addr_mode(scsi3addr)) |
1464 | hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); |
1465 | else |
1466 | this_device->raid_level = RAID_UNKNOWN; |
1467 | |
1468 | kfree(inq_buff); |
1469 | return 0; |
1470 | |
1471 | bail_out: |
1472 | kfree(inq_buff); |
1473 | return 1; |
1474 | } |
1475 | |
1476 | static unsigned char *msa2xxx_model[] = { |
1477 | "MSA2012", |
1478 | "MSA2024", |
1479 | "MSA2312", |
1480 | "MSA2324", |
1481 | NULL, |
1482 | }; |
1483 | |
1484 | static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) |
1485 | { |
1486 | int i; |
1487 | |
1488 | for (i = 0; msa2xxx_model[i]; i++) |
1489 | if (strncmp(device->model, msa2xxx_model[i], |
1490 | strlen(msa2xxx_model[i])) == 0) |
1491 | return 1; |
1492 | return 0; |
1493 | } |
1494 | |
1495 | /* Helper function to assign bus, target, lun mapping of devices. |
1496 | * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical |
1497 | * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. |
1498 | * Logical drive target and lun are assigned at this time, but |
1499 | * physical device lun and target assignment are deferred (assigned |
1500 | * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) |
1501 | */ |
1502 | static void figure_bus_target_lun(struct ctlr_info *h, |
1503 | u8 *lunaddrbytes, int *bus, int *target, int *lun, |
1504 | struct hpsa_scsi_dev_t *device) |
1505 | { |
1506 | u32 lunid; |
1507 | |
1508 | if (is_logical_dev_addr_mode(lunaddrbytes)) { |
1509 | /* logical device */ |
1510 | if (unlikely(is_scsi_rev_5(h))) { |
1511 | /* p1210m, logical drives lun assignments |
1512 | * match SCSI REPORT LUNS data. |
1513 | */ |
1514 | lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); |
1515 | *bus = 0; |
1516 | *target = 0; |
1517 | *lun = (lunid & 0x3fff) + 1; |
1518 | } else { |
1519 | /* not p1210m... */ |
1520 | lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); |
1521 | if (is_msa2xxx(h, device)) { |
1522 | /* msa2xxx way, put logicals on bus 1 |
1523 | * and match target/lun numbers box |
1524 | * reports. |
1525 | */ |
1526 | *bus = 1; |
1527 | *target = (lunid >> 16) & 0x3fff; |
1528 | *lun = lunid & 0x00ff; |
1529 | } else { |
1530 | /* Traditional smart array way. */ |
1531 | *bus = 0; |
1532 | *lun = 0; |
1533 | *target = lunid & 0x3fff; |
1534 | } |
1535 | } |
1536 | } else { |
1537 | /* physical device */ |
1538 | if (is_hba_lunid(lunaddrbytes)) |
1539 | if (unlikely(is_scsi_rev_5(h))) { |
1540 | *bus = 0; /* put p1210m ctlr at 0,0,0 */ |
1541 | *target = 0; |
1542 | *lun = 0; |
1543 | return; |
1544 | } else |
1545 | *bus = 3; /* traditional smartarray */ |
1546 | else |
1547 | *bus = 2; /* physical disk */ |
1548 | *target = -1; |
1549 | *lun = -1; /* we will fill these in later. */ |
1550 | } |
1551 | } |
1552 | |
1553 | /* |
1554 | * If there is no lun 0 on a target, linux won't find any devices. |
1555 | * For the MSA2xxx boxes, we have to manually detect the enclosure |
1556 | * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report |
1557 | * it for some reason. *tmpdevice is the target we're adding, |
1558 | * this_device is a pointer into the current element of currentsd[] |
1559 | * that we're building up in update_scsi_devices(), below. |
1560 | * lunzerobits is a bitmap that tracks which targets already have a |
1561 | * lun 0 assigned. |
1562 | * Returns 1 if an enclosure was added, 0 if not. |
1563 | */ |
1564 | static int add_msa2xxx_enclosure_device(struct ctlr_info *h, |
1565 | struct hpsa_scsi_dev_t *tmpdevice, |
1566 | struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, |
1567 | int bus, int target, int lun, unsigned long lunzerobits[], |
1568 | int *nmsa2xxx_enclosures) |
1569 | { |
1570 | unsigned char scsi3addr[8]; |
1571 | |
1572 | if (test_bit(target, lunzerobits)) |
1573 | return 0; /* There is already a lun 0 on this target. */ |
1574 | |
1575 | if (!is_logical_dev_addr_mode(lunaddrbytes)) |
1576 | return 0; /* It's the logical targets that may lack lun 0. */ |
1577 | |
1578 | if (!is_msa2xxx(h, tmpdevice)) |
1579 | return 0; /* It's only the MSA2xxx that have this problem. */ |
1580 | |
1581 | if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */ |
1582 | return 0; |
1583 | |
1584 | if (is_hba_lunid(scsi3addr)) |
1585 | return 0; /* Don't add the RAID controller here. */ |
1586 | |
1587 | if (is_scsi_rev_5(h)) |
1588 | return 0; /* p1210m doesn't need to do this. */ |
1589 | |
1590 | #define MAX_MSA2XXX_ENCLOSURES 32 |
1591 | if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { |
1592 | dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " |
1593 | "enclosures exceeded. Check your hardware " |
1594 | "configuration."); |
1595 | return 0; |
1596 | } |
1597 | |
1598 | memset(scsi3addr, 0, 8); |
1599 | scsi3addr[3] = target; |
1600 | if (hpsa_update_device_info(h, scsi3addr, this_device)) |
1601 | return 0; |
1602 | (*nmsa2xxx_enclosures)++; |
1603 | hpsa_set_bus_target_lun(this_device, bus, target, 0); |
1604 | set_bit(target, lunzerobits); |
1605 | return 1; |
1606 | } |
1607 | |
1608 | /* |
1609 | * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, |
1610 | * logdev. The number of luns in physdev and logdev are returned in |
1611 | * *nphysicals and *nlogicals, respectively. |
1612 | * Returns 0 on success, -1 otherwise. |
1613 | */ |
1614 | static int hpsa_gather_lun_info(struct ctlr_info *h, |
1615 | int reportlunsize, |
1616 | struct ReportLUNdata *physdev, u32 *nphysicals, |
1617 | struct ReportLUNdata *logdev, u32 *nlogicals) |
1618 | { |
1619 | if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { |
1620 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); |
1621 | return -1; |
1622 | } |
1623 | *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; |
1624 | if (*nphysicals > HPSA_MAX_PHYS_LUN) { |
1625 | dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." |
1626 | " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, |
1627 | *nphysicals - HPSA_MAX_PHYS_LUN); |
1628 | *nphysicals = HPSA_MAX_PHYS_LUN; |
1629 | } |
1630 | if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { |
1631 | dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); |
1632 | return -1; |
1633 | } |
1634 | *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; |
1635 | /* Reject Logicals in excess of our max capability. */ |
1636 | if (*nlogicals > HPSA_MAX_LUN) { |
1637 | dev_warn(&h->pdev->dev, |
1638 | "maximum logical LUNs (%d) exceeded. " |
1639 | "%d LUNs ignored.\n", HPSA_MAX_LUN, |
1640 | *nlogicals - HPSA_MAX_LUN); |
1641 | *nlogicals = HPSA_MAX_LUN; |
1642 | } |
1643 | if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { |
1644 | dev_warn(&h->pdev->dev, |
1645 | "maximum logical + physical LUNs (%d) exceeded. " |
1646 | "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, |
1647 | *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); |
1648 | *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; |
1649 | } |
1650 | return 0; |
1651 | } |
1652 | |
1653 | u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, |
1654 | int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, |
1655 | struct ReportLUNdata *logdev_list) |
1656 | { |
1657 | /* Helper function, figure out where the LUN ID info is coming from |
1658 | * given index i, lists of physical and logical devices, where in |
1659 | * the list the raid controller is supposed to appear (first or last) |
1660 | */ |
1661 | |
1662 | int logicals_start = nphysicals + (raid_ctlr_position == 0); |
1663 | int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); |
1664 | |
1665 | if (i == raid_ctlr_position) |
1666 | return RAID_CTLR_LUNID; |
1667 | |
1668 | if (i < logicals_start) |
1669 | return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; |
1670 | |
1671 | if (i < last_device) |
1672 | return &logdev_list->LUN[i - nphysicals - |
1673 | (raid_ctlr_position == 0)][0]; |
1674 | BUG(); |
1675 | return NULL; |
1676 | } |
1677 | |
1678 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) |
1679 | { |
1680 | /* the idea here is we could get notified |
1681 | * that some devices have changed, so we do a report |
1682 | * physical luns and report logical luns cmd, and adjust |
1683 | * our list of devices accordingly. |
1684 | * |
1685 | * The scsi3addr's of devices won't change so long as the |
1686 | * adapter is not reset. That means we can rescan and |
1687 | * tell which devices we already know about, vs. new |
1688 | * devices, vs. disappearing devices. |
1689 | */ |
1690 | struct ReportLUNdata *physdev_list = NULL; |
1691 | struct ReportLUNdata *logdev_list = NULL; |
1692 | unsigned char *inq_buff = NULL; |
1693 | u32 nphysicals = 0; |
1694 | u32 nlogicals = 0; |
1695 | u32 ndev_allocated = 0; |
1696 | struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; |
1697 | int ncurrent = 0; |
1698 | int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; |
1699 | int i, nmsa2xxx_enclosures, ndevs_to_allocate; |
1700 | int bus, target, lun; |
1701 | int raid_ctlr_position; |
1702 | DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); |
1703 | |
1704 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, |
1705 | GFP_KERNEL); |
1706 | physdev_list = kzalloc(reportlunsize, GFP_KERNEL); |
1707 | logdev_list = kzalloc(reportlunsize, GFP_KERNEL); |
1708 | inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); |
1709 | tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); |
1710 | |
1711 | if (!currentsd || !physdev_list || !logdev_list || |
1712 | !inq_buff || !tmpdevice) { |
1713 | dev_err(&h->pdev->dev, "out of memory\n"); |
1714 | goto out; |
1715 | } |
1716 | memset(lunzerobits, 0, sizeof(lunzerobits)); |
1717 | |
1718 | if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, |
1719 | logdev_list, &nlogicals)) |
1720 | goto out; |
1721 | |
1722 | /* We might see up to 32 MSA2xxx enclosures, actually 8 of them |
1723 | * but each of them 4 times through different paths. The plus 1 |
1724 | * is for the RAID controller. |
1725 | */ |
1726 | ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1; |
1727 | |
1728 | /* Allocate the per device structures */ |
1729 | for (i = 0; i < ndevs_to_allocate; i++) { |
1730 | currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); |
1731 | if (!currentsd[i]) { |
1732 | dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", |
1733 | __FILE__, __LINE__); |
1734 | goto out; |
1735 | } |
1736 | ndev_allocated++; |
1737 | } |
1738 | |
1739 | if (unlikely(is_scsi_rev_5(h))) |
1740 | raid_ctlr_position = 0; |
1741 | else |
1742 | raid_ctlr_position = nphysicals + nlogicals; |
1743 | |
1744 | /* adjust our table of devices */ |
1745 | nmsa2xxx_enclosures = 0; |
1746 | for (i = 0; i < nphysicals + nlogicals + 1; i++) { |
1747 | u8 *lunaddrbytes; |
1748 | |
1749 | /* Figure out where the LUN ID info is coming from */ |
1750 | lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, |
1751 | i, nphysicals, nlogicals, physdev_list, logdev_list); |
1752 | /* skip masked physical devices. */ |
1753 | if (lunaddrbytes[3] & 0xC0 && |
1754 | i < nphysicals + (raid_ctlr_position == 0)) |
1755 | continue; |
1756 | |
1757 | /* Get device type, vendor, model, device id */ |
1758 | if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) |
1759 | continue; /* skip it if we can't talk to it. */ |
1760 | figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, |
1761 | tmpdevice); |
1762 | this_device = currentsd[ncurrent]; |
1763 | |
1764 | /* |
1765 | * For the msa2xxx boxes, we have to insert a LUN 0 which |
1766 | * doesn't show up in CCISS_REPORT_PHYSICAL data, but there |
1767 | * is nonetheless an enclosure device there. We have to |
1768 | * present that otherwise linux won't find anything if |
1769 | * there is no lun 0. |
1770 | */ |
1771 | if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device, |
1772 | lunaddrbytes, bus, target, lun, lunzerobits, |
1773 | &nmsa2xxx_enclosures)) { |
1774 | ncurrent++; |
1775 | this_device = currentsd[ncurrent]; |
1776 | } |
1777 | |
1778 | *this_device = *tmpdevice; |
1779 | hpsa_set_bus_target_lun(this_device, bus, target, lun); |
1780 | |
1781 | switch (this_device->devtype) { |
1782 | case TYPE_ROM: { |
1783 | /* We don't *really* support actual CD-ROM devices, |
1784 | * just "One Button Disaster Recovery" tape drive |
1785 | * which temporarily pretends to be a CD-ROM drive. |
1786 | * So we check that the device is really an OBDR tape |
1787 | * device by checking for "$DR-10" in bytes 43-48 of |
1788 | * the inquiry data. |
1789 | */ |
1790 | char obdr_sig[7]; |
1791 | #define OBDR_TAPE_SIG "$DR-10" |
1792 | strncpy(obdr_sig, &inq_buff[43], 6); |
1793 | obdr_sig[6] = '\0'; |
1794 | if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) |
1795 | /* Not OBDR device, ignore it. */ |
1796 | break; |
1797 | } |
1798 | ncurrent++; |
1799 | break; |
1800 | case TYPE_DISK: |
1801 | if (i < nphysicals) |
1802 | break; |
1803 | ncurrent++; |
1804 | break; |
1805 | case TYPE_TAPE: |
1806 | case TYPE_MEDIUM_CHANGER: |
1807 | ncurrent++; |
1808 | break; |
1809 | case TYPE_RAID: |
1810 | /* Only present the Smartarray HBA as a RAID controller. |
1811 | * If it's a RAID controller other than the HBA itself |
1812 | * (an external RAID controller, MSA500 or similar) |
1813 | * don't present it. |
1814 | */ |
1815 | if (!is_hba_lunid(lunaddrbytes)) |
1816 | break; |
1817 | ncurrent++; |
1818 | break; |
1819 | default: |
1820 | break; |
1821 | } |
1822 | if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA) |
1823 | break; |
1824 | } |
1825 | adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); |
1826 | out: |
1827 | kfree(tmpdevice); |
1828 | for (i = 0; i < ndev_allocated; i++) |
1829 | kfree(currentsd[i]); |
1830 | kfree(currentsd); |
1831 | kfree(inq_buff); |
1832 | kfree(physdev_list); |
1833 | kfree(logdev_list); |
1834 | } |
1835 | |
1836 | /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci |
1837 | * dma mapping and fills in the scatter gather entries of the |
1838 | * hpsa command, cp. |
1839 | */ |
1840 | static int hpsa_scatter_gather(struct ctlr_info *h, |
1841 | struct CommandList *cp, |
1842 | struct scsi_cmnd *cmd) |
1843 | { |
1844 | unsigned int len; |
1845 | struct scatterlist *sg; |
1846 | u64 addr64; |
1847 | int use_sg, i, sg_index, chained; |
1848 | struct SGDescriptor *curr_sg; |
1849 | |
1850 | BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); |
1851 | |
1852 | use_sg = scsi_dma_map(cmd); |
1853 | if (use_sg < 0) |
1854 | return use_sg; |
1855 | |
1856 | if (!use_sg) |
1857 | goto sglist_finished; |
1858 | |
1859 | curr_sg = cp->SG; |
1860 | chained = 0; |
1861 | sg_index = 0; |
1862 | scsi_for_each_sg(cmd, sg, use_sg, i) { |
1863 | if (i == h->max_cmd_sg_entries - 1 && |
1864 | use_sg > h->max_cmd_sg_entries) { |
1865 | chained = 1; |
1866 | curr_sg = h->cmd_sg_list[cp->cmdindex]; |
1867 | sg_index = 0; |
1868 | } |
1869 | addr64 = (u64) sg_dma_address(sg); |
1870 | len = sg_dma_len(sg); |
1871 | curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); |
1872 | curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); |
1873 | curr_sg->Len = len; |
1874 | curr_sg->Ext = 0; /* we are not chaining */ |
1875 | curr_sg++; |
1876 | } |
1877 | |
1878 | if (use_sg + chained > h->maxSG) |
1879 | h->maxSG = use_sg + chained; |
1880 | |
1881 | if (chained) { |
1882 | cp->Header.SGList = h->max_cmd_sg_entries; |
1883 | cp->Header.SGTotal = (u16) (use_sg + 1); |
1884 | hpsa_map_sg_chain_block(h, cp); |
1885 | return 0; |
1886 | } |
1887 | |
1888 | sglist_finished: |
1889 | |
1890 | cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ |
1891 | cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ |
1892 | return 0; |
1893 | } |
1894 | |
1895 | |
1896 | static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, |
1897 | void (*done)(struct scsi_cmnd *)) |
1898 | { |
1899 | struct ctlr_info *h; |
1900 | struct hpsa_scsi_dev_t *dev; |
1901 | unsigned char scsi3addr[8]; |
1902 | struct CommandList *c; |
1903 | unsigned long flags; |
1904 | |
1905 | /* Get the ptr to our adapter structure out of cmd->host. */ |
1906 | h = sdev_to_hba(cmd->device); |
1907 | dev = cmd->device->hostdata; |
1908 | if (!dev) { |
1909 | cmd->result = DID_NO_CONNECT << 16; |
1910 | done(cmd); |
1911 | return 0; |
1912 | } |
1913 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); |
1914 | |
1915 | /* Need a lock as this is being allocated from the pool */ |
1916 | spin_lock_irqsave(&h->lock, flags); |
1917 | c = cmd_alloc(h); |
1918 | spin_unlock_irqrestore(&h->lock, flags); |
1919 | if (c == NULL) { /* trouble... */ |
1920 | dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); |
1921 | return SCSI_MLQUEUE_HOST_BUSY; |
1922 | } |
1923 | |
1924 | /* Fill in the command list header */ |
1925 | |
1926 | cmd->scsi_done = done; /* save this for use by completion code */ |
1927 | |
1928 | /* save c in case we have to abort it */ |
1929 | cmd->host_scribble = (unsigned char *) c; |
1930 | |
1931 | c->cmd_type = CMD_SCSI; |
1932 | c->scsi_cmd = cmd; |
1933 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
1934 | memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); |
1935 | c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); |
1936 | c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; |
1937 | |
1938 | /* Fill in the request block... */ |
1939 | |
1940 | c->Request.Timeout = 0; |
1941 | memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); |
1942 | BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); |
1943 | c->Request.CDBLen = cmd->cmd_len; |
1944 | memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); |
1945 | c->Request.Type.Type = TYPE_CMD; |
1946 | c->Request.Type.Attribute = ATTR_SIMPLE; |
1947 | switch (cmd->sc_data_direction) { |
1948 | case DMA_TO_DEVICE: |
1949 | c->Request.Type.Direction = XFER_WRITE; |
1950 | break; |
1951 | case DMA_FROM_DEVICE: |
1952 | c->Request.Type.Direction = XFER_READ; |
1953 | break; |
1954 | case DMA_NONE: |
1955 | c->Request.Type.Direction = XFER_NONE; |
1956 | break; |
1957 | case DMA_BIDIRECTIONAL: |
1958 | /* This can happen if a buggy application does a scsi passthru |
1959 | * and sets both inlen and outlen to non-zero. ( see |
1960 | * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) |
1961 | */ |
1962 | |
1963 | c->Request.Type.Direction = XFER_RSVD; |
1964 | /* This is technically wrong, and hpsa controllers should |
1965 | * reject it with CMD_INVALID, which is the most correct |
1966 | * response, but non-fibre backends appear to let it |
1967 | * slide by, and give the same results as if this field |
1968 | * were set correctly. Either way is acceptable for |
1969 | * our purposes here. |
1970 | */ |
1971 | |
1972 | break; |
1973 | |
1974 | default: |
1975 | dev_err(&h->pdev->dev, "unknown data direction: %d\n", |
1976 | cmd->sc_data_direction); |
1977 | BUG(); |
1978 | break; |
1979 | } |
1980 | |
1981 | if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ |
1982 | cmd_free(h, c); |
1983 | return SCSI_MLQUEUE_HOST_BUSY; |
1984 | } |
1985 | enqueue_cmd_and_start_io(h, c); |
1986 | /* the cmd'll come back via intr handler in complete_scsi_command() */ |
1987 | return 0; |
1988 | } |
1989 | |
1990 | static void hpsa_scan_start(struct Scsi_Host *sh) |
1991 | { |
1992 | struct ctlr_info *h = shost_to_hba(sh); |
1993 | unsigned long flags; |
1994 | |
1995 | /* wait until any scan already in progress is finished. */ |
1996 | while (1) { |
1997 | spin_lock_irqsave(&h->scan_lock, flags); |
1998 | if (h->scan_finished) |
1999 | break; |
2000 | spin_unlock_irqrestore(&h->scan_lock, flags); |
2001 | wait_event(h->scan_wait_queue, h->scan_finished); |
2002 | /* Note: We don't need to worry about a race between this |
2003 | * thread and driver unload because the midlayer will |
2004 | * have incremented the reference count, so unload won't |
2005 | * happen if we're in here. |
2006 | */ |
2007 | } |
2008 | h->scan_finished = 0; /* mark scan as in progress */ |
2009 | spin_unlock_irqrestore(&h->scan_lock, flags); |
2010 | |
2011 | hpsa_update_scsi_devices(h, h->scsi_host->host_no); |
2012 | |
2013 | spin_lock_irqsave(&h->scan_lock, flags); |
2014 | h->scan_finished = 1; /* mark scan as finished. */ |
2015 | wake_up_all(&h->scan_wait_queue); |
2016 | spin_unlock_irqrestore(&h->scan_lock, flags); |
2017 | } |
2018 | |
2019 | static int hpsa_scan_finished(struct Scsi_Host *sh, |
2020 | unsigned long elapsed_time) |
2021 | { |
2022 | struct ctlr_info *h = shost_to_hba(sh); |
2023 | unsigned long flags; |
2024 | int finished; |
2025 | |
2026 | spin_lock_irqsave(&h->scan_lock, flags); |
2027 | finished = h->scan_finished; |
2028 | spin_unlock_irqrestore(&h->scan_lock, flags); |
2029 | return finished; |
2030 | } |
2031 | |
2032 | static int hpsa_change_queue_depth(struct scsi_device *sdev, |
2033 | int qdepth, int reason) |
2034 | { |
2035 | struct ctlr_info *h = sdev_to_hba(sdev); |
2036 | |
2037 | if (reason != SCSI_QDEPTH_DEFAULT) |
2038 | return -ENOTSUPP; |
2039 | |
2040 | if (qdepth < 1) |
2041 | qdepth = 1; |
2042 | else |
2043 | if (qdepth > h->nr_cmds) |
2044 | qdepth = h->nr_cmds; |
2045 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); |
2046 | return sdev->queue_depth; |
2047 | } |
2048 | |
2049 | static void hpsa_unregister_scsi(struct ctlr_info *h) |
2050 | { |
2051 | /* we are being forcibly unloaded, and may not refuse. */ |
2052 | scsi_remove_host(h->scsi_host); |
2053 | scsi_host_put(h->scsi_host); |
2054 | h->scsi_host = NULL; |
2055 | } |
2056 | |
2057 | static int hpsa_register_scsi(struct ctlr_info *h) |
2058 | { |
2059 | int rc; |
2060 | |
2061 | rc = hpsa_scsi_detect(h); |
2062 | if (rc != 0) |
2063 | dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" |
2064 | " hpsa_scsi_detect(), rc is %d\n", rc); |
2065 | return rc; |
2066 | } |
2067 | |
2068 | static int wait_for_device_to_become_ready(struct ctlr_info *h, |
2069 | unsigned char lunaddr[]) |
2070 | { |
2071 | int rc = 0; |
2072 | int count = 0; |
2073 | int waittime = 1; /* seconds */ |
2074 | struct CommandList *c; |
2075 | |
2076 | c = cmd_special_alloc(h); |
2077 | if (!c) { |
2078 | dev_warn(&h->pdev->dev, "out of memory in " |
2079 | "wait_for_device_to_become_ready.\n"); |
2080 | return IO_ERROR; |
2081 | } |
2082 | |
2083 | /* Send test unit ready until device ready, or give up. */ |
2084 | while (count < HPSA_TUR_RETRY_LIMIT) { |
2085 | |
2086 | /* Wait for a bit. do this first, because if we send |
2087 | * the TUR right away, the reset will just abort it. |
2088 | */ |
2089 | msleep(1000 * waittime); |
2090 | count++; |
2091 | |
2092 | /* Increase wait time with each try, up to a point. */ |
2093 | if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) |
2094 | waittime = waittime * 2; |
2095 | |
2096 | /* Send the Test Unit Ready */ |
2097 | fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD); |
2098 | hpsa_scsi_do_simple_cmd_core(h, c); |
2099 | /* no unmap needed here because no data xfer. */ |
2100 | |
2101 | if (c->err_info->CommandStatus == CMD_SUCCESS) |
2102 | break; |
2103 | |
2104 | if (c->err_info->CommandStatus == CMD_TARGET_STATUS && |
2105 | c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && |
2106 | (c->err_info->SenseInfo[2] == NO_SENSE || |
2107 | c->err_info->SenseInfo[2] == UNIT_ATTENTION)) |
2108 | break; |
2109 | |
2110 | dev_warn(&h->pdev->dev, "waiting %d secs " |
2111 | "for device to become ready.\n", waittime); |
2112 | rc = 1; /* device not ready. */ |
2113 | } |
2114 | |
2115 | if (rc) |
2116 | dev_warn(&h->pdev->dev, "giving up on device.\n"); |
2117 | else |
2118 | dev_warn(&h->pdev->dev, "device is ready.\n"); |
2119 | |
2120 | cmd_special_free(h, c); |
2121 | return rc; |
2122 | } |
2123 | |
2124 | /* Need at least one of these error handlers to keep ../scsi/hosts.c from |
2125 | * complaining. Doing a host- or bus-reset can't do anything good here. |
2126 | */ |
2127 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) |
2128 | { |
2129 | int rc; |
2130 | struct ctlr_info *h; |
2131 | struct hpsa_scsi_dev_t *dev; |
2132 | |
2133 | /* find the controller to which the command to be aborted was sent */ |
2134 | h = sdev_to_hba(scsicmd->device); |
2135 | if (h == NULL) /* paranoia */ |
2136 | return FAILED; |
2137 | dev = scsicmd->device->hostdata; |
2138 | if (!dev) { |
2139 | dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " |
2140 | "device lookup failed.\n"); |
2141 | return FAILED; |
2142 | } |
2143 | dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", |
2144 | h->scsi_host->host_no, dev->bus, dev->target, dev->lun); |
2145 | /* send a reset to the SCSI LUN which the command was sent to */ |
2146 | rc = hpsa_send_reset(h, dev->scsi3addr); |
2147 | if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) |
2148 | return SUCCESS; |
2149 | |
2150 | dev_warn(&h->pdev->dev, "resetting device failed.\n"); |
2151 | return FAILED; |
2152 | } |
2153 | |
2154 | /* |
2155 | * For operations that cannot sleep, a command block is allocated at init, |
2156 | * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track |
2157 | * which ones are free or in use. Lock must be held when calling this. |
2158 | * cmd_free() is the complement. |
2159 | */ |
2160 | static struct CommandList *cmd_alloc(struct ctlr_info *h) |
2161 | { |
2162 | struct CommandList *c; |
2163 | int i; |
2164 | union u64bit temp64; |
2165 | dma_addr_t cmd_dma_handle, err_dma_handle; |
2166 | |
2167 | do { |
2168 | i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); |
2169 | if (i == h->nr_cmds) |
2170 | return NULL; |
2171 | } while (test_and_set_bit |
2172 | (i & (BITS_PER_LONG - 1), |
2173 | h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); |
2174 | c = h->cmd_pool + i; |
2175 | memset(c, 0, sizeof(*c)); |
2176 | cmd_dma_handle = h->cmd_pool_dhandle |
2177 | + i * sizeof(*c); |
2178 | c->err_info = h->errinfo_pool + i; |
2179 | memset(c->err_info, 0, sizeof(*c->err_info)); |
2180 | err_dma_handle = h->errinfo_pool_dhandle |
2181 | + i * sizeof(*c->err_info); |
2182 | h->nr_allocs++; |
2183 | |
2184 | c->cmdindex = i; |
2185 | |
2186 | INIT_HLIST_NODE(&c->list); |
2187 | c->busaddr = (u32) cmd_dma_handle; |
2188 | temp64.val = (u64) err_dma_handle; |
2189 | c->ErrDesc.Addr.lower = temp64.val32.lower; |
2190 | c->ErrDesc.Addr.upper = temp64.val32.upper; |
2191 | c->ErrDesc.Len = sizeof(*c->err_info); |
2192 | |
2193 | c->h = h; |
2194 | return c; |
2195 | } |
2196 | |
2197 | /* For operations that can wait for kmalloc to possibly sleep, |
2198 | * this routine can be called. Lock need not be held to call |
2199 | * cmd_special_alloc. cmd_special_free() is the complement. |
2200 | */ |
2201 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h) |
2202 | { |
2203 | struct CommandList *c; |
2204 | union u64bit temp64; |
2205 | dma_addr_t cmd_dma_handle, err_dma_handle; |
2206 | |
2207 | c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); |
2208 | if (c == NULL) |
2209 | return NULL; |
2210 | memset(c, 0, sizeof(*c)); |
2211 | |
2212 | c->cmdindex = -1; |
2213 | |
2214 | c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), |
2215 | &err_dma_handle); |
2216 | |
2217 | if (c->err_info == NULL) { |
2218 | pci_free_consistent(h->pdev, |
2219 | sizeof(*c), c, cmd_dma_handle); |
2220 | return NULL; |
2221 | } |
2222 | memset(c->err_info, 0, sizeof(*c->err_info)); |
2223 | |
2224 | INIT_HLIST_NODE(&c->list); |
2225 | c->busaddr = (u32) cmd_dma_handle; |
2226 | temp64.val = (u64) err_dma_handle; |
2227 | c->ErrDesc.Addr.lower = temp64.val32.lower; |
2228 | c->ErrDesc.Addr.upper = temp64.val32.upper; |
2229 | c->ErrDesc.Len = sizeof(*c->err_info); |
2230 | |
2231 | c->h = h; |
2232 | return c; |
2233 | } |
2234 | |
2235 | static void cmd_free(struct ctlr_info *h, struct CommandList *c) |
2236 | { |
2237 | int i; |
2238 | |
2239 | i = c - h->cmd_pool; |
2240 | clear_bit(i & (BITS_PER_LONG - 1), |
2241 | h->cmd_pool_bits + (i / BITS_PER_LONG)); |
2242 | h->nr_frees++; |
2243 | } |
2244 | |
2245 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) |
2246 | { |
2247 | union u64bit temp64; |
2248 | |
2249 | temp64.val32.lower = c->ErrDesc.Addr.lower; |
2250 | temp64.val32.upper = c->ErrDesc.Addr.upper; |
2251 | pci_free_consistent(h->pdev, sizeof(*c->err_info), |
2252 | c->err_info, (dma_addr_t) temp64.val); |
2253 | pci_free_consistent(h->pdev, sizeof(*c), |
2254 | c, (dma_addr_t) c->busaddr); |
2255 | } |
2256 | |
2257 | #ifdef CONFIG_COMPAT |
2258 | |
2259 | static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) |
2260 | { |
2261 | IOCTL32_Command_struct __user *arg32 = |
2262 | (IOCTL32_Command_struct __user *) arg; |
2263 | IOCTL_Command_struct arg64; |
2264 | IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); |
2265 | int err; |
2266 | u32 cp; |
2267 | |
2268 | err = 0; |
2269 | err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, |
2270 | sizeof(arg64.LUN_info)); |
2271 | err |= copy_from_user(&arg64.Request, &arg32->Request, |
2272 | sizeof(arg64.Request)); |
2273 | err |= copy_from_user(&arg64.error_info, &arg32->error_info, |
2274 | sizeof(arg64.error_info)); |
2275 | err |= get_user(arg64.buf_size, &arg32->buf_size); |
2276 | err |= get_user(cp, &arg32->buf); |
2277 | arg64.buf = compat_ptr(cp); |
2278 | err |= copy_to_user(p, &arg64, sizeof(arg64)); |
2279 | |
2280 | if (err) |
2281 | return -EFAULT; |
2282 | |
2283 | err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); |
2284 | if (err) |
2285 | return err; |
2286 | err |= copy_in_user(&arg32->error_info, &p->error_info, |
2287 | sizeof(arg32->error_info)); |
2288 | if (err) |
2289 | return -EFAULT; |
2290 | return err; |
2291 | } |
2292 | |
2293 | static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, |
2294 | int cmd, void *arg) |
2295 | { |
2296 | BIG_IOCTL32_Command_struct __user *arg32 = |
2297 | (BIG_IOCTL32_Command_struct __user *) arg; |
2298 | BIG_IOCTL_Command_struct arg64; |
2299 | BIG_IOCTL_Command_struct __user *p = |
2300 | compat_alloc_user_space(sizeof(arg64)); |
2301 | int err; |
2302 | u32 cp; |
2303 | |
2304 | err = 0; |
2305 | err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, |
2306 | sizeof(arg64.LUN_info)); |
2307 | err |= copy_from_user(&arg64.Request, &arg32->Request, |
2308 | sizeof(arg64.Request)); |
2309 | err |= copy_from_user(&arg64.error_info, &arg32->error_info, |
2310 | sizeof(arg64.error_info)); |
2311 | err |= get_user(arg64.buf_size, &arg32->buf_size); |
2312 | err |= get_user(arg64.malloc_size, &arg32->malloc_size); |
2313 | err |= get_user(cp, &arg32->buf); |
2314 | arg64.buf = compat_ptr(cp); |
2315 | err |= copy_to_user(p, &arg64, sizeof(arg64)); |
2316 | |
2317 | if (err) |
2318 | return -EFAULT; |
2319 | |
2320 | err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); |
2321 | if (err) |
2322 | return err; |
2323 | err |= copy_in_user(&arg32->error_info, &p->error_info, |
2324 | sizeof(arg32->error_info)); |
2325 | if (err) |
2326 | return -EFAULT; |
2327 | return err; |
2328 | } |
2329 | |
2330 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) |
2331 | { |
2332 | switch (cmd) { |
2333 | case CCISS_GETPCIINFO: |
2334 | case CCISS_GETINTINFO: |
2335 | case CCISS_SETINTINFO: |
2336 | case CCISS_GETNODENAME: |
2337 | case CCISS_SETNODENAME: |
2338 | case CCISS_GETHEARTBEAT: |
2339 | case CCISS_GETBUSTYPES: |
2340 | case CCISS_GETFIRMVER: |
2341 | case CCISS_GETDRIVVER: |
2342 | case CCISS_REVALIDVOLS: |
2343 | case CCISS_DEREGDISK: |
2344 | case CCISS_REGNEWDISK: |
2345 | case CCISS_REGNEWD: |
2346 | case CCISS_RESCANDISK: |
2347 | case CCISS_GETLUNINFO: |
2348 | return hpsa_ioctl(dev, cmd, arg); |
2349 | |
2350 | case CCISS_PASSTHRU32: |
2351 | return hpsa_ioctl32_passthru(dev, cmd, arg); |
2352 | case CCISS_BIG_PASSTHRU32: |
2353 | return hpsa_ioctl32_big_passthru(dev, cmd, arg); |
2354 | |
2355 | default: |
2356 | return -ENOIOCTLCMD; |
2357 | } |
2358 | } |
2359 | #endif |
2360 | |
2361 | static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) |
2362 | { |
2363 | struct hpsa_pci_info pciinfo; |
2364 | |
2365 | if (!argp) |
2366 | return -EINVAL; |
2367 | pciinfo.domain = pci_domain_nr(h->pdev->bus); |
2368 | pciinfo.bus = h->pdev->bus->number; |
2369 | pciinfo.dev_fn = h->pdev->devfn; |
2370 | pciinfo.board_id = h->board_id; |
2371 | if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) |
2372 | return -EFAULT; |
2373 | return 0; |
2374 | } |
2375 | |
2376 | static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) |
2377 | { |
2378 | DriverVer_type DriverVer; |
2379 | unsigned char vmaj, vmin, vsubmin; |
2380 | int rc; |
2381 | |
2382 | rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", |
2383 | &vmaj, &vmin, &vsubmin); |
2384 | if (rc != 3) { |
2385 | dev_info(&h->pdev->dev, "driver version string '%s' " |
2386 | "unrecognized.", HPSA_DRIVER_VERSION); |
2387 | vmaj = 0; |
2388 | vmin = 0; |
2389 | vsubmin = 0; |
2390 | } |
2391 | DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; |
2392 | if (!argp) |
2393 | return -EINVAL; |
2394 | if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) |
2395 | return -EFAULT; |
2396 | return 0; |
2397 | } |
2398 | |
2399 | static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) |
2400 | { |
2401 | IOCTL_Command_struct iocommand; |
2402 | struct CommandList *c; |
2403 | char *buff = NULL; |
2404 | union u64bit temp64; |
2405 | |
2406 | if (!argp) |
2407 | return -EINVAL; |
2408 | if (!capable(CAP_SYS_RAWIO)) |
2409 | return -EPERM; |
2410 | if (copy_from_user(&iocommand, argp, sizeof(iocommand))) |
2411 | return -EFAULT; |
2412 | if ((iocommand.buf_size < 1) && |
2413 | (iocommand.Request.Type.Direction != XFER_NONE)) { |
2414 | return -EINVAL; |
2415 | } |
2416 | if (iocommand.buf_size > 0) { |
2417 | buff = kmalloc(iocommand.buf_size, GFP_KERNEL); |
2418 | if (buff == NULL) |
2419 | return -EFAULT; |
2420 | } |
2421 | if (iocommand.Request.Type.Direction == XFER_WRITE) { |
2422 | /* Copy the data into the buffer we created */ |
2423 | if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { |
2424 | kfree(buff); |
2425 | return -EFAULT; |
2426 | } |
2427 | } else |
2428 | memset(buff, 0, iocommand.buf_size); |
2429 | c = cmd_special_alloc(h); |
2430 | if (c == NULL) { |
2431 | kfree(buff); |
2432 | return -ENOMEM; |
2433 | } |
2434 | /* Fill in the command type */ |
2435 | c->cmd_type = CMD_IOCTL_PEND; |
2436 | /* Fill in Command Header */ |
2437 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
2438 | if (iocommand.buf_size > 0) { /* buffer to fill */ |
2439 | c->Header.SGList = 1; |
2440 | c->Header.SGTotal = 1; |
2441 | } else { /* no buffers to fill */ |
2442 | c->Header.SGList = 0; |
2443 | c->Header.SGTotal = 0; |
2444 | } |
2445 | memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); |
2446 | /* use the kernel address the cmd block for tag */ |
2447 | c->Header.Tag.lower = c->busaddr; |
2448 | |
2449 | /* Fill in Request block */ |
2450 | memcpy(&c->Request, &iocommand.Request, |
2451 | sizeof(c->Request)); |
2452 | |
2453 | /* Fill in the scatter gather information */ |
2454 | if (iocommand.buf_size > 0) { |
2455 | temp64.val = pci_map_single(h->pdev, buff, |
2456 | iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); |
2457 | c->SG[0].Addr.lower = temp64.val32.lower; |
2458 | c->SG[0].Addr.upper = temp64.val32.upper; |
2459 | c->SG[0].Len = iocommand.buf_size; |
2460 | c->SG[0].Ext = 0; /* we are not chaining*/ |
2461 | } |
2462 | hpsa_scsi_do_simple_cmd_core(h, c); |
2463 | hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); |
2464 | check_ioctl_unit_attention(h, c); |
2465 | |
2466 | /* Copy the error information out */ |
2467 | memcpy(&iocommand.error_info, c->err_info, |
2468 | sizeof(iocommand.error_info)); |
2469 | if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { |
2470 | kfree(buff); |
2471 | cmd_special_free(h, c); |
2472 | return -EFAULT; |
2473 | } |
2474 | |
2475 | if (iocommand.Request.Type.Direction == XFER_READ) { |
2476 | /* Copy the data out of the buffer we created */ |
2477 | if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { |
2478 | kfree(buff); |
2479 | cmd_special_free(h, c); |
2480 | return -EFAULT; |
2481 | } |
2482 | } |
2483 | kfree(buff); |
2484 | cmd_special_free(h, c); |
2485 | return 0; |
2486 | } |
2487 | |
2488 | static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) |
2489 | { |
2490 | BIG_IOCTL_Command_struct *ioc; |
2491 | struct CommandList *c; |
2492 | unsigned char **buff = NULL; |
2493 | int *buff_size = NULL; |
2494 | union u64bit temp64; |
2495 | BYTE sg_used = 0; |
2496 | int status = 0; |
2497 | int i; |
2498 | u32 left; |
2499 | u32 sz; |
2500 | BYTE __user *data_ptr; |
2501 | |
2502 | if (!argp) |
2503 | return -EINVAL; |
2504 | if (!capable(CAP_SYS_RAWIO)) |
2505 | return -EPERM; |
2506 | ioc = (BIG_IOCTL_Command_struct *) |
2507 | kmalloc(sizeof(*ioc), GFP_KERNEL); |
2508 | if (!ioc) { |
2509 | status = -ENOMEM; |
2510 | goto cleanup1; |
2511 | } |
2512 | if (copy_from_user(ioc, argp, sizeof(*ioc))) { |
2513 | status = -EFAULT; |
2514 | goto cleanup1; |
2515 | } |
2516 | if ((ioc->buf_size < 1) && |
2517 | (ioc->Request.Type.Direction != XFER_NONE)) { |
2518 | status = -EINVAL; |
2519 | goto cleanup1; |
2520 | } |
2521 | /* Check kmalloc limits using all SGs */ |
2522 | if (ioc->malloc_size > MAX_KMALLOC_SIZE) { |
2523 | status = -EINVAL; |
2524 | goto cleanup1; |
2525 | } |
2526 | if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { |
2527 | status = -EINVAL; |
2528 | goto cleanup1; |
2529 | } |
2530 | buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); |
2531 | if (!buff) { |
2532 | status = -ENOMEM; |
2533 | goto cleanup1; |
2534 | } |
2535 | buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); |
2536 | if (!buff_size) { |
2537 | status = -ENOMEM; |
2538 | goto cleanup1; |
2539 | } |
2540 | left = ioc->buf_size; |
2541 | data_ptr = ioc->buf; |
2542 | while (left) { |
2543 | sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; |
2544 | buff_size[sg_used] = sz; |
2545 | buff[sg_used] = kmalloc(sz, GFP_KERNEL); |
2546 | if (buff[sg_used] == NULL) { |
2547 | status = -ENOMEM; |
2548 | goto cleanup1; |
2549 | } |
2550 | if (ioc->Request.Type.Direction == XFER_WRITE) { |
2551 | if (copy_from_user(buff[sg_used], data_ptr, sz)) { |
2552 | status = -ENOMEM; |
2553 | goto cleanup1; |
2554 | } |
2555 | } else |
2556 | memset(buff[sg_used], 0, sz); |
2557 | left -= sz; |
2558 | data_ptr += sz; |
2559 | sg_used++; |
2560 | } |
2561 | c = cmd_special_alloc(h); |
2562 | if (c == NULL) { |
2563 | status = -ENOMEM; |
2564 | goto cleanup1; |
2565 | } |
2566 | c->cmd_type = CMD_IOCTL_PEND; |
2567 | c->Header.ReplyQueue = 0; |
2568 | |
2569 | if (ioc->buf_size > 0) { |
2570 | c->Header.SGList = sg_used; |
2571 | c->Header.SGTotal = sg_used; |
2572 | } else { |
2573 | c->Header.SGList = 0; |
2574 | c->Header.SGTotal = 0; |
2575 | } |
2576 | memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); |
2577 | c->Header.Tag.lower = c->busaddr; |
2578 | memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); |
2579 | if (ioc->buf_size > 0) { |
2580 | int i; |
2581 | for (i = 0; i < sg_used; i++) { |
2582 | temp64.val = pci_map_single(h->pdev, buff[i], |
2583 | buff_size[i], PCI_DMA_BIDIRECTIONAL); |
2584 | c->SG[i].Addr.lower = temp64.val32.lower; |
2585 | c->SG[i].Addr.upper = temp64.val32.upper; |
2586 | c->SG[i].Len = buff_size[i]; |
2587 | /* we are not chaining */ |
2588 | c->SG[i].Ext = 0; |
2589 | } |
2590 | } |
2591 | hpsa_scsi_do_simple_cmd_core(h, c); |
2592 | hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); |
2593 | check_ioctl_unit_attention(h, c); |
2594 | /* Copy the error information out */ |
2595 | memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); |
2596 | if (copy_to_user(argp, ioc, sizeof(*ioc))) { |
2597 | cmd_special_free(h, c); |
2598 | status = -EFAULT; |
2599 | goto cleanup1; |
2600 | } |
2601 | if (ioc->Request.Type.Direction == XFER_READ) { |
2602 | /* Copy the data out of the buffer we created */ |
2603 | BYTE __user *ptr = ioc->buf; |
2604 | for (i = 0; i < sg_used; i++) { |
2605 | if (copy_to_user(ptr, buff[i], buff_size[i])) { |
2606 | cmd_special_free(h, c); |
2607 | status = -EFAULT; |
2608 | goto cleanup1; |
2609 | } |
2610 | ptr += buff_size[i]; |
2611 | } |
2612 | } |
2613 | cmd_special_free(h, c); |
2614 | status = 0; |
2615 | cleanup1: |
2616 | if (buff) { |
2617 | for (i = 0; i < sg_used; i++) |
2618 | kfree(buff[i]); |
2619 | kfree(buff); |
2620 | } |
2621 | kfree(buff_size); |
2622 | kfree(ioc); |
2623 | return status; |
2624 | } |
2625 | |
2626 | static void check_ioctl_unit_attention(struct ctlr_info *h, |
2627 | struct CommandList *c) |
2628 | { |
2629 | if (c->err_info->CommandStatus == CMD_TARGET_STATUS && |
2630 | c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) |
2631 | (void) check_for_unit_attention(h, c); |
2632 | } |
2633 | /* |
2634 | * ioctl |
2635 | */ |
2636 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) |
2637 | { |
2638 | struct ctlr_info *h; |
2639 | void __user *argp = (void __user *)arg; |
2640 | |
2641 | h = sdev_to_hba(dev); |
2642 | |
2643 | switch (cmd) { |
2644 | case CCISS_DEREGDISK: |
2645 | case CCISS_REGNEWDISK: |
2646 | case CCISS_REGNEWD: |
2647 | hpsa_scan_start(h->scsi_host); |
2648 | return 0; |
2649 | case CCISS_GETPCIINFO: |
2650 | return hpsa_getpciinfo_ioctl(h, argp); |
2651 | case CCISS_GETDRIVVER: |
2652 | return hpsa_getdrivver_ioctl(h, argp); |
2653 | case CCISS_PASSTHRU: |
2654 | return hpsa_passthru_ioctl(h, argp); |
2655 | case CCISS_BIG_PASSTHRU: |
2656 | return hpsa_big_passthru_ioctl(h, argp); |
2657 | default: |
2658 | return -ENOTTY; |
2659 | } |
2660 | } |
2661 | |
2662 | static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
2663 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, |
2664 | int cmd_type) |
2665 | { |
2666 | int pci_dir = XFER_NONE; |
2667 | |
2668 | c->cmd_type = CMD_IOCTL_PEND; |
2669 | c->Header.ReplyQueue = 0; |
2670 | if (buff != NULL && size > 0) { |
2671 | c->Header.SGList = 1; |
2672 | c->Header.SGTotal = 1; |
2673 | } else { |
2674 | c->Header.SGList = 0; |
2675 | c->Header.SGTotal = 0; |
2676 | } |
2677 | c->Header.Tag.lower = c->busaddr; |
2678 | memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); |
2679 | |
2680 | c->Request.Type.Type = cmd_type; |
2681 | if (cmd_type == TYPE_CMD) { |
2682 | switch (cmd) { |
2683 | case HPSA_INQUIRY: |
2684 | /* are we trying to read a vital product page */ |
2685 | if (page_code != 0) { |
2686 | c->Request.CDB[1] = 0x01; |
2687 | c->Request.CDB[2] = page_code; |
2688 | } |
2689 | c->Request.CDBLen = 6; |
2690 | c->Request.Type.Attribute = ATTR_SIMPLE; |
2691 | c->Request.Type.Direction = XFER_READ; |
2692 | c->Request.Timeout = 0; |
2693 | c->Request.CDB[0] = HPSA_INQUIRY; |
2694 | c->Request.CDB[4] = size & 0xFF; |
2695 | break; |
2696 | case HPSA_REPORT_LOG: |
2697 | case HPSA_REPORT_PHYS: |
2698 | /* Talking to controller so It's a physical command |
2699 | mode = 00 target = 0. Nothing to write. |
2700 | */ |
2701 | c->Request.CDBLen = 12; |
2702 | c->Request.Type.Attribute = ATTR_SIMPLE; |
2703 | c->Request.Type.Direction = XFER_READ; |
2704 | c->Request.Timeout = 0; |
2705 | c->Request.CDB[0] = cmd; |
2706 | c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ |
2707 | c->Request.CDB[7] = (size >> 16) & 0xFF; |
2708 | c->Request.CDB[8] = (size >> 8) & 0xFF; |
2709 | c->Request.CDB[9] = size & 0xFF; |
2710 | break; |
2711 | |
2712 | case HPSA_READ_CAPACITY: |
2713 | c->Request.CDBLen = 10; |
2714 | c->Request.Type.Attribute = ATTR_SIMPLE; |
2715 | c->Request.Type.Direction = XFER_READ; |
2716 | c->Request.Timeout = 0; |
2717 | c->Request.CDB[0] = cmd; |
2718 | break; |
2719 | case HPSA_CACHE_FLUSH: |
2720 | c->Request.CDBLen = 12; |
2721 | c->Request.Type.Attribute = ATTR_SIMPLE; |
2722 | c->Request.Type.Direction = XFER_WRITE; |
2723 | c->Request.Timeout = 0; |
2724 | c->Request.CDB[0] = BMIC_WRITE; |
2725 | c->Request.CDB[6] = BMIC_CACHE_FLUSH; |
2726 | break; |
2727 | case TEST_UNIT_READY: |
2728 | c->Request.CDBLen = 6; |
2729 | c->Request.Type.Attribute = ATTR_SIMPLE; |
2730 | c->Request.Type.Direction = XFER_NONE; |
2731 | c->Request.Timeout = 0; |
2732 | break; |
2733 | default: |
2734 | dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); |
2735 | BUG(); |
2736 | return; |
2737 | } |
2738 | } else if (cmd_type == TYPE_MSG) { |
2739 | switch (cmd) { |
2740 | |
2741 | case HPSA_DEVICE_RESET_MSG: |
2742 | c->Request.CDBLen = 16; |
2743 | c->Request.Type.Type = 1; /* It is a MSG not a CMD */ |
2744 | c->Request.Type.Attribute = ATTR_SIMPLE; |
2745 | c->Request.Type.Direction = XFER_NONE; |
2746 | c->Request.Timeout = 0; /* Don't time out */ |
2747 | c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */ |
2748 | c->Request.CDB[1] = 0x03; /* Reset target above */ |
2749 | /* If bytes 4-7 are zero, it means reset the */ |
2750 | /* LunID device */ |
2751 | c->Request.CDB[4] = 0x00; |
2752 | c->Request.CDB[5] = 0x00; |
2753 | c->Request.CDB[6] = 0x00; |
2754 | c->Request.CDB[7] = 0x00; |
2755 | break; |
2756 | |
2757 | default: |
2758 | dev_warn(&h->pdev->dev, "unknown message type %d\n", |
2759 | cmd); |
2760 | BUG(); |
2761 | } |
2762 | } else { |
2763 | dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); |
2764 | BUG(); |
2765 | } |
2766 | |
2767 | switch (c->Request.Type.Direction) { |
2768 | case XFER_READ: |
2769 | pci_dir = PCI_DMA_FROMDEVICE; |
2770 | break; |
2771 | case XFER_WRITE: |
2772 | pci_dir = PCI_DMA_TODEVICE; |
2773 | break; |
2774 | case XFER_NONE: |
2775 | pci_dir = PCI_DMA_NONE; |
2776 | break; |
2777 | default: |
2778 | pci_dir = PCI_DMA_BIDIRECTIONAL; |
2779 | } |
2780 | |
2781 | hpsa_map_one(h->pdev, c, buff, size, pci_dir); |
2782 | |
2783 | return; |
2784 | } |
2785 | |
2786 | /* |
2787 | * Map (physical) PCI mem into (virtual) kernel space |
2788 | */ |
2789 | static void __iomem *remap_pci_mem(ulong base, ulong size) |
2790 | { |
2791 | ulong page_base = ((ulong) base) & PAGE_MASK; |
2792 | ulong page_offs = ((ulong) base) - page_base; |
2793 | void __iomem *page_remapped = ioremap(page_base, page_offs + size); |
2794 | |
2795 | return page_remapped ? (page_remapped + page_offs) : NULL; |
2796 | } |
2797 | |
2798 | /* Takes cmds off the submission queue and sends them to the hardware, |
2799 | * then puts them on the queue of cmds waiting for completion. |
2800 | */ |
2801 | static void start_io(struct ctlr_info *h) |
2802 | { |
2803 | struct CommandList *c; |
2804 | |
2805 | while (!hlist_empty(&h->reqQ)) { |
2806 | c = hlist_entry(h->reqQ.first, struct CommandList, list); |
2807 | /* can't do anything if fifo is full */ |
2808 | if ((h->access.fifo_full(h))) { |
2809 | dev_warn(&h->pdev->dev, "fifo full\n"); |
2810 | break; |
2811 | } |
2812 | |
2813 | /* Get the first entry from the Request Q */ |
2814 | removeQ(c); |
2815 | h->Qdepth--; |
2816 | |
2817 | /* Tell the controller execute command */ |
2818 | h->access.submit_command(h, c); |
2819 | |
2820 | /* Put job onto the completed Q */ |
2821 | addQ(&h->cmpQ, c); |
2822 | } |
2823 | } |
2824 | |
2825 | static inline unsigned long get_next_completion(struct ctlr_info *h) |
2826 | { |
2827 | return h->access.command_completed(h); |
2828 | } |
2829 | |
2830 | static inline bool interrupt_pending(struct ctlr_info *h) |
2831 | { |
2832 | return h->access.intr_pending(h); |
2833 | } |
2834 | |
2835 | static inline long interrupt_not_for_us(struct ctlr_info *h) |
2836 | { |
2837 | return !(h->msi_vector || h->msix_vector) && |
2838 | ((h->access.intr_pending(h) == 0) || |
2839 | (h->interrupts_enabled == 0)); |
2840 | } |
2841 | |
2842 | static inline int bad_tag(struct ctlr_info *h, u32 tag_index, |
2843 | u32 raw_tag) |
2844 | { |
2845 | if (unlikely(tag_index >= h->nr_cmds)) { |
2846 | dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); |
2847 | return 1; |
2848 | } |
2849 | return 0; |
2850 | } |
2851 | |
2852 | static inline void finish_cmd(struct CommandList *c, u32 raw_tag) |
2853 | { |
2854 | removeQ(c); |
2855 | if (likely(c->cmd_type == CMD_SCSI)) |
2856 | complete_scsi_command(c, 0, raw_tag); |
2857 | else if (c->cmd_type == CMD_IOCTL_PEND) |
2858 | complete(c->waiting); |
2859 | } |
2860 | |
2861 | static inline u32 hpsa_tag_contains_index(u32 tag) |
2862 | { |
2863 | #define DIRECT_LOOKUP_BIT 0x10 |
2864 | return tag & DIRECT_LOOKUP_BIT; |
2865 | } |
2866 | |
2867 | static inline u32 hpsa_tag_to_index(u32 tag) |
2868 | { |
2869 | #define DIRECT_LOOKUP_SHIFT 5 |
2870 | return tag >> DIRECT_LOOKUP_SHIFT; |
2871 | } |
2872 | |
2873 | static inline u32 hpsa_tag_discard_error_bits(u32 tag) |
2874 | { |
2875 | #define HPSA_ERROR_BITS 0x03 |
2876 | return tag & ~HPSA_ERROR_BITS; |
2877 | } |
2878 | |
2879 | /* process completion of an indexed ("direct lookup") command */ |
2880 | static inline u32 process_indexed_cmd(struct ctlr_info *h, |
2881 | u32 raw_tag) |
2882 | { |
2883 | u32 tag_index; |
2884 | struct CommandList *c; |
2885 | |
2886 | tag_index = hpsa_tag_to_index(raw_tag); |
2887 | if (bad_tag(h, tag_index, raw_tag)) |
2888 | return next_command(h); |
2889 | c = h->cmd_pool + tag_index; |
2890 | finish_cmd(c, raw_tag); |
2891 | return next_command(h); |
2892 | } |
2893 | |
2894 | /* process completion of a non-indexed command */ |
2895 | static inline u32 process_nonindexed_cmd(struct ctlr_info *h, |
2896 | u32 raw_tag) |
2897 | { |
2898 | u32 tag; |
2899 | struct CommandList *c = NULL; |
2900 | struct hlist_node *tmp; |
2901 | |
2902 | tag = hpsa_tag_discard_error_bits(raw_tag); |
2903 | hlist_for_each_entry(c, tmp, &h->cmpQ, list) { |
2904 | if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { |
2905 | finish_cmd(c, raw_tag); |
2906 | return next_command(h); |
2907 | } |
2908 | } |
2909 | bad_tag(h, h->nr_cmds + 1, raw_tag); |
2910 | return next_command(h); |
2911 | } |
2912 | |
2913 | static irqreturn_t do_hpsa_intr(int irq, void *dev_id) |
2914 | { |
2915 | struct ctlr_info *h = dev_id; |
2916 | unsigned long flags; |
2917 | u32 raw_tag; |
2918 | |
2919 | if (interrupt_not_for_us(h)) |
2920 | return IRQ_NONE; |
2921 | spin_lock_irqsave(&h->lock, flags); |
2922 | raw_tag = get_next_completion(h); |
2923 | while (raw_tag != FIFO_EMPTY) { |
2924 | if (hpsa_tag_contains_index(raw_tag)) |
2925 | raw_tag = process_indexed_cmd(h, raw_tag); |
2926 | else |
2927 | raw_tag = process_nonindexed_cmd(h, raw_tag); |
2928 | } |
2929 | spin_unlock_irqrestore(&h->lock, flags); |
2930 | return IRQ_HANDLED; |
2931 | } |
2932 | |
2933 | /* Send a message CDB to the firmware. */ |
2934 | static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, |
2935 | unsigned char type) |
2936 | { |
2937 | struct Command { |
2938 | struct CommandListHeader CommandHeader; |
2939 | struct RequestBlock Request; |
2940 | struct ErrDescriptor ErrorDescriptor; |
2941 | }; |
2942 | struct Command *cmd; |
2943 | static const size_t cmd_sz = sizeof(*cmd) + |
2944 | sizeof(cmd->ErrorDescriptor); |
2945 | dma_addr_t paddr64; |
2946 | uint32_t paddr32, tag; |
2947 | void __iomem *vaddr; |
2948 | int i, err; |
2949 | |
2950 | vaddr = pci_ioremap_bar(pdev, 0); |
2951 | if (vaddr == NULL) |
2952 | return -ENOMEM; |
2953 | |
2954 | /* The Inbound Post Queue only accepts 32-bit physical addresses for the |
2955 | * CCISS commands, so they must be allocated from the lower 4GiB of |
2956 | * memory. |
2957 | */ |
2958 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); |
2959 | if (err) { |
2960 | iounmap(vaddr); |
2961 | return -ENOMEM; |
2962 | } |
2963 | |
2964 | cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); |
2965 | if (cmd == NULL) { |
2966 | iounmap(vaddr); |
2967 | return -ENOMEM; |
2968 | } |
2969 | |
2970 | /* This must fit, because of the 32-bit consistent DMA mask. Also, |
2971 | * although there's no guarantee, we assume that the address is at |
2972 | * least 4-byte aligned (most likely, it's page-aligned). |
2973 | */ |
2974 | paddr32 = paddr64; |
2975 | |
2976 | cmd->CommandHeader.ReplyQueue = 0; |
2977 | cmd->CommandHeader.SGList = 0; |
2978 | cmd->CommandHeader.SGTotal = 0; |
2979 | cmd->CommandHeader.Tag.lower = paddr32; |
2980 | cmd->CommandHeader.Tag.upper = 0; |
2981 | memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); |
2982 | |
2983 | cmd->Request.CDBLen = 16; |
2984 | cmd->Request.Type.Type = TYPE_MSG; |
2985 | cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; |
2986 | cmd->Request.Type.Direction = XFER_NONE; |
2987 | cmd->Request.Timeout = 0; /* Don't time out */ |
2988 | cmd->Request.CDB[0] = opcode; |
2989 | cmd->Request.CDB[1] = type; |
2990 | memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ |
2991 | cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); |
2992 | cmd->ErrorDescriptor.Addr.upper = 0; |
2993 | cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); |
2994 | |
2995 | writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); |
2996 | |
2997 | for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { |
2998 | tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); |
2999 | if (hpsa_tag_discard_error_bits(tag) == paddr32) |
3000 | break; |
3001 | msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); |
3002 | } |
3003 | |
3004 | iounmap(vaddr); |
3005 | |
3006 | /* we leak the DMA buffer here ... no choice since the controller could |
3007 | * still complete the command. |
3008 | */ |
3009 | if (i == HPSA_MSG_SEND_RETRY_LIMIT) { |
3010 | dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", |
3011 | opcode, type); |
3012 | return -ETIMEDOUT; |
3013 | } |
3014 | |
3015 | pci_free_consistent(pdev, cmd_sz, cmd, paddr64); |
3016 | |
3017 | if (tag & HPSA_ERROR_BIT) { |
3018 | dev_err(&pdev->dev, "controller message %02x:%02x failed\n", |
3019 | opcode, type); |
3020 | return -EIO; |
3021 | } |
3022 | |
3023 | dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", |
3024 | opcode, type); |
3025 | return 0; |
3026 | } |
3027 | |
3028 | #define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0) |
3029 | #define hpsa_noop(p) hpsa_message(p, 3, 0) |
3030 | |
3031 | static __devinit int hpsa_reset_msi(struct pci_dev *pdev) |
3032 | { |
3033 | /* the #defines are stolen from drivers/pci/msi.h. */ |
3034 | #define msi_control_reg(base) (base + PCI_MSI_FLAGS) |
3035 | #define PCI_MSIX_FLAGS_ENABLE (1 << 15) |
3036 | |
3037 | int pos; |
3038 | u16 control = 0; |
3039 | |
3040 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); |
3041 | if (pos) { |
3042 | pci_read_config_word(pdev, msi_control_reg(pos), &control); |
3043 | if (control & PCI_MSI_FLAGS_ENABLE) { |
3044 | dev_info(&pdev->dev, "resetting MSI\n"); |
3045 | pci_write_config_word(pdev, msi_control_reg(pos), |
3046 | control & ~PCI_MSI_FLAGS_ENABLE); |
3047 | } |
3048 | } |
3049 | |
3050 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); |
3051 | if (pos) { |
3052 | pci_read_config_word(pdev, msi_control_reg(pos), &control); |
3053 | if (control & PCI_MSIX_FLAGS_ENABLE) { |
3054 | dev_info(&pdev->dev, "resetting MSI-X\n"); |
3055 | pci_write_config_word(pdev, msi_control_reg(pos), |
3056 | control & ~PCI_MSIX_FLAGS_ENABLE); |
3057 | } |
3058 | } |
3059 | |
3060 | return 0; |
3061 | } |
3062 | |
3063 | /* This does a hard reset of the controller using PCI power management |
3064 | * states. |
3065 | */ |
3066 | static __devinit int hpsa_hard_reset_controller(struct pci_dev *pdev) |
3067 | { |
3068 | u16 pmcsr, saved_config_space[32]; |
3069 | int i, pos; |
3070 | |
3071 | dev_info(&pdev->dev, "using PCI PM to reset controller\n"); |
3072 | |
3073 | /* This is very nearly the same thing as |
3074 | * |
3075 | * pci_save_state(pci_dev); |
3076 | * pci_set_power_state(pci_dev, PCI_D3hot); |
3077 | * pci_set_power_state(pci_dev, PCI_D0); |
3078 | * pci_restore_state(pci_dev); |
3079 | * |
3080 | * but we can't use these nice canned kernel routines on |
3081 | * kexec, because they also check the MSI/MSI-X state in PCI |
3082 | * configuration space and do the wrong thing when it is |
3083 | * set/cleared. Also, the pci_save/restore_state functions |
3084 | * violate the ordering requirements for restoring the |
3085 | * configuration space from the CCISS document (see the |
3086 | * comment below). So we roll our own .... |
3087 | */ |
3088 | |
3089 | for (i = 0; i < 32; i++) |
3090 | pci_read_config_word(pdev, 2*i, &saved_config_space[i]); |
3091 | |
3092 | pos = pci_find_capability(pdev, PCI_CAP_ID_PM); |
3093 | if (pos == 0) { |
3094 | dev_err(&pdev->dev, |
3095 | "hpsa_reset_controller: PCI PM not supported\n"); |
3096 | return -ENODEV; |
3097 | } |
3098 | |
3099 | /* Quoting from the Open CISS Specification: "The Power |
3100 | * Management Control/Status Register (CSR) controls the power |
3101 | * state of the device. The normal operating state is D0, |
3102 | * CSR=00h. The software off state is D3, CSR=03h. To reset |
3103 | * the controller, place the interface device in D3 then to |
3104 | * D0, this causes a secondary PCI reset which will reset the |
3105 | * controller." |
3106 | */ |
3107 | |
3108 | /* enter the D3hot power management state */ |
3109 | pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); |
3110 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
3111 | pmcsr |= PCI_D3hot; |
3112 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); |
3113 | |
3114 | msleep(500); |
3115 | |
3116 | /* enter the D0 power management state */ |
3117 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
3118 | pmcsr |= PCI_D0; |
3119 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); |
3120 | |
3121 | msleep(500); |
3122 | |
3123 | /* Restore the PCI configuration space. The Open CISS |
3124 | * Specification says, "Restore the PCI Configuration |
3125 | * Registers, offsets 00h through 60h. It is important to |
3126 | * restore the command register, 16-bits at offset 04h, |
3127 | * last. Do not restore the configuration status register, |
3128 | * 16-bits at offset 06h." Note that the offset is 2*i. |
3129 | */ |
3130 | for (i = 0; i < 32; i++) { |
3131 | if (i == 2 || i == 3) |
3132 | continue; |
3133 | pci_write_config_word(pdev, 2*i, saved_config_space[i]); |
3134 | } |
3135 | wmb(); |
3136 | pci_write_config_word(pdev, 4, saved_config_space[2]); |
3137 | |
3138 | return 0; |
3139 | } |
3140 | |
3141 | /* |
3142 | * We cannot read the structure directly, for portability we must use |
3143 | * the io functions. |
3144 | * This is for debug only. |
3145 | */ |
3146 | #ifdef HPSA_DEBUG |
3147 | static void print_cfg_table(struct device *dev, struct CfgTable *tb) |
3148 | { |
3149 | int i; |
3150 | char temp_name[17]; |
3151 | |
3152 | dev_info(dev, "Controller Configuration information\n"); |
3153 | dev_info(dev, "------------------------------------\n"); |
3154 | for (i = 0; i < 4; i++) |
3155 | temp_name[i] = readb(&(tb->Signature[i])); |
3156 | temp_name[4] = '\0'; |
3157 | dev_info(dev, " Signature = %s\n", temp_name); |
3158 | dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); |
3159 | dev_info(dev, " Transport methods supported = 0x%x\n", |
3160 | readl(&(tb->TransportSupport))); |
3161 | dev_info(dev, " Transport methods active = 0x%x\n", |
3162 | readl(&(tb->TransportActive))); |
3163 | dev_info(dev, " Requested transport Method = 0x%x\n", |
3164 | readl(&(tb->HostWrite.TransportRequest))); |
3165 | dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", |
3166 | readl(&(tb->HostWrite.CoalIntDelay))); |
3167 | dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", |
3168 | readl(&(tb->HostWrite.CoalIntCount))); |
3169 | dev_info(dev, " Max outstanding commands = 0x%d\n", |
3170 | readl(&(tb->CmdsOutMax))); |
3171 | dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); |
3172 | for (i = 0; i < 16; i++) |
3173 | temp_name[i] = readb(&(tb->ServerName[i])); |
3174 | temp_name[16] = '\0'; |
3175 | dev_info(dev, " Server Name = %s\n", temp_name); |
3176 | dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", |
3177 | readl(&(tb->HeartBeat))); |
3178 | } |
3179 | #endif /* HPSA_DEBUG */ |
3180 | |
3181 | static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) |
3182 | { |
3183 | int i, offset, mem_type, bar_type; |
3184 | |
3185 | if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ |
3186 | return 0; |
3187 | offset = 0; |
3188 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
3189 | bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; |
3190 | if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) |
3191 | offset += 4; |
3192 | else { |
3193 | mem_type = pci_resource_flags(pdev, i) & |
3194 | PCI_BASE_ADDRESS_MEM_TYPE_MASK; |
3195 | switch (mem_type) { |
3196 | case PCI_BASE_ADDRESS_MEM_TYPE_32: |
3197 | case PCI_BASE_ADDRESS_MEM_TYPE_1M: |
3198 | offset += 4; /* 32 bit */ |
3199 | break; |
3200 | case PCI_BASE_ADDRESS_MEM_TYPE_64: |
3201 | offset += 8; |
3202 | break; |
3203 | default: /* reserved in PCI 2.2 */ |
3204 | dev_warn(&pdev->dev, |
3205 | "base address is invalid\n"); |
3206 | return -1; |
3207 | break; |
3208 | } |
3209 | } |
3210 | if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) |
3211 | return i + 1; |
3212 | } |
3213 | return -1; |
3214 | } |
3215 | |
3216 | /* If MSI/MSI-X is supported by the kernel we will try to enable it on |
3217 | * controllers that are capable. If not, we use IO-APIC mode. |
3218 | */ |
3219 | |
3220 | static void __devinit hpsa_interrupt_mode(struct ctlr_info *h, |
3221 | struct pci_dev *pdev, u32 board_id) |
3222 | { |
3223 | #ifdef CONFIG_PCI_MSI |
3224 | int err; |
3225 | struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1}, |
3226 | {0, 2}, {0, 3} |
3227 | }; |
3228 | |
3229 | /* Some boards advertise MSI but don't really support it */ |
3230 | if ((board_id == 0x40700E11) || |
3231 | (board_id == 0x40800E11) || |
3232 | (board_id == 0x40820E11) || (board_id == 0x40830E11)) |
3233 | goto default_int_mode; |
3234 | if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) { |
3235 | dev_info(&pdev->dev, "MSIX\n"); |
3236 | err = pci_enable_msix(pdev, hpsa_msix_entries, 4); |
3237 | if (!err) { |
3238 | h->intr[0] = hpsa_msix_entries[0].vector; |
3239 | h->intr[1] = hpsa_msix_entries[1].vector; |
3240 | h->intr[2] = hpsa_msix_entries[2].vector; |
3241 | h->intr[3] = hpsa_msix_entries[3].vector; |
3242 | h->msix_vector = 1; |
3243 | return; |
3244 | } |
3245 | if (err > 0) { |
3246 | dev_warn(&pdev->dev, "only %d MSI-X vectors " |
3247 | "available\n", err); |
3248 | goto default_int_mode; |
3249 | } else { |
3250 | dev_warn(&pdev->dev, "MSI-X init failed %d\n", |
3251 | err); |
3252 | goto default_int_mode; |
3253 | } |
3254 | } |
3255 | if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) { |
3256 | dev_info(&pdev->dev, "MSI\n"); |
3257 | if (!pci_enable_msi(pdev)) |
3258 | h->msi_vector = 1; |
3259 | else |
3260 | dev_warn(&pdev->dev, "MSI init failed\n"); |
3261 | } |
3262 | default_int_mode: |
3263 | #endif /* CONFIG_PCI_MSI */ |
3264 | /* if we get here we're going to use the default interrupt mode */ |
3265 | h->intr[PERF_MODE_INT] = pdev->irq; |
3266 | } |
3267 | |
3268 | static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) |
3269 | { |
3270 | ushort subsystem_vendor_id, subsystem_device_id, command; |
3271 | u32 board_id, scratchpad = 0; |
3272 | u64 cfg_offset; |
3273 | u32 cfg_base_addr; |
3274 | u64 cfg_base_addr_index; |
3275 | u32 trans_offset; |
3276 | int i, prod_index, err; |
3277 | |
3278 | subsystem_vendor_id = pdev->subsystem_vendor; |
3279 | subsystem_device_id = pdev->subsystem_device; |
3280 | board_id = (((u32) (subsystem_device_id << 16) & 0xffff0000) | |
3281 | subsystem_vendor_id); |
3282 | |
3283 | for (i = 0; i < ARRAY_SIZE(products); i++) |
3284 | if (board_id == products[i].board_id) |
3285 | break; |
3286 | |
3287 | prod_index = i; |
3288 | |
3289 | if (prod_index == ARRAY_SIZE(products)) { |
3290 | prod_index--; |
3291 | if (subsystem_vendor_id != PCI_VENDOR_ID_HP || |
3292 | !hpsa_allow_any) { |
3293 | dev_warn(&pdev->dev, "unrecognized board ID:" |
3294 | " 0x%08lx, ignoring.\n", |
3295 | (unsigned long) board_id); |
3296 | return -ENODEV; |
3297 | } |
3298 | } |
3299 | /* check to see if controller has been disabled |
3300 | * BEFORE trying to enable it |
3301 | */ |
3302 | (void)pci_read_config_word(pdev, PCI_COMMAND, &command); |
3303 | if (!(command & 0x02)) { |
3304 | dev_warn(&pdev->dev, "controller appears to be disabled\n"); |
3305 | return -ENODEV; |
3306 | } |
3307 | |
3308 | err = pci_enable_device(pdev); |
3309 | if (err) { |
3310 | dev_warn(&pdev->dev, "unable to enable PCI device\n"); |
3311 | return err; |
3312 | } |
3313 | |
3314 | err = pci_request_regions(pdev, "hpsa"); |
3315 | if (err) { |
3316 | dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n"); |
3317 | return err; |
3318 | } |
3319 | |
3320 | /* If the kernel supports MSI/MSI-X we will try to enable that, |
3321 | * else we use the IO-APIC interrupt assigned to us by system ROM. |
3322 | */ |
3323 | hpsa_interrupt_mode(h, pdev, board_id); |
3324 | |
3325 | /* find the memory BAR */ |
3326 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
3327 | if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) |
3328 | break; |
3329 | } |
3330 | if (i == DEVICE_COUNT_RESOURCE) { |
3331 | dev_warn(&pdev->dev, "no memory BAR found\n"); |
3332 | err = -ENODEV; |
3333 | goto err_out_free_res; |
3334 | } |
3335 | |
3336 | h->paddr = pci_resource_start(pdev, i); /* addressing mode bits |
3337 | * already removed |
3338 | */ |
3339 | |
3340 | h->vaddr = remap_pci_mem(h->paddr, 0x250); |
3341 | |
3342 | /* Wait for the board to become ready. */ |
3343 | for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) { |
3344 | scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); |
3345 | if (scratchpad == HPSA_FIRMWARE_READY) |
3346 | break; |
3347 | msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); |
3348 | } |
3349 | if (scratchpad != HPSA_FIRMWARE_READY) { |
3350 | dev_warn(&pdev->dev, "board not ready, timed out.\n"); |
3351 | err = -ENODEV; |
3352 | goto err_out_free_res; |
3353 | } |
3354 | |
3355 | /* get the address index number */ |
3356 | cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET); |
3357 | cfg_base_addr &= (u32) 0x0000ffff; |
3358 | cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr); |
3359 | if (cfg_base_addr_index == -1) { |
3360 | dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); |
3361 | err = -ENODEV; |
3362 | goto err_out_free_res; |
3363 | } |
3364 | |
3365 | cfg_offset = readl(h->vaddr + SA5_CTMEM_OFFSET); |
3366 | h->cfgtable = remap_pci_mem(pci_resource_start(pdev, |
3367 | cfg_base_addr_index) + cfg_offset, |
3368 | sizeof(h->cfgtable)); |
3369 | /* Find performant mode table. */ |
3370 | trans_offset = readl(&(h->cfgtable->TransMethodOffset)); |
3371 | h->transtable = remap_pci_mem(pci_resource_start(pdev, |
3372 | cfg_base_addr_index)+cfg_offset+trans_offset, |
3373 | sizeof(*h->transtable)); |
3374 | |
3375 | h->board_id = board_id; |
3376 | h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); |
3377 | h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); |
3378 | |
3379 | /* |
3380 | * Limit in-command s/g elements to 32 save dma'able memory. |
3381 | * Howvever spec says if 0, use 31 |
3382 | */ |
3383 | |
3384 | h->max_cmd_sg_entries = 31; |
3385 | if (h->maxsgentries > 512) { |
3386 | h->max_cmd_sg_entries = 32; |
3387 | h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; |
3388 | h->maxsgentries--; /* save one for chain pointer */ |
3389 | } else { |
3390 | h->maxsgentries = 31; /* default to traditional values */ |
3391 | h->chainsize = 0; |
3392 | } |
3393 | |
3394 | h->product_name = products[prod_index].product_name; |
3395 | h->access = *(products[prod_index].access); |
3396 | /* Allow room for some ioctls */ |
3397 | h->nr_cmds = h->max_commands - 4; |
3398 | |
3399 | if ((readb(&h->cfgtable->Signature[0]) != 'C') || |
3400 | (readb(&h->cfgtable->Signature[1]) != 'I') || |
3401 | (readb(&h->cfgtable->Signature[2]) != 'S') || |
3402 | (readb(&h->cfgtable->Signature[3]) != 'S')) { |
3403 | dev_warn(&pdev->dev, "not a valid CISS config table\n"); |
3404 | err = -ENODEV; |
3405 | goto err_out_free_res; |
3406 | } |
3407 | #ifdef CONFIG_X86 |
3408 | { |
3409 | /* Need to enable prefetch in the SCSI core for 6400 in x86 */ |
3410 | u32 prefetch; |
3411 | prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); |
3412 | prefetch |= 0x100; |
3413 | writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); |
3414 | } |
3415 | #endif |
3416 | |
3417 | /* Disabling DMA prefetch for the P600 |
3418 | * An ASIC bug may result in a prefetch beyond |
3419 | * physical memory. |
3420 | */ |
3421 | if (board_id == 0x3225103C) { |
3422 | u32 dma_prefetch; |
3423 | dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); |
3424 | dma_prefetch |= 0x8000; |
3425 | writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); |
3426 | } |
3427 | |
3428 | h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); |
3429 | /* Update the field, and then ring the doorbell */ |
3430 | writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); |
3431 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
3432 | |
3433 | /* under certain very rare conditions, this can take awhile. |
3434 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right |
3435 | * as we enter this code.) |
3436 | */ |
3437 | for (i = 0; i < MAX_CONFIG_WAIT; i++) { |
3438 | if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) |
3439 | break; |
3440 | /* delay and try again */ |
3441 | msleep(10); |
3442 | } |
3443 | |
3444 | #ifdef HPSA_DEBUG |
3445 | print_cfg_table(&pdev->dev, h->cfgtable); |
3446 | #endif /* HPSA_DEBUG */ |
3447 | |
3448 | if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { |
3449 | dev_warn(&pdev->dev, "unable to get board into simple mode\n"); |
3450 | err = -ENODEV; |
3451 | goto err_out_free_res; |
3452 | } |
3453 | return 0; |
3454 | |
3455 | err_out_free_res: |
3456 | /* |
3457 | * Deliberately omit pci_disable_device(): it does something nasty to |
3458 | * Smart Array controllers that pci_enable_device does not undo |
3459 | */ |
3460 | pci_release_regions(pdev); |
3461 | return err; |
3462 | } |
3463 | |
3464 | static void __devinit hpsa_hba_inquiry(struct ctlr_info *h) |
3465 | { |
3466 | int rc; |
3467 | |
3468 | #define HBA_INQUIRY_BYTE_COUNT 64 |
3469 | h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); |
3470 | if (!h->hba_inquiry_data) |
3471 | return; |
3472 | rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, |
3473 | h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); |
3474 | if (rc != 0) { |
3475 | kfree(h->hba_inquiry_data); |
3476 | h->hba_inquiry_data = NULL; |
3477 | } |
3478 | } |
3479 | |
3480 | static int __devinit hpsa_init_one(struct pci_dev *pdev, |
3481 | const struct pci_device_id *ent) |
3482 | { |
3483 | int i, rc; |
3484 | int dac; |
3485 | struct ctlr_info *h; |
3486 | |
3487 | if (number_of_controllers == 0) |
3488 | printk(KERN_INFO DRIVER_NAME "\n"); |
3489 | if (reset_devices) { |
3490 | /* Reset the controller with a PCI power-cycle */ |
3491 | if (hpsa_hard_reset_controller(pdev) || hpsa_reset_msi(pdev)) |
3492 | return -ENODEV; |
3493 | |
3494 | /* Some devices (notably the HP Smart Array 5i Controller) |
3495 | need a little pause here */ |
3496 | msleep(HPSA_POST_RESET_PAUSE_MSECS); |
3497 | |
3498 | /* Now try to get the controller to respond to a no-op */ |
3499 | for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { |
3500 | if (hpsa_noop(pdev) == 0) |
3501 | break; |
3502 | else |
3503 | dev_warn(&pdev->dev, "no-op failed%s\n", |
3504 | (i < 11 ? "; re-trying" : "")); |
3505 | } |
3506 | } |
3507 | |
3508 | /* Command structures must be aligned on a 32-byte boundary because |
3509 | * the 5 lower bits of the address are used by the hardware. and by |
3510 | * the driver. See comments in hpsa.h for more info. |
3511 | */ |
3512 | #define COMMANDLIST_ALIGNMENT 32 |
3513 | BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); |
3514 | h = kzalloc(sizeof(*h), GFP_KERNEL); |
3515 | if (!h) |
3516 | return -ENOMEM; |
3517 | |
3518 | h->busy_initializing = 1; |
3519 | INIT_HLIST_HEAD(&h->cmpQ); |
3520 | INIT_HLIST_HEAD(&h->reqQ); |
3521 | rc = hpsa_pci_init(h, pdev); |
3522 | if (rc != 0) |
3523 | goto clean1; |
3524 | |
3525 | sprintf(h->devname, "hpsa%d", number_of_controllers); |
3526 | h->ctlr = number_of_controllers; |
3527 | number_of_controllers++; |
3528 | h->pdev = pdev; |
3529 | |
3530 | /* configure PCI DMA stuff */ |
3531 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
3532 | if (rc == 0) { |
3533 | dac = 1; |
3534 | } else { |
3535 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
3536 | if (rc == 0) { |
3537 | dac = 0; |
3538 | } else { |
3539 | dev_err(&pdev->dev, "no suitable DMA available\n"); |
3540 | goto clean1; |
3541 | } |
3542 | } |
3543 | |
3544 | /* make sure the board interrupts are off */ |
3545 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
3546 | rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr, |
3547 | IRQF_DISABLED, h->devname, h); |
3548 | if (rc) { |
3549 | dev_err(&pdev->dev, "unable to get irq %d for %s\n", |
3550 | h->intr[PERF_MODE_INT], h->devname); |
3551 | goto clean2; |
3552 | } |
3553 | |
3554 | dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", |
3555 | h->devname, pdev->device, |
3556 | h->intr[PERF_MODE_INT], dac ? "" : " not"); |
3557 | |
3558 | h->cmd_pool_bits = |
3559 | kmalloc(((h->nr_cmds + BITS_PER_LONG - |
3560 | 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL); |
3561 | h->cmd_pool = pci_alloc_consistent(h->pdev, |
3562 | h->nr_cmds * sizeof(*h->cmd_pool), |
3563 | &(h->cmd_pool_dhandle)); |
3564 | h->errinfo_pool = pci_alloc_consistent(h->pdev, |
3565 | h->nr_cmds * sizeof(*h->errinfo_pool), |
3566 | &(h->errinfo_pool_dhandle)); |
3567 | if ((h->cmd_pool_bits == NULL) |
3568 | || (h->cmd_pool == NULL) |
3569 | || (h->errinfo_pool == NULL)) { |
3570 | dev_err(&pdev->dev, "out of memory"); |
3571 | rc = -ENOMEM; |
3572 | goto clean4; |
3573 | } |
3574 | if (hpsa_allocate_sg_chain_blocks(h)) |
3575 | goto clean4; |
3576 | spin_lock_init(&h->lock); |
3577 | spin_lock_init(&h->scan_lock); |
3578 | init_waitqueue_head(&h->scan_wait_queue); |
3579 | h->scan_finished = 1; /* no scan currently in progress */ |
3580 | |
3581 | pci_set_drvdata(pdev, h); |
3582 | memset(h->cmd_pool_bits, 0, |
3583 | ((h->nr_cmds + BITS_PER_LONG - |
3584 | 1) / BITS_PER_LONG) * sizeof(unsigned long)); |
3585 | |
3586 | hpsa_scsi_setup(h); |
3587 | |
3588 | /* Turn the interrupts on so we can service requests */ |
3589 | h->access.set_intr_mask(h, HPSA_INTR_ON); |
3590 | |
3591 | hpsa_put_ctlr_into_performant_mode(h); |
3592 | hpsa_hba_inquiry(h); |
3593 | hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ |
3594 | h->busy_initializing = 0; |
3595 | return 1; |
3596 | |
3597 | clean4: |
3598 | hpsa_free_sg_chain_blocks(h); |
3599 | kfree(h->cmd_pool_bits); |
3600 | if (h->cmd_pool) |
3601 | pci_free_consistent(h->pdev, |
3602 | h->nr_cmds * sizeof(struct CommandList), |
3603 | h->cmd_pool, h->cmd_pool_dhandle); |
3604 | if (h->errinfo_pool) |
3605 | pci_free_consistent(h->pdev, |
3606 | h->nr_cmds * sizeof(struct ErrorInfo), |
3607 | h->errinfo_pool, |
3608 | h->errinfo_pool_dhandle); |
3609 | free_irq(h->intr[PERF_MODE_INT], h); |
3610 | clean2: |
3611 | clean1: |
3612 | h->busy_initializing = 0; |
3613 | kfree(h); |
3614 | return rc; |
3615 | } |
3616 | |
3617 | static void hpsa_flush_cache(struct ctlr_info *h) |
3618 | { |
3619 | char *flush_buf; |
3620 | struct CommandList *c; |
3621 | |
3622 | flush_buf = kzalloc(4, GFP_KERNEL); |
3623 | if (!flush_buf) |
3624 | return; |
3625 | |
3626 | c = cmd_special_alloc(h); |
3627 | if (!c) { |
3628 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); |
3629 | goto out_of_memory; |
3630 | } |
3631 | fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, |
3632 | RAID_CTLR_LUNID, TYPE_CMD); |
3633 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); |
3634 | if (c->err_info->CommandStatus != 0) |
3635 | dev_warn(&h->pdev->dev, |
3636 | "error flushing cache on controller\n"); |
3637 | cmd_special_free(h, c); |
3638 | out_of_memory: |
3639 | kfree(flush_buf); |
3640 | } |
3641 | |
3642 | static void hpsa_shutdown(struct pci_dev *pdev) |
3643 | { |
3644 | struct ctlr_info *h; |
3645 | |
3646 | h = pci_get_drvdata(pdev); |
3647 | /* Turn board interrupts off and send the flush cache command |
3648 | * sendcmd will turn off interrupt, and send the flush... |
3649 | * To write all data in the battery backed cache to disks |
3650 | */ |
3651 | hpsa_flush_cache(h); |
3652 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
3653 | free_irq(h->intr[PERF_MODE_INT], h); |
3654 | #ifdef CONFIG_PCI_MSI |
3655 | if (h->msix_vector) |
3656 | pci_disable_msix(h->pdev); |
3657 | else if (h->msi_vector) |
3658 | pci_disable_msi(h->pdev); |
3659 | #endif /* CONFIG_PCI_MSI */ |
3660 | } |
3661 | |
3662 | static void __devexit hpsa_remove_one(struct pci_dev *pdev) |
3663 | { |
3664 | struct ctlr_info *h; |
3665 | |
3666 | if (pci_get_drvdata(pdev) == NULL) { |
3667 | dev_err(&pdev->dev, "unable to remove device \n"); |
3668 | return; |
3669 | } |
3670 | h = pci_get_drvdata(pdev); |
3671 | hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ |
3672 | hpsa_shutdown(pdev); |
3673 | iounmap(h->vaddr); |
3674 | hpsa_free_sg_chain_blocks(h); |
3675 | pci_free_consistent(h->pdev, |
3676 | h->nr_cmds * sizeof(struct CommandList), |
3677 | h->cmd_pool, h->cmd_pool_dhandle); |
3678 | pci_free_consistent(h->pdev, |
3679 | h->nr_cmds * sizeof(struct ErrorInfo), |
3680 | h->errinfo_pool, h->errinfo_pool_dhandle); |
3681 | pci_free_consistent(h->pdev, h->reply_pool_size, |
3682 | h->reply_pool, h->reply_pool_dhandle); |
3683 | kfree(h->cmd_pool_bits); |
3684 | kfree(h->blockFetchTable); |
3685 | kfree(h->hba_inquiry_data); |
3686 | /* |
3687 | * Deliberately omit pci_disable_device(): it does something nasty to |
3688 | * Smart Array controllers that pci_enable_device does not undo |
3689 | */ |
3690 | pci_release_regions(pdev); |
3691 | pci_set_drvdata(pdev, NULL); |
3692 | kfree(h); |
3693 | } |
3694 | |
3695 | static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, |
3696 | __attribute__((unused)) pm_message_t state) |
3697 | { |
3698 | return -ENOSYS; |
3699 | } |
3700 | |
3701 | static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) |
3702 | { |
3703 | return -ENOSYS; |
3704 | } |
3705 | |
3706 | static struct pci_driver hpsa_pci_driver = { |
3707 | .name = "hpsa", |
3708 | .probe = hpsa_init_one, |
3709 | .remove = __devexit_p(hpsa_remove_one), |
3710 | .id_table = hpsa_pci_device_id, /* id_table */ |
3711 | .shutdown = hpsa_shutdown, |
3712 | .suspend = hpsa_suspend, |
3713 | .resume = hpsa_resume, |
3714 | }; |
3715 | |
3716 | /* Fill in bucket_map[], given nsgs (the max number of |
3717 | * scatter gather elements supported) and bucket[], |
3718 | * which is an array of 8 integers. The bucket[] array |
3719 | * contains 8 different DMA transfer sizes (in 16 |
3720 | * byte increments) which the controller uses to fetch |
3721 | * commands. This function fills in bucket_map[], which |
3722 | * maps a given number of scatter gather elements to one of |
3723 | * the 8 DMA transfer sizes. The point of it is to allow the |
3724 | * controller to only do as much DMA as needed to fetch the |
3725 | * command, with the DMA transfer size encoded in the lower |
3726 | * bits of the command address. |
3727 | */ |
3728 | static void calc_bucket_map(int bucket[], int num_buckets, |
3729 | int nsgs, int *bucket_map) |
3730 | { |
3731 | int i, j, b, size; |
3732 | |
3733 | /* even a command with 0 SGs requires 4 blocks */ |
3734 | #define MINIMUM_TRANSFER_BLOCKS 4 |
3735 | #define NUM_BUCKETS 8 |
3736 | /* Note, bucket_map must have nsgs+1 entries. */ |
3737 | for (i = 0; i <= nsgs; i++) { |
3738 | /* Compute size of a command with i SG entries */ |
3739 | size = i + MINIMUM_TRANSFER_BLOCKS; |
3740 | b = num_buckets; /* Assume the biggest bucket */ |
3741 | /* Find the bucket that is just big enough */ |
3742 | for (j = 0; j < 8; j++) { |
3743 | if (bucket[j] >= size) { |
3744 | b = j; |
3745 | break; |
3746 | } |
3747 | } |
3748 | /* for a command with i SG entries, use bucket b. */ |
3749 | bucket_map[i] = b; |
3750 | } |
3751 | } |
3752 | |
3753 | static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) |
3754 | { |
3755 | u32 trans_support; |
3756 | u64 trans_offset; |
3757 | /* 5 = 1 s/g entry or 4k |
3758 | * 6 = 2 s/g entry or 8k |
3759 | * 8 = 4 s/g entry or 16k |
3760 | * 10 = 6 s/g entry or 24k |
3761 | */ |
3762 | int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */ |
3763 | int i = 0; |
3764 | int l = 0; |
3765 | unsigned long register_value; |
3766 | |
3767 | trans_support = readl(&(h->cfgtable->TransportSupport)); |
3768 | if (!(trans_support & PERFORMANT_MODE)) |
3769 | return; |
3770 | |
3771 | h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); |
3772 | h->max_sg_entries = 32; |
3773 | /* Performant mode ring buffer and supporting data structures */ |
3774 | h->reply_pool_size = h->max_commands * sizeof(u64); |
3775 | h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, |
3776 | &(h->reply_pool_dhandle)); |
3777 | |
3778 | /* Need a block fetch table for performant mode */ |
3779 | h->blockFetchTable = kmalloc(((h->max_sg_entries+1) * |
3780 | sizeof(u32)), GFP_KERNEL); |
3781 | |
3782 | if ((h->reply_pool == NULL) |
3783 | || (h->blockFetchTable == NULL)) |
3784 | goto clean_up; |
3785 | |
3786 | h->reply_pool_wraparound = 1; /* spec: init to 1 */ |
3787 | |
3788 | /* Controller spec: zero out this buffer. */ |
3789 | memset(h->reply_pool, 0, h->reply_pool_size); |
3790 | h->reply_pool_head = h->reply_pool; |
3791 | |
3792 | trans_offset = readl(&(h->cfgtable->TransMethodOffset)); |
3793 | bft[7] = h->max_sg_entries + 4; |
3794 | calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable); |
3795 | for (i = 0; i < 8; i++) |
3796 | writel(bft[i], &h->transtable->BlockFetch[i]); |
3797 | |
3798 | /* size of controller ring buffer */ |
3799 | writel(h->max_commands, &h->transtable->RepQSize); |
3800 | writel(1, &h->transtable->RepQCount); |
3801 | writel(0, &h->transtable->RepQCtrAddrLow32); |
3802 | writel(0, &h->transtable->RepQCtrAddrHigh32); |
3803 | writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); |
3804 | writel(0, &h->transtable->RepQAddr0High32); |
3805 | writel(CFGTBL_Trans_Performant, |
3806 | &(h->cfgtable->HostWrite.TransportRequest)); |
3807 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); |
3808 | /* under certain very rare conditions, this can take awhile. |
3809 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right |
3810 | * as we enter this code.) */ |
3811 | for (l = 0; l < MAX_CONFIG_WAIT; l++) { |
3812 | register_value = readl(h->vaddr + SA5_DOORBELL); |
3813 | if (!(register_value & CFGTBL_ChangeReq)) |
3814 | break; |
3815 | /* delay and try again */ |
3816 | set_current_state(TASK_INTERRUPTIBLE); |
3817 | schedule_timeout(10); |
3818 | } |
3819 | register_value = readl(&(h->cfgtable->TransportActive)); |
3820 | if (!(register_value & CFGTBL_Trans_Performant)) { |
3821 | dev_warn(&h->pdev->dev, "unable to get board into" |
3822 | " performant mode\n"); |
3823 | return; |
3824 | } |
3825 | |
3826 | /* Change the access methods to the performant access methods */ |
3827 | h->access = SA5_performant_access; |
3828 | h->transMethod = CFGTBL_Trans_Performant; |
3829 | |
3830 | return; |
3831 | |
3832 | clean_up: |
3833 | if (h->reply_pool) |
3834 | pci_free_consistent(h->pdev, h->reply_pool_size, |
3835 | h->reply_pool, h->reply_pool_dhandle); |
3836 | kfree(h->blockFetchTable); |
3837 | } |
3838 | |
3839 | /* |
3840 | * This is it. Register the PCI driver information for the cards we control |
3841 | * the OS will call our registered routines when it finds one of our cards. |
3842 | */ |
3843 | static int __init hpsa_init(void) |
3844 | { |
3845 | return pci_register_driver(&hpsa_pci_driver); |
3846 | } |
3847 | |
3848 | static void __exit hpsa_cleanup(void) |
3849 | { |
3850 | pci_unregister_driver(&hpsa_pci_driver); |
3851 | } |
3852 | |
3853 | module_init(hpsa_init); |
3854 | module_exit(hpsa_cleanup); |
3855 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9