Root/
1 | /* |
2 | * Marvell UMI driver |
3 | * |
4 | * Copyright 2011 Marvell. <jyli@marvell.com> |
5 | * |
6 | * This file is licensed under GPLv2. |
7 | * |
8 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License as |
10 | * published by the Free Software Foundation; version 2 of the |
11 | * License. |
12 | * |
13 | * This program is distributed in the hope that it will be useful, |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | * General Public License for more details. |
17 | * |
18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 |
21 | * USA |
22 | */ |
23 | |
24 | #include <linux/kernel.h> |
25 | #include <linux/module.h> |
26 | #include <linux/moduleparam.h> |
27 | #include <linux/init.h> |
28 | #include <linux/device.h> |
29 | #include <linux/pci.h> |
30 | #include <linux/list.h> |
31 | #include <linux/spinlock.h> |
32 | #include <linux/interrupt.h> |
33 | #include <linux/delay.h> |
34 | #include <linux/blkdev.h> |
35 | #include <linux/io.h> |
36 | #include <scsi/scsi.h> |
37 | #include <scsi/scsi_cmnd.h> |
38 | #include <scsi/scsi_device.h> |
39 | #include <scsi/scsi_host.h> |
40 | #include <scsi/scsi_transport.h> |
41 | #include <scsi/scsi_eh.h> |
42 | #include <linux/uaccess.h> |
43 | #include <linux/kthread.h> |
44 | |
45 | #include "mvumi.h" |
46 | |
47 | MODULE_LICENSE("GPL"); |
48 | MODULE_AUTHOR("jyli@marvell.com"); |
49 | MODULE_DESCRIPTION("Marvell UMI Driver"); |
50 | |
51 | static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = { |
52 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) }, |
53 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) }, |
54 | { 0 } |
55 | }; |
56 | |
57 | MODULE_DEVICE_TABLE(pci, mvumi_pci_table); |
58 | |
59 | static void tag_init(struct mvumi_tag *st, unsigned short size) |
60 | { |
61 | unsigned short i; |
62 | BUG_ON(size != st->size); |
63 | st->top = size; |
64 | for (i = 0; i < size; i++) |
65 | st->stack[i] = size - 1 - i; |
66 | } |
67 | |
68 | static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st) |
69 | { |
70 | BUG_ON(st->top <= 0); |
71 | return st->stack[--st->top]; |
72 | } |
73 | |
74 | static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st, |
75 | unsigned short tag) |
76 | { |
77 | BUG_ON(st->top >= st->size); |
78 | st->stack[st->top++] = tag; |
79 | } |
80 | |
81 | static bool tag_is_empty(struct mvumi_tag *st) |
82 | { |
83 | if (st->top == 0) |
84 | return 1; |
85 | else |
86 | return 0; |
87 | } |
88 | |
89 | static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array) |
90 | { |
91 | int i; |
92 | |
93 | for (i = 0; i < MAX_BASE_ADDRESS; i++) |
94 | if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) && |
95 | addr_array[i]) |
96 | pci_iounmap(dev, addr_array[i]); |
97 | } |
98 | |
99 | static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array) |
100 | { |
101 | int i; |
102 | |
103 | for (i = 0; i < MAX_BASE_ADDRESS; i++) { |
104 | if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { |
105 | addr_array[i] = pci_iomap(dev, i, 0); |
106 | if (!addr_array[i]) { |
107 | dev_err(&dev->dev, "failed to map Bar[%d]\n", |
108 | i); |
109 | mvumi_unmap_pci_addr(dev, addr_array); |
110 | return -ENOMEM; |
111 | } |
112 | } else |
113 | addr_array[i] = NULL; |
114 | |
115 | dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]); |
116 | } |
117 | |
118 | return 0; |
119 | } |
120 | |
121 | static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, |
122 | enum resource_type type, unsigned int size) |
123 | { |
124 | struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC); |
125 | |
126 | if (!res) { |
127 | dev_err(&mhba->pdev->dev, |
128 | "Failed to allocate memory for resource manager.\n"); |
129 | return NULL; |
130 | } |
131 | |
132 | switch (type) { |
133 | case RESOURCE_CACHED_MEMORY: |
134 | res->virt_addr = kzalloc(size, GFP_ATOMIC); |
135 | if (!res->virt_addr) { |
136 | dev_err(&mhba->pdev->dev, |
137 | "unable to allocate memory,size = %d.\n", size); |
138 | kfree(res); |
139 | return NULL; |
140 | } |
141 | break; |
142 | |
143 | case RESOURCE_UNCACHED_MEMORY: |
144 | size = round_up(size, 8); |
145 | res->virt_addr = pci_alloc_consistent(mhba->pdev, size, |
146 | &res->bus_addr); |
147 | if (!res->virt_addr) { |
148 | dev_err(&mhba->pdev->dev, |
149 | "unable to allocate consistent mem," |
150 | "size = %d.\n", size); |
151 | kfree(res); |
152 | return NULL; |
153 | } |
154 | memset(res->virt_addr, 0, size); |
155 | break; |
156 | |
157 | default: |
158 | dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type); |
159 | kfree(res); |
160 | return NULL; |
161 | } |
162 | |
163 | res->type = type; |
164 | res->size = size; |
165 | INIT_LIST_HEAD(&res->entry); |
166 | list_add_tail(&res->entry, &mhba->res_list); |
167 | |
168 | return res; |
169 | } |
170 | |
171 | static void mvumi_release_mem_resource(struct mvumi_hba *mhba) |
172 | { |
173 | struct mvumi_res *res, *tmp; |
174 | |
175 | list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) { |
176 | switch (res->type) { |
177 | case RESOURCE_UNCACHED_MEMORY: |
178 | pci_free_consistent(mhba->pdev, res->size, |
179 | res->virt_addr, res->bus_addr); |
180 | break; |
181 | case RESOURCE_CACHED_MEMORY: |
182 | kfree(res->virt_addr); |
183 | break; |
184 | default: |
185 | dev_err(&mhba->pdev->dev, |
186 | "unknown resource type %d\n", res->type); |
187 | break; |
188 | } |
189 | list_del(&res->entry); |
190 | kfree(res); |
191 | } |
192 | mhba->fw_flag &= ~MVUMI_FW_ALLOC; |
193 | } |
194 | |
195 | /** |
196 | * mvumi_make_sgl - Prepares SGL |
197 | * @mhba: Adapter soft state |
198 | * @scmd: SCSI command from the mid-layer |
199 | * @sgl_p: SGL to be filled in |
200 | * @sg_count return the number of SG elements |
201 | * |
202 | * If successful, this function returns 0. otherwise, it returns -1. |
203 | */ |
204 | static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, |
205 | void *sgl_p, unsigned char *sg_count) |
206 | { |
207 | struct scatterlist *sg; |
208 | struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p; |
209 | unsigned int i; |
210 | unsigned int sgnum = scsi_sg_count(scmd); |
211 | dma_addr_t busaddr; |
212 | |
213 | if (sgnum) { |
214 | sg = scsi_sglist(scmd); |
215 | *sg_count = pci_map_sg(mhba->pdev, sg, sgnum, |
216 | (int) scmd->sc_data_direction); |
217 | if (*sg_count > mhba->max_sge) { |
218 | dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger " |
219 | "than max sg[0x%x].\n", |
220 | *sg_count, mhba->max_sge); |
221 | return -1; |
222 | } |
223 | for (i = 0; i < *sg_count; i++) { |
224 | busaddr = sg_dma_address(&sg[i]); |
225 | m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); |
226 | m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); |
227 | m_sg->flags = 0; |
228 | sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i]))); |
229 | if ((i + 1) == *sg_count) |
230 | m_sg->flags |= 1U << mhba->eot_flag; |
231 | |
232 | sgd_inc(mhba, m_sg); |
233 | } |
234 | } else { |
235 | scmd->SCp.dma_handle = scsi_bufflen(scmd) ? |
236 | pci_map_single(mhba->pdev, scsi_sglist(scmd), |
237 | scsi_bufflen(scmd), |
238 | (int) scmd->sc_data_direction) |
239 | : 0; |
240 | busaddr = scmd->SCp.dma_handle; |
241 | m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); |
242 | m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); |
243 | m_sg->flags = 1U << mhba->eot_flag; |
244 | sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd))); |
245 | *sg_count = 1; |
246 | } |
247 | |
248 | return 0; |
249 | } |
250 | |
251 | static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, |
252 | unsigned int size) |
253 | { |
254 | struct mvumi_sgl *m_sg; |
255 | void *virt_addr; |
256 | dma_addr_t phy_addr; |
257 | |
258 | if (size == 0) |
259 | return 0; |
260 | |
261 | virt_addr = pci_alloc_consistent(mhba->pdev, size, &phy_addr); |
262 | if (!virt_addr) |
263 | return -1; |
264 | |
265 | memset(virt_addr, 0, size); |
266 | |
267 | m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; |
268 | cmd->frame->sg_counts = 1; |
269 | cmd->data_buf = virt_addr; |
270 | |
271 | m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr)); |
272 | m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr)); |
273 | m_sg->flags = 1U << mhba->eot_flag; |
274 | sgd_setsz(mhba, m_sg, cpu_to_le32(size)); |
275 | |
276 | return 0; |
277 | } |
278 | |
279 | static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba, |
280 | unsigned int buf_size) |
281 | { |
282 | struct mvumi_cmd *cmd; |
283 | |
284 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); |
285 | if (!cmd) { |
286 | dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n"); |
287 | return NULL; |
288 | } |
289 | INIT_LIST_HEAD(&cmd->queue_pointer); |
290 | |
291 | cmd->frame = pci_alloc_consistent(mhba->pdev, |
292 | mhba->ib_max_size, &cmd->frame_phys); |
293 | if (!cmd->frame) { |
294 | dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" |
295 | " frame,size = %d.\n", mhba->ib_max_size); |
296 | kfree(cmd); |
297 | return NULL; |
298 | } |
299 | |
300 | if (buf_size) { |
301 | if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { |
302 | dev_err(&mhba->pdev->dev, "failed to allocate memory" |
303 | " for internal frame\n"); |
304 | pci_free_consistent(mhba->pdev, mhba->ib_max_size, |
305 | cmd->frame, cmd->frame_phys); |
306 | kfree(cmd); |
307 | return NULL; |
308 | } |
309 | } else |
310 | cmd->frame->sg_counts = 0; |
311 | |
312 | return cmd; |
313 | } |
314 | |
315 | static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, |
316 | struct mvumi_cmd *cmd) |
317 | { |
318 | struct mvumi_sgl *m_sg; |
319 | unsigned int size; |
320 | dma_addr_t phy_addr; |
321 | |
322 | if (cmd && cmd->frame) { |
323 | if (cmd->frame->sg_counts) { |
324 | m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; |
325 | sgd_getsz(mhba, m_sg, size); |
326 | |
327 | phy_addr = (dma_addr_t) m_sg->baseaddr_l | |
328 | (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); |
329 | |
330 | pci_free_consistent(mhba->pdev, size, cmd->data_buf, |
331 | phy_addr); |
332 | } |
333 | pci_free_consistent(mhba->pdev, mhba->ib_max_size, |
334 | cmd->frame, cmd->frame_phys); |
335 | kfree(cmd); |
336 | } |
337 | } |
338 | |
339 | /** |
340 | * mvumi_get_cmd - Get a command from the free pool |
341 | * @mhba: Adapter soft state |
342 | * |
343 | * Returns a free command from the pool |
344 | */ |
345 | static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba) |
346 | { |
347 | struct mvumi_cmd *cmd = NULL; |
348 | |
349 | if (likely(!list_empty(&mhba->cmd_pool))) { |
350 | cmd = list_entry((&mhba->cmd_pool)->next, |
351 | struct mvumi_cmd, queue_pointer); |
352 | list_del_init(&cmd->queue_pointer); |
353 | } else |
354 | dev_warn(&mhba->pdev->dev, "command pool is empty!\n"); |
355 | |
356 | return cmd; |
357 | } |
358 | |
359 | /** |
360 | * mvumi_return_cmd - Return a cmd to free command pool |
361 | * @mhba: Adapter soft state |
362 | * @cmd: Command packet to be returned to free command pool |
363 | */ |
364 | static inline void mvumi_return_cmd(struct mvumi_hba *mhba, |
365 | struct mvumi_cmd *cmd) |
366 | { |
367 | cmd->scmd = NULL; |
368 | list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); |
369 | } |
370 | |
371 | /** |
372 | * mvumi_free_cmds - Free all the cmds in the free cmd pool |
373 | * @mhba: Adapter soft state |
374 | */ |
375 | static void mvumi_free_cmds(struct mvumi_hba *mhba) |
376 | { |
377 | struct mvumi_cmd *cmd; |
378 | |
379 | while (!list_empty(&mhba->cmd_pool)) { |
380 | cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, |
381 | queue_pointer); |
382 | list_del(&cmd->queue_pointer); |
383 | if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) |
384 | kfree(cmd->frame); |
385 | kfree(cmd); |
386 | } |
387 | } |
388 | |
389 | /** |
390 | * mvumi_alloc_cmds - Allocates the command packets |
391 | * @mhba: Adapter soft state |
392 | * |
393 | */ |
394 | static int mvumi_alloc_cmds(struct mvumi_hba *mhba) |
395 | { |
396 | int i; |
397 | struct mvumi_cmd *cmd; |
398 | |
399 | for (i = 0; i < mhba->max_io; i++) { |
400 | cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); |
401 | if (!cmd) |
402 | goto err_exit; |
403 | |
404 | INIT_LIST_HEAD(&cmd->queue_pointer); |
405 | list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); |
406 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { |
407 | cmd->frame = mhba->ib_frame + i * mhba->ib_max_size; |
408 | cmd->frame_phys = mhba->ib_frame_phys |
409 | + i * mhba->ib_max_size; |
410 | } else |
411 | cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); |
412 | if (!cmd->frame) |
413 | goto err_exit; |
414 | } |
415 | return 0; |
416 | |
417 | err_exit: |
418 | dev_err(&mhba->pdev->dev, |
419 | "failed to allocate memory for cmd[0x%x].\n", i); |
420 | while (!list_empty(&mhba->cmd_pool)) { |
421 | cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, |
422 | queue_pointer); |
423 | list_del(&cmd->queue_pointer); |
424 | if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) |
425 | kfree(cmd->frame); |
426 | kfree(cmd); |
427 | } |
428 | return -ENOMEM; |
429 | } |
430 | |
431 | static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba) |
432 | { |
433 | unsigned int ib_rp_reg; |
434 | struct mvumi_hw_regs *regs = mhba->regs; |
435 | |
436 | ib_rp_reg = ioread32(mhba->regs->inb_read_pointer); |
437 | |
438 | if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) == |
439 | (mhba->ib_cur_slot & regs->cl_slot_num_mask)) && |
440 | ((ib_rp_reg & regs->cl_pointer_toggle) |
441 | != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) { |
442 | dev_warn(&mhba->pdev->dev, "no free slot to use.\n"); |
443 | return 0; |
444 | } |
445 | if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) { |
446 | dev_warn(&mhba->pdev->dev, "firmware io overflow.\n"); |
447 | return 0; |
448 | } else { |
449 | return mhba->max_io - atomic_read(&mhba->fw_outstanding); |
450 | } |
451 | } |
452 | |
453 | static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba) |
454 | { |
455 | unsigned int count; |
456 | if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1)) |
457 | return 0; |
458 | count = ioread32(mhba->ib_shadow); |
459 | if (count == 0xffff) |
460 | return 0; |
461 | return count; |
462 | } |
463 | |
464 | static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) |
465 | { |
466 | unsigned int cur_ib_entry; |
467 | |
468 | cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask; |
469 | cur_ib_entry++; |
470 | if (cur_ib_entry >= mhba->list_num_io) { |
471 | cur_ib_entry -= mhba->list_num_io; |
472 | mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle; |
473 | } |
474 | mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask; |
475 | mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask); |
476 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { |
477 | *ib_entry = mhba->ib_list + cur_ib_entry * |
478 | sizeof(struct mvumi_dyn_list_entry); |
479 | } else { |
480 | *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size; |
481 | } |
482 | atomic_inc(&mhba->fw_outstanding); |
483 | } |
484 | |
485 | static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) |
486 | { |
487 | iowrite32(0xffff, mhba->ib_shadow); |
488 | iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer); |
489 | } |
490 | |
491 | static char mvumi_check_ob_frame(struct mvumi_hba *mhba, |
492 | unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame) |
493 | { |
494 | unsigned short tag, request_id; |
495 | |
496 | udelay(1); |
497 | p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; |
498 | request_id = p_outb_frame->request_id; |
499 | tag = p_outb_frame->tag; |
500 | if (tag > mhba->tag_pool.size) { |
501 | dev_err(&mhba->pdev->dev, "ob frame data error\n"); |
502 | return -1; |
503 | } |
504 | if (mhba->tag_cmd[tag] == NULL) { |
505 | dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag); |
506 | return -1; |
507 | } else if (mhba->tag_cmd[tag]->request_id != request_id && |
508 | mhba->request_id_enabled) { |
509 | dev_err(&mhba->pdev->dev, "request ID from FW:0x%x," |
510 | "cmd request ID:0x%x\n", request_id, |
511 | mhba->tag_cmd[tag]->request_id); |
512 | return -1; |
513 | } |
514 | |
515 | return 0; |
516 | } |
517 | |
518 | static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, |
519 | unsigned int *cur_obf, unsigned int *assign_obf_end) |
520 | { |
521 | unsigned int ob_write, ob_write_shadow; |
522 | struct mvumi_hw_regs *regs = mhba->regs; |
523 | |
524 | do { |
525 | ob_write = ioread32(regs->outb_copy_pointer); |
526 | ob_write_shadow = ioread32(mhba->ob_shadow); |
527 | } while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow); |
528 | |
529 | *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; |
530 | *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; |
531 | |
532 | if ((ob_write & regs->cl_pointer_toggle) != |
533 | (mhba->ob_cur_slot & regs->cl_pointer_toggle)) { |
534 | *assign_obf_end += mhba->list_num_io; |
535 | } |
536 | return 0; |
537 | } |
538 | |
539 | static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, |
540 | unsigned int *cur_obf, unsigned int *assign_obf_end) |
541 | { |
542 | unsigned int ob_write; |
543 | struct mvumi_hw_regs *regs = mhba->regs; |
544 | |
545 | ob_write = ioread32(regs->outb_read_pointer); |
546 | ob_write = ioread32(regs->outb_copy_pointer); |
547 | *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; |
548 | *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; |
549 | if (*assign_obf_end < *cur_obf) |
550 | *assign_obf_end += mhba->list_num_io; |
551 | else if (*assign_obf_end == *cur_obf) |
552 | return -1; |
553 | return 0; |
554 | } |
555 | |
556 | static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) |
557 | { |
558 | unsigned int cur_obf, assign_obf_end, i; |
559 | struct mvumi_ob_data *ob_data; |
560 | struct mvumi_rsp_frame *p_outb_frame; |
561 | struct mvumi_hw_regs *regs = mhba->regs; |
562 | |
563 | if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end)) |
564 | return; |
565 | |
566 | for (i = (assign_obf_end - cur_obf); i != 0; i--) { |
567 | cur_obf++; |
568 | if (cur_obf >= mhba->list_num_io) { |
569 | cur_obf -= mhba->list_num_io; |
570 | mhba->ob_cur_slot ^= regs->cl_pointer_toggle; |
571 | } |
572 | |
573 | p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; |
574 | |
575 | /* Copy pointer may point to entry in outbound list |
576 | * before entry has valid data |
577 | */ |
578 | if (unlikely(p_outb_frame->tag > mhba->tag_pool.size || |
579 | mhba->tag_cmd[p_outb_frame->tag] == NULL || |
580 | p_outb_frame->request_id != |
581 | mhba->tag_cmd[p_outb_frame->tag]->request_id)) |
582 | if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame)) |
583 | continue; |
584 | |
585 | if (!list_empty(&mhba->ob_data_list)) { |
586 | ob_data = (struct mvumi_ob_data *) |
587 | list_first_entry(&mhba->ob_data_list, |
588 | struct mvumi_ob_data, list); |
589 | list_del_init(&ob_data->list); |
590 | } else { |
591 | ob_data = NULL; |
592 | if (cur_obf == 0) { |
593 | cur_obf = mhba->list_num_io - 1; |
594 | mhba->ob_cur_slot ^= regs->cl_pointer_toggle; |
595 | } else |
596 | cur_obf -= 1; |
597 | break; |
598 | } |
599 | |
600 | memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size); |
601 | p_outb_frame->tag = 0xff; |
602 | |
603 | list_add_tail(&ob_data->list, &mhba->free_ob_list); |
604 | } |
605 | mhba->ob_cur_slot &= ~regs->cl_slot_num_mask; |
606 | mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask); |
607 | iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer); |
608 | } |
609 | |
610 | static void mvumi_reset(struct mvumi_hba *mhba) |
611 | { |
612 | struct mvumi_hw_regs *regs = mhba->regs; |
613 | |
614 | iowrite32(0, regs->enpointa_mask_reg); |
615 | if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE) |
616 | return; |
617 | |
618 | iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg); |
619 | } |
620 | |
621 | static unsigned char mvumi_start(struct mvumi_hba *mhba); |
622 | |
623 | static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) |
624 | { |
625 | mhba->fw_state = FW_STATE_ABORT; |
626 | mvumi_reset(mhba); |
627 | |
628 | if (mvumi_start(mhba)) |
629 | return FAILED; |
630 | else |
631 | return SUCCESS; |
632 | } |
633 | |
634 | static int mvumi_wait_for_fw(struct mvumi_hba *mhba) |
635 | { |
636 | struct mvumi_hw_regs *regs = mhba->regs; |
637 | u32 tmp; |
638 | unsigned long before; |
639 | before = jiffies; |
640 | |
641 | iowrite32(0, regs->enpointa_mask_reg); |
642 | tmp = ioread32(regs->arm_to_pciea_msg1); |
643 | while (tmp != HANDSHAKE_READYSTATE) { |
644 | iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg); |
645 | if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { |
646 | dev_err(&mhba->pdev->dev, |
647 | "FW reset failed [0x%x].\n", tmp); |
648 | return FAILED; |
649 | } |
650 | |
651 | msleep(500); |
652 | rmb(); |
653 | tmp = ioread32(regs->arm_to_pciea_msg1); |
654 | } |
655 | |
656 | return SUCCESS; |
657 | } |
658 | |
659 | static void mvumi_backup_bar_addr(struct mvumi_hba *mhba) |
660 | { |
661 | unsigned char i; |
662 | |
663 | for (i = 0; i < MAX_BASE_ADDRESS; i++) { |
664 | pci_read_config_dword(mhba->pdev, 0x10 + i * 4, |
665 | &mhba->pci_base[i]); |
666 | } |
667 | } |
668 | |
669 | static void mvumi_restore_bar_addr(struct mvumi_hba *mhba) |
670 | { |
671 | unsigned char i; |
672 | |
673 | for (i = 0; i < MAX_BASE_ADDRESS; i++) { |
674 | if (mhba->pci_base[i]) |
675 | pci_write_config_dword(mhba->pdev, 0x10 + i * 4, |
676 | mhba->pci_base[i]); |
677 | } |
678 | } |
679 | |
680 | static unsigned int mvumi_pci_set_master(struct pci_dev *pdev) |
681 | { |
682 | unsigned int ret = 0; |
683 | pci_set_master(pdev); |
684 | |
685 | if (IS_DMA64) { |
686 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) |
687 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
688 | } else |
689 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
690 | |
691 | return ret; |
692 | } |
693 | |
694 | static int mvumi_reset_host_9580(struct mvumi_hba *mhba) |
695 | { |
696 | mhba->fw_state = FW_STATE_ABORT; |
697 | |
698 | iowrite32(0, mhba->regs->reset_enable); |
699 | iowrite32(0xf, mhba->regs->reset_request); |
700 | |
701 | iowrite32(0x10, mhba->regs->reset_enable); |
702 | iowrite32(0x10, mhba->regs->reset_request); |
703 | msleep(100); |
704 | pci_disable_device(mhba->pdev); |
705 | |
706 | if (pci_enable_device(mhba->pdev)) { |
707 | dev_err(&mhba->pdev->dev, "enable device failed\n"); |
708 | return FAILED; |
709 | } |
710 | if (mvumi_pci_set_master(mhba->pdev)) { |
711 | dev_err(&mhba->pdev->dev, "set master failed\n"); |
712 | return FAILED; |
713 | } |
714 | mvumi_restore_bar_addr(mhba); |
715 | if (mvumi_wait_for_fw(mhba) == FAILED) |
716 | return FAILED; |
717 | |
718 | return mvumi_wait_for_outstanding(mhba); |
719 | } |
720 | |
721 | static int mvumi_reset_host_9143(struct mvumi_hba *mhba) |
722 | { |
723 | return mvumi_wait_for_outstanding(mhba); |
724 | } |
725 | |
726 | static int mvumi_host_reset(struct scsi_cmnd *scmd) |
727 | { |
728 | struct mvumi_hba *mhba; |
729 | |
730 | mhba = (struct mvumi_hba *) scmd->device->host->hostdata; |
731 | |
732 | scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n", |
733 | scmd->serial_number, scmd->cmnd[0], scmd->retries); |
734 | |
735 | return mhba->instancet->reset_host(mhba); |
736 | } |
737 | |
738 | static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, |
739 | struct mvumi_cmd *cmd) |
740 | { |
741 | unsigned long flags; |
742 | |
743 | cmd->cmd_status = REQ_STATUS_PENDING; |
744 | |
745 | if (atomic_read(&cmd->sync_cmd)) { |
746 | dev_err(&mhba->pdev->dev, |
747 | "last blocked cmd not finished, sync_cmd = %d\n", |
748 | atomic_read(&cmd->sync_cmd)); |
749 | BUG_ON(1); |
750 | return -1; |
751 | } |
752 | atomic_inc(&cmd->sync_cmd); |
753 | spin_lock_irqsave(mhba->shost->host_lock, flags); |
754 | mhba->instancet->fire_cmd(mhba, cmd); |
755 | spin_unlock_irqrestore(mhba->shost->host_lock, flags); |
756 | |
757 | wait_event_timeout(mhba->int_cmd_wait_q, |
758 | (cmd->cmd_status != REQ_STATUS_PENDING), |
759 | MVUMI_INTERNAL_CMD_WAIT_TIME * HZ); |
760 | |
761 | /* command timeout */ |
762 | if (atomic_read(&cmd->sync_cmd)) { |
763 | spin_lock_irqsave(mhba->shost->host_lock, flags); |
764 | atomic_dec(&cmd->sync_cmd); |
765 | if (mhba->tag_cmd[cmd->frame->tag]) { |
766 | mhba->tag_cmd[cmd->frame->tag] = 0; |
767 | dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n", |
768 | cmd->frame->tag); |
769 | tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); |
770 | } |
771 | if (!list_empty(&cmd->queue_pointer)) { |
772 | dev_warn(&mhba->pdev->dev, |
773 | "TIMEOUT:A internal command doesn't send!\n"); |
774 | list_del_init(&cmd->queue_pointer); |
775 | } else |
776 | atomic_dec(&mhba->fw_outstanding); |
777 | |
778 | spin_unlock_irqrestore(mhba->shost->host_lock, flags); |
779 | } |
780 | return 0; |
781 | } |
782 | |
783 | static void mvumi_release_fw(struct mvumi_hba *mhba) |
784 | { |
785 | mvumi_free_cmds(mhba); |
786 | mvumi_release_mem_resource(mhba); |
787 | mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); |
788 | pci_free_consistent(mhba->pdev, HSP_MAX_SIZE, |
789 | mhba->handshake_page, mhba->handshake_page_phys); |
790 | kfree(mhba->regs); |
791 | pci_release_regions(mhba->pdev); |
792 | } |
793 | |
794 | static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba) |
795 | { |
796 | struct mvumi_cmd *cmd; |
797 | struct mvumi_msg_frame *frame; |
798 | unsigned char device_id, retry = 0; |
799 | unsigned char bitcount = sizeof(unsigned char) * 8; |
800 | |
801 | for (device_id = 0; device_id < mhba->max_target_id; device_id++) { |
802 | if (!(mhba->target_map[device_id / bitcount] & |
803 | (1 << (device_id % bitcount)))) |
804 | continue; |
805 | get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0); |
806 | if (!cmd) { |
807 | if (retry++ >= 5) { |
808 | dev_err(&mhba->pdev->dev, "failed to get memory" |
809 | " for internal flush cache cmd for " |
810 | "device %d", device_id); |
811 | retry = 0; |
812 | continue; |
813 | } else |
814 | goto get_cmd; |
815 | } |
816 | cmd->scmd = NULL; |
817 | cmd->cmd_status = REQ_STATUS_PENDING; |
818 | atomic_set(&cmd->sync_cmd, 0); |
819 | frame = cmd->frame; |
820 | frame->req_function = CL_FUN_SCSI_CMD; |
821 | frame->device_id = device_id; |
822 | frame->cmd_flag = CMD_FLAG_NON_DATA; |
823 | frame->data_transfer_length = 0; |
824 | frame->cdb_length = MAX_COMMAND_SIZE; |
825 | memset(frame->cdb, 0, MAX_COMMAND_SIZE); |
826 | frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC; |
827 | frame->cdb[1] = CDB_CORE_MODULE; |
828 | frame->cdb[2] = CDB_CORE_SHUTDOWN; |
829 | |
830 | mvumi_issue_blocked_cmd(mhba, cmd); |
831 | if (cmd->cmd_status != SAM_STAT_GOOD) { |
832 | dev_err(&mhba->pdev->dev, |
833 | "device %d flush cache failed, status=0x%x.\n", |
834 | device_id, cmd->cmd_status); |
835 | } |
836 | |
837 | mvumi_delete_internal_cmd(mhba, cmd); |
838 | } |
839 | return 0; |
840 | } |
841 | |
842 | static unsigned char |
843 | mvumi_calculate_checksum(struct mvumi_hs_header *p_header, |
844 | unsigned short len) |
845 | { |
846 | unsigned char *ptr; |
847 | unsigned char ret = 0, i; |
848 | |
849 | ptr = (unsigned char *) p_header->frame_content; |
850 | for (i = 0; i < len; i++) { |
851 | ret ^= *ptr; |
852 | ptr++; |
853 | } |
854 | |
855 | return ret; |
856 | } |
857 | |
858 | static void mvumi_hs_build_page(struct mvumi_hba *mhba, |
859 | struct mvumi_hs_header *hs_header) |
860 | { |
861 | struct mvumi_hs_page2 *hs_page2; |
862 | struct mvumi_hs_page4 *hs_page4; |
863 | struct mvumi_hs_page3 *hs_page3; |
864 | struct timeval time; |
865 | unsigned int local_time; |
866 | |
867 | switch (hs_header->page_code) { |
868 | case HS_PAGE_HOST_INFO: |
869 | hs_page2 = (struct mvumi_hs_page2 *) hs_header; |
870 | hs_header->frame_length = sizeof(*hs_page2) - 4; |
871 | memset(hs_header->frame_content, 0, hs_header->frame_length); |
872 | hs_page2->host_type = 3; /* 3 mean linux*/ |
873 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) |
874 | hs_page2->host_cap = 0x08;/* host dynamic source mode */ |
875 | hs_page2->host_ver.ver_major = VER_MAJOR; |
876 | hs_page2->host_ver.ver_minor = VER_MINOR; |
877 | hs_page2->host_ver.ver_oem = VER_OEM; |
878 | hs_page2->host_ver.ver_build = VER_BUILD; |
879 | hs_page2->system_io_bus = 0; |
880 | hs_page2->slot_number = 0; |
881 | hs_page2->intr_level = 0; |
882 | hs_page2->intr_vector = 0; |
883 | do_gettimeofday(&time); |
884 | local_time = (unsigned int) (time.tv_sec - |
885 | (sys_tz.tz_minuteswest * 60)); |
886 | hs_page2->seconds_since1970 = local_time; |
887 | hs_header->checksum = mvumi_calculate_checksum(hs_header, |
888 | hs_header->frame_length); |
889 | break; |
890 | |
891 | case HS_PAGE_FIRM_CTL: |
892 | hs_page3 = (struct mvumi_hs_page3 *) hs_header; |
893 | hs_header->frame_length = sizeof(*hs_page3) - 4; |
894 | memset(hs_header->frame_content, 0, hs_header->frame_length); |
895 | hs_header->checksum = mvumi_calculate_checksum(hs_header, |
896 | hs_header->frame_length); |
897 | break; |
898 | |
899 | case HS_PAGE_CL_INFO: |
900 | hs_page4 = (struct mvumi_hs_page4 *) hs_header; |
901 | hs_header->frame_length = sizeof(*hs_page4) - 4; |
902 | memset(hs_header->frame_content, 0, hs_header->frame_length); |
903 | hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys); |
904 | hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys); |
905 | |
906 | hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys); |
907 | hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); |
908 | hs_page4->ib_entry_size = mhba->ib_max_size_setting; |
909 | hs_page4->ob_entry_size = mhba->ob_max_size_setting; |
910 | if (mhba->hba_capability |
911 | & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) { |
912 | hs_page4->ob_depth = find_first_bit((unsigned long *) |
913 | &mhba->list_num_io, |
914 | BITS_PER_LONG); |
915 | hs_page4->ib_depth = find_first_bit((unsigned long *) |
916 | &mhba->list_num_io, |
917 | BITS_PER_LONG); |
918 | } else { |
919 | hs_page4->ob_depth = (u8) mhba->list_num_io; |
920 | hs_page4->ib_depth = (u8) mhba->list_num_io; |
921 | } |
922 | hs_header->checksum = mvumi_calculate_checksum(hs_header, |
923 | hs_header->frame_length); |
924 | break; |
925 | |
926 | default: |
927 | dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n", |
928 | hs_header->page_code); |
929 | break; |
930 | } |
931 | } |
932 | |
933 | /** |
934 | * mvumi_init_data - Initialize requested date for FW |
935 | * @mhba: Adapter soft state |
936 | */ |
937 | static int mvumi_init_data(struct mvumi_hba *mhba) |
938 | { |
939 | struct mvumi_ob_data *ob_pool; |
940 | struct mvumi_res *res_mgnt; |
941 | unsigned int tmp_size, offset, i; |
942 | void *virmem, *v; |
943 | dma_addr_t p; |
944 | |
945 | if (mhba->fw_flag & MVUMI_FW_ALLOC) |
946 | return 0; |
947 | |
948 | tmp_size = mhba->ib_max_size * mhba->max_io; |
949 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) |
950 | tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; |
951 | |
952 | tmp_size += 128 + mhba->ob_max_size * mhba->max_io; |
953 | tmp_size += 8 + sizeof(u32)*2 + 16; |
954 | |
955 | res_mgnt = mvumi_alloc_mem_resource(mhba, |
956 | RESOURCE_UNCACHED_MEMORY, tmp_size); |
957 | if (!res_mgnt) { |
958 | dev_err(&mhba->pdev->dev, |
959 | "failed to allocate memory for inbound list\n"); |
960 | goto fail_alloc_dma_buf; |
961 | } |
962 | |
963 | p = res_mgnt->bus_addr; |
964 | v = res_mgnt->virt_addr; |
965 | /* ib_list */ |
966 | offset = round_up(p, 128) - p; |
967 | p += offset; |
968 | v += offset; |
969 | mhba->ib_list = v; |
970 | mhba->ib_list_phys = p; |
971 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { |
972 | v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; |
973 | p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; |
974 | mhba->ib_frame = v; |
975 | mhba->ib_frame_phys = p; |
976 | } |
977 | v += mhba->ib_max_size * mhba->max_io; |
978 | p += mhba->ib_max_size * mhba->max_io; |
979 | |
980 | /* ib shadow */ |
981 | offset = round_up(p, 8) - p; |
982 | p += offset; |
983 | v += offset; |
984 | mhba->ib_shadow = v; |
985 | mhba->ib_shadow_phys = p; |
986 | p += sizeof(u32)*2; |
987 | v += sizeof(u32)*2; |
988 | /* ob shadow */ |
989 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { |
990 | offset = round_up(p, 8) - p; |
991 | p += offset; |
992 | v += offset; |
993 | mhba->ob_shadow = v; |
994 | mhba->ob_shadow_phys = p; |
995 | p += 8; |
996 | v += 8; |
997 | } else { |
998 | offset = round_up(p, 4) - p; |
999 | p += offset; |
1000 | v += offset; |
1001 | mhba->ob_shadow = v; |
1002 | mhba->ob_shadow_phys = p; |
1003 | p += 4; |
1004 | v += 4; |
1005 | } |
1006 | |
1007 | /* ob list */ |
1008 | offset = round_up(p, 128) - p; |
1009 | p += offset; |
1010 | v += offset; |
1011 | |
1012 | mhba->ob_list = v; |
1013 | mhba->ob_list_phys = p; |
1014 | |
1015 | /* ob data pool */ |
1016 | tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool)); |
1017 | tmp_size = round_up(tmp_size, 8); |
1018 | |
1019 | res_mgnt = mvumi_alloc_mem_resource(mhba, |
1020 | RESOURCE_CACHED_MEMORY, tmp_size); |
1021 | if (!res_mgnt) { |
1022 | dev_err(&mhba->pdev->dev, |
1023 | "failed to allocate memory for outbound data buffer\n"); |
1024 | goto fail_alloc_dma_buf; |
1025 | } |
1026 | virmem = res_mgnt->virt_addr; |
1027 | |
1028 | for (i = mhba->max_io; i != 0; i--) { |
1029 | ob_pool = (struct mvumi_ob_data *) virmem; |
1030 | list_add_tail(&ob_pool->list, &mhba->ob_data_list); |
1031 | virmem += mhba->ob_max_size + sizeof(*ob_pool); |
1032 | } |
1033 | |
1034 | tmp_size = sizeof(unsigned short) * mhba->max_io + |
1035 | sizeof(struct mvumi_cmd *) * mhba->max_io; |
1036 | tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) / |
1037 | (sizeof(unsigned char) * 8); |
1038 | |
1039 | res_mgnt = mvumi_alloc_mem_resource(mhba, |
1040 | RESOURCE_CACHED_MEMORY, tmp_size); |
1041 | if (!res_mgnt) { |
1042 | dev_err(&mhba->pdev->dev, |
1043 | "failed to allocate memory for tag and target map\n"); |
1044 | goto fail_alloc_dma_buf; |
1045 | } |
1046 | |
1047 | virmem = res_mgnt->virt_addr; |
1048 | mhba->tag_pool.stack = virmem; |
1049 | mhba->tag_pool.size = mhba->max_io; |
1050 | tag_init(&mhba->tag_pool, mhba->max_io); |
1051 | virmem += sizeof(unsigned short) * mhba->max_io; |
1052 | |
1053 | mhba->tag_cmd = virmem; |
1054 | virmem += sizeof(struct mvumi_cmd *) * mhba->max_io; |
1055 | |
1056 | mhba->target_map = virmem; |
1057 | |
1058 | mhba->fw_flag |= MVUMI_FW_ALLOC; |
1059 | return 0; |
1060 | |
1061 | fail_alloc_dma_buf: |
1062 | mvumi_release_mem_resource(mhba); |
1063 | return -1; |
1064 | } |
1065 | |
1066 | static int mvumi_hs_process_page(struct mvumi_hba *mhba, |
1067 | struct mvumi_hs_header *hs_header) |
1068 | { |
1069 | struct mvumi_hs_page1 *hs_page1; |
1070 | unsigned char page_checksum; |
1071 | |
1072 | page_checksum = mvumi_calculate_checksum(hs_header, |
1073 | hs_header->frame_length); |
1074 | if (page_checksum != hs_header->checksum) { |
1075 | dev_err(&mhba->pdev->dev, "checksum error\n"); |
1076 | return -1; |
1077 | } |
1078 | |
1079 | switch (hs_header->page_code) { |
1080 | case HS_PAGE_FIRM_CAP: |
1081 | hs_page1 = (struct mvumi_hs_page1 *) hs_header; |
1082 | |
1083 | mhba->max_io = hs_page1->max_io_support; |
1084 | mhba->list_num_io = hs_page1->cl_inout_list_depth; |
1085 | mhba->max_transfer_size = hs_page1->max_transfer_size; |
1086 | mhba->max_target_id = hs_page1->max_devices_support; |
1087 | mhba->hba_capability = hs_page1->capability; |
1088 | mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size; |
1089 | mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2; |
1090 | |
1091 | mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size; |
1092 | mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2; |
1093 | |
1094 | dev_dbg(&mhba->pdev->dev, "FW version:%d\n", |
1095 | hs_page1->fw_ver.ver_build); |
1096 | |
1097 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) |
1098 | mhba->eot_flag = 22; |
1099 | else |
1100 | mhba->eot_flag = 27; |
1101 | if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) |
1102 | mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth; |
1103 | break; |
1104 | default: |
1105 | dev_err(&mhba->pdev->dev, "handshake: page code error\n"); |
1106 | return -1; |
1107 | } |
1108 | return 0; |
1109 | } |
1110 | |
1111 | /** |
1112 | * mvumi_handshake - Move the FW to READY state |
1113 | * @mhba: Adapter soft state |
1114 | * |
1115 | * During the initialization, FW passes can potentially be in any one of |
1116 | * several possible states. If the FW in operational, waiting-for-handshake |
1117 | * states, driver must take steps to bring it to ready state. Otherwise, it |
1118 | * has to wait for the ready state. |
1119 | */ |
1120 | static int mvumi_handshake(struct mvumi_hba *mhba) |
1121 | { |
1122 | unsigned int hs_state, tmp, hs_fun; |
1123 | struct mvumi_hs_header *hs_header; |
1124 | struct mvumi_hw_regs *regs = mhba->regs; |
1125 | |
1126 | if (mhba->fw_state == FW_STATE_STARTING) |
1127 | hs_state = HS_S_START; |
1128 | else { |
1129 | tmp = ioread32(regs->arm_to_pciea_msg0); |
1130 | hs_state = HS_GET_STATE(tmp); |
1131 | dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state); |
1132 | if (HS_GET_STATUS(tmp) != HS_STATUS_OK) { |
1133 | mhba->fw_state = FW_STATE_STARTING; |
1134 | return -1; |
1135 | } |
1136 | } |
1137 | |
1138 | hs_fun = 0; |
1139 | switch (hs_state) { |
1140 | case HS_S_START: |
1141 | mhba->fw_state = FW_STATE_HANDSHAKING; |
1142 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); |
1143 | HS_SET_STATE(hs_fun, HS_S_RESET); |
1144 | iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1); |
1145 | iowrite32(hs_fun, regs->pciea_to_arm_msg0); |
1146 | iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); |
1147 | break; |
1148 | |
1149 | case HS_S_RESET: |
1150 | iowrite32(lower_32_bits(mhba->handshake_page_phys), |
1151 | regs->pciea_to_arm_msg1); |
1152 | iowrite32(upper_32_bits(mhba->handshake_page_phys), |
1153 | regs->arm_to_pciea_msg1); |
1154 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); |
1155 | HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR); |
1156 | iowrite32(hs_fun, regs->pciea_to_arm_msg0); |
1157 | iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); |
1158 | break; |
1159 | |
1160 | case HS_S_PAGE_ADDR: |
1161 | case HS_S_QUERY_PAGE: |
1162 | case HS_S_SEND_PAGE: |
1163 | hs_header = (struct mvumi_hs_header *) mhba->handshake_page; |
1164 | if (hs_header->page_code == HS_PAGE_FIRM_CAP) { |
1165 | mhba->hba_total_pages = |
1166 | ((struct mvumi_hs_page1 *) hs_header)->total_pages; |
1167 | |
1168 | if (mhba->hba_total_pages == 0) |
1169 | mhba->hba_total_pages = HS_PAGE_TOTAL-1; |
1170 | } |
1171 | |
1172 | if (hs_state == HS_S_QUERY_PAGE) { |
1173 | if (mvumi_hs_process_page(mhba, hs_header)) { |
1174 | HS_SET_STATE(hs_fun, HS_S_ABORT); |
1175 | return -1; |
1176 | } |
1177 | if (mvumi_init_data(mhba)) { |
1178 | HS_SET_STATE(hs_fun, HS_S_ABORT); |
1179 | return -1; |
1180 | } |
1181 | } else if (hs_state == HS_S_PAGE_ADDR) { |
1182 | hs_header->page_code = 0; |
1183 | mhba->hba_total_pages = HS_PAGE_TOTAL-1; |
1184 | } |
1185 | |
1186 | if ((hs_header->page_code + 1) <= mhba->hba_total_pages) { |
1187 | hs_header->page_code++; |
1188 | if (hs_header->page_code != HS_PAGE_FIRM_CAP) { |
1189 | mvumi_hs_build_page(mhba, hs_header); |
1190 | HS_SET_STATE(hs_fun, HS_S_SEND_PAGE); |
1191 | } else |
1192 | HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE); |
1193 | } else |
1194 | HS_SET_STATE(hs_fun, HS_S_END); |
1195 | |
1196 | HS_SET_STATUS(hs_fun, HS_STATUS_OK); |
1197 | iowrite32(hs_fun, regs->pciea_to_arm_msg0); |
1198 | iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg); |
1199 | break; |
1200 | |
1201 | case HS_S_END: |
1202 | /* Set communication list ISR */ |
1203 | tmp = ioread32(regs->enpointa_mask_reg); |
1204 | tmp |= regs->int_comaout | regs->int_comaerr; |
1205 | iowrite32(tmp, regs->enpointa_mask_reg); |
1206 | iowrite32(mhba->list_num_io, mhba->ib_shadow); |
1207 | /* Set InBound List Available count shadow */ |
1208 | iowrite32(lower_32_bits(mhba->ib_shadow_phys), |
1209 | regs->inb_aval_count_basel); |
1210 | iowrite32(upper_32_bits(mhba->ib_shadow_phys), |
1211 | regs->inb_aval_count_baseh); |
1212 | |
1213 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) { |
1214 | /* Set OutBound List Available count shadow */ |
1215 | iowrite32((mhba->list_num_io-1) | |
1216 | regs->cl_pointer_toggle, |
1217 | mhba->ob_shadow); |
1218 | iowrite32(lower_32_bits(mhba->ob_shadow_phys), |
1219 | regs->outb_copy_basel); |
1220 | iowrite32(upper_32_bits(mhba->ob_shadow_phys), |
1221 | regs->outb_copy_baseh); |
1222 | } |
1223 | |
1224 | mhba->ib_cur_slot = (mhba->list_num_io - 1) | |
1225 | regs->cl_pointer_toggle; |
1226 | mhba->ob_cur_slot = (mhba->list_num_io - 1) | |
1227 | regs->cl_pointer_toggle; |
1228 | mhba->fw_state = FW_STATE_STARTED; |
1229 | |
1230 | break; |
1231 | default: |
1232 | dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n", |
1233 | hs_state); |
1234 | return -1; |
1235 | } |
1236 | return 0; |
1237 | } |
1238 | |
1239 | static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba) |
1240 | { |
1241 | unsigned int isr_status; |
1242 | unsigned long before; |
1243 | |
1244 | before = jiffies; |
1245 | mvumi_handshake(mhba); |
1246 | do { |
1247 | isr_status = mhba->instancet->read_fw_status_reg(mhba); |
1248 | |
1249 | if (mhba->fw_state == FW_STATE_STARTED) |
1250 | return 0; |
1251 | if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { |
1252 | dev_err(&mhba->pdev->dev, |
1253 | "no handshake response at state 0x%x.\n", |
1254 | mhba->fw_state); |
1255 | dev_err(&mhba->pdev->dev, |
1256 | "isr : global=0x%x,status=0x%x.\n", |
1257 | mhba->global_isr, isr_status); |
1258 | return -1; |
1259 | } |
1260 | rmb(); |
1261 | usleep_range(1000, 2000); |
1262 | } while (!(isr_status & DRBL_HANDSHAKE_ISR)); |
1263 | |
1264 | return 0; |
1265 | } |
1266 | |
1267 | static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) |
1268 | { |
1269 | unsigned int tmp; |
1270 | unsigned long before; |
1271 | |
1272 | before = jiffies; |
1273 | tmp = ioread32(mhba->regs->arm_to_pciea_msg1); |
1274 | while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) { |
1275 | if (tmp != HANDSHAKE_READYSTATE) |
1276 | iowrite32(DRBL_MU_RESET, |
1277 | mhba->regs->pciea_to_arm_drbl_reg); |
1278 | if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { |
1279 | dev_err(&mhba->pdev->dev, |
1280 | "invalid signature [0x%x].\n", tmp); |
1281 | return -1; |
1282 | } |
1283 | usleep_range(1000, 2000); |
1284 | rmb(); |
1285 | tmp = ioread32(mhba->regs->arm_to_pciea_msg1); |
1286 | } |
1287 | |
1288 | mhba->fw_state = FW_STATE_STARTING; |
1289 | dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n"); |
1290 | do { |
1291 | if (mvumi_handshake_event(mhba)) { |
1292 | dev_err(&mhba->pdev->dev, |
1293 | "handshake failed at state 0x%x.\n", |
1294 | mhba->fw_state); |
1295 | return -1; |
1296 | } |
1297 | } while (mhba->fw_state != FW_STATE_STARTED); |
1298 | |
1299 | dev_dbg(&mhba->pdev->dev, "firmware handshake done\n"); |
1300 | |
1301 | return 0; |
1302 | } |
1303 | |
1304 | static unsigned char mvumi_start(struct mvumi_hba *mhba) |
1305 | { |
1306 | unsigned int tmp; |
1307 | struct mvumi_hw_regs *regs = mhba->regs; |
1308 | |
1309 | /* clear Door bell */ |
1310 | tmp = ioread32(regs->arm_to_pciea_drbl_reg); |
1311 | iowrite32(tmp, regs->arm_to_pciea_drbl_reg); |
1312 | |
1313 | iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg); |
1314 | tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea; |
1315 | iowrite32(tmp, regs->enpointa_mask_reg); |
1316 | msleep(100); |
1317 | if (mvumi_check_handshake(mhba)) |
1318 | return -1; |
1319 | |
1320 | return 0; |
1321 | } |
1322 | |
1323 | /** |
1324 | * mvumi_complete_cmd - Completes a command |
1325 | * @mhba: Adapter soft state |
1326 | * @cmd: Command to be completed |
1327 | */ |
1328 | static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, |
1329 | struct mvumi_rsp_frame *ob_frame) |
1330 | { |
1331 | struct scsi_cmnd *scmd = cmd->scmd; |
1332 | |
1333 | cmd->scmd->SCp.ptr = NULL; |
1334 | scmd->result = ob_frame->req_status; |
1335 | |
1336 | switch (ob_frame->req_status) { |
1337 | case SAM_STAT_GOOD: |
1338 | scmd->result |= DID_OK << 16; |
1339 | break; |
1340 | case SAM_STAT_BUSY: |
1341 | scmd->result |= DID_BUS_BUSY << 16; |
1342 | break; |
1343 | case SAM_STAT_CHECK_CONDITION: |
1344 | scmd->result |= (DID_OK << 16); |
1345 | if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) { |
1346 | memcpy(cmd->scmd->sense_buffer, ob_frame->payload, |
1347 | sizeof(struct mvumi_sense_data)); |
1348 | scmd->result |= (DRIVER_SENSE << 24); |
1349 | } |
1350 | break; |
1351 | default: |
1352 | scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16); |
1353 | break; |
1354 | } |
1355 | |
1356 | if (scsi_bufflen(scmd)) { |
1357 | if (scsi_sg_count(scmd)) { |
1358 | pci_unmap_sg(mhba->pdev, |
1359 | scsi_sglist(scmd), |
1360 | scsi_sg_count(scmd), |
1361 | (int) scmd->sc_data_direction); |
1362 | } else { |
1363 | pci_unmap_single(mhba->pdev, |
1364 | scmd->SCp.dma_handle, |
1365 | scsi_bufflen(scmd), |
1366 | (int) scmd->sc_data_direction); |
1367 | |
1368 | scmd->SCp.dma_handle = 0; |
1369 | } |
1370 | } |
1371 | cmd->scmd->scsi_done(scmd); |
1372 | mvumi_return_cmd(mhba, cmd); |
1373 | } |
1374 | |
1375 | static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, |
1376 | struct mvumi_cmd *cmd, |
1377 | struct mvumi_rsp_frame *ob_frame) |
1378 | { |
1379 | if (atomic_read(&cmd->sync_cmd)) { |
1380 | cmd->cmd_status = ob_frame->req_status; |
1381 | |
1382 | if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) && |
1383 | (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) && |
1384 | cmd->data_buf) { |
1385 | memcpy(cmd->data_buf, ob_frame->payload, |
1386 | sizeof(struct mvumi_sense_data)); |
1387 | } |
1388 | atomic_dec(&cmd->sync_cmd); |
1389 | wake_up(&mhba->int_cmd_wait_q); |
1390 | } |
1391 | } |
1392 | |
1393 | static void mvumi_show_event(struct mvumi_hba *mhba, |
1394 | struct mvumi_driver_event *ptr) |
1395 | { |
1396 | unsigned int i; |
1397 | |
1398 | dev_warn(&mhba->pdev->dev, |
1399 | "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n", |
1400 | ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id); |
1401 | if (ptr->param_count) { |
1402 | printk(KERN_WARNING "Event param(len 0x%x): ", |
1403 | ptr->param_count); |
1404 | for (i = 0; i < ptr->param_count; i++) |
1405 | printk(KERN_WARNING "0x%x ", ptr->params[i]); |
1406 | |
1407 | printk(KERN_WARNING "\n"); |
1408 | } |
1409 | |
1410 | if (ptr->sense_data_length) { |
1411 | printk(KERN_WARNING "Event sense data(len 0x%x): ", |
1412 | ptr->sense_data_length); |
1413 | for (i = 0; i < ptr->sense_data_length; i++) |
1414 | printk(KERN_WARNING "0x%x ", ptr->sense_data[i]); |
1415 | printk(KERN_WARNING "\n"); |
1416 | } |
1417 | } |
1418 | |
1419 | static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status) |
1420 | { |
1421 | struct scsi_device *sdev; |
1422 | int ret = -1; |
1423 | |
1424 | if (status == DEVICE_OFFLINE) { |
1425 | sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); |
1426 | if (sdev) { |
1427 | dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0, |
1428 | sdev->id, 0); |
1429 | scsi_remove_device(sdev); |
1430 | scsi_device_put(sdev); |
1431 | ret = 0; |
1432 | } else |
1433 | dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n", |
1434 | devid); |
1435 | } else if (status == DEVICE_ONLINE) { |
1436 | sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); |
1437 | if (!sdev) { |
1438 | scsi_add_device(mhba->shost, 0, devid, 0); |
1439 | dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0, |
1440 | devid, 0); |
1441 | ret = 0; |
1442 | } else { |
1443 | dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n", |
1444 | 0, devid, 0); |
1445 | scsi_device_put(sdev); |
1446 | } |
1447 | } |
1448 | return ret; |
1449 | } |
1450 | |
1451 | static u64 mvumi_inquiry(struct mvumi_hba *mhba, |
1452 | unsigned int id, struct mvumi_cmd *cmd) |
1453 | { |
1454 | struct mvumi_msg_frame *frame; |
1455 | u64 wwid = 0; |
1456 | int cmd_alloc = 0; |
1457 | int data_buf_len = 64; |
1458 | |
1459 | if (!cmd) { |
1460 | cmd = mvumi_create_internal_cmd(mhba, data_buf_len); |
1461 | if (cmd) |
1462 | cmd_alloc = 1; |
1463 | else |
1464 | return 0; |
1465 | } else { |
1466 | memset(cmd->data_buf, 0, data_buf_len); |
1467 | } |
1468 | cmd->scmd = NULL; |
1469 | cmd->cmd_status = REQ_STATUS_PENDING; |
1470 | atomic_set(&cmd->sync_cmd, 0); |
1471 | frame = cmd->frame; |
1472 | frame->device_id = (u16) id; |
1473 | frame->cmd_flag = CMD_FLAG_DATA_IN; |
1474 | frame->req_function = CL_FUN_SCSI_CMD; |
1475 | frame->cdb_length = 6; |
1476 | frame->data_transfer_length = MVUMI_INQUIRY_LENGTH; |
1477 | memset(frame->cdb, 0, frame->cdb_length); |
1478 | frame->cdb[0] = INQUIRY; |
1479 | frame->cdb[4] = frame->data_transfer_length; |
1480 | |
1481 | mvumi_issue_blocked_cmd(mhba, cmd); |
1482 | |
1483 | if (cmd->cmd_status == SAM_STAT_GOOD) { |
1484 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) |
1485 | wwid = id + 1; |
1486 | else |
1487 | memcpy((void *)&wwid, |
1488 | (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF), |
1489 | MVUMI_INQUIRY_UUID_LEN); |
1490 | dev_dbg(&mhba->pdev->dev, |
1491 | "inquiry device(0:%d:0) wwid(%llx)\n", id, wwid); |
1492 | } else { |
1493 | wwid = 0; |
1494 | } |
1495 | if (cmd_alloc) |
1496 | mvumi_delete_internal_cmd(mhba, cmd); |
1497 | |
1498 | return wwid; |
1499 | } |
1500 | |
1501 | static void mvumi_detach_devices(struct mvumi_hba *mhba) |
1502 | { |
1503 | struct mvumi_device *mv_dev = NULL , *dev_next; |
1504 | struct scsi_device *sdev = NULL; |
1505 | |
1506 | mutex_lock(&mhba->device_lock); |
1507 | |
1508 | /* detach Hard Disk */ |
1509 | list_for_each_entry_safe(mv_dev, dev_next, |
1510 | &mhba->shost_dev_list, list) { |
1511 | mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); |
1512 | list_del_init(&mv_dev->list); |
1513 | dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", |
1514 | mv_dev->id, mv_dev->wwid); |
1515 | kfree(mv_dev); |
1516 | } |
1517 | list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) { |
1518 | list_del_init(&mv_dev->list); |
1519 | dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", |
1520 | mv_dev->id, mv_dev->wwid); |
1521 | kfree(mv_dev); |
1522 | } |
1523 | |
1524 | /* detach virtual device */ |
1525 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) |
1526 | sdev = scsi_device_lookup(mhba->shost, 0, |
1527 | mhba->max_target_id - 1, 0); |
1528 | |
1529 | if (sdev) { |
1530 | scsi_remove_device(sdev); |
1531 | scsi_device_put(sdev); |
1532 | } |
1533 | |
1534 | mutex_unlock(&mhba->device_lock); |
1535 | } |
1536 | |
1537 | static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id) |
1538 | { |
1539 | struct scsi_device *sdev; |
1540 | |
1541 | sdev = scsi_device_lookup(mhba->shost, 0, id, 0); |
1542 | if (sdev) { |
1543 | scsi_rescan_device(&sdev->sdev_gendev); |
1544 | scsi_device_put(sdev); |
1545 | } |
1546 | } |
1547 | |
1548 | static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid) |
1549 | { |
1550 | struct mvumi_device *mv_dev = NULL; |
1551 | |
1552 | list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) { |
1553 | if (mv_dev->wwid == wwid) { |
1554 | if (mv_dev->id != id) { |
1555 | dev_err(&mhba->pdev->dev, |
1556 | "%s has same wwid[%llx] ," |
1557 | " but different id[%d %d]\n", |
1558 | __func__, mv_dev->wwid, mv_dev->id, id); |
1559 | return -1; |
1560 | } else { |
1561 | if (mhba->pdev->device == |
1562 | PCI_DEVICE_ID_MARVELL_MV9143) |
1563 | mvumi_rescan_devices(mhba, id); |
1564 | return 1; |
1565 | } |
1566 | } |
1567 | } |
1568 | return 0; |
1569 | } |
1570 | |
1571 | static void mvumi_remove_devices(struct mvumi_hba *mhba, int id) |
1572 | { |
1573 | struct mvumi_device *mv_dev = NULL, *dev_next; |
1574 | |
1575 | list_for_each_entry_safe(mv_dev, dev_next, |
1576 | &mhba->shost_dev_list, list) { |
1577 | if (mv_dev->id == id) { |
1578 | dev_dbg(&mhba->pdev->dev, |
1579 | "detach device(0:%d:0) wwid(%llx) from HOST\n", |
1580 | mv_dev->id, mv_dev->wwid); |
1581 | mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); |
1582 | list_del_init(&mv_dev->list); |
1583 | kfree(mv_dev); |
1584 | } |
1585 | } |
1586 | } |
1587 | |
1588 | static int mvumi_probe_devices(struct mvumi_hba *mhba) |
1589 | { |
1590 | int id, maxid; |
1591 | u64 wwid = 0; |
1592 | struct mvumi_device *mv_dev = NULL; |
1593 | struct mvumi_cmd *cmd = NULL; |
1594 | int found = 0; |
1595 | |
1596 | cmd = mvumi_create_internal_cmd(mhba, 64); |
1597 | if (!cmd) |
1598 | return -1; |
1599 | |
1600 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) |
1601 | maxid = mhba->max_target_id; |
1602 | else |
1603 | maxid = mhba->max_target_id - 1; |
1604 | |
1605 | for (id = 0; id < maxid; id++) { |
1606 | wwid = mvumi_inquiry(mhba, id, cmd); |
1607 | if (!wwid) { |
1608 | /* device no response, remove it */ |
1609 | mvumi_remove_devices(mhba, id); |
1610 | } else { |
1611 | /* device response, add it */ |
1612 | found = mvumi_match_devices(mhba, id, wwid); |
1613 | if (!found) { |
1614 | mvumi_remove_devices(mhba, id); |
1615 | mv_dev = kzalloc(sizeof(struct mvumi_device), |
1616 | GFP_KERNEL); |
1617 | if (!mv_dev) { |
1618 | dev_err(&mhba->pdev->dev, |
1619 | "%s alloc mv_dev failed\n", |
1620 | __func__); |
1621 | continue; |
1622 | } |
1623 | mv_dev->id = id; |
1624 | mv_dev->wwid = wwid; |
1625 | mv_dev->sdev = NULL; |
1626 | INIT_LIST_HEAD(&mv_dev->list); |
1627 | list_add_tail(&mv_dev->list, |
1628 | &mhba->mhba_dev_list); |
1629 | dev_dbg(&mhba->pdev->dev, |
1630 | "probe a new device(0:%d:0)" |
1631 | " wwid(%llx)\n", id, mv_dev->wwid); |
1632 | } else if (found == -1) |
1633 | return -1; |
1634 | else |
1635 | continue; |
1636 | } |
1637 | } |
1638 | |
1639 | if (cmd) |
1640 | mvumi_delete_internal_cmd(mhba, cmd); |
1641 | |
1642 | return 0; |
1643 | } |
1644 | |
1645 | static int mvumi_rescan_bus(void *data) |
1646 | { |
1647 | int ret = 0; |
1648 | struct mvumi_hba *mhba = (struct mvumi_hba *) data; |
1649 | struct mvumi_device *mv_dev = NULL , *dev_next; |
1650 | |
1651 | while (!kthread_should_stop()) { |
1652 | |
1653 | set_current_state(TASK_INTERRUPTIBLE); |
1654 | if (!atomic_read(&mhba->pnp_count)) |
1655 | schedule(); |
1656 | msleep(1000); |
1657 | atomic_set(&mhba->pnp_count, 0); |
1658 | __set_current_state(TASK_RUNNING); |
1659 | |
1660 | mutex_lock(&mhba->device_lock); |
1661 | ret = mvumi_probe_devices(mhba); |
1662 | if (!ret) { |
1663 | list_for_each_entry_safe(mv_dev, dev_next, |
1664 | &mhba->mhba_dev_list, list) { |
1665 | if (mvumi_handle_hotplug(mhba, mv_dev->id, |
1666 | DEVICE_ONLINE)) { |
1667 | dev_err(&mhba->pdev->dev, |
1668 | "%s add device(0:%d:0) failed" |
1669 | "wwid(%llx) has exist\n", |
1670 | __func__, |
1671 | mv_dev->id, mv_dev->wwid); |
1672 | list_del_init(&mv_dev->list); |
1673 | kfree(mv_dev); |
1674 | } else { |
1675 | list_move_tail(&mv_dev->list, |
1676 | &mhba->shost_dev_list); |
1677 | } |
1678 | } |
1679 | } |
1680 | mutex_unlock(&mhba->device_lock); |
1681 | } |
1682 | return 0; |
1683 | } |
1684 | |
1685 | static void mvumi_proc_msg(struct mvumi_hba *mhba, |
1686 | struct mvumi_hotplug_event *param) |
1687 | { |
1688 | u16 size = param->size; |
1689 | const unsigned long *ar_bitmap; |
1690 | const unsigned long *re_bitmap; |
1691 | int index; |
1692 | |
1693 | if (mhba->fw_flag & MVUMI_FW_ATTACH) { |
1694 | index = -1; |
1695 | ar_bitmap = (const unsigned long *) param->bitmap; |
1696 | re_bitmap = (const unsigned long *) ¶m->bitmap[size >> 3]; |
1697 | |
1698 | mutex_lock(&mhba->sas_discovery_mutex); |
1699 | do { |
1700 | index = find_next_zero_bit(ar_bitmap, size, index + 1); |
1701 | if (index >= size) |
1702 | break; |
1703 | mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE); |
1704 | } while (1); |
1705 | |
1706 | index = -1; |
1707 | do { |
1708 | index = find_next_zero_bit(re_bitmap, size, index + 1); |
1709 | if (index >= size) |
1710 | break; |
1711 | mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE); |
1712 | } while (1); |
1713 | mutex_unlock(&mhba->sas_discovery_mutex); |
1714 | } |
1715 | } |
1716 | |
1717 | static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) |
1718 | { |
1719 | if (msg == APICDB1_EVENT_GETEVENT) { |
1720 | int i, count; |
1721 | struct mvumi_driver_event *param = NULL; |
1722 | struct mvumi_event_req *er = buffer; |
1723 | count = er->count; |
1724 | if (count > MAX_EVENTS_RETURNED) { |
1725 | dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger" |
1726 | " than max event count[0x%x].\n", |
1727 | count, MAX_EVENTS_RETURNED); |
1728 | return; |
1729 | } |
1730 | for (i = 0; i < count; i++) { |
1731 | param = &er->events[i]; |
1732 | mvumi_show_event(mhba, param); |
1733 | } |
1734 | } else if (msg == APICDB1_HOST_GETEVENT) { |
1735 | mvumi_proc_msg(mhba, buffer); |
1736 | } |
1737 | } |
1738 | |
1739 | static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg) |
1740 | { |
1741 | struct mvumi_cmd *cmd; |
1742 | struct mvumi_msg_frame *frame; |
1743 | |
1744 | cmd = mvumi_create_internal_cmd(mhba, 512); |
1745 | if (!cmd) |
1746 | return -1; |
1747 | cmd->scmd = NULL; |
1748 | cmd->cmd_status = REQ_STATUS_PENDING; |
1749 | atomic_set(&cmd->sync_cmd, 0); |
1750 | frame = cmd->frame; |
1751 | frame->device_id = 0; |
1752 | frame->cmd_flag = CMD_FLAG_DATA_IN; |
1753 | frame->req_function = CL_FUN_SCSI_CMD; |
1754 | frame->cdb_length = MAX_COMMAND_SIZE; |
1755 | frame->data_transfer_length = sizeof(struct mvumi_event_req); |
1756 | memset(frame->cdb, 0, MAX_COMMAND_SIZE); |
1757 | frame->cdb[0] = APICDB0_EVENT; |
1758 | frame->cdb[1] = msg; |
1759 | mvumi_issue_blocked_cmd(mhba, cmd); |
1760 | |
1761 | if (cmd->cmd_status != SAM_STAT_GOOD) |
1762 | dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n", |
1763 | cmd->cmd_status); |
1764 | else |
1765 | mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf); |
1766 | |
1767 | mvumi_delete_internal_cmd(mhba, cmd); |
1768 | return 0; |
1769 | } |
1770 | |
1771 | static void mvumi_scan_events(struct work_struct *work) |
1772 | { |
1773 | struct mvumi_events_wq *mu_ev = |
1774 | container_of(work, struct mvumi_events_wq, work_q); |
1775 | |
1776 | mvumi_get_event(mu_ev->mhba, mu_ev->event); |
1777 | kfree(mu_ev); |
1778 | } |
1779 | |
1780 | static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status) |
1781 | { |
1782 | struct mvumi_events_wq *mu_ev; |
1783 | |
1784 | while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) { |
1785 | if (isr_status & DRBL_BUS_CHANGE) { |
1786 | atomic_inc(&mhba->pnp_count); |
1787 | wake_up_process(mhba->dm_thread); |
1788 | isr_status &= ~(DRBL_BUS_CHANGE); |
1789 | continue; |
1790 | } |
1791 | |
1792 | mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC); |
1793 | if (mu_ev) { |
1794 | INIT_WORK(&mu_ev->work_q, mvumi_scan_events); |
1795 | mu_ev->mhba = mhba; |
1796 | mu_ev->event = APICDB1_EVENT_GETEVENT; |
1797 | isr_status &= ~(DRBL_EVENT_NOTIFY); |
1798 | mu_ev->param = NULL; |
1799 | schedule_work(&mu_ev->work_q); |
1800 | } |
1801 | } |
1802 | } |
1803 | |
1804 | static void mvumi_handle_clob(struct mvumi_hba *mhba) |
1805 | { |
1806 | struct mvumi_rsp_frame *ob_frame; |
1807 | struct mvumi_cmd *cmd; |
1808 | struct mvumi_ob_data *pool; |
1809 | |
1810 | while (!list_empty(&mhba->free_ob_list)) { |
1811 | pool = list_first_entry(&mhba->free_ob_list, |
1812 | struct mvumi_ob_data, list); |
1813 | list_del_init(&pool->list); |
1814 | list_add_tail(&pool->list, &mhba->ob_data_list); |
1815 | |
1816 | ob_frame = (struct mvumi_rsp_frame *) &pool->data[0]; |
1817 | cmd = mhba->tag_cmd[ob_frame->tag]; |
1818 | |
1819 | atomic_dec(&mhba->fw_outstanding); |
1820 | mhba->tag_cmd[ob_frame->tag] = 0; |
1821 | tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag); |
1822 | if (cmd->scmd) |
1823 | mvumi_complete_cmd(mhba, cmd, ob_frame); |
1824 | else |
1825 | mvumi_complete_internal_cmd(mhba, cmd, ob_frame); |
1826 | } |
1827 | mhba->instancet->fire_cmd(mhba, NULL); |
1828 | } |
1829 | |
1830 | static irqreturn_t mvumi_isr_handler(int irq, void *devp) |
1831 | { |
1832 | struct mvumi_hba *mhba = (struct mvumi_hba *) devp; |
1833 | unsigned long flags; |
1834 | |
1835 | spin_lock_irqsave(mhba->shost->host_lock, flags); |
1836 | if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) { |
1837 | spin_unlock_irqrestore(mhba->shost->host_lock, flags); |
1838 | return IRQ_NONE; |
1839 | } |
1840 | |
1841 | if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) { |
1842 | if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) |
1843 | mvumi_launch_events(mhba, mhba->isr_status); |
1844 | if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { |
1845 | dev_warn(&mhba->pdev->dev, "enter handshake again!\n"); |
1846 | mvumi_handshake(mhba); |
1847 | } |
1848 | |
1849 | } |
1850 | |
1851 | if (mhba->global_isr & mhba->regs->int_comaout) |
1852 | mvumi_receive_ob_list_entry(mhba); |
1853 | |
1854 | mhba->global_isr = 0; |
1855 | mhba->isr_status = 0; |
1856 | if (mhba->fw_state == FW_STATE_STARTED) |
1857 | mvumi_handle_clob(mhba); |
1858 | spin_unlock_irqrestore(mhba->shost->host_lock, flags); |
1859 | return IRQ_HANDLED; |
1860 | } |
1861 | |
1862 | static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, |
1863 | struct mvumi_cmd *cmd) |
1864 | { |
1865 | void *ib_entry; |
1866 | struct mvumi_msg_frame *ib_frame; |
1867 | unsigned int frame_len; |
1868 | |
1869 | ib_frame = cmd->frame; |
1870 | if (unlikely(mhba->fw_state != FW_STATE_STARTED)) { |
1871 | dev_dbg(&mhba->pdev->dev, "firmware not ready.\n"); |
1872 | return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; |
1873 | } |
1874 | if (tag_is_empty(&mhba->tag_pool)) { |
1875 | dev_dbg(&mhba->pdev->dev, "no free tag.\n"); |
1876 | return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; |
1877 | } |
1878 | mvumi_get_ib_list_entry(mhba, &ib_entry); |
1879 | |
1880 | cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool); |
1881 | cmd->frame->request_id = mhba->io_seq++; |
1882 | cmd->request_id = cmd->frame->request_id; |
1883 | mhba->tag_cmd[cmd->frame->tag] = cmd; |
1884 | frame_len = sizeof(*ib_frame) - 4 + |
1885 | ib_frame->sg_counts * sizeof(struct mvumi_sgl); |
1886 | if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { |
1887 | struct mvumi_dyn_list_entry *dle; |
1888 | dle = ib_entry; |
1889 | dle->src_low_addr = |
1890 | cpu_to_le32(lower_32_bits(cmd->frame_phys)); |
1891 | dle->src_high_addr = |
1892 | cpu_to_le32(upper_32_bits(cmd->frame_phys)); |
1893 | dle->if_length = (frame_len >> 2) & 0xFFF; |
1894 | } else { |
1895 | memcpy(ib_entry, ib_frame, frame_len); |
1896 | } |
1897 | return MV_QUEUE_COMMAND_RESULT_SENT; |
1898 | } |
1899 | |
1900 | static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) |
1901 | { |
1902 | unsigned short num_of_cl_sent = 0; |
1903 | unsigned int count; |
1904 | enum mvumi_qc_result result; |
1905 | |
1906 | if (cmd) |
1907 | list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list); |
1908 | count = mhba->instancet->check_ib_list(mhba); |
1909 | if (list_empty(&mhba->waiting_req_list) || !count) |
1910 | return; |
1911 | |
1912 | do { |
1913 | cmd = list_first_entry(&mhba->waiting_req_list, |
1914 | struct mvumi_cmd, queue_pointer); |
1915 | list_del_init(&cmd->queue_pointer); |
1916 | result = mvumi_send_command(mhba, cmd); |
1917 | switch (result) { |
1918 | case MV_QUEUE_COMMAND_RESULT_SENT: |
1919 | num_of_cl_sent++; |
1920 | break; |
1921 | case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE: |
1922 | list_add(&cmd->queue_pointer, &mhba->waiting_req_list); |
1923 | if (num_of_cl_sent > 0) |
1924 | mvumi_send_ib_list_entry(mhba); |
1925 | |
1926 | return; |
1927 | } |
1928 | } while (!list_empty(&mhba->waiting_req_list) && count--); |
1929 | |
1930 | if (num_of_cl_sent > 0) |
1931 | mvumi_send_ib_list_entry(mhba); |
1932 | } |
1933 | |
1934 | /** |
1935 | * mvumi_enable_intr - Enables interrupts |
1936 | * @mhba: Adapter soft state |
1937 | */ |
1938 | static void mvumi_enable_intr(struct mvumi_hba *mhba) |
1939 | { |
1940 | unsigned int mask; |
1941 | struct mvumi_hw_regs *regs = mhba->regs; |
1942 | |
1943 | iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg); |
1944 | mask = ioread32(regs->enpointa_mask_reg); |
1945 | mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr; |
1946 | iowrite32(mask, regs->enpointa_mask_reg); |
1947 | } |
1948 | |
1949 | /** |
1950 | * mvumi_disable_intr -Disables interrupt |
1951 | * @mhba: Adapter soft state |
1952 | */ |
1953 | static void mvumi_disable_intr(struct mvumi_hba *mhba) |
1954 | { |
1955 | unsigned int mask; |
1956 | struct mvumi_hw_regs *regs = mhba->regs; |
1957 | |
1958 | iowrite32(0, regs->arm_to_pciea_mask_reg); |
1959 | mask = ioread32(regs->enpointa_mask_reg); |
1960 | mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout | |
1961 | regs->int_comaerr); |
1962 | iowrite32(mask, regs->enpointa_mask_reg); |
1963 | } |
1964 | |
1965 | static int mvumi_clear_intr(void *extend) |
1966 | { |
1967 | struct mvumi_hba *mhba = (struct mvumi_hba *) extend; |
1968 | unsigned int status, isr_status = 0, tmp = 0; |
1969 | struct mvumi_hw_regs *regs = mhba->regs; |
1970 | |
1971 | status = ioread32(regs->main_int_cause_reg); |
1972 | if (!(status & regs->int_mu) || status == 0xFFFFFFFF) |
1973 | return 1; |
1974 | if (unlikely(status & regs->int_comaerr)) { |
1975 | tmp = ioread32(regs->outb_isr_cause); |
1976 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { |
1977 | if (tmp & regs->clic_out_err) { |
1978 | iowrite32(tmp & regs->clic_out_err, |
1979 | regs->outb_isr_cause); |
1980 | } |
1981 | } else { |
1982 | if (tmp & (regs->clic_in_err | regs->clic_out_err)) |
1983 | iowrite32(tmp & (regs->clic_in_err | |
1984 | regs->clic_out_err), |
1985 | regs->outb_isr_cause); |
1986 | } |
1987 | status ^= mhba->regs->int_comaerr; |
1988 | /* inbound or outbound parity error, command will timeout */ |
1989 | } |
1990 | if (status & regs->int_comaout) { |
1991 | tmp = ioread32(regs->outb_isr_cause); |
1992 | if (tmp & regs->clic_irq) |
1993 | iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause); |
1994 | } |
1995 | if (status & regs->int_dl_cpu2pciea) { |
1996 | isr_status = ioread32(regs->arm_to_pciea_drbl_reg); |
1997 | if (isr_status) |
1998 | iowrite32(isr_status, regs->arm_to_pciea_drbl_reg); |
1999 | } |
2000 | |
2001 | mhba->global_isr = status; |
2002 | mhba->isr_status = isr_status; |
2003 | |
2004 | return 0; |
2005 | } |
2006 | |
2007 | /** |
2008 | * mvumi_read_fw_status_reg - returns the current FW status value |
2009 | * @mhba: Adapter soft state |
2010 | */ |
2011 | static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba) |
2012 | { |
2013 | unsigned int status; |
2014 | |
2015 | status = ioread32(mhba->regs->arm_to_pciea_drbl_reg); |
2016 | if (status) |
2017 | iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg); |
2018 | return status; |
2019 | } |
2020 | |
2021 | static struct mvumi_instance_template mvumi_instance_9143 = { |
2022 | .fire_cmd = mvumi_fire_cmd, |
2023 | .enable_intr = mvumi_enable_intr, |
2024 | .disable_intr = mvumi_disable_intr, |
2025 | .clear_intr = mvumi_clear_intr, |
2026 | .read_fw_status_reg = mvumi_read_fw_status_reg, |
2027 | .check_ib_list = mvumi_check_ib_list_9143, |
2028 | .check_ob_list = mvumi_check_ob_list_9143, |
2029 | .reset_host = mvumi_reset_host_9143, |
2030 | }; |
2031 | |
2032 | static struct mvumi_instance_template mvumi_instance_9580 = { |
2033 | .fire_cmd = mvumi_fire_cmd, |
2034 | .enable_intr = mvumi_enable_intr, |
2035 | .disable_intr = mvumi_disable_intr, |
2036 | .clear_intr = mvumi_clear_intr, |
2037 | .read_fw_status_reg = mvumi_read_fw_status_reg, |
2038 | .check_ib_list = mvumi_check_ib_list_9580, |
2039 | .check_ob_list = mvumi_check_ob_list_9580, |
2040 | .reset_host = mvumi_reset_host_9580, |
2041 | }; |
2042 | |
2043 | static int mvumi_slave_configure(struct scsi_device *sdev) |
2044 | { |
2045 | struct mvumi_hba *mhba; |
2046 | unsigned char bitcount = sizeof(unsigned char) * 8; |
2047 | |
2048 | mhba = (struct mvumi_hba *) sdev->host->hostdata; |
2049 | if (sdev->id >= mhba->max_target_id) |
2050 | return -EINVAL; |
2051 | |
2052 | mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount)); |
2053 | return 0; |
2054 | } |
2055 | |
2056 | /** |
2057 | * mvumi_build_frame - Prepares a direct cdb (DCDB) command |
2058 | * @mhba: Adapter soft state |
2059 | * @scmd: SCSI command |
2060 | * @cmd: Command to be prepared in |
2061 | * |
2062 | * This function prepares CDB commands. These are typcially pass-through |
2063 | * commands to the devices. |
2064 | */ |
2065 | static unsigned char mvumi_build_frame(struct mvumi_hba *mhba, |
2066 | struct scsi_cmnd *scmd, struct mvumi_cmd *cmd) |
2067 | { |
2068 | struct mvumi_msg_frame *pframe; |
2069 | |
2070 | cmd->scmd = scmd; |
2071 | cmd->cmd_status = REQ_STATUS_PENDING; |
2072 | pframe = cmd->frame; |
2073 | pframe->device_id = ((unsigned short) scmd->device->id) | |
2074 | (((unsigned short) scmd->device->lun) << 8); |
2075 | pframe->cmd_flag = 0; |
2076 | |
2077 | switch (scmd->sc_data_direction) { |
2078 | case DMA_NONE: |
2079 | pframe->cmd_flag |= CMD_FLAG_NON_DATA; |
2080 | break; |
2081 | case DMA_FROM_DEVICE: |
2082 | pframe->cmd_flag |= CMD_FLAG_DATA_IN; |
2083 | break; |
2084 | case DMA_TO_DEVICE: |
2085 | pframe->cmd_flag |= CMD_FLAG_DATA_OUT; |
2086 | break; |
2087 | case DMA_BIDIRECTIONAL: |
2088 | default: |
2089 | dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] " |
2090 | "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]); |
2091 | goto error; |
2092 | } |
2093 | |
2094 | pframe->cdb_length = scmd->cmd_len; |
2095 | memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length); |
2096 | pframe->req_function = CL_FUN_SCSI_CMD; |
2097 | if (scsi_bufflen(scmd)) { |
2098 | if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0], |
2099 | &pframe->sg_counts)) |
2100 | goto error; |
2101 | |
2102 | pframe->data_transfer_length = scsi_bufflen(scmd); |
2103 | } else { |
2104 | pframe->sg_counts = 0; |
2105 | pframe->data_transfer_length = 0; |
2106 | } |
2107 | return 0; |
2108 | |
2109 | error: |
2110 | scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) | |
2111 | SAM_STAT_CHECK_CONDITION; |
2112 | scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24, |
2113 | 0); |
2114 | return -1; |
2115 | } |
2116 | |
2117 | /** |
2118 | * mvumi_queue_command - Queue entry point |
2119 | * @scmd: SCSI command to be queued |
2120 | * @done: Callback entry point |
2121 | */ |
2122 | static int mvumi_queue_command(struct Scsi_Host *shost, |
2123 | struct scsi_cmnd *scmd) |
2124 | { |
2125 | struct mvumi_cmd *cmd; |
2126 | struct mvumi_hba *mhba; |
2127 | unsigned long irq_flags; |
2128 | |
2129 | spin_lock_irqsave(shost->host_lock, irq_flags); |
2130 | scsi_cmd_get_serial(shost, scmd); |
2131 | |
2132 | mhba = (struct mvumi_hba *) shost->hostdata; |
2133 | scmd->result = 0; |
2134 | cmd = mvumi_get_cmd(mhba); |
2135 | if (unlikely(!cmd)) { |
2136 | spin_unlock_irqrestore(shost->host_lock, irq_flags); |
2137 | return SCSI_MLQUEUE_HOST_BUSY; |
2138 | } |
2139 | |
2140 | if (unlikely(mvumi_build_frame(mhba, scmd, cmd))) |
2141 | goto out_return_cmd; |
2142 | |
2143 | cmd->scmd = scmd; |
2144 | scmd->SCp.ptr = (char *) cmd; |
2145 | mhba->instancet->fire_cmd(mhba, cmd); |
2146 | spin_unlock_irqrestore(shost->host_lock, irq_flags); |
2147 | return 0; |
2148 | |
2149 | out_return_cmd: |
2150 | mvumi_return_cmd(mhba, cmd); |
2151 | scmd->scsi_done(scmd); |
2152 | spin_unlock_irqrestore(shost->host_lock, irq_flags); |
2153 | return 0; |
2154 | } |
2155 | |
2156 | static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd) |
2157 | { |
2158 | struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr; |
2159 | struct Scsi_Host *host = scmd->device->host; |
2160 | struct mvumi_hba *mhba = shost_priv(host); |
2161 | unsigned long flags; |
2162 | |
2163 | spin_lock_irqsave(mhba->shost->host_lock, flags); |
2164 | |
2165 | if (mhba->tag_cmd[cmd->frame->tag]) { |
2166 | mhba->tag_cmd[cmd->frame->tag] = 0; |
2167 | tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); |
2168 | } |
2169 | if (!list_empty(&cmd->queue_pointer)) |
2170 | list_del_init(&cmd->queue_pointer); |
2171 | else |
2172 | atomic_dec(&mhba->fw_outstanding); |
2173 | |
2174 | scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16); |
2175 | scmd->SCp.ptr = NULL; |
2176 | if (scsi_bufflen(scmd)) { |
2177 | if (scsi_sg_count(scmd)) { |
2178 | pci_unmap_sg(mhba->pdev, |
2179 | scsi_sglist(scmd), |
2180 | scsi_sg_count(scmd), |
2181 | (int)scmd->sc_data_direction); |
2182 | } else { |
2183 | pci_unmap_single(mhba->pdev, |
2184 | scmd->SCp.dma_handle, |
2185 | scsi_bufflen(scmd), |
2186 | (int)scmd->sc_data_direction); |
2187 | |
2188 | scmd->SCp.dma_handle = 0; |
2189 | } |
2190 | } |
2191 | mvumi_return_cmd(mhba, cmd); |
2192 | spin_unlock_irqrestore(mhba->shost->host_lock, flags); |
2193 | |
2194 | return BLK_EH_NOT_HANDLED; |
2195 | } |
2196 | |
2197 | static int |
2198 | mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev, |
2199 | sector_t capacity, int geom[]) |
2200 | { |
2201 | int heads, sectors; |
2202 | sector_t cylinders; |
2203 | unsigned long tmp; |
2204 | |
2205 | heads = 64; |
2206 | sectors = 32; |
2207 | tmp = heads * sectors; |
2208 | cylinders = capacity; |
2209 | sector_div(cylinders, tmp); |
2210 | |
2211 | if (capacity >= 0x200000) { |
2212 | heads = 255; |
2213 | sectors = 63; |
2214 | tmp = heads * sectors; |
2215 | cylinders = capacity; |
2216 | sector_div(cylinders, tmp); |
2217 | } |
2218 | geom[0] = heads; |
2219 | geom[1] = sectors; |
2220 | geom[2] = cylinders; |
2221 | |
2222 | return 0; |
2223 | } |
2224 | |
2225 | static struct scsi_host_template mvumi_template = { |
2226 | |
2227 | .module = THIS_MODULE, |
2228 | .name = "Marvell Storage Controller", |
2229 | .slave_configure = mvumi_slave_configure, |
2230 | .queuecommand = mvumi_queue_command, |
2231 | .eh_host_reset_handler = mvumi_host_reset, |
2232 | .bios_param = mvumi_bios_param, |
2233 | .this_id = -1, |
2234 | }; |
2235 | |
2236 | static struct scsi_transport_template mvumi_transport_template = { |
2237 | .eh_timed_out = mvumi_timed_out, |
2238 | }; |
2239 | |
2240 | static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba) |
2241 | { |
2242 | void *base = NULL; |
2243 | struct mvumi_hw_regs *regs; |
2244 | |
2245 | switch (mhba->pdev->device) { |
2246 | case PCI_DEVICE_ID_MARVELL_MV9143: |
2247 | mhba->mmio = mhba->base_addr[0]; |
2248 | base = mhba->mmio; |
2249 | if (!mhba->regs) { |
2250 | mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); |
2251 | if (mhba->regs == NULL) |
2252 | return -ENOMEM; |
2253 | } |
2254 | regs = mhba->regs; |
2255 | |
2256 | /* For Arm */ |
2257 | regs->ctrl_sts_reg = base + 0x20104; |
2258 | regs->rstoutn_mask_reg = base + 0x20108; |
2259 | regs->sys_soft_rst_reg = base + 0x2010C; |
2260 | regs->main_int_cause_reg = base + 0x20200; |
2261 | regs->enpointa_mask_reg = base + 0x2020C; |
2262 | regs->rstoutn_en_reg = base + 0xF1400; |
2263 | /* For Doorbell */ |
2264 | regs->pciea_to_arm_drbl_reg = base + 0x20400; |
2265 | regs->arm_to_pciea_drbl_reg = base + 0x20408; |
2266 | regs->arm_to_pciea_mask_reg = base + 0x2040C; |
2267 | regs->pciea_to_arm_msg0 = base + 0x20430; |
2268 | regs->pciea_to_arm_msg1 = base + 0x20434; |
2269 | regs->arm_to_pciea_msg0 = base + 0x20438; |
2270 | regs->arm_to_pciea_msg1 = base + 0x2043C; |
2271 | |
2272 | /* For Message Unit */ |
2273 | |
2274 | regs->inb_aval_count_basel = base + 0x508; |
2275 | regs->inb_aval_count_baseh = base + 0x50C; |
2276 | regs->inb_write_pointer = base + 0x518; |
2277 | regs->inb_read_pointer = base + 0x51C; |
2278 | regs->outb_coal_cfg = base + 0x568; |
2279 | regs->outb_copy_basel = base + 0x5B0; |
2280 | regs->outb_copy_baseh = base + 0x5B4; |
2281 | regs->outb_copy_pointer = base + 0x544; |
2282 | regs->outb_read_pointer = base + 0x548; |
2283 | regs->outb_isr_cause = base + 0x560; |
2284 | regs->outb_coal_cfg = base + 0x568; |
2285 | /* Bit setting for HW */ |
2286 | regs->int_comaout = 1 << 8; |
2287 | regs->int_comaerr = 1 << 6; |
2288 | regs->int_dl_cpu2pciea = 1 << 1; |
2289 | regs->cl_pointer_toggle = 1 << 12; |
2290 | regs->clic_irq = 1 << 1; |
2291 | regs->clic_in_err = 1 << 8; |
2292 | regs->clic_out_err = 1 << 12; |
2293 | regs->cl_slot_num_mask = 0xFFF; |
2294 | regs->int_drbl_int_mask = 0x3FFFFFFF; |
2295 | regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout | |
2296 | regs->int_comaerr; |
2297 | break; |
2298 | case PCI_DEVICE_ID_MARVELL_MV9580: |
2299 | mhba->mmio = mhba->base_addr[2]; |
2300 | base = mhba->mmio; |
2301 | if (!mhba->regs) { |
2302 | mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); |
2303 | if (mhba->regs == NULL) |
2304 | return -ENOMEM; |
2305 | } |
2306 | regs = mhba->regs; |
2307 | /* For Arm */ |
2308 | regs->ctrl_sts_reg = base + 0x20104; |
2309 | regs->rstoutn_mask_reg = base + 0x1010C; |
2310 | regs->sys_soft_rst_reg = base + 0x10108; |
2311 | regs->main_int_cause_reg = base + 0x10200; |
2312 | regs->enpointa_mask_reg = base + 0x1020C; |
2313 | regs->rstoutn_en_reg = base + 0xF1400; |
2314 | |
2315 | /* For Doorbell */ |
2316 | regs->pciea_to_arm_drbl_reg = base + 0x10460; |
2317 | regs->arm_to_pciea_drbl_reg = base + 0x10480; |
2318 | regs->arm_to_pciea_mask_reg = base + 0x10484; |
2319 | regs->pciea_to_arm_msg0 = base + 0x10400; |
2320 | regs->pciea_to_arm_msg1 = base + 0x10404; |
2321 | regs->arm_to_pciea_msg0 = base + 0x10420; |
2322 | regs->arm_to_pciea_msg1 = base + 0x10424; |
2323 | |
2324 | /* For reset*/ |
2325 | regs->reset_request = base + 0x10108; |
2326 | regs->reset_enable = base + 0x1010c; |
2327 | |
2328 | /* For Message Unit */ |
2329 | regs->inb_aval_count_basel = base + 0x4008; |
2330 | regs->inb_aval_count_baseh = base + 0x400C; |
2331 | regs->inb_write_pointer = base + 0x4018; |
2332 | regs->inb_read_pointer = base + 0x401C; |
2333 | regs->outb_copy_basel = base + 0x4058; |
2334 | regs->outb_copy_baseh = base + 0x405C; |
2335 | regs->outb_copy_pointer = base + 0x406C; |
2336 | regs->outb_read_pointer = base + 0x4070; |
2337 | regs->outb_coal_cfg = base + 0x4080; |
2338 | regs->outb_isr_cause = base + 0x4088; |
2339 | /* Bit setting for HW */ |
2340 | regs->int_comaout = 1 << 4; |
2341 | regs->int_dl_cpu2pciea = 1 << 12; |
2342 | regs->int_comaerr = 1 << 29; |
2343 | regs->cl_pointer_toggle = 1 << 14; |
2344 | regs->cl_slot_num_mask = 0x3FFF; |
2345 | regs->clic_irq = 1 << 0; |
2346 | regs->clic_out_err = 1 << 1; |
2347 | regs->int_drbl_int_mask = 0x3FFFFFFF; |
2348 | regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout; |
2349 | break; |
2350 | default: |
2351 | return -1; |
2352 | break; |
2353 | } |
2354 | |
2355 | return 0; |
2356 | } |
2357 | |
2358 | /** |
2359 | * mvumi_init_fw - Initializes the FW |
2360 | * @mhba: Adapter soft state |
2361 | * |
2362 | * This is the main function for initializing firmware. |
2363 | */ |
2364 | static int mvumi_init_fw(struct mvumi_hba *mhba) |
2365 | { |
2366 | int ret = 0; |
2367 | |
2368 | if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) { |
2369 | dev_err(&mhba->pdev->dev, "IO memory region busy!\n"); |
2370 | return -EBUSY; |
2371 | } |
2372 | ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); |
2373 | if (ret) |
2374 | goto fail_ioremap; |
2375 | |
2376 | switch (mhba->pdev->device) { |
2377 | case PCI_DEVICE_ID_MARVELL_MV9143: |
2378 | mhba->instancet = &mvumi_instance_9143; |
2379 | mhba->io_seq = 0; |
2380 | mhba->max_sge = MVUMI_MAX_SG_ENTRY; |
2381 | mhba->request_id_enabled = 1; |
2382 | break; |
2383 | case PCI_DEVICE_ID_MARVELL_MV9580: |
2384 | mhba->instancet = &mvumi_instance_9580; |
2385 | mhba->io_seq = 0; |
2386 | mhba->max_sge = MVUMI_MAX_SG_ENTRY; |
2387 | break; |
2388 | default: |
2389 | dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n", |
2390 | mhba->pdev->device); |
2391 | mhba->instancet = NULL; |
2392 | ret = -EINVAL; |
2393 | goto fail_alloc_mem; |
2394 | } |
2395 | dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n", |
2396 | mhba->pdev->device); |
2397 | ret = mvumi_cfg_hw_reg(mhba); |
2398 | if (ret) { |
2399 | dev_err(&mhba->pdev->dev, |
2400 | "failed to allocate memory for reg\n"); |
2401 | ret = -ENOMEM; |
2402 | goto fail_alloc_mem; |
2403 | } |
2404 | mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE, |
2405 | &mhba->handshake_page_phys); |
2406 | if (!mhba->handshake_page) { |
2407 | dev_err(&mhba->pdev->dev, |
2408 | "failed to allocate memory for handshake\n"); |
2409 | ret = -ENOMEM; |
2410 | goto fail_alloc_page; |
2411 | } |
2412 | |
2413 | if (mvumi_start(mhba)) { |
2414 | ret = -EINVAL; |
2415 | goto fail_ready_state; |
2416 | } |
2417 | ret = mvumi_alloc_cmds(mhba); |
2418 | if (ret) |
2419 | goto fail_ready_state; |
2420 | |
2421 | return 0; |
2422 | |
2423 | fail_ready_state: |
2424 | mvumi_release_mem_resource(mhba); |
2425 | pci_free_consistent(mhba->pdev, HSP_MAX_SIZE, |
2426 | mhba->handshake_page, mhba->handshake_page_phys); |
2427 | fail_alloc_page: |
2428 | kfree(mhba->regs); |
2429 | fail_alloc_mem: |
2430 | mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); |
2431 | fail_ioremap: |
2432 | pci_release_regions(mhba->pdev); |
2433 | |
2434 | return ret; |
2435 | } |
2436 | |
2437 | /** |
2438 | * mvumi_io_attach - Attaches this driver to SCSI mid-layer |
2439 | * @mhba: Adapter soft state |
2440 | */ |
2441 | static int mvumi_io_attach(struct mvumi_hba *mhba) |
2442 | { |
2443 | struct Scsi_Host *host = mhba->shost; |
2444 | struct scsi_device *sdev = NULL; |
2445 | int ret; |
2446 | unsigned int max_sg = (mhba->ib_max_size + 4 - |
2447 | sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl); |
2448 | |
2449 | host->irq = mhba->pdev->irq; |
2450 | host->unique_id = mhba->unique_id; |
2451 | host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; |
2452 | host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; |
2453 | host->max_sectors = mhba->max_transfer_size / 512; |
2454 | host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; |
2455 | host->max_id = mhba->max_target_id; |
2456 | host->max_cmd_len = MAX_COMMAND_SIZE; |
2457 | host->transportt = &mvumi_transport_template; |
2458 | |
2459 | ret = scsi_add_host(host, &mhba->pdev->dev); |
2460 | if (ret) { |
2461 | dev_err(&mhba->pdev->dev, "scsi_add_host failed\n"); |
2462 | return ret; |
2463 | } |
2464 | mhba->fw_flag |= MVUMI_FW_ATTACH; |
2465 | |
2466 | mutex_lock(&mhba->sas_discovery_mutex); |
2467 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) |
2468 | ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0); |
2469 | else |
2470 | ret = 0; |
2471 | if (ret) { |
2472 | dev_err(&mhba->pdev->dev, "add virtual device failed\n"); |
2473 | mutex_unlock(&mhba->sas_discovery_mutex); |
2474 | goto fail_add_device; |
2475 | } |
2476 | |
2477 | mhba->dm_thread = kthread_create(mvumi_rescan_bus, |
2478 | mhba, "mvumi_scanthread"); |
2479 | if (IS_ERR(mhba->dm_thread)) { |
2480 | dev_err(&mhba->pdev->dev, |
2481 | "failed to create device scan thread\n"); |
2482 | mutex_unlock(&mhba->sas_discovery_mutex); |
2483 | goto fail_create_thread; |
2484 | } |
2485 | atomic_set(&mhba->pnp_count, 1); |
2486 | wake_up_process(mhba->dm_thread); |
2487 | |
2488 | mutex_unlock(&mhba->sas_discovery_mutex); |
2489 | return 0; |
2490 | |
2491 | fail_create_thread: |
2492 | if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) |
2493 | sdev = scsi_device_lookup(mhba->shost, 0, |
2494 | mhba->max_target_id - 1, 0); |
2495 | if (sdev) { |
2496 | scsi_remove_device(sdev); |
2497 | scsi_device_put(sdev); |
2498 | } |
2499 | fail_add_device: |
2500 | scsi_remove_host(mhba->shost); |
2501 | return ret; |
2502 | } |
2503 | |
2504 | /** |
2505 | * mvumi_probe_one - PCI hotplug entry point |
2506 | * @pdev: PCI device structure |
2507 | * @id: PCI ids of supported hotplugged adapter |
2508 | */ |
2509 | static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) |
2510 | { |
2511 | struct Scsi_Host *host; |
2512 | struct mvumi_hba *mhba; |
2513 | int ret; |
2514 | |
2515 | dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", |
2516 | pdev->vendor, pdev->device, pdev->subsystem_vendor, |
2517 | pdev->subsystem_device); |
2518 | |
2519 | ret = pci_enable_device(pdev); |
2520 | if (ret) |
2521 | return ret; |
2522 | |
2523 | pci_set_master(pdev); |
2524 | |
2525 | if (IS_DMA64) { |
2526 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
2527 | if (ret) { |
2528 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
2529 | if (ret) |
2530 | goto fail_set_dma_mask; |
2531 | } |
2532 | } else { |
2533 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
2534 | if (ret) |
2535 | goto fail_set_dma_mask; |
2536 | } |
2537 | |
2538 | host = scsi_host_alloc(&mvumi_template, sizeof(*mhba)); |
2539 | if (!host) { |
2540 | dev_err(&pdev->dev, "scsi_host_alloc failed\n"); |
2541 | ret = -ENOMEM; |
2542 | goto fail_alloc_instance; |
2543 | } |
2544 | mhba = shost_priv(host); |
2545 | |
2546 | INIT_LIST_HEAD(&mhba->cmd_pool); |
2547 | INIT_LIST_HEAD(&mhba->ob_data_list); |
2548 | INIT_LIST_HEAD(&mhba->free_ob_list); |
2549 | INIT_LIST_HEAD(&mhba->res_list); |
2550 | INIT_LIST_HEAD(&mhba->waiting_req_list); |
2551 | mutex_init(&mhba->device_lock); |
2552 | INIT_LIST_HEAD(&mhba->mhba_dev_list); |
2553 | INIT_LIST_HEAD(&mhba->shost_dev_list); |
2554 | atomic_set(&mhba->fw_outstanding, 0); |
2555 | init_waitqueue_head(&mhba->int_cmd_wait_q); |
2556 | mutex_init(&mhba->sas_discovery_mutex); |
2557 | |
2558 | mhba->pdev = pdev; |
2559 | mhba->shost = host; |
2560 | mhba->unique_id = pdev->bus->number << 8 | pdev->devfn; |
2561 | |
2562 | ret = mvumi_init_fw(mhba); |
2563 | if (ret) |
2564 | goto fail_init_fw; |
2565 | |
2566 | ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, |
2567 | "mvumi", mhba); |
2568 | if (ret) { |
2569 | dev_err(&pdev->dev, "failed to register IRQ\n"); |
2570 | goto fail_init_irq; |
2571 | } |
2572 | |
2573 | mhba->instancet->enable_intr(mhba); |
2574 | pci_set_drvdata(pdev, mhba); |
2575 | |
2576 | ret = mvumi_io_attach(mhba); |
2577 | if (ret) |
2578 | goto fail_io_attach; |
2579 | |
2580 | mvumi_backup_bar_addr(mhba); |
2581 | dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n"); |
2582 | |
2583 | return 0; |
2584 | |
2585 | fail_io_attach: |
2586 | pci_set_drvdata(pdev, NULL); |
2587 | mhba->instancet->disable_intr(mhba); |
2588 | free_irq(mhba->pdev->irq, mhba); |
2589 | fail_init_irq: |
2590 | mvumi_release_fw(mhba); |
2591 | fail_init_fw: |
2592 | scsi_host_put(host); |
2593 | |
2594 | fail_alloc_instance: |
2595 | fail_set_dma_mask: |
2596 | pci_disable_device(pdev); |
2597 | |
2598 | return ret; |
2599 | } |
2600 | |
2601 | static void mvumi_detach_one(struct pci_dev *pdev) |
2602 | { |
2603 | struct Scsi_Host *host; |
2604 | struct mvumi_hba *mhba; |
2605 | |
2606 | mhba = pci_get_drvdata(pdev); |
2607 | if (mhba->dm_thread) { |
2608 | kthread_stop(mhba->dm_thread); |
2609 | mhba->dm_thread = NULL; |
2610 | } |
2611 | |
2612 | mvumi_detach_devices(mhba); |
2613 | host = mhba->shost; |
2614 | scsi_remove_host(mhba->shost); |
2615 | mvumi_flush_cache(mhba); |
2616 | |
2617 | mhba->instancet->disable_intr(mhba); |
2618 | free_irq(mhba->pdev->irq, mhba); |
2619 | mvumi_release_fw(mhba); |
2620 | scsi_host_put(host); |
2621 | pci_set_drvdata(pdev, NULL); |
2622 | pci_disable_device(pdev); |
2623 | dev_dbg(&pdev->dev, "driver is removed!\n"); |
2624 | } |
2625 | |
2626 | /** |
2627 | * mvumi_shutdown - Shutdown entry point |
2628 | * @device: Generic device structure |
2629 | */ |
2630 | static void mvumi_shutdown(struct pci_dev *pdev) |
2631 | { |
2632 | struct mvumi_hba *mhba = pci_get_drvdata(pdev); |
2633 | |
2634 | mvumi_flush_cache(mhba); |
2635 | } |
2636 | |
2637 | static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state) |
2638 | { |
2639 | struct mvumi_hba *mhba = NULL; |
2640 | |
2641 | mhba = pci_get_drvdata(pdev); |
2642 | mvumi_flush_cache(mhba); |
2643 | |
2644 | pci_set_drvdata(pdev, mhba); |
2645 | mhba->instancet->disable_intr(mhba); |
2646 | free_irq(mhba->pdev->irq, mhba); |
2647 | mvumi_unmap_pci_addr(pdev, mhba->base_addr); |
2648 | pci_release_regions(pdev); |
2649 | pci_save_state(pdev); |
2650 | pci_disable_device(pdev); |
2651 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
2652 | |
2653 | return 0; |
2654 | } |
2655 | |
2656 | static int mvumi_resume(struct pci_dev *pdev) |
2657 | { |
2658 | int ret; |
2659 | struct mvumi_hba *mhba = NULL; |
2660 | |
2661 | mhba = pci_get_drvdata(pdev); |
2662 | |
2663 | pci_set_power_state(pdev, PCI_D0); |
2664 | pci_enable_wake(pdev, PCI_D0, 0); |
2665 | pci_restore_state(pdev); |
2666 | |
2667 | ret = pci_enable_device(pdev); |
2668 | if (ret) { |
2669 | dev_err(&pdev->dev, "enable device failed\n"); |
2670 | return ret; |
2671 | } |
2672 | pci_set_master(pdev); |
2673 | if (IS_DMA64) { |
2674 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
2675 | if (ret) { |
2676 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
2677 | if (ret) |
2678 | goto fail; |
2679 | } |
2680 | } else { |
2681 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
2682 | if (ret) |
2683 | goto fail; |
2684 | } |
2685 | ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME); |
2686 | if (ret) |
2687 | goto fail; |
2688 | ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); |
2689 | if (ret) |
2690 | goto release_regions; |
2691 | |
2692 | if (mvumi_cfg_hw_reg(mhba)) { |
2693 | ret = -EINVAL; |
2694 | goto unmap_pci_addr; |
2695 | } |
2696 | |
2697 | mhba->mmio = mhba->base_addr[0]; |
2698 | mvumi_reset(mhba); |
2699 | |
2700 | if (mvumi_start(mhba)) { |
2701 | ret = -EINVAL; |
2702 | goto unmap_pci_addr; |
2703 | } |
2704 | |
2705 | ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, |
2706 | "mvumi", mhba); |
2707 | if (ret) { |
2708 | dev_err(&pdev->dev, "failed to register IRQ\n"); |
2709 | goto unmap_pci_addr; |
2710 | } |
2711 | mhba->instancet->enable_intr(mhba); |
2712 | |
2713 | return 0; |
2714 | |
2715 | unmap_pci_addr: |
2716 | mvumi_unmap_pci_addr(pdev, mhba->base_addr); |
2717 | release_regions: |
2718 | pci_release_regions(pdev); |
2719 | fail: |
2720 | pci_disable_device(pdev); |
2721 | |
2722 | return ret; |
2723 | } |
2724 | |
2725 | static struct pci_driver mvumi_pci_driver = { |
2726 | |
2727 | .name = MV_DRIVER_NAME, |
2728 | .id_table = mvumi_pci_table, |
2729 | .probe = mvumi_probe_one, |
2730 | .remove = mvumi_detach_one, |
2731 | .shutdown = mvumi_shutdown, |
2732 | #ifdef CONFIG_PM |
2733 | .suspend = mvumi_suspend, |
2734 | .resume = mvumi_resume, |
2735 | #endif |
2736 | }; |
2737 | |
2738 | /** |
2739 | * mvumi_init - Driver load entry point |
2740 | */ |
2741 | static int __init mvumi_init(void) |
2742 | { |
2743 | return pci_register_driver(&mvumi_pci_driver); |
2744 | } |
2745 | |
2746 | /** |
2747 | * mvumi_exit - Driver unload entry point |
2748 | */ |
2749 | static void __exit mvumi_exit(void) |
2750 | { |
2751 | |
2752 | pci_unregister_driver(&mvumi_pci_driver); |
2753 | } |
2754 | |
2755 | module_init(mvumi_init); |
2756 | module_exit(mvumi_exit); |
2757 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9