Root/
1 | /* |
2 | * libata-core.c - helper library for ATA |
3 | * |
4 | * Maintained by: Jeff Garzik <jgarzik@pobox.com> |
5 | * Please ALWAYS copy linux-ide@vger.kernel.org |
6 | * on emails. |
7 | * |
8 | * Copyright 2003-2004 Red Hat, Inc. All rights reserved. |
9 | * Copyright 2003-2004 Jeff Garzik |
10 | * |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License as published by |
14 | * the Free Software Foundation; either version 2, or (at your option) |
15 | * any later version. |
16 | * |
17 | * This program is distributed in the hope that it will be useful, |
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
20 | * GNU General Public License for more details. |
21 | * |
22 | * You should have received a copy of the GNU General Public License |
23 | * along with this program; see the file COPYING. If not, write to |
24 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. |
25 | * |
26 | * |
27 | * libata documentation is available via 'make {ps|pdf}docs', |
28 | * as Documentation/DocBook/libata.* |
29 | * |
30 | * Hardware documentation available from http://www.t13.org/ and |
31 | * http://www.sata-io.org/ |
32 | * |
33 | * Standards documents from: |
34 | * http://www.t13.org (ATA standards, PCI DMA IDE spec) |
35 | * http://www.t10.org (SCSI MMC - for ATAPI MMC) |
36 | * http://www.sata-io.org (SATA) |
37 | * http://www.compactflash.org (CF) |
38 | * http://www.qic.org (QIC157 - Tape and DSC) |
39 | * http://www.ce-ata.org (CE-ATA: not supported) |
40 | * |
41 | */ |
42 | |
43 | #include <linux/kernel.h> |
44 | #include <linux/module.h> |
45 | #include <linux/pci.h> |
46 | #include <linux/init.h> |
47 | #include <linux/list.h> |
48 | #include <linux/mm.h> |
49 | #include <linux/spinlock.h> |
50 | #include <linux/blkdev.h> |
51 | #include <linux/delay.h> |
52 | #include <linux/timer.h> |
53 | #include <linux/interrupt.h> |
54 | #include <linux/completion.h> |
55 | #include <linux/suspend.h> |
56 | #include <linux/workqueue.h> |
57 | #include <linux/scatterlist.h> |
58 | #include <linux/io.h> |
59 | #include <linux/async.h> |
60 | #include <linux/log2.h> |
61 | #include <linux/slab.h> |
62 | #include <scsi/scsi.h> |
63 | #include <scsi/scsi_cmnd.h> |
64 | #include <scsi/scsi_host.h> |
65 | #include <linux/libata.h> |
66 | #include <asm/byteorder.h> |
67 | #include <linux/cdrom.h> |
68 | #include <linux/ratelimit.h> |
69 | #include <linux/pm_runtime.h> |
70 | #include <linux/platform_device.h> |
71 | |
72 | #include "libata.h" |
73 | #include "libata-transport.h" |
74 | |
75 | /* debounce timing parameters in msecs { interval, duration, timeout } */ |
76 | const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; |
77 | const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; |
78 | const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; |
79 | |
80 | const struct ata_port_operations ata_base_port_ops = { |
81 | .prereset = ata_std_prereset, |
82 | .postreset = ata_std_postreset, |
83 | .error_handler = ata_std_error_handler, |
84 | .sched_eh = ata_std_sched_eh, |
85 | .end_eh = ata_std_end_eh, |
86 | }; |
87 | |
88 | const struct ata_port_operations sata_port_ops = { |
89 | .inherits = &ata_base_port_ops, |
90 | |
91 | .qc_defer = ata_std_qc_defer, |
92 | .hardreset = sata_std_hardreset, |
93 | }; |
94 | |
95 | static unsigned int ata_dev_init_params(struct ata_device *dev, |
96 | u16 heads, u16 sectors); |
97 | static unsigned int ata_dev_set_xfermode(struct ata_device *dev); |
98 | static void ata_dev_xfermask(struct ata_device *dev); |
99 | static unsigned long ata_dev_blacklisted(const struct ata_device *dev); |
100 | |
101 | atomic_t ata_print_id = ATOMIC_INIT(0); |
102 | |
103 | struct ata_force_param { |
104 | const char *name; |
105 | unsigned int cbl; |
106 | int spd_limit; |
107 | unsigned long xfer_mask; |
108 | unsigned int horkage_on; |
109 | unsigned int horkage_off; |
110 | unsigned int lflags; |
111 | }; |
112 | |
113 | struct ata_force_ent { |
114 | int port; |
115 | int device; |
116 | struct ata_force_param param; |
117 | }; |
118 | |
119 | static struct ata_force_ent *ata_force_tbl; |
120 | static int ata_force_tbl_size; |
121 | |
122 | static char ata_force_param_buf[PAGE_SIZE] __initdata; |
123 | /* param_buf is thrown away after initialization, disallow read */ |
124 | module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); |
125 | MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); |
126 | |
127 | static int atapi_enabled = 1; |
128 | module_param(atapi_enabled, int, 0444); |
129 | MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); |
130 | |
131 | static int atapi_dmadir = 0; |
132 | module_param(atapi_dmadir, int, 0444); |
133 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); |
134 | |
135 | int atapi_passthru16 = 1; |
136 | module_param(atapi_passthru16, int, 0444); |
137 | MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); |
138 | |
139 | int libata_fua = 0; |
140 | module_param_named(fua, libata_fua, int, 0444); |
141 | MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); |
142 | |
143 | static int ata_ignore_hpa; |
144 | module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); |
145 | MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); |
146 | |
147 | static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; |
148 | module_param_named(dma, libata_dma_mask, int, 0444); |
149 | MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); |
150 | |
151 | static int ata_probe_timeout; |
152 | module_param(ata_probe_timeout, int, 0444); |
153 | MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); |
154 | |
155 | int libata_noacpi = 0; |
156 | module_param_named(noacpi, libata_noacpi, int, 0444); |
157 | MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); |
158 | |
159 | int libata_allow_tpm = 0; |
160 | module_param_named(allow_tpm, libata_allow_tpm, int, 0444); |
161 | MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); |
162 | |
163 | static int atapi_an; |
164 | module_param(atapi_an, int, 0444); |
165 | MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); |
166 | |
167 | MODULE_AUTHOR("Jeff Garzik"); |
168 | MODULE_DESCRIPTION("Library module for ATA devices"); |
169 | MODULE_LICENSE("GPL"); |
170 | MODULE_VERSION(DRV_VERSION); |
171 | |
172 | |
173 | static bool ata_sstatus_online(u32 sstatus) |
174 | { |
175 | return (sstatus & 0xf) == 0x3; |
176 | } |
177 | |
178 | /** |
179 | * ata_link_next - link iteration helper |
180 | * @link: the previous link, NULL to start |
181 | * @ap: ATA port containing links to iterate |
182 | * @mode: iteration mode, one of ATA_LITER_* |
183 | * |
184 | * LOCKING: |
185 | * Host lock or EH context. |
186 | * |
187 | * RETURNS: |
188 | * Pointer to the next link. |
189 | */ |
190 | struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, |
191 | enum ata_link_iter_mode mode) |
192 | { |
193 | BUG_ON(mode != ATA_LITER_EDGE && |
194 | mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); |
195 | |
196 | /* NULL link indicates start of iteration */ |
197 | if (!link) |
198 | switch (mode) { |
199 | case ATA_LITER_EDGE: |
200 | case ATA_LITER_PMP_FIRST: |
201 | if (sata_pmp_attached(ap)) |
202 | return ap->pmp_link; |
203 | /* fall through */ |
204 | case ATA_LITER_HOST_FIRST: |
205 | return &ap->link; |
206 | } |
207 | |
208 | /* we just iterated over the host link, what's next? */ |
209 | if (link == &ap->link) |
210 | switch (mode) { |
211 | case ATA_LITER_HOST_FIRST: |
212 | if (sata_pmp_attached(ap)) |
213 | return ap->pmp_link; |
214 | /* fall through */ |
215 | case ATA_LITER_PMP_FIRST: |
216 | if (unlikely(ap->slave_link)) |
217 | return ap->slave_link; |
218 | /* fall through */ |
219 | case ATA_LITER_EDGE: |
220 | return NULL; |
221 | } |
222 | |
223 | /* slave_link excludes PMP */ |
224 | if (unlikely(link == ap->slave_link)) |
225 | return NULL; |
226 | |
227 | /* we were over a PMP link */ |
228 | if (++link < ap->pmp_link + ap->nr_pmp_links) |
229 | return link; |
230 | |
231 | if (mode == ATA_LITER_PMP_FIRST) |
232 | return &ap->link; |
233 | |
234 | return NULL; |
235 | } |
236 | |
237 | /** |
238 | * ata_dev_next - device iteration helper |
239 | * @dev: the previous device, NULL to start |
240 | * @link: ATA link containing devices to iterate |
241 | * @mode: iteration mode, one of ATA_DITER_* |
242 | * |
243 | * LOCKING: |
244 | * Host lock or EH context. |
245 | * |
246 | * RETURNS: |
247 | * Pointer to the next device. |
248 | */ |
249 | struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, |
250 | enum ata_dev_iter_mode mode) |
251 | { |
252 | BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && |
253 | mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); |
254 | |
255 | /* NULL dev indicates start of iteration */ |
256 | if (!dev) |
257 | switch (mode) { |
258 | case ATA_DITER_ENABLED: |
259 | case ATA_DITER_ALL: |
260 | dev = link->device; |
261 | goto check; |
262 | case ATA_DITER_ENABLED_REVERSE: |
263 | case ATA_DITER_ALL_REVERSE: |
264 | dev = link->device + ata_link_max_devices(link) - 1; |
265 | goto check; |
266 | } |
267 | |
268 | next: |
269 | /* move to the next one */ |
270 | switch (mode) { |
271 | case ATA_DITER_ENABLED: |
272 | case ATA_DITER_ALL: |
273 | if (++dev < link->device + ata_link_max_devices(link)) |
274 | goto check; |
275 | return NULL; |
276 | case ATA_DITER_ENABLED_REVERSE: |
277 | case ATA_DITER_ALL_REVERSE: |
278 | if (--dev >= link->device) |
279 | goto check; |
280 | return NULL; |
281 | } |
282 | |
283 | check: |
284 | if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && |
285 | !ata_dev_enabled(dev)) |
286 | goto next; |
287 | return dev; |
288 | } |
289 | |
290 | /** |
291 | * ata_dev_phys_link - find physical link for a device |
292 | * @dev: ATA device to look up physical link for |
293 | * |
294 | * Look up physical link which @dev is attached to. Note that |
295 | * this is different from @dev->link only when @dev is on slave |
296 | * link. For all other cases, it's the same as @dev->link. |
297 | * |
298 | * LOCKING: |
299 | * Don't care. |
300 | * |
301 | * RETURNS: |
302 | * Pointer to the found physical link. |
303 | */ |
304 | struct ata_link *ata_dev_phys_link(struct ata_device *dev) |
305 | { |
306 | struct ata_port *ap = dev->link->ap; |
307 | |
308 | if (!ap->slave_link) |
309 | return dev->link; |
310 | if (!dev->devno) |
311 | return &ap->link; |
312 | return ap->slave_link; |
313 | } |
314 | |
315 | /** |
316 | * ata_force_cbl - force cable type according to libata.force |
317 | * @ap: ATA port of interest |
318 | * |
319 | * Force cable type according to libata.force and whine about it. |
320 | * The last entry which has matching port number is used, so it |
321 | * can be specified as part of device force parameters. For |
322 | * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the |
323 | * same effect. |
324 | * |
325 | * LOCKING: |
326 | * EH context. |
327 | */ |
328 | void ata_force_cbl(struct ata_port *ap) |
329 | { |
330 | int i; |
331 | |
332 | for (i = ata_force_tbl_size - 1; i >= 0; i--) { |
333 | const struct ata_force_ent *fe = &ata_force_tbl[i]; |
334 | |
335 | if (fe->port != -1 && fe->port != ap->print_id) |
336 | continue; |
337 | |
338 | if (fe->param.cbl == ATA_CBL_NONE) |
339 | continue; |
340 | |
341 | ap->cbl = fe->param.cbl; |
342 | ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name); |
343 | return; |
344 | } |
345 | } |
346 | |
347 | /** |
348 | * ata_force_link_limits - force link limits according to libata.force |
349 | * @link: ATA link of interest |
350 | * |
351 | * Force link flags and SATA spd limit according to libata.force |
352 | * and whine about it. When only the port part is specified |
353 | * (e.g. 1:), the limit applies to all links connected to both |
354 | * the host link and all fan-out ports connected via PMP. If the |
355 | * device part is specified as 0 (e.g. 1.00:), it specifies the |
356 | * first fan-out link not the host link. Device number 15 always |
357 | * points to the host link whether PMP is attached or not. If the |
358 | * controller has slave link, device number 16 points to it. |
359 | * |
360 | * LOCKING: |
361 | * EH context. |
362 | */ |
363 | static void ata_force_link_limits(struct ata_link *link) |
364 | { |
365 | bool did_spd = false; |
366 | int linkno = link->pmp; |
367 | int i; |
368 | |
369 | if (ata_is_host_link(link)) |
370 | linkno += 15; |
371 | |
372 | for (i = ata_force_tbl_size - 1; i >= 0; i--) { |
373 | const struct ata_force_ent *fe = &ata_force_tbl[i]; |
374 | |
375 | if (fe->port != -1 && fe->port != link->ap->print_id) |
376 | continue; |
377 | |
378 | if (fe->device != -1 && fe->device != linkno) |
379 | continue; |
380 | |
381 | /* only honor the first spd limit */ |
382 | if (!did_spd && fe->param.spd_limit) { |
383 | link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; |
384 | ata_link_notice(link, "FORCE: PHY spd limit set to %s\n", |
385 | fe->param.name); |
386 | did_spd = true; |
387 | } |
388 | |
389 | /* let lflags stack */ |
390 | if (fe->param.lflags) { |
391 | link->flags |= fe->param.lflags; |
392 | ata_link_notice(link, |
393 | "FORCE: link flag 0x%x forced -> 0x%x\n", |
394 | fe->param.lflags, link->flags); |
395 | } |
396 | } |
397 | } |
398 | |
399 | /** |
400 | * ata_force_xfermask - force xfermask according to libata.force |
401 | * @dev: ATA device of interest |
402 | * |
403 | * Force xfer_mask according to libata.force and whine about it. |
404 | * For consistency with link selection, device number 15 selects |
405 | * the first device connected to the host link. |
406 | * |
407 | * LOCKING: |
408 | * EH context. |
409 | */ |
410 | static void ata_force_xfermask(struct ata_device *dev) |
411 | { |
412 | int devno = dev->link->pmp + dev->devno; |
413 | int alt_devno = devno; |
414 | int i; |
415 | |
416 | /* allow n.15/16 for devices attached to host port */ |
417 | if (ata_is_host_link(dev->link)) |
418 | alt_devno += 15; |
419 | |
420 | for (i = ata_force_tbl_size - 1; i >= 0; i--) { |
421 | const struct ata_force_ent *fe = &ata_force_tbl[i]; |
422 | unsigned long pio_mask, mwdma_mask, udma_mask; |
423 | |
424 | if (fe->port != -1 && fe->port != dev->link->ap->print_id) |
425 | continue; |
426 | |
427 | if (fe->device != -1 && fe->device != devno && |
428 | fe->device != alt_devno) |
429 | continue; |
430 | |
431 | if (!fe->param.xfer_mask) |
432 | continue; |
433 | |
434 | ata_unpack_xfermask(fe->param.xfer_mask, |
435 | &pio_mask, &mwdma_mask, &udma_mask); |
436 | if (udma_mask) |
437 | dev->udma_mask = udma_mask; |
438 | else if (mwdma_mask) { |
439 | dev->udma_mask = 0; |
440 | dev->mwdma_mask = mwdma_mask; |
441 | } else { |
442 | dev->udma_mask = 0; |
443 | dev->mwdma_mask = 0; |
444 | dev->pio_mask = pio_mask; |
445 | } |
446 | |
447 | ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n", |
448 | fe->param.name); |
449 | return; |
450 | } |
451 | } |
452 | |
453 | /** |
454 | * ata_force_horkage - force horkage according to libata.force |
455 | * @dev: ATA device of interest |
456 | * |
457 | * Force horkage according to libata.force and whine about it. |
458 | * For consistency with link selection, device number 15 selects |
459 | * the first device connected to the host link. |
460 | * |
461 | * LOCKING: |
462 | * EH context. |
463 | */ |
464 | static void ata_force_horkage(struct ata_device *dev) |
465 | { |
466 | int devno = dev->link->pmp + dev->devno; |
467 | int alt_devno = devno; |
468 | int i; |
469 | |
470 | /* allow n.15/16 for devices attached to host port */ |
471 | if (ata_is_host_link(dev->link)) |
472 | alt_devno += 15; |
473 | |
474 | for (i = 0; i < ata_force_tbl_size; i++) { |
475 | const struct ata_force_ent *fe = &ata_force_tbl[i]; |
476 | |
477 | if (fe->port != -1 && fe->port != dev->link->ap->print_id) |
478 | continue; |
479 | |
480 | if (fe->device != -1 && fe->device != devno && |
481 | fe->device != alt_devno) |
482 | continue; |
483 | |
484 | if (!(~dev->horkage & fe->param.horkage_on) && |
485 | !(dev->horkage & fe->param.horkage_off)) |
486 | continue; |
487 | |
488 | dev->horkage |= fe->param.horkage_on; |
489 | dev->horkage &= ~fe->param.horkage_off; |
490 | |
491 | ata_dev_notice(dev, "FORCE: horkage modified (%s)\n", |
492 | fe->param.name); |
493 | } |
494 | } |
495 | |
496 | /** |
497 | * atapi_cmd_type - Determine ATAPI command type from SCSI opcode |
498 | * @opcode: SCSI opcode |
499 | * |
500 | * Determine ATAPI command type from @opcode. |
501 | * |
502 | * LOCKING: |
503 | * None. |
504 | * |
505 | * RETURNS: |
506 | * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} |
507 | */ |
508 | int atapi_cmd_type(u8 opcode) |
509 | { |
510 | switch (opcode) { |
511 | case GPCMD_READ_10: |
512 | case GPCMD_READ_12: |
513 | return ATAPI_READ; |
514 | |
515 | case GPCMD_WRITE_10: |
516 | case GPCMD_WRITE_12: |
517 | case GPCMD_WRITE_AND_VERIFY_10: |
518 | return ATAPI_WRITE; |
519 | |
520 | case GPCMD_READ_CD: |
521 | case GPCMD_READ_CD_MSF: |
522 | return ATAPI_READ_CD; |
523 | |
524 | case ATA_16: |
525 | case ATA_12: |
526 | if (atapi_passthru16) |
527 | return ATAPI_PASS_THRU; |
528 | /* fall thru */ |
529 | default: |
530 | return ATAPI_MISC; |
531 | } |
532 | } |
533 | |
534 | /** |
535 | * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure |
536 | * @tf: Taskfile to convert |
537 | * @pmp: Port multiplier port |
538 | * @is_cmd: This FIS is for command |
539 | * @fis: Buffer into which data will output |
540 | * |
541 | * Converts a standard ATA taskfile to a Serial ATA |
542 | * FIS structure (Register - Host to Device). |
543 | * |
544 | * LOCKING: |
545 | * Inherited from caller. |
546 | */ |
547 | void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) |
548 | { |
549 | fis[0] = 0x27; /* Register - Host to Device FIS */ |
550 | fis[1] = pmp & 0xf; /* Port multiplier number*/ |
551 | if (is_cmd) |
552 | fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ |
553 | |
554 | fis[2] = tf->command; |
555 | fis[3] = tf->feature; |
556 | |
557 | fis[4] = tf->lbal; |
558 | fis[5] = tf->lbam; |
559 | fis[6] = tf->lbah; |
560 | fis[7] = tf->device; |
561 | |
562 | fis[8] = tf->hob_lbal; |
563 | fis[9] = tf->hob_lbam; |
564 | fis[10] = tf->hob_lbah; |
565 | fis[11] = tf->hob_feature; |
566 | |
567 | fis[12] = tf->nsect; |
568 | fis[13] = tf->hob_nsect; |
569 | fis[14] = 0; |
570 | fis[15] = tf->ctl; |
571 | |
572 | fis[16] = 0; |
573 | fis[17] = 0; |
574 | fis[18] = 0; |
575 | fis[19] = 0; |
576 | } |
577 | |
578 | /** |
579 | * ata_tf_from_fis - Convert SATA FIS to ATA taskfile |
580 | * @fis: Buffer from which data will be input |
581 | * @tf: Taskfile to output |
582 | * |
583 | * Converts a serial ATA FIS structure to a standard ATA taskfile. |
584 | * |
585 | * LOCKING: |
586 | * Inherited from caller. |
587 | */ |
588 | |
589 | void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) |
590 | { |
591 | tf->command = fis[2]; /* status */ |
592 | tf->feature = fis[3]; /* error */ |
593 | |
594 | tf->lbal = fis[4]; |
595 | tf->lbam = fis[5]; |
596 | tf->lbah = fis[6]; |
597 | tf->device = fis[7]; |
598 | |
599 | tf->hob_lbal = fis[8]; |
600 | tf->hob_lbam = fis[9]; |
601 | tf->hob_lbah = fis[10]; |
602 | |
603 | tf->nsect = fis[12]; |
604 | tf->hob_nsect = fis[13]; |
605 | } |
606 | |
607 | static const u8 ata_rw_cmds[] = { |
608 | /* pio multi */ |
609 | ATA_CMD_READ_MULTI, |
610 | ATA_CMD_WRITE_MULTI, |
611 | ATA_CMD_READ_MULTI_EXT, |
612 | ATA_CMD_WRITE_MULTI_EXT, |
613 | 0, |
614 | 0, |
615 | 0, |
616 | ATA_CMD_WRITE_MULTI_FUA_EXT, |
617 | /* pio */ |
618 | ATA_CMD_PIO_READ, |
619 | ATA_CMD_PIO_WRITE, |
620 | ATA_CMD_PIO_READ_EXT, |
621 | ATA_CMD_PIO_WRITE_EXT, |
622 | 0, |
623 | 0, |
624 | 0, |
625 | 0, |
626 | /* dma */ |
627 | ATA_CMD_READ, |
628 | ATA_CMD_WRITE, |
629 | ATA_CMD_READ_EXT, |
630 | ATA_CMD_WRITE_EXT, |
631 | 0, |
632 | 0, |
633 | 0, |
634 | ATA_CMD_WRITE_FUA_EXT |
635 | }; |
636 | |
637 | /** |
638 | * ata_rwcmd_protocol - set taskfile r/w commands and protocol |
639 | * @tf: command to examine and configure |
640 | * @dev: device tf belongs to |
641 | * |
642 | * Examine the device configuration and tf->flags to calculate |
643 | * the proper read/write commands and protocol to use. |
644 | * |
645 | * LOCKING: |
646 | * caller. |
647 | */ |
648 | static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) |
649 | { |
650 | u8 cmd; |
651 | |
652 | int index, fua, lba48, write; |
653 | |
654 | fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; |
655 | lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; |
656 | write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; |
657 | |
658 | if (dev->flags & ATA_DFLAG_PIO) { |
659 | tf->protocol = ATA_PROT_PIO; |
660 | index = dev->multi_count ? 0 : 8; |
661 | } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { |
662 | /* Unable to use DMA due to host limitation */ |
663 | tf->protocol = ATA_PROT_PIO; |
664 | index = dev->multi_count ? 0 : 8; |
665 | } else { |
666 | tf->protocol = ATA_PROT_DMA; |
667 | index = 16; |
668 | } |
669 | |
670 | cmd = ata_rw_cmds[index + fua + lba48 + write]; |
671 | if (cmd) { |
672 | tf->command = cmd; |
673 | return 0; |
674 | } |
675 | return -1; |
676 | } |
677 | |
678 | /** |
679 | * ata_tf_read_block - Read block address from ATA taskfile |
680 | * @tf: ATA taskfile of interest |
681 | * @dev: ATA device @tf belongs to |
682 | * |
683 | * LOCKING: |
684 | * None. |
685 | * |
686 | * Read block address from @tf. This function can handle all |
687 | * three address formats - LBA, LBA48 and CHS. tf->protocol and |
688 | * flags select the address format to use. |
689 | * |
690 | * RETURNS: |
691 | * Block address read from @tf. |
692 | */ |
693 | u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) |
694 | { |
695 | u64 block = 0; |
696 | |
697 | if (tf->flags & ATA_TFLAG_LBA) { |
698 | if (tf->flags & ATA_TFLAG_LBA48) { |
699 | block |= (u64)tf->hob_lbah << 40; |
700 | block |= (u64)tf->hob_lbam << 32; |
701 | block |= (u64)tf->hob_lbal << 24; |
702 | } else |
703 | block |= (tf->device & 0xf) << 24; |
704 | |
705 | block |= tf->lbah << 16; |
706 | block |= tf->lbam << 8; |
707 | block |= tf->lbal; |
708 | } else { |
709 | u32 cyl, head, sect; |
710 | |
711 | cyl = tf->lbam | (tf->lbah << 8); |
712 | head = tf->device & 0xf; |
713 | sect = tf->lbal; |
714 | |
715 | if (!sect) { |
716 | ata_dev_warn(dev, |
717 | "device reported invalid CHS sector 0\n"); |
718 | sect = 1; /* oh well */ |
719 | } |
720 | |
721 | block = (cyl * dev->heads + head) * dev->sectors + sect - 1; |
722 | } |
723 | |
724 | return block; |
725 | } |
726 | |
727 | /** |
728 | * ata_build_rw_tf - Build ATA taskfile for given read/write request |
729 | * @tf: Target ATA taskfile |
730 | * @dev: ATA device @tf belongs to |
731 | * @block: Block address |
732 | * @n_block: Number of blocks |
733 | * @tf_flags: RW/FUA etc... |
734 | * @tag: tag |
735 | * |
736 | * LOCKING: |
737 | * None. |
738 | * |
739 | * Build ATA taskfile @tf for read/write request described by |
740 | * @block, @n_block, @tf_flags and @tag on @dev. |
741 | * |
742 | * RETURNS: |
743 | * |
744 | * 0 on success, -ERANGE if the request is too large for @dev, |
745 | * -EINVAL if the request is invalid. |
746 | */ |
747 | int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, |
748 | u64 block, u32 n_block, unsigned int tf_flags, |
749 | unsigned int tag) |
750 | { |
751 | tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
752 | tf->flags |= tf_flags; |
753 | |
754 | if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { |
755 | /* yay, NCQ */ |
756 | if (!lba_48_ok(block, n_block)) |
757 | return -ERANGE; |
758 | |
759 | tf->protocol = ATA_PROT_NCQ; |
760 | tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; |
761 | |
762 | if (tf->flags & ATA_TFLAG_WRITE) |
763 | tf->command = ATA_CMD_FPDMA_WRITE; |
764 | else |
765 | tf->command = ATA_CMD_FPDMA_READ; |
766 | |
767 | tf->nsect = tag << 3; |
768 | tf->hob_feature = (n_block >> 8) & 0xff; |
769 | tf->feature = n_block & 0xff; |
770 | |
771 | tf->hob_lbah = (block >> 40) & 0xff; |
772 | tf->hob_lbam = (block >> 32) & 0xff; |
773 | tf->hob_lbal = (block >> 24) & 0xff; |
774 | tf->lbah = (block >> 16) & 0xff; |
775 | tf->lbam = (block >> 8) & 0xff; |
776 | tf->lbal = block & 0xff; |
777 | |
778 | tf->device = ATA_LBA; |
779 | if (tf->flags & ATA_TFLAG_FUA) |
780 | tf->device |= 1 << 7; |
781 | } else if (dev->flags & ATA_DFLAG_LBA) { |
782 | tf->flags |= ATA_TFLAG_LBA; |
783 | |
784 | if (lba_28_ok(block, n_block)) { |
785 | /* use LBA28 */ |
786 | tf->device |= (block >> 24) & 0xf; |
787 | } else if (lba_48_ok(block, n_block)) { |
788 | if (!(dev->flags & ATA_DFLAG_LBA48)) |
789 | return -ERANGE; |
790 | |
791 | /* use LBA48 */ |
792 | tf->flags |= ATA_TFLAG_LBA48; |
793 | |
794 | tf->hob_nsect = (n_block >> 8) & 0xff; |
795 | |
796 | tf->hob_lbah = (block >> 40) & 0xff; |
797 | tf->hob_lbam = (block >> 32) & 0xff; |
798 | tf->hob_lbal = (block >> 24) & 0xff; |
799 | } else |
800 | /* request too large even for LBA48 */ |
801 | return -ERANGE; |
802 | |
803 | if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) |
804 | return -EINVAL; |
805 | |
806 | tf->nsect = n_block & 0xff; |
807 | |
808 | tf->lbah = (block >> 16) & 0xff; |
809 | tf->lbam = (block >> 8) & 0xff; |
810 | tf->lbal = block & 0xff; |
811 | |
812 | tf->device |= ATA_LBA; |
813 | } else { |
814 | /* CHS */ |
815 | u32 sect, head, cyl, track; |
816 | |
817 | /* The request -may- be too large for CHS addressing. */ |
818 | if (!lba_28_ok(block, n_block)) |
819 | return -ERANGE; |
820 | |
821 | if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) |
822 | return -EINVAL; |
823 | |
824 | /* Convert LBA to CHS */ |
825 | track = (u32)block / dev->sectors; |
826 | cyl = track / dev->heads; |
827 | head = track % dev->heads; |
828 | sect = (u32)block % dev->sectors + 1; |
829 | |
830 | DPRINTK("block %u track %u cyl %u head %u sect %u\n", |
831 | (u32)block, track, cyl, head, sect); |
832 | |
833 | /* Check whether the converted CHS can fit. |
834 | Cylinder: 0-65535 |
835 | Head: 0-15 |
836 | Sector: 1-255*/ |
837 | if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) |
838 | return -ERANGE; |
839 | |
840 | tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ |
841 | tf->lbal = sect; |
842 | tf->lbam = cyl; |
843 | tf->lbah = cyl >> 8; |
844 | tf->device |= head; |
845 | } |
846 | |
847 | return 0; |
848 | } |
849 | |
850 | /** |
851 | * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask |
852 | * @pio_mask: pio_mask |
853 | * @mwdma_mask: mwdma_mask |
854 | * @udma_mask: udma_mask |
855 | * |
856 | * Pack @pio_mask, @mwdma_mask and @udma_mask into a single |
857 | * unsigned int xfer_mask. |
858 | * |
859 | * LOCKING: |
860 | * None. |
861 | * |
862 | * RETURNS: |
863 | * Packed xfer_mask. |
864 | */ |
865 | unsigned long ata_pack_xfermask(unsigned long pio_mask, |
866 | unsigned long mwdma_mask, |
867 | unsigned long udma_mask) |
868 | { |
869 | return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | |
870 | ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | |
871 | ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); |
872 | } |
873 | |
874 | /** |
875 | * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks |
876 | * @xfer_mask: xfer_mask to unpack |
877 | * @pio_mask: resulting pio_mask |
878 | * @mwdma_mask: resulting mwdma_mask |
879 | * @udma_mask: resulting udma_mask |
880 | * |
881 | * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. |
882 | * Any NULL distination masks will be ignored. |
883 | */ |
884 | void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, |
885 | unsigned long *mwdma_mask, unsigned long *udma_mask) |
886 | { |
887 | if (pio_mask) |
888 | *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; |
889 | if (mwdma_mask) |
890 | *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; |
891 | if (udma_mask) |
892 | *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; |
893 | } |
894 | |
895 | static const struct ata_xfer_ent { |
896 | int shift, bits; |
897 | u8 base; |
898 | } ata_xfer_tbl[] = { |
899 | { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, |
900 | { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, |
901 | { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, |
902 | { -1, }, |
903 | }; |
904 | |
905 | /** |
906 | * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask |
907 | * @xfer_mask: xfer_mask of interest |
908 | * |
909 | * Return matching XFER_* value for @xfer_mask. Only the highest |
910 | * bit of @xfer_mask is considered. |
911 | * |
912 | * LOCKING: |
913 | * None. |
914 | * |
915 | * RETURNS: |
916 | * Matching XFER_* value, 0xff if no match found. |
917 | */ |
918 | u8 ata_xfer_mask2mode(unsigned long xfer_mask) |
919 | { |
920 | int highbit = fls(xfer_mask) - 1; |
921 | const struct ata_xfer_ent *ent; |
922 | |
923 | for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) |
924 | if (highbit >= ent->shift && highbit < ent->shift + ent->bits) |
925 | return ent->base + highbit - ent->shift; |
926 | return 0xff; |
927 | } |
928 | |
929 | /** |
930 | * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* |
931 | * @xfer_mode: XFER_* of interest |
932 | * |
933 | * Return matching xfer_mask for @xfer_mode. |
934 | * |
935 | * LOCKING: |
936 | * None. |
937 | * |
938 | * RETURNS: |
939 | * Matching xfer_mask, 0 if no match found. |
940 | */ |
941 | unsigned long ata_xfer_mode2mask(u8 xfer_mode) |
942 | { |
943 | const struct ata_xfer_ent *ent; |
944 | |
945 | for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) |
946 | if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) |
947 | return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) |
948 | & ~((1 << ent->shift) - 1); |
949 | return 0; |
950 | } |
951 | |
952 | /** |
953 | * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* |
954 | * @xfer_mode: XFER_* of interest |
955 | * |
956 | * Return matching xfer_shift for @xfer_mode. |
957 | * |
958 | * LOCKING: |
959 | * None. |
960 | * |
961 | * RETURNS: |
962 | * Matching xfer_shift, -1 if no match found. |
963 | */ |
964 | int ata_xfer_mode2shift(unsigned long xfer_mode) |
965 | { |
966 | const struct ata_xfer_ent *ent; |
967 | |
968 | for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) |
969 | if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) |
970 | return ent->shift; |
971 | return -1; |
972 | } |
973 | |
974 | /** |
975 | * ata_mode_string - convert xfer_mask to string |
976 | * @xfer_mask: mask of bits supported; only highest bit counts. |
977 | * |
978 | * Determine string which represents the highest speed |
979 | * (highest bit in @modemask). |
980 | * |
981 | * LOCKING: |
982 | * None. |
983 | * |
984 | * RETURNS: |
985 | * Constant C string representing highest speed listed in |
986 | * @mode_mask, or the constant C string "<n/a>". |
987 | */ |
988 | const char *ata_mode_string(unsigned long xfer_mask) |
989 | { |
990 | static const char * const xfer_mode_str[] = { |
991 | "PIO0", |
992 | "PIO1", |
993 | "PIO2", |
994 | "PIO3", |
995 | "PIO4", |
996 | "PIO5", |
997 | "PIO6", |
998 | "MWDMA0", |
999 | "MWDMA1", |
1000 | "MWDMA2", |
1001 | "MWDMA3", |
1002 | "MWDMA4", |
1003 | "UDMA/16", |
1004 | "UDMA/25", |
1005 | "UDMA/33", |
1006 | "UDMA/44", |
1007 | "UDMA/66", |
1008 | "UDMA/100", |
1009 | "UDMA/133", |
1010 | "UDMA7", |
1011 | }; |
1012 | int highbit; |
1013 | |
1014 | highbit = fls(xfer_mask) - 1; |
1015 | if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) |
1016 | return xfer_mode_str[highbit]; |
1017 | return "<n/a>"; |
1018 | } |
1019 | |
1020 | const char *sata_spd_string(unsigned int spd) |
1021 | { |
1022 | static const char * const spd_str[] = { |
1023 | "1.5 Gbps", |
1024 | "3.0 Gbps", |
1025 | "6.0 Gbps", |
1026 | }; |
1027 | |
1028 | if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) |
1029 | return "<unknown>"; |
1030 | return spd_str[spd - 1]; |
1031 | } |
1032 | |
1033 | /** |
1034 | * ata_dev_classify - determine device type based on ATA-spec signature |
1035 | * @tf: ATA taskfile register set for device to be identified |
1036 | * |
1037 | * Determine from taskfile register contents whether a device is |
1038 | * ATA or ATAPI, as per "Signature and persistence" section |
1039 | * of ATA/PI spec (volume 1, sect 5.14). |
1040 | * |
1041 | * LOCKING: |
1042 | * None. |
1043 | * |
1044 | * RETURNS: |
1045 | * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or |
1046 | * %ATA_DEV_UNKNOWN the event of failure. |
1047 | */ |
1048 | unsigned int ata_dev_classify(const struct ata_taskfile *tf) |
1049 | { |
1050 | /* Apple's open source Darwin code hints that some devices only |
1051 | * put a proper signature into the LBA mid/high registers, |
1052 | * So, we only check those. It's sufficient for uniqueness. |
1053 | * |
1054 | * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate |
1055 | * signatures for ATA and ATAPI devices attached on SerialATA, |
1056 | * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA |
1057 | * spec has never mentioned about using different signatures |
1058 | * for ATA/ATAPI devices. Then, Serial ATA II: Port |
1059 | * Multiplier specification began to use 0x69/0x96 to identify |
1060 | * port multpliers and 0x3c/0xc3 to identify SEMB device. |
1061 | * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and |
1062 | * 0x69/0x96 shortly and described them as reserved for |
1063 | * SerialATA. |
1064 | * |
1065 | * We follow the current spec and consider that 0x69/0x96 |
1066 | * identifies a port multiplier and 0x3c/0xc3 a SEMB device. |
1067 | * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports |
1068 | * SEMB signature. This is worked around in |
1069 | * ata_dev_read_id(). |
1070 | */ |
1071 | if ((tf->lbam == 0) && (tf->lbah == 0)) { |
1072 | DPRINTK("found ATA device by sig\n"); |
1073 | return ATA_DEV_ATA; |
1074 | } |
1075 | |
1076 | if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { |
1077 | DPRINTK("found ATAPI device by sig\n"); |
1078 | return ATA_DEV_ATAPI; |
1079 | } |
1080 | |
1081 | if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { |
1082 | DPRINTK("found PMP device by sig\n"); |
1083 | return ATA_DEV_PMP; |
1084 | } |
1085 | |
1086 | if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { |
1087 | DPRINTK("found SEMB device by sig (could be ATA device)\n"); |
1088 | return ATA_DEV_SEMB; |
1089 | } |
1090 | |
1091 | DPRINTK("unknown device\n"); |
1092 | return ATA_DEV_UNKNOWN; |
1093 | } |
1094 | |
1095 | /** |
1096 | * ata_id_string - Convert IDENTIFY DEVICE page into string |
1097 | * @id: IDENTIFY DEVICE results we will examine |
1098 | * @s: string into which data is output |
1099 | * @ofs: offset into identify device page |
1100 | * @len: length of string to return. must be an even number. |
1101 | * |
1102 | * The strings in the IDENTIFY DEVICE page are broken up into |
1103 | * 16-bit chunks. Run through the string, and output each |
1104 | * 8-bit chunk linearly, regardless of platform. |
1105 | * |
1106 | * LOCKING: |
1107 | * caller. |
1108 | */ |
1109 | |
1110 | void ata_id_string(const u16 *id, unsigned char *s, |
1111 | unsigned int ofs, unsigned int len) |
1112 | { |
1113 | unsigned int c; |
1114 | |
1115 | BUG_ON(len & 1); |
1116 | |
1117 | while (len > 0) { |
1118 | c = id[ofs] >> 8; |
1119 | *s = c; |
1120 | s++; |
1121 | |
1122 | c = id[ofs] & 0xff; |
1123 | *s = c; |
1124 | s++; |
1125 | |
1126 | ofs++; |
1127 | len -= 2; |
1128 | } |
1129 | } |
1130 | |
1131 | /** |
1132 | * ata_id_c_string - Convert IDENTIFY DEVICE page into C string |
1133 | * @id: IDENTIFY DEVICE results we will examine |
1134 | * @s: string into which data is output |
1135 | * @ofs: offset into identify device page |
1136 | * @len: length of string to return. must be an odd number. |
1137 | * |
1138 | * This function is identical to ata_id_string except that it |
1139 | * trims trailing spaces and terminates the resulting string with |
1140 | * null. @len must be actual maximum length (even number) + 1. |
1141 | * |
1142 | * LOCKING: |
1143 | * caller. |
1144 | */ |
1145 | void ata_id_c_string(const u16 *id, unsigned char *s, |
1146 | unsigned int ofs, unsigned int len) |
1147 | { |
1148 | unsigned char *p; |
1149 | |
1150 | ata_id_string(id, s, ofs, len - 1); |
1151 | |
1152 | p = s + strnlen(s, len - 1); |
1153 | while (p > s && p[-1] == ' ') |
1154 | p--; |
1155 | *p = '\0'; |
1156 | } |
1157 | |
1158 | static u64 ata_id_n_sectors(const u16 *id) |
1159 | { |
1160 | if (ata_id_has_lba(id)) { |
1161 | if (ata_id_has_lba48(id)) |
1162 | return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); |
1163 | else |
1164 | return ata_id_u32(id, ATA_ID_LBA_CAPACITY); |
1165 | } else { |
1166 | if (ata_id_current_chs_valid(id)) |
1167 | return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * |
1168 | id[ATA_ID_CUR_SECTORS]; |
1169 | else |
1170 | return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * |
1171 | id[ATA_ID_SECTORS]; |
1172 | } |
1173 | } |
1174 | |
1175 | u64 ata_tf_to_lba48(const struct ata_taskfile *tf) |
1176 | { |
1177 | u64 sectors = 0; |
1178 | |
1179 | sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; |
1180 | sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; |
1181 | sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; |
1182 | sectors |= (tf->lbah & 0xff) << 16; |
1183 | sectors |= (tf->lbam & 0xff) << 8; |
1184 | sectors |= (tf->lbal & 0xff); |
1185 | |
1186 | return sectors; |
1187 | } |
1188 | |
1189 | u64 ata_tf_to_lba(const struct ata_taskfile *tf) |
1190 | { |
1191 | u64 sectors = 0; |
1192 | |
1193 | sectors |= (tf->device & 0x0f) << 24; |
1194 | sectors |= (tf->lbah & 0xff) << 16; |
1195 | sectors |= (tf->lbam & 0xff) << 8; |
1196 | sectors |= (tf->lbal & 0xff); |
1197 | |
1198 | return sectors; |
1199 | } |
1200 | |
1201 | /** |
1202 | * ata_read_native_max_address - Read native max address |
1203 | * @dev: target device |
1204 | * @max_sectors: out parameter for the result native max address |
1205 | * |
1206 | * Perform an LBA48 or LBA28 native size query upon the device in |
1207 | * question. |
1208 | * |
1209 | * RETURNS: |
1210 | * 0 on success, -EACCES if command is aborted by the drive. |
1211 | * -EIO on other errors. |
1212 | */ |
1213 | static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) |
1214 | { |
1215 | unsigned int err_mask; |
1216 | struct ata_taskfile tf; |
1217 | int lba48 = ata_id_has_lba48(dev->id); |
1218 | |
1219 | ata_tf_init(dev, &tf); |
1220 | |
1221 | /* always clear all address registers */ |
1222 | tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; |
1223 | |
1224 | if (lba48) { |
1225 | tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; |
1226 | tf.flags |= ATA_TFLAG_LBA48; |
1227 | } else |
1228 | tf.command = ATA_CMD_READ_NATIVE_MAX; |
1229 | |
1230 | tf.protocol |= ATA_PROT_NODATA; |
1231 | tf.device |= ATA_LBA; |
1232 | |
1233 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); |
1234 | if (err_mask) { |
1235 | ata_dev_warn(dev, |
1236 | "failed to read native max address (err_mask=0x%x)\n", |
1237 | err_mask); |
1238 | if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) |
1239 | return -EACCES; |
1240 | return -EIO; |
1241 | } |
1242 | |
1243 | if (lba48) |
1244 | *max_sectors = ata_tf_to_lba48(&tf) + 1; |
1245 | else |
1246 | *max_sectors = ata_tf_to_lba(&tf) + 1; |
1247 | if (dev->horkage & ATA_HORKAGE_HPA_SIZE) |
1248 | (*max_sectors)--; |
1249 | return 0; |
1250 | } |
1251 | |
1252 | /** |
1253 | * ata_set_max_sectors - Set max sectors |
1254 | * @dev: target device |
1255 | * @new_sectors: new max sectors value to set for the device |
1256 | * |
1257 | * Set max sectors of @dev to @new_sectors. |
1258 | * |
1259 | * RETURNS: |
1260 | * 0 on success, -EACCES if command is aborted or denied (due to |
1261 | * previous non-volatile SET_MAX) by the drive. -EIO on other |
1262 | * errors. |
1263 | */ |
1264 | static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) |
1265 | { |
1266 | unsigned int err_mask; |
1267 | struct ata_taskfile tf; |
1268 | int lba48 = ata_id_has_lba48(dev->id); |
1269 | |
1270 | new_sectors--; |
1271 | |
1272 | ata_tf_init(dev, &tf); |
1273 | |
1274 | tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; |
1275 | |
1276 | if (lba48) { |
1277 | tf.command = ATA_CMD_SET_MAX_EXT; |
1278 | tf.flags |= ATA_TFLAG_LBA48; |
1279 | |
1280 | tf.hob_lbal = (new_sectors >> 24) & 0xff; |
1281 | tf.hob_lbam = (new_sectors >> 32) & 0xff; |
1282 | tf.hob_lbah = (new_sectors >> 40) & 0xff; |
1283 | } else { |
1284 | tf.command = ATA_CMD_SET_MAX; |
1285 | |
1286 | tf.device |= (new_sectors >> 24) & 0xf; |
1287 | } |
1288 | |
1289 | tf.protocol |= ATA_PROT_NODATA; |
1290 | tf.device |= ATA_LBA; |
1291 | |
1292 | tf.lbal = (new_sectors >> 0) & 0xff; |
1293 | tf.lbam = (new_sectors >> 8) & 0xff; |
1294 | tf.lbah = (new_sectors >> 16) & 0xff; |
1295 | |
1296 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); |
1297 | if (err_mask) { |
1298 | ata_dev_warn(dev, |
1299 | "failed to set max address (err_mask=0x%x)\n", |
1300 | err_mask); |
1301 | if (err_mask == AC_ERR_DEV && |
1302 | (tf.feature & (ATA_ABORTED | ATA_IDNF))) |
1303 | return -EACCES; |
1304 | return -EIO; |
1305 | } |
1306 | |
1307 | return 0; |
1308 | } |
1309 | |
1310 | /** |
1311 | * ata_hpa_resize - Resize a device with an HPA set |
1312 | * @dev: Device to resize |
1313 | * |
1314 | * Read the size of an LBA28 or LBA48 disk with HPA features and resize |
1315 | * it if required to the full size of the media. The caller must check |
1316 | * the drive has the HPA feature set enabled. |
1317 | * |
1318 | * RETURNS: |
1319 | * 0 on success, -errno on failure. |
1320 | */ |
1321 | static int ata_hpa_resize(struct ata_device *dev) |
1322 | { |
1323 | struct ata_eh_context *ehc = &dev->link->eh_context; |
1324 | int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; |
1325 | bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; |
1326 | u64 sectors = ata_id_n_sectors(dev->id); |
1327 | u64 native_sectors; |
1328 | int rc; |
1329 | |
1330 | /* do we need to do it? */ |
1331 | if (dev->class != ATA_DEV_ATA || |
1332 | !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || |
1333 | (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) |
1334 | return 0; |
1335 | |
1336 | /* read native max address */ |
1337 | rc = ata_read_native_max_address(dev, &native_sectors); |
1338 | if (rc) { |
1339 | /* If device aborted the command or HPA isn't going to |
1340 | * be unlocked, skip HPA resizing. |
1341 | */ |
1342 | if (rc == -EACCES || !unlock_hpa) { |
1343 | ata_dev_warn(dev, |
1344 | "HPA support seems broken, skipping HPA handling\n"); |
1345 | dev->horkage |= ATA_HORKAGE_BROKEN_HPA; |
1346 | |
1347 | /* we can continue if device aborted the command */ |
1348 | if (rc == -EACCES) |
1349 | rc = 0; |
1350 | } |
1351 | |
1352 | return rc; |
1353 | } |
1354 | dev->n_native_sectors = native_sectors; |
1355 | |
1356 | /* nothing to do? */ |
1357 | if (native_sectors <= sectors || !unlock_hpa) { |
1358 | if (!print_info || native_sectors == sectors) |
1359 | return 0; |
1360 | |
1361 | if (native_sectors > sectors) |
1362 | ata_dev_info(dev, |
1363 | "HPA detected: current %llu, native %llu\n", |
1364 | (unsigned long long)sectors, |
1365 | (unsigned long long)native_sectors); |
1366 | else if (native_sectors < sectors) |
1367 | ata_dev_warn(dev, |
1368 | "native sectors (%llu) is smaller than sectors (%llu)\n", |
1369 | (unsigned long long)native_sectors, |
1370 | (unsigned long long)sectors); |
1371 | return 0; |
1372 | } |
1373 | |
1374 | /* let's unlock HPA */ |
1375 | rc = ata_set_max_sectors(dev, native_sectors); |
1376 | if (rc == -EACCES) { |
1377 | /* if device aborted the command, skip HPA resizing */ |
1378 | ata_dev_warn(dev, |
1379 | "device aborted resize (%llu -> %llu), skipping HPA handling\n", |
1380 | (unsigned long long)sectors, |
1381 | (unsigned long long)native_sectors); |
1382 | dev->horkage |= ATA_HORKAGE_BROKEN_HPA; |
1383 | return 0; |
1384 | } else if (rc) |
1385 | return rc; |
1386 | |
1387 | /* re-read IDENTIFY data */ |
1388 | rc = ata_dev_reread_id(dev, 0); |
1389 | if (rc) { |
1390 | ata_dev_err(dev, |
1391 | "failed to re-read IDENTIFY data after HPA resizing\n"); |
1392 | return rc; |
1393 | } |
1394 | |
1395 | if (print_info) { |
1396 | u64 new_sectors = ata_id_n_sectors(dev->id); |
1397 | ata_dev_info(dev, |
1398 | "HPA unlocked: %llu -> %llu, native %llu\n", |
1399 | (unsigned long long)sectors, |
1400 | (unsigned long long)new_sectors, |
1401 | (unsigned long long)native_sectors); |
1402 | } |
1403 | |
1404 | return 0; |
1405 | } |
1406 | |
1407 | /** |
1408 | * ata_dump_id - IDENTIFY DEVICE info debugging output |
1409 | * @id: IDENTIFY DEVICE page to dump |
1410 | * |
1411 | * Dump selected 16-bit words from the given IDENTIFY DEVICE |
1412 | * page. |
1413 | * |
1414 | * LOCKING: |
1415 | * caller. |
1416 | */ |
1417 | |
1418 | static inline void ata_dump_id(const u16 *id) |
1419 | { |
1420 | DPRINTK("49==0x%04x " |
1421 | "53==0x%04x " |
1422 | "63==0x%04x " |
1423 | "64==0x%04x " |
1424 | "75==0x%04x \n", |
1425 | id[49], |
1426 | id[53], |
1427 | id[63], |
1428 | id[64], |
1429 | id[75]); |
1430 | DPRINTK("80==0x%04x " |
1431 | "81==0x%04x " |
1432 | "82==0x%04x " |
1433 | "83==0x%04x " |
1434 | "84==0x%04x \n", |
1435 | id[80], |
1436 | id[81], |
1437 | id[82], |
1438 | id[83], |
1439 | id[84]); |
1440 | DPRINTK("88==0x%04x " |
1441 | "93==0x%04x\n", |
1442 | id[88], |
1443 | id[93]); |
1444 | } |
1445 | |
1446 | /** |
1447 | * ata_id_xfermask - Compute xfermask from the given IDENTIFY data |
1448 | * @id: IDENTIFY data to compute xfer mask from |
1449 | * |
1450 | * Compute the xfermask for this device. This is not as trivial |
1451 | * as it seems if we must consider early devices correctly. |
1452 | * |
1453 | * FIXME: pre IDE drive timing (do we care ?). |
1454 | * |
1455 | * LOCKING: |
1456 | * None. |
1457 | * |
1458 | * RETURNS: |
1459 | * Computed xfermask |
1460 | */ |
1461 | unsigned long ata_id_xfermask(const u16 *id) |
1462 | { |
1463 | unsigned long pio_mask, mwdma_mask, udma_mask; |
1464 | |
1465 | /* Usual case. Word 53 indicates word 64 is valid */ |
1466 | if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { |
1467 | pio_mask = id[ATA_ID_PIO_MODES] & 0x03; |
1468 | pio_mask <<= 3; |
1469 | pio_mask |= 0x7; |
1470 | } else { |
1471 | /* If word 64 isn't valid then Word 51 high byte holds |
1472 | * the PIO timing number for the maximum. Turn it into |
1473 | * a mask. |
1474 | */ |
1475 | u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; |
1476 | if (mode < 5) /* Valid PIO range */ |
1477 | pio_mask = (2 << mode) - 1; |
1478 | else |
1479 | pio_mask = 1; |
1480 | |
1481 | /* But wait.. there's more. Design your standards by |
1482 | * committee and you too can get a free iordy field to |
1483 | * process. However its the speeds not the modes that |
1484 | * are supported... Note drivers using the timing API |
1485 | * will get this right anyway |
1486 | */ |
1487 | } |
1488 | |
1489 | mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; |
1490 | |
1491 | if (ata_id_is_cfa(id)) { |
1492 | /* |
1493 | * Process compact flash extended modes |
1494 | */ |
1495 | int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; |
1496 | int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; |
1497 | |
1498 | if (pio) |
1499 | pio_mask |= (1 << 5); |
1500 | if (pio > 1) |
1501 | pio_mask |= (1 << 6); |
1502 | if (dma) |
1503 | mwdma_mask |= (1 << 3); |
1504 | if (dma > 1) |
1505 | mwdma_mask |= (1 << 4); |
1506 | } |
1507 | |
1508 | udma_mask = 0; |
1509 | if (id[ATA_ID_FIELD_VALID] & (1 << 2)) |
1510 | udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; |
1511 | |
1512 | return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); |
1513 | } |
1514 | |
1515 | static void ata_qc_complete_internal(struct ata_queued_cmd *qc) |
1516 | { |
1517 | struct completion *waiting = qc->private_data; |
1518 | |
1519 | complete(waiting); |
1520 | } |
1521 | |
1522 | /** |
1523 | * ata_exec_internal_sg - execute libata internal command |
1524 | * @dev: Device to which the command is sent |
1525 | * @tf: Taskfile registers for the command and the result |
1526 | * @cdb: CDB for packet command |
1527 | * @dma_dir: Data tranfer direction of the command |
1528 | * @sgl: sg list for the data buffer of the command |
1529 | * @n_elem: Number of sg entries |
1530 | * @timeout: Timeout in msecs (0 for default) |
1531 | * |
1532 | * Executes libata internal command with timeout. @tf contains |
1533 | * command on entry and result on return. Timeout and error |
1534 | * conditions are reported via return value. No recovery action |
1535 | * is taken after a command times out. It's caller's duty to |
1536 | * clean up after timeout. |
1537 | * |
1538 | * LOCKING: |
1539 | * None. Should be called with kernel context, might sleep. |
1540 | * |
1541 | * RETURNS: |
1542 | * Zero on success, AC_ERR_* mask on failure |
1543 | */ |
1544 | unsigned ata_exec_internal_sg(struct ata_device *dev, |
1545 | struct ata_taskfile *tf, const u8 *cdb, |
1546 | int dma_dir, struct scatterlist *sgl, |
1547 | unsigned int n_elem, unsigned long timeout) |
1548 | { |
1549 | struct ata_link *link = dev->link; |
1550 | struct ata_port *ap = link->ap; |
1551 | u8 command = tf->command; |
1552 | int auto_timeout = 0; |
1553 | struct ata_queued_cmd *qc; |
1554 | unsigned int tag, preempted_tag; |
1555 | u32 preempted_sactive, preempted_qc_active; |
1556 | int preempted_nr_active_links; |
1557 | DECLARE_COMPLETION_ONSTACK(wait); |
1558 | unsigned long flags; |
1559 | unsigned int err_mask; |
1560 | int rc; |
1561 | |
1562 | spin_lock_irqsave(ap->lock, flags); |
1563 | |
1564 | /* no internal command while frozen */ |
1565 | if (ap->pflags & ATA_PFLAG_FROZEN) { |
1566 | spin_unlock_irqrestore(ap->lock, flags); |
1567 | return AC_ERR_SYSTEM; |
1568 | } |
1569 | |
1570 | /* initialize internal qc */ |
1571 | |
1572 | /* XXX: Tag 0 is used for drivers with legacy EH as some |
1573 | * drivers choke if any other tag is given. This breaks |
1574 | * ata_tag_internal() test for those drivers. Don't use new |
1575 | * EH stuff without converting to it. |
1576 | */ |
1577 | if (ap->ops->error_handler) |
1578 | tag = ATA_TAG_INTERNAL; |
1579 | else |
1580 | tag = 0; |
1581 | |
1582 | if (test_and_set_bit(tag, &ap->qc_allocated)) |
1583 | BUG(); |
1584 | qc = __ata_qc_from_tag(ap, tag); |
1585 | |
1586 | qc->tag = tag; |
1587 | qc->scsicmd = NULL; |
1588 | qc->ap = ap; |
1589 | qc->dev = dev; |
1590 | ata_qc_reinit(qc); |
1591 | |
1592 | preempted_tag = link->active_tag; |
1593 | preempted_sactive = link->sactive; |
1594 | preempted_qc_active = ap->qc_active; |
1595 | preempted_nr_active_links = ap->nr_active_links; |
1596 | link->active_tag = ATA_TAG_POISON; |
1597 | link->sactive = 0; |
1598 | ap->qc_active = 0; |
1599 | ap->nr_active_links = 0; |
1600 | |
1601 | /* prepare & issue qc */ |
1602 | qc->tf = *tf; |
1603 | if (cdb) |
1604 | memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); |
1605 | qc->flags |= ATA_QCFLAG_RESULT_TF; |
1606 | qc->dma_dir = dma_dir; |
1607 | if (dma_dir != DMA_NONE) { |
1608 | unsigned int i, buflen = 0; |
1609 | struct scatterlist *sg; |
1610 | |
1611 | for_each_sg(sgl, sg, n_elem, i) |
1612 | buflen += sg->length; |
1613 | |
1614 | ata_sg_init(qc, sgl, n_elem); |
1615 | qc->nbytes = buflen; |
1616 | } |
1617 | |
1618 | qc->private_data = &wait; |
1619 | qc->complete_fn = ata_qc_complete_internal; |
1620 | |
1621 | ata_qc_issue(qc); |
1622 | |
1623 | spin_unlock_irqrestore(ap->lock, flags); |
1624 | |
1625 | if (!timeout) { |
1626 | if (ata_probe_timeout) |
1627 | timeout = ata_probe_timeout * 1000; |
1628 | else { |
1629 | timeout = ata_internal_cmd_timeout(dev, command); |
1630 | auto_timeout = 1; |
1631 | } |
1632 | } |
1633 | |
1634 | if (ap->ops->error_handler) |
1635 | ata_eh_release(ap); |
1636 | |
1637 | rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); |
1638 | |
1639 | if (ap->ops->error_handler) |
1640 | ata_eh_acquire(ap); |
1641 | |
1642 | ata_sff_flush_pio_task(ap); |
1643 | |
1644 | if (!rc) { |
1645 | spin_lock_irqsave(ap->lock, flags); |
1646 | |
1647 | /* We're racing with irq here. If we lose, the |
1648 | * following test prevents us from completing the qc |
1649 | * twice. If we win, the port is frozen and will be |
1650 | * cleaned up by ->post_internal_cmd(). |
1651 | */ |
1652 | if (qc->flags & ATA_QCFLAG_ACTIVE) { |
1653 | qc->err_mask |= AC_ERR_TIMEOUT; |
1654 | |
1655 | if (ap->ops->error_handler) |
1656 | ata_port_freeze(ap); |
1657 | else |
1658 | ata_qc_complete(qc); |
1659 | |
1660 | if (ata_msg_warn(ap)) |
1661 | ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", |
1662 | command); |
1663 | } |
1664 | |
1665 | spin_unlock_irqrestore(ap->lock, flags); |
1666 | } |
1667 | |
1668 | /* do post_internal_cmd */ |
1669 | if (ap->ops->post_internal_cmd) |
1670 | ap->ops->post_internal_cmd(qc); |
1671 | |
1672 | /* perform minimal error analysis */ |
1673 | if (qc->flags & ATA_QCFLAG_FAILED) { |
1674 | if (qc->result_tf.command & (ATA_ERR | ATA_DF)) |
1675 | qc->err_mask |= AC_ERR_DEV; |
1676 | |
1677 | if (!qc->err_mask) |
1678 | qc->err_mask |= AC_ERR_OTHER; |
1679 | |
1680 | if (qc->err_mask & ~AC_ERR_OTHER) |
1681 | qc->err_mask &= ~AC_ERR_OTHER; |
1682 | } |
1683 | |
1684 | /* finish up */ |
1685 | spin_lock_irqsave(ap->lock, flags); |
1686 | |
1687 | *tf = qc->result_tf; |
1688 | err_mask = qc->err_mask; |
1689 | |
1690 | ata_qc_free(qc); |
1691 | link->active_tag = preempted_tag; |
1692 | link->sactive = preempted_sactive; |
1693 | ap->qc_active = preempted_qc_active; |
1694 | ap->nr_active_links = preempted_nr_active_links; |
1695 | |
1696 | spin_unlock_irqrestore(ap->lock, flags); |
1697 | |
1698 | if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) |
1699 | ata_internal_cmd_timed_out(dev, command); |
1700 | |
1701 | return err_mask; |
1702 | } |
1703 | |
1704 | /** |
1705 | * ata_exec_internal - execute libata internal command |
1706 | * @dev: Device to which the command is sent |
1707 | * @tf: Taskfile registers for the command and the result |
1708 | * @cdb: CDB for packet command |
1709 | * @dma_dir: Data tranfer direction of the command |
1710 | * @buf: Data buffer of the command |
1711 | * @buflen: Length of data buffer |
1712 | * @timeout: Timeout in msecs (0 for default) |
1713 | * |
1714 | * Wrapper around ata_exec_internal_sg() which takes simple |
1715 | * buffer instead of sg list. |
1716 | * |
1717 | * LOCKING: |
1718 | * None. Should be called with kernel context, might sleep. |
1719 | * |
1720 | * RETURNS: |
1721 | * Zero on success, AC_ERR_* mask on failure |
1722 | */ |
1723 | unsigned ata_exec_internal(struct ata_device *dev, |
1724 | struct ata_taskfile *tf, const u8 *cdb, |
1725 | int dma_dir, void *buf, unsigned int buflen, |
1726 | unsigned long timeout) |
1727 | { |
1728 | struct scatterlist *psg = NULL, sg; |
1729 | unsigned int n_elem = 0; |
1730 | |
1731 | if (dma_dir != DMA_NONE) { |
1732 | WARN_ON(!buf); |
1733 | sg_init_one(&sg, buf, buflen); |
1734 | psg = &sg; |
1735 | n_elem++; |
1736 | } |
1737 | |
1738 | return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, |
1739 | timeout); |
1740 | } |
1741 | |
1742 | /** |
1743 | * ata_do_simple_cmd - execute simple internal command |
1744 | * @dev: Device to which the command is sent |
1745 | * @cmd: Opcode to execute |
1746 | * |
1747 | * Execute a 'simple' command, that only consists of the opcode |
1748 | * 'cmd' itself, without filling any other registers |
1749 | * |
1750 | * LOCKING: |
1751 | * Kernel thread context (may sleep). |
1752 | * |
1753 | * RETURNS: |
1754 | * Zero on success, AC_ERR_* mask on failure |
1755 | */ |
1756 | unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) |
1757 | { |
1758 | struct ata_taskfile tf; |
1759 | |
1760 | ata_tf_init(dev, &tf); |
1761 | |
1762 | tf.command = cmd; |
1763 | tf.flags |= ATA_TFLAG_DEVICE; |
1764 | tf.protocol = ATA_PROT_NODATA; |
1765 | |
1766 | return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); |
1767 | } |
1768 | |
1769 | /** |
1770 | * ata_pio_need_iordy - check if iordy needed |
1771 | * @adev: ATA device |
1772 | * |
1773 | * Check if the current speed of the device requires IORDY. Used |
1774 | * by various controllers for chip configuration. |
1775 | */ |
1776 | unsigned int ata_pio_need_iordy(const struct ata_device *adev) |
1777 | { |
1778 | /* Don't set IORDY if we're preparing for reset. IORDY may |
1779 | * lead to controller lock up on certain controllers if the |
1780 | * port is not occupied. See bko#11703 for details. |
1781 | */ |
1782 | if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) |
1783 | return 0; |
1784 | /* Controller doesn't support IORDY. Probably a pointless |
1785 | * check as the caller should know this. |
1786 | */ |
1787 | if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) |
1788 | return 0; |
1789 | /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ |
1790 | if (ata_id_is_cfa(adev->id) |
1791 | && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) |
1792 | return 0; |
1793 | /* PIO3 and higher it is mandatory */ |
1794 | if (adev->pio_mode > XFER_PIO_2) |
1795 | return 1; |
1796 | /* We turn it on when possible */ |
1797 | if (ata_id_has_iordy(adev->id)) |
1798 | return 1; |
1799 | return 0; |
1800 | } |
1801 | |
1802 | /** |
1803 | * ata_pio_mask_no_iordy - Return the non IORDY mask |
1804 | * @adev: ATA device |
1805 | * |
1806 | * Compute the highest mode possible if we are not using iordy. Return |
1807 | * -1 if no iordy mode is available. |
1808 | */ |
1809 | static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) |
1810 | { |
1811 | /* If we have no drive specific rule, then PIO 2 is non IORDY */ |
1812 | if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ |
1813 | u16 pio = adev->id[ATA_ID_EIDE_PIO]; |
1814 | /* Is the speed faster than the drive allows non IORDY ? */ |
1815 | if (pio) { |
1816 | /* This is cycle times not frequency - watch the logic! */ |
1817 | if (pio > 240) /* PIO2 is 240nS per cycle */ |
1818 | return 3 << ATA_SHIFT_PIO; |
1819 | return 7 << ATA_SHIFT_PIO; |
1820 | } |
1821 | } |
1822 | return 3 << ATA_SHIFT_PIO; |
1823 | } |
1824 | |
1825 | /** |
1826 | * ata_do_dev_read_id - default ID read method |
1827 | * @dev: device |
1828 | * @tf: proposed taskfile |
1829 | * @id: data buffer |
1830 | * |
1831 | * Issue the identify taskfile and hand back the buffer containing |
1832 | * identify data. For some RAID controllers and for pre ATA devices |
1833 | * this function is wrapped or replaced by the driver |
1834 | */ |
1835 | unsigned int ata_do_dev_read_id(struct ata_device *dev, |
1836 | struct ata_taskfile *tf, u16 *id) |
1837 | { |
1838 | return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, |
1839 | id, sizeof(id[0]) * ATA_ID_WORDS, 0); |
1840 | } |
1841 | |
1842 | /** |
1843 | * ata_dev_read_id - Read ID data from the specified device |
1844 | * @dev: target device |
1845 | * @p_class: pointer to class of the target device (may be changed) |
1846 | * @flags: ATA_READID_* flags |
1847 | * @id: buffer to read IDENTIFY data into |
1848 | * |
1849 | * Read ID data from the specified device. ATA_CMD_ID_ATA is |
1850 | * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI |
1851 | * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS |
1852 | * for pre-ATA4 drives. |
1853 | * |
1854 | * FIXME: ATA_CMD_ID_ATA is optional for early drives and right |
1855 | * now we abort if we hit that case. |
1856 | * |
1857 | * LOCKING: |
1858 | * Kernel thread context (may sleep) |
1859 | * |
1860 | * RETURNS: |
1861 | * 0 on success, -errno otherwise. |
1862 | */ |
1863 | int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, |
1864 | unsigned int flags, u16 *id) |
1865 | { |
1866 | struct ata_port *ap = dev->link->ap; |
1867 | unsigned int class = *p_class; |
1868 | struct ata_taskfile tf; |
1869 | unsigned int err_mask = 0; |
1870 | const char *reason; |
1871 | bool is_semb = class == ATA_DEV_SEMB; |
1872 | int may_fallback = 1, tried_spinup = 0; |
1873 | int rc; |
1874 | |
1875 | if (ata_msg_ctl(ap)) |
1876 | ata_dev_dbg(dev, "%s: ENTER\n", __func__); |
1877 | |
1878 | retry: |
1879 | ata_tf_init(dev, &tf); |
1880 | |
1881 | switch (class) { |
1882 | case ATA_DEV_SEMB: |
1883 | class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ |
1884 | case ATA_DEV_ATA: |
1885 | tf.command = ATA_CMD_ID_ATA; |
1886 | break; |
1887 | case ATA_DEV_ATAPI: |
1888 | tf.command = ATA_CMD_ID_ATAPI; |
1889 | break; |
1890 | default: |
1891 | rc = -ENODEV; |
1892 | reason = "unsupported class"; |
1893 | goto err_out; |
1894 | } |
1895 | |
1896 | tf.protocol = ATA_PROT_PIO; |
1897 | |
1898 | /* Some devices choke if TF registers contain garbage. Make |
1899 | * sure those are properly initialized. |
1900 | */ |
1901 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
1902 | |
1903 | /* Device presence detection is unreliable on some |
1904 | * controllers. Always poll IDENTIFY if available. |
1905 | */ |
1906 | tf.flags |= ATA_TFLAG_POLLING; |
1907 | |
1908 | if (ap->ops->read_id) |
1909 | err_mask = ap->ops->read_id(dev, &tf, id); |
1910 | else |
1911 | err_mask = ata_do_dev_read_id(dev, &tf, id); |
1912 | |
1913 | if (err_mask) { |
1914 | if (err_mask & AC_ERR_NODEV_HINT) { |
1915 | ata_dev_dbg(dev, "NODEV after polling detection\n"); |
1916 | return -ENOENT; |
1917 | } |
1918 | |
1919 | if (is_semb) { |
1920 | ata_dev_info(dev, |
1921 | "IDENTIFY failed on device w/ SEMB sig, disabled\n"); |
1922 | /* SEMB is not supported yet */ |
1923 | *p_class = ATA_DEV_SEMB_UNSUP; |
1924 | return 0; |
1925 | } |
1926 | |
1927 | if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { |
1928 | /* Device or controller might have reported |
1929 | * the wrong device class. Give a shot at the |
1930 | * other IDENTIFY if the current one is |
1931 | * aborted by the device. |
1932 | */ |
1933 | if (may_fallback) { |
1934 | may_fallback = 0; |
1935 | |
1936 | if (class == ATA_DEV_ATA) |
1937 | class = ATA_DEV_ATAPI; |
1938 | else |
1939 | class = ATA_DEV_ATA; |
1940 | goto retry; |
1941 | } |
1942 | |
1943 | /* Control reaches here iff the device aborted |
1944 | * both flavors of IDENTIFYs which happens |
1945 | * sometimes with phantom devices. |
1946 | */ |
1947 | ata_dev_dbg(dev, |
1948 | "both IDENTIFYs aborted, assuming NODEV\n"); |
1949 | return -ENOENT; |
1950 | } |
1951 | |
1952 | rc = -EIO; |
1953 | reason = "I/O error"; |
1954 | goto err_out; |
1955 | } |
1956 | |
1957 | if (dev->horkage & ATA_HORKAGE_DUMP_ID) { |
1958 | ata_dev_dbg(dev, "dumping IDENTIFY data, " |
1959 | "class=%d may_fallback=%d tried_spinup=%d\n", |
1960 | class, may_fallback, tried_spinup); |
1961 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, |
1962 | 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); |
1963 | } |
1964 | |
1965 | /* Falling back doesn't make sense if ID data was read |
1966 | * successfully at least once. |
1967 | */ |
1968 | may_fallback = 0; |
1969 | |
1970 | swap_buf_le16(id, ATA_ID_WORDS); |
1971 | |
1972 | /* sanity check */ |
1973 | rc = -EINVAL; |
1974 | reason = "device reports invalid type"; |
1975 | |
1976 | if (class == ATA_DEV_ATA) { |
1977 | if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) |
1978 | goto err_out; |
1979 | if (ap->host->flags & ATA_HOST_IGNORE_ATA && |
1980 | ata_id_is_ata(id)) { |
1981 | ata_dev_dbg(dev, |
1982 | "host indicates ignore ATA devices, ignored\n"); |
1983 | return -ENOENT; |
1984 | } |
1985 | } else { |
1986 | if (ata_id_is_ata(id)) |
1987 | goto err_out; |
1988 | } |
1989 | |
1990 | if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { |
1991 | tried_spinup = 1; |
1992 | /* |
1993 | * Drive powered-up in standby mode, and requires a specific |
1994 | * SET_FEATURES spin-up subcommand before it will accept |
1995 | * anything other than the original IDENTIFY command. |
1996 | */ |
1997 | err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); |
1998 | if (err_mask && id[2] != 0x738c) { |
1999 | rc = -EIO; |
2000 | reason = "SPINUP failed"; |
2001 | goto err_out; |
2002 | } |
2003 | /* |
2004 | * If the drive initially returned incomplete IDENTIFY info, |
2005 | * we now must reissue the IDENTIFY command. |
2006 | */ |
2007 | if (id[2] == 0x37c8) |
2008 | goto retry; |
2009 | } |
2010 | |
2011 | if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { |
2012 | /* |
2013 | * The exact sequence expected by certain pre-ATA4 drives is: |
2014 | * SRST RESET |
2015 | * IDENTIFY (optional in early ATA) |
2016 | * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) |
2017 | * anything else.. |
2018 | * Some drives were very specific about that exact sequence. |
2019 | * |
2020 | * Note that ATA4 says lba is mandatory so the second check |
2021 | * should never trigger. |
2022 | */ |
2023 | if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { |
2024 | err_mask = ata_dev_init_params(dev, id[3], id[6]); |
2025 | if (err_mask) { |
2026 | rc = -EIO; |
2027 | reason = "INIT_DEV_PARAMS failed"; |
2028 | goto err_out; |
2029 | } |
2030 | |
2031 | /* current CHS translation info (id[53-58]) might be |
2032 | * changed. reread the identify device info. |
2033 | */ |
2034 | flags &= ~ATA_READID_POSTRESET; |
2035 | goto retry; |
2036 | } |
2037 | } |
2038 | |
2039 | *p_class = class; |
2040 | |
2041 | return 0; |
2042 | |
2043 | err_out: |
2044 | if (ata_msg_warn(ap)) |
2045 | ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", |
2046 | reason, err_mask); |
2047 | return rc; |
2048 | } |
2049 | |
2050 | static int ata_do_link_spd_horkage(struct ata_device *dev) |
2051 | { |
2052 | struct ata_link *plink = ata_dev_phys_link(dev); |
2053 | u32 target, target_limit; |
2054 | |
2055 | if (!sata_scr_valid(plink)) |
2056 | return 0; |
2057 | |
2058 | if (dev->horkage & ATA_HORKAGE_1_5_GBPS) |
2059 | target = 1; |
2060 | else |
2061 | return 0; |
2062 | |
2063 | target_limit = (1 << target) - 1; |
2064 | |
2065 | /* if already on stricter limit, no need to push further */ |
2066 | if (plink->sata_spd_limit <= target_limit) |
2067 | return 0; |
2068 | |
2069 | plink->sata_spd_limit = target_limit; |
2070 | |
2071 | /* Request another EH round by returning -EAGAIN if link is |
2072 | * going faster than the target speed. Forward progress is |
2073 | * guaranteed by setting sata_spd_limit to target_limit above. |
2074 | */ |
2075 | if (plink->sata_spd > target) { |
2076 | ata_dev_info(dev, "applying link speed limit horkage to %s\n", |
2077 | sata_spd_string(target)); |
2078 | return -EAGAIN; |
2079 | } |
2080 | return 0; |
2081 | } |
2082 | |
2083 | static inline u8 ata_dev_knobble(struct ata_device *dev) |
2084 | { |
2085 | struct ata_port *ap = dev->link->ap; |
2086 | |
2087 | if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) |
2088 | return 0; |
2089 | |
2090 | return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); |
2091 | } |
2092 | |
2093 | static int ata_dev_config_ncq(struct ata_device *dev, |
2094 | char *desc, size_t desc_sz) |
2095 | { |
2096 | struct ata_port *ap = dev->link->ap; |
2097 | int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); |
2098 | unsigned int err_mask; |
2099 | char *aa_desc = ""; |
2100 | |
2101 | if (!ata_id_has_ncq(dev->id)) { |
2102 | desc[0] = '\0'; |
2103 | return 0; |
2104 | } |
2105 | if (dev->horkage & ATA_HORKAGE_NONCQ) { |
2106 | snprintf(desc, desc_sz, "NCQ (not used)"); |
2107 | return 0; |
2108 | } |
2109 | if (ap->flags & ATA_FLAG_NCQ) { |
2110 | hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); |
2111 | dev->flags |= ATA_DFLAG_NCQ; |
2112 | } |
2113 | |
2114 | if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && |
2115 | (ap->flags & ATA_FLAG_FPDMA_AA) && |
2116 | ata_id_has_fpdma_aa(dev->id)) { |
2117 | err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, |
2118 | SATA_FPDMA_AA); |
2119 | if (err_mask) { |
2120 | ata_dev_err(dev, |
2121 | "failed to enable AA (error_mask=0x%x)\n", |
2122 | err_mask); |
2123 | if (err_mask != AC_ERR_DEV) { |
2124 | dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; |
2125 | return -EIO; |
2126 | } |
2127 | } else |
2128 | aa_desc = ", AA"; |
2129 | } |
2130 | |
2131 | if (hdepth >= ddepth) |
2132 | snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); |
2133 | else |
2134 | snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, |
2135 | ddepth, aa_desc); |
2136 | return 0; |
2137 | } |
2138 | |
2139 | /** |
2140 | * ata_dev_configure - Configure the specified ATA/ATAPI device |
2141 | * @dev: Target device to configure |
2142 | * |
2143 | * Configure @dev according to @dev->id. Generic and low-level |
2144 | * driver specific fixups are also applied. |
2145 | * |
2146 | * LOCKING: |
2147 | * Kernel thread context (may sleep) |
2148 | * |
2149 | * RETURNS: |
2150 | * 0 on success, -errno otherwise |
2151 | */ |
2152 | int ata_dev_configure(struct ata_device *dev) |
2153 | { |
2154 | struct ata_port *ap = dev->link->ap; |
2155 | struct ata_eh_context *ehc = &dev->link->eh_context; |
2156 | int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; |
2157 | const u16 *id = dev->id; |
2158 | unsigned long xfer_mask; |
2159 | unsigned int err_mask; |
2160 | char revbuf[7]; /* XYZ-99\0 */ |
2161 | char fwrevbuf[ATA_ID_FW_REV_LEN+1]; |
2162 | char modelbuf[ATA_ID_PROD_LEN+1]; |
2163 | int rc; |
2164 | |
2165 | if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { |
2166 | ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__); |
2167 | return 0; |
2168 | } |
2169 | |
2170 | if (ata_msg_probe(ap)) |
2171 | ata_dev_dbg(dev, "%s: ENTER\n", __func__); |
2172 | |
2173 | /* set horkage */ |
2174 | dev->horkage |= ata_dev_blacklisted(dev); |
2175 | ata_force_horkage(dev); |
2176 | |
2177 | if (dev->horkage & ATA_HORKAGE_DISABLE) { |
2178 | ata_dev_info(dev, "unsupported device, disabling\n"); |
2179 | ata_dev_disable(dev); |
2180 | return 0; |
2181 | } |
2182 | |
2183 | if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && |
2184 | dev->class == ATA_DEV_ATAPI) { |
2185 | ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n", |
2186 | atapi_enabled ? "not supported with this driver" |
2187 | : "disabled"); |
2188 | ata_dev_disable(dev); |
2189 | return 0; |
2190 | } |
2191 | |
2192 | rc = ata_do_link_spd_horkage(dev); |
2193 | if (rc) |
2194 | return rc; |
2195 | |
2196 | /* let ACPI work its magic */ |
2197 | rc = ata_acpi_on_devcfg(dev); |
2198 | if (rc) |
2199 | return rc; |
2200 | |
2201 | /* massage HPA, do it early as it might change IDENTIFY data */ |
2202 | rc = ata_hpa_resize(dev); |
2203 | if (rc) |
2204 | return rc; |
2205 | |
2206 | /* print device capabilities */ |
2207 | if (ata_msg_probe(ap)) |
2208 | ata_dev_dbg(dev, |
2209 | "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " |
2210 | "85:%04x 86:%04x 87:%04x 88:%04x\n", |
2211 | __func__, |
2212 | id[49], id[82], id[83], id[84], |
2213 | id[85], id[86], id[87], id[88]); |
2214 | |
2215 | /* initialize to-be-configured parameters */ |
2216 | dev->flags &= ~ATA_DFLAG_CFG_MASK; |
2217 | dev->max_sectors = 0; |
2218 | dev->cdb_len = 0; |
2219 | dev->n_sectors = 0; |
2220 | dev->cylinders = 0; |
2221 | dev->heads = 0; |
2222 | dev->sectors = 0; |
2223 | dev->multi_count = 0; |
2224 | |
2225 | /* |
2226 | * common ATA, ATAPI feature tests |
2227 | */ |
2228 | |
2229 | /* find max transfer mode; for printk only */ |
2230 | xfer_mask = ata_id_xfermask(id); |
2231 | |
2232 | if (ata_msg_probe(ap)) |
2233 | ata_dump_id(id); |
2234 | |
2235 | /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ |
2236 | ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, |
2237 | sizeof(fwrevbuf)); |
2238 | |
2239 | ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, |
2240 | sizeof(modelbuf)); |
2241 | |
2242 | /* ATA-specific feature tests */ |
2243 | if (dev->class == ATA_DEV_ATA) { |
2244 | if (ata_id_is_cfa(id)) { |
2245 | /* CPRM may make this media unusable */ |
2246 | if (id[ATA_ID_CFA_KEY_MGMT] & 1) |
2247 | ata_dev_warn(dev, |
2248 | "supports DRM functions and may not be fully accessible\n"); |
2249 | snprintf(revbuf, 7, "CFA"); |
2250 | } else { |
2251 | snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); |
2252 | /* Warn the user if the device has TPM extensions */ |
2253 | if (ata_id_has_tpm(id)) |
2254 | ata_dev_warn(dev, |
2255 | "supports DRM functions and may not be fully accessible\n"); |
2256 | } |
2257 | |
2258 | dev->n_sectors = ata_id_n_sectors(id); |
2259 | |
2260 | /* get current R/W Multiple count setting */ |
2261 | if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { |
2262 | unsigned int max = dev->id[47] & 0xff; |
2263 | unsigned int cnt = dev->id[59] & 0xff; |
2264 | /* only recognize/allow powers of two here */ |
2265 | if (is_power_of_2(max) && is_power_of_2(cnt)) |
2266 | if (cnt <= max) |
2267 | dev->multi_count = cnt; |
2268 | } |
2269 | |
2270 | if (ata_id_has_lba(id)) { |
2271 | const char *lba_desc; |
2272 | char ncq_desc[24]; |
2273 | |
2274 | lba_desc = "LBA"; |
2275 | dev->flags |= ATA_DFLAG_LBA; |
2276 | if (ata_id_has_lba48(id)) { |
2277 | dev->flags |= ATA_DFLAG_LBA48; |
2278 | lba_desc = "LBA48"; |
2279 | |
2280 | if (dev->n_sectors >= (1UL << 28) && |
2281 | ata_id_has_flush_ext(id)) |
2282 | dev->flags |= ATA_DFLAG_FLUSH_EXT; |
2283 | } |
2284 | |
2285 | /* config NCQ */ |
2286 | rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); |
2287 | if (rc) |
2288 | return rc; |
2289 | |
2290 | /* print device info to dmesg */ |
2291 | if (ata_msg_drv(ap) && print_info) { |
2292 | ata_dev_info(dev, "%s: %s, %s, max %s\n", |
2293 | revbuf, modelbuf, fwrevbuf, |
2294 | ata_mode_string(xfer_mask)); |
2295 | ata_dev_info(dev, |
2296 | "%llu sectors, multi %u: %s %s\n", |
2297 | (unsigned long long)dev->n_sectors, |
2298 | dev->multi_count, lba_desc, ncq_desc); |
2299 | } |
2300 | } else { |
2301 | /* CHS */ |
2302 | |
2303 | /* Default translation */ |
2304 | dev->cylinders = id[1]; |
2305 | dev->heads = id[3]; |
2306 | dev->sectors = id[6]; |
2307 | |
2308 | if (ata_id_current_chs_valid(id)) { |
2309 | /* Current CHS translation is valid. */ |
2310 | dev->cylinders = id[54]; |
2311 | dev->heads = id[55]; |
2312 | dev->sectors = id[56]; |
2313 | } |
2314 | |
2315 | /* print device info to dmesg */ |
2316 | if (ata_msg_drv(ap) && print_info) { |
2317 | ata_dev_info(dev, "%s: %s, %s, max %s\n", |
2318 | revbuf, modelbuf, fwrevbuf, |
2319 | ata_mode_string(xfer_mask)); |
2320 | ata_dev_info(dev, |
2321 | "%llu sectors, multi %u, CHS %u/%u/%u\n", |
2322 | (unsigned long long)dev->n_sectors, |
2323 | dev->multi_count, dev->cylinders, |
2324 | dev->heads, dev->sectors); |
2325 | } |
2326 | } |
2327 | |
2328 | /* Check and mark DevSlp capability. Get DevSlp timing variables |
2329 | * from SATA Settings page of Identify Device Data Log. |
2330 | */ |
2331 | if (ata_id_has_devslp(dev->id)) { |
2332 | u8 *sata_setting = ap->sector_buf; |
2333 | int i, j; |
2334 | |
2335 | dev->flags |= ATA_DFLAG_DEVSLP; |
2336 | err_mask = ata_read_log_page(dev, |
2337 | ATA_LOG_SATA_ID_DEV_DATA, |
2338 | ATA_LOG_SATA_SETTINGS, |
2339 | sata_setting, |
2340 | 1); |
2341 | if (err_mask) |
2342 | ata_dev_dbg(dev, |
2343 | "failed to get Identify Device Data, Emask 0x%x\n", |
2344 | err_mask); |
2345 | else |
2346 | for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { |
2347 | j = ATA_LOG_DEVSLP_OFFSET + i; |
2348 | dev->devslp_timing[i] = sata_setting[j]; |
2349 | } |
2350 | } |
2351 | |
2352 | dev->cdb_len = 16; |
2353 | } |
2354 | |
2355 | /* ATAPI-specific feature tests */ |
2356 | else if (dev->class == ATA_DEV_ATAPI) { |
2357 | const char *cdb_intr_string = ""; |
2358 | const char *atapi_an_string = ""; |
2359 | const char *dma_dir_string = ""; |
2360 | u32 sntf; |
2361 | |
2362 | rc = atapi_cdb_len(id); |
2363 | if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { |
2364 | if (ata_msg_warn(ap)) |
2365 | ata_dev_warn(dev, "unsupported CDB len\n"); |
2366 | rc = -EINVAL; |
2367 | goto err_out_nosup; |
2368 | } |
2369 | dev->cdb_len = (unsigned int) rc; |
2370 | |
2371 | /* Enable ATAPI AN if both the host and device have |
2372 | * the support. If PMP is attached, SNTF is required |
2373 | * to enable ATAPI AN to discern between PHY status |
2374 | * changed notifications and ATAPI ANs. |
2375 | */ |
2376 | if (atapi_an && |
2377 | (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && |
2378 | (!sata_pmp_attached(ap) || |
2379 | sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { |
2380 | /* issue SET feature command to turn this on */ |
2381 | err_mask = ata_dev_set_feature(dev, |
2382 | SETFEATURES_SATA_ENABLE, SATA_AN); |
2383 | if (err_mask) |
2384 | ata_dev_err(dev, |
2385 | "failed to enable ATAPI AN (err_mask=0x%x)\n", |
2386 | err_mask); |
2387 | else { |
2388 | dev->flags |= ATA_DFLAG_AN; |
2389 | atapi_an_string = ", ATAPI AN"; |
2390 | } |
2391 | } |
2392 | |
2393 | if (ata_id_cdb_intr(dev->id)) { |
2394 | dev->flags |= ATA_DFLAG_CDB_INTR; |
2395 | cdb_intr_string = ", CDB intr"; |
2396 | } |
2397 | |
2398 | if (atapi_dmadir || atapi_id_dmadir(dev->id)) { |
2399 | dev->flags |= ATA_DFLAG_DMADIR; |
2400 | dma_dir_string = ", DMADIR"; |
2401 | } |
2402 | |
2403 | if (ata_id_has_da(dev->id)) { |
2404 | dev->flags |= ATA_DFLAG_DA; |
2405 | zpodd_init(dev); |
2406 | } |
2407 | |
2408 | /* print device info to dmesg */ |
2409 | if (ata_msg_drv(ap) && print_info) |
2410 | ata_dev_info(dev, |
2411 | "ATAPI: %s, %s, max %s%s%s%s\n", |
2412 | modelbuf, fwrevbuf, |
2413 | ata_mode_string(xfer_mask), |
2414 | cdb_intr_string, atapi_an_string, |
2415 | dma_dir_string); |
2416 | } |
2417 | |
2418 | /* determine max_sectors */ |
2419 | dev->max_sectors = ATA_MAX_SECTORS; |
2420 | if (dev->flags & ATA_DFLAG_LBA48) |
2421 | dev->max_sectors = ATA_MAX_SECTORS_LBA48; |
2422 | |
2423 | /* Limit PATA drive on SATA cable bridge transfers to udma5, |
2424 | 200 sectors */ |
2425 | if (ata_dev_knobble(dev)) { |
2426 | if (ata_msg_drv(ap) && print_info) |
2427 | ata_dev_info(dev, "applying bridge limits\n"); |
2428 | dev->udma_mask &= ATA_UDMA5; |
2429 | dev->max_sectors = ATA_MAX_SECTORS; |
2430 | } |
2431 | |
2432 | if ((dev->class == ATA_DEV_ATAPI) && |
2433 | (atapi_command_packet_set(id) == TYPE_TAPE)) { |
2434 | dev->max_sectors = ATA_MAX_SECTORS_TAPE; |
2435 | dev->horkage |= ATA_HORKAGE_STUCK_ERR; |
2436 | } |
2437 | |
2438 | if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) |
2439 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, |
2440 | dev->max_sectors); |
2441 | |
2442 | if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) |
2443 | dev->max_sectors = ATA_MAX_SECTORS_LBA48; |
2444 | |
2445 | if (ap->ops->dev_config) |
2446 | ap->ops->dev_config(dev); |
2447 | |
2448 | if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { |
2449 | /* Let the user know. We don't want to disallow opens for |
2450 | rescue purposes, or in case the vendor is just a blithering |
2451 | idiot. Do this after the dev_config call as some controllers |
2452 | with buggy firmware may want to avoid reporting false device |
2453 | bugs */ |
2454 | |
2455 | if (print_info) { |
2456 | ata_dev_warn(dev, |
2457 | "Drive reports diagnostics failure. This may indicate a drive\n"); |
2458 | ata_dev_warn(dev, |
2459 | "fault or invalid emulation. Contact drive vendor for information.\n"); |
2460 | } |
2461 | } |
2462 | |
2463 | if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { |
2464 | ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); |
2465 | ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); |
2466 | } |
2467 | |
2468 | return 0; |
2469 | |
2470 | err_out_nosup: |
2471 | if (ata_msg_probe(ap)) |
2472 | ata_dev_dbg(dev, "%s: EXIT, err\n", __func__); |
2473 | return rc; |
2474 | } |
2475 | |
2476 | /** |
2477 | * ata_cable_40wire - return 40 wire cable type |
2478 | * @ap: port |
2479 | * |
2480 | * Helper method for drivers which want to hardwire 40 wire cable |
2481 | * detection. |
2482 | */ |
2483 | |
2484 | int ata_cable_40wire(struct ata_port *ap) |
2485 | { |
2486 | return ATA_CBL_PATA40; |
2487 | } |
2488 | |
2489 | /** |
2490 | * ata_cable_80wire - return 80 wire cable type |
2491 | * @ap: port |
2492 | * |
2493 | * Helper method for drivers which want to hardwire 80 wire cable |
2494 | * detection. |
2495 | */ |
2496 | |
2497 | int ata_cable_80wire(struct ata_port *ap) |
2498 | { |
2499 | return ATA_CBL_PATA80; |
2500 | } |
2501 | |
2502 | /** |
2503 | * ata_cable_unknown - return unknown PATA cable. |
2504 | * @ap: port |
2505 | * |
2506 | * Helper method for drivers which have no PATA cable detection. |
2507 | */ |
2508 | |
2509 | int ata_cable_unknown(struct ata_port *ap) |
2510 | { |
2511 | return ATA_CBL_PATA_UNK; |
2512 | } |
2513 | |
2514 | /** |
2515 | * ata_cable_ignore - return ignored PATA cable. |
2516 | * @ap: port |
2517 | * |
2518 | * Helper method for drivers which don't use cable type to limit |
2519 | * transfer mode. |
2520 | */ |
2521 | int ata_cable_ignore(struct ata_port *ap) |
2522 | { |
2523 | return ATA_CBL_PATA_IGN; |
2524 | } |
2525 | |
2526 | /** |
2527 | * ata_cable_sata - return SATA cable type |
2528 | * @ap: port |
2529 | * |
2530 | * Helper method for drivers which have SATA cables |
2531 | */ |
2532 | |
2533 | int ata_cable_sata(struct ata_port *ap) |
2534 | { |
2535 | return ATA_CBL_SATA; |
2536 | } |
2537 | |
2538 | /** |
2539 | * ata_bus_probe - Reset and probe ATA bus |
2540 | * @ap: Bus to probe |
2541 | * |
2542 | * Master ATA bus probing function. Initiates a hardware-dependent |
2543 | * bus reset, then attempts to identify any devices found on |
2544 | * the bus. |
2545 | * |
2546 | * LOCKING: |
2547 | * PCI/etc. bus probe sem. |
2548 | * |
2549 | * RETURNS: |
2550 | * Zero on success, negative errno otherwise. |
2551 | */ |
2552 | |
2553 | int ata_bus_probe(struct ata_port *ap) |
2554 | { |
2555 | unsigned int classes[ATA_MAX_DEVICES]; |
2556 | int tries[ATA_MAX_DEVICES]; |
2557 | int rc; |
2558 | struct ata_device *dev; |
2559 | |
2560 | ata_for_each_dev(dev, &ap->link, ALL) |
2561 | tries[dev->devno] = ATA_PROBE_MAX_TRIES; |
2562 | |
2563 | retry: |
2564 | ata_for_each_dev(dev, &ap->link, ALL) { |
2565 | /* If we issue an SRST then an ATA drive (not ATAPI) |
2566 | * may change configuration and be in PIO0 timing. If |
2567 | * we do a hard reset (or are coming from power on) |
2568 | * this is true for ATA or ATAPI. Until we've set a |
2569 | * suitable controller mode we should not touch the |
2570 | * bus as we may be talking too fast. |
2571 | */ |
2572 | dev->pio_mode = XFER_PIO_0; |
2573 | dev->dma_mode = 0xff; |
2574 | |
2575 | /* If the controller has a pio mode setup function |
2576 | * then use it to set the chipset to rights. Don't |
2577 | * touch the DMA setup as that will be dealt with when |
2578 | * configuring devices. |
2579 | */ |
2580 | if (ap->ops->set_piomode) |
2581 | ap->ops->set_piomode(ap, dev); |
2582 | } |
2583 | |
2584 | /* reset and determine device classes */ |
2585 | ap->ops->phy_reset(ap); |
2586 | |
2587 | ata_for_each_dev(dev, &ap->link, ALL) { |
2588 | if (dev->class != ATA_DEV_UNKNOWN) |
2589 | classes[dev->devno] = dev->class; |
2590 | else |
2591 | classes[dev->devno] = ATA_DEV_NONE; |
2592 | |
2593 | dev->class = ATA_DEV_UNKNOWN; |
2594 | } |
2595 | |
2596 | /* read IDENTIFY page and configure devices. We have to do the identify |
2597 | specific sequence bass-ackwards so that PDIAG- is released by |
2598 | the slave device */ |
2599 | |
2600 | ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { |
2601 | if (tries[dev->devno]) |
2602 | dev->class = classes[dev->devno]; |
2603 | |
2604 | if (!ata_dev_enabled(dev)) |
2605 | continue; |
2606 | |
2607 | rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, |
2608 | dev->id); |
2609 | if (rc) |
2610 | goto fail; |
2611 | } |
2612 | |
2613 | /* Now ask for the cable type as PDIAG- should have been released */ |
2614 | if (ap->ops->cable_detect) |
2615 | ap->cbl = ap->ops->cable_detect(ap); |
2616 | |
2617 | /* We may have SATA bridge glue hiding here irrespective of |
2618 | * the reported cable types and sensed types. When SATA |
2619 | * drives indicate we have a bridge, we don't know which end |
2620 | * of the link the bridge is which is a problem. |
2621 | */ |
2622 | ata_for_each_dev(dev, &ap->link, ENABLED) |
2623 | if (ata_id_is_sata(dev->id)) |
2624 | ap->cbl = ATA_CBL_SATA; |
2625 | |
2626 | /* After the identify sequence we can now set up the devices. We do |
2627 | this in the normal order so that the user doesn't get confused */ |
2628 | |
2629 | ata_for_each_dev(dev, &ap->link, ENABLED) { |
2630 | ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; |
2631 | rc = ata_dev_configure(dev); |
2632 | ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; |
2633 | if (rc) |
2634 | goto fail; |
2635 | } |
2636 | |
2637 | /* configure transfer mode */ |
2638 | rc = ata_set_mode(&ap->link, &dev); |
2639 | if (rc) |
2640 | goto fail; |
2641 | |
2642 | ata_for_each_dev(dev, &ap->link, ENABLED) |
2643 | return 0; |
2644 | |
2645 | return -ENODEV; |
2646 | |
2647 | fail: |
2648 | tries[dev->devno]--; |
2649 | |
2650 | switch (rc) { |
2651 | case -EINVAL: |
2652 | /* eeek, something went very wrong, give up */ |
2653 | tries[dev->devno] = 0; |
2654 | break; |
2655 | |
2656 | case -ENODEV: |
2657 | /* give it just one more chance */ |
2658 | tries[dev->devno] = min(tries[dev->devno], 1); |
2659 | case -EIO: |
2660 | if (tries[dev->devno] == 1) { |
2661 | /* This is the last chance, better to slow |
2662 | * down than lose it. |
2663 | */ |
2664 | sata_down_spd_limit(&ap->link, 0); |
2665 | ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); |
2666 | } |
2667 | } |
2668 | |
2669 | if (!tries[dev->devno]) |
2670 | ata_dev_disable(dev); |
2671 | |
2672 | goto retry; |
2673 | } |
2674 | |
2675 | /** |
2676 | * sata_print_link_status - Print SATA link status |
2677 | * @link: SATA link to printk link status about |
2678 | * |
2679 | * This function prints link speed and status of a SATA link. |
2680 | * |
2681 | * LOCKING: |
2682 | * None. |
2683 | */ |
2684 | static void sata_print_link_status(struct ata_link *link) |
2685 | { |
2686 | u32 sstatus, scontrol, tmp; |
2687 | |
2688 | if (sata_scr_read(link, SCR_STATUS, &sstatus)) |
2689 | return; |
2690 | sata_scr_read(link, SCR_CONTROL, &scontrol); |
2691 | |
2692 | if (ata_phys_link_online(link)) { |
2693 | tmp = (sstatus >> 4) & 0xf; |
2694 | ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n", |
2695 | sata_spd_string(tmp), sstatus, scontrol); |
2696 | } else { |
2697 | ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n", |
2698 | sstatus, scontrol); |
2699 | } |
2700 | } |
2701 | |
2702 | /** |
2703 | * ata_dev_pair - return other device on cable |
2704 | * @adev: device |
2705 | * |
2706 | * Obtain the other device on the same cable, or if none is |
2707 | * present NULL is returned |
2708 | */ |
2709 | |
2710 | struct ata_device *ata_dev_pair(struct ata_device *adev) |
2711 | { |
2712 | struct ata_link *link = adev->link; |
2713 | struct ata_device *pair = &link->device[1 - adev->devno]; |
2714 | if (!ata_dev_enabled(pair)) |
2715 | return NULL; |
2716 | return pair; |
2717 | } |
2718 | |
2719 | /** |
2720 | * sata_down_spd_limit - adjust SATA spd limit downward |
2721 | * @link: Link to adjust SATA spd limit for |
2722 | * @spd_limit: Additional limit |
2723 | * |
2724 | * Adjust SATA spd limit of @link downward. Note that this |
2725 | * function only adjusts the limit. The change must be applied |
2726 | * using sata_set_spd(). |
2727 | * |
2728 | * If @spd_limit is non-zero, the speed is limited to equal to or |
2729 | * lower than @spd_limit if such speed is supported. If |
2730 | * @spd_limit is slower than any supported speed, only the lowest |
2731 | * supported speed is allowed. |
2732 | * |
2733 | * LOCKING: |
2734 | * Inherited from caller. |
2735 | * |
2736 | * RETURNS: |
2737 | * 0 on success, negative errno on failure |
2738 | */ |
2739 | int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) |
2740 | { |
2741 | u32 sstatus, spd, mask; |
2742 | int rc, bit; |
2743 | |
2744 | if (!sata_scr_valid(link)) |
2745 | return -EOPNOTSUPP; |
2746 | |
2747 | /* If SCR can be read, use it to determine the current SPD. |
2748 | * If not, use cached value in link->sata_spd. |
2749 | */ |
2750 | rc = sata_scr_read(link, SCR_STATUS, &sstatus); |
2751 | if (rc == 0 && ata_sstatus_online(sstatus)) |
2752 | spd = (sstatus >> 4) & 0xf; |
2753 | else |
2754 | spd = link->sata_spd; |
2755 | |
2756 | mask = link->sata_spd_limit; |
2757 | if (mask <= 1) |
2758 | return -EINVAL; |
2759 | |
2760 | /* unconditionally mask off the highest bit */ |
2761 | bit = fls(mask) - 1; |
2762 | mask &= ~(1 << bit); |
2763 | |
2764 | /* Mask off all speeds higher than or equal to the current |
2765 | * one. Force 1.5Gbps if current SPD is not available. |
2766 | */ |
2767 | if (spd > 1) |
2768 | mask &= (1 << (spd - 1)) - 1; |
2769 | else |
2770 | mask &= 1; |
2771 | |
2772 | /* were we already at the bottom? */ |
2773 | if (!mask) |
2774 | return -EINVAL; |
2775 | |
2776 | if (spd_limit) { |
2777 | if (mask & ((1 << spd_limit) - 1)) |
2778 | mask &= (1 << spd_limit) - 1; |
2779 | else { |
2780 | bit = ffs(mask) - 1; |
2781 | mask = 1 << bit; |
2782 | } |
2783 | } |
2784 | |
2785 | link->sata_spd_limit = mask; |
2786 | |
2787 | ata_link_warn(link, "limiting SATA link speed to %s\n", |
2788 | sata_spd_string(fls(mask))); |
2789 | |
2790 | return 0; |
2791 | } |
2792 | |
2793 | static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) |
2794 | { |
2795 | struct ata_link *host_link = &link->ap->link; |
2796 | u32 limit, target, spd; |
2797 | |
2798 | limit = link->sata_spd_limit; |
2799 | |
2800 | /* Don't configure downstream link faster than upstream link. |
2801 | * It doesn't speed up anything and some PMPs choke on such |
2802 | * configuration. |
2803 | */ |
2804 | if (!ata_is_host_link(link) && host_link->sata_spd) |
2805 | limit &= (1 << host_link->sata_spd) - 1; |
2806 | |
2807 | if (limit == UINT_MAX) |
2808 | target = 0; |
2809 | else |
2810 | target = fls(limit); |
2811 | |
2812 | spd = (*scontrol >> 4) & 0xf; |
2813 | *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); |
2814 | |
2815 | return spd != target; |
2816 | } |
2817 | |
2818 | /** |
2819 | * sata_set_spd_needed - is SATA spd configuration needed |
2820 | * @link: Link in question |
2821 | * |
2822 | * Test whether the spd limit in SControl matches |
2823 | * @link->sata_spd_limit. This function is used to determine |
2824 | * whether hardreset is necessary to apply SATA spd |
2825 | * configuration. |
2826 | * |
2827 | * LOCKING: |
2828 | * Inherited from caller. |
2829 | * |
2830 | * RETURNS: |
2831 | * 1 if SATA spd configuration is needed, 0 otherwise. |
2832 | */ |
2833 | static int sata_set_spd_needed(struct ata_link *link) |
2834 | { |
2835 | u32 scontrol; |
2836 | |
2837 | if (sata_scr_read(link, SCR_CONTROL, &scontrol)) |
2838 | return 1; |
2839 | |
2840 | return __sata_set_spd_needed(link, &scontrol); |
2841 | } |
2842 | |
2843 | /** |
2844 | * sata_set_spd - set SATA spd according to spd limit |
2845 | * @link: Link to set SATA spd for |
2846 | * |
2847 | * Set SATA spd of @link according to sata_spd_limit. |
2848 | * |
2849 | * LOCKING: |
2850 | * Inherited from caller. |
2851 | * |
2852 | * RETURNS: |
2853 | * 0 if spd doesn't need to be changed, 1 if spd has been |
2854 | * changed. Negative errno if SCR registers are inaccessible. |
2855 | */ |
2856 | int sata_set_spd(struct ata_link *link) |
2857 | { |
2858 | u32 scontrol; |
2859 | int rc; |
2860 | |
2861 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) |
2862 | return rc; |
2863 | |
2864 | if (!__sata_set_spd_needed(link, &scontrol)) |
2865 | return 0; |
2866 | |
2867 | if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) |
2868 | return rc; |
2869 | |
2870 | return 1; |
2871 | } |
2872 | |
2873 | /* |
2874 | * This mode timing computation functionality is ported over from |
2875 | * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik |
2876 | */ |
2877 | /* |
2878 | * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). |
2879 | * These were taken from ATA/ATAPI-6 standard, rev 0a, except |
2880 | * for UDMA6, which is currently supported only by Maxtor drives. |
2881 | * |
2882 | * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. |
2883 | */ |
2884 | |
2885 | static const struct ata_timing ata_timing[] = { |
2886 | /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ |
2887 | { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, |
2888 | { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, |
2889 | { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, |
2890 | { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, |
2891 | { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, |
2892 | { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, |
2893 | { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, |
2894 | |
2895 | { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, |
2896 | { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, |
2897 | { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, |
2898 | |
2899 | { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, |
2900 | { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, |
2901 | { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, |
2902 | { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, |
2903 | { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, |
2904 | |
2905 | /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ |
2906 | { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, |
2907 | { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, |
2908 | { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, |
2909 | { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, |
2910 | { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, |
2911 | { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, |
2912 | { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, |
2913 | |
2914 | { 0xFF } |
2915 | }; |
2916 | |
2917 | #define ENOUGH(v, unit) (((v)-1)/(unit)+1) |
2918 | #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) |
2919 | |
2920 | static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) |
2921 | { |
2922 | q->setup = EZ(t->setup * 1000, T); |
2923 | q->act8b = EZ(t->act8b * 1000, T); |
2924 | q->rec8b = EZ(t->rec8b * 1000, T); |
2925 | q->cyc8b = EZ(t->cyc8b * 1000, T); |
2926 | q->active = EZ(t->active * 1000, T); |
2927 | q->recover = EZ(t->recover * 1000, T); |
2928 | q->dmack_hold = EZ(t->dmack_hold * 1000, T); |
2929 | q->cycle = EZ(t->cycle * 1000, T); |
2930 | q->udma = EZ(t->udma * 1000, UT); |
2931 | } |
2932 | |
2933 | void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, |
2934 | struct ata_timing *m, unsigned int what) |
2935 | { |
2936 | if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); |
2937 | if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); |
2938 | if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); |
2939 | if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); |
2940 | if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); |
2941 | if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); |
2942 | if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); |
2943 | if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); |
2944 | if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); |
2945 | } |
2946 | |
2947 | const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) |
2948 | { |
2949 | const struct ata_timing *t = ata_timing; |
2950 | |
2951 | while (xfer_mode > t->mode) |
2952 | t++; |
2953 | |
2954 | if (xfer_mode == t->mode) |
2955 | return t; |
2956 | |
2957 | WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", |
2958 | __func__, xfer_mode); |
2959 | |
2960 | return NULL; |
2961 | } |
2962 | |
2963 | int ata_timing_compute(struct ata_device *adev, unsigned short speed, |
2964 | struct ata_timing *t, int T, int UT) |
2965 | { |
2966 | const u16 *id = adev->id; |
2967 | const struct ata_timing *s; |
2968 | struct ata_timing p; |
2969 | |
2970 | /* |
2971 | * Find the mode. |
2972 | */ |
2973 | |
2974 | if (!(s = ata_timing_find_mode(speed))) |
2975 | return -EINVAL; |
2976 | |
2977 | memcpy(t, s, sizeof(*s)); |
2978 | |
2979 | /* |
2980 | * If the drive is an EIDE drive, it can tell us it needs extended |
2981 | * PIO/MW_DMA cycle timing. |
2982 | */ |
2983 | |
2984 | if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ |
2985 | memset(&p, 0, sizeof(p)); |
2986 | |
2987 | if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { |
2988 | if (speed <= XFER_PIO_2) |
2989 | p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; |
2990 | else if ((speed <= XFER_PIO_4) || |
2991 | (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) |
2992 | p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; |
2993 | } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) |
2994 | p.cycle = id[ATA_ID_EIDE_DMA_MIN]; |
2995 | |
2996 | ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); |
2997 | } |
2998 | |
2999 | /* |
3000 | * Convert the timing to bus clock counts. |
3001 | */ |
3002 | |
3003 | ata_timing_quantize(t, t, T, UT); |
3004 | |
3005 | /* |
3006 | * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, |
3007 | * S.M.A.R.T * and some other commands. We have to ensure that the |
3008 | * DMA cycle timing is slower/equal than the fastest PIO timing. |
3009 | */ |
3010 | |
3011 | if (speed > XFER_PIO_6) { |
3012 | ata_timing_compute(adev, adev->pio_mode, &p, T, UT); |
3013 | ata_timing_merge(&p, t, t, ATA_TIMING_ALL); |
3014 | } |
3015 | |
3016 | /* |
3017 | * Lengthen active & recovery time so that cycle time is correct. |
3018 | */ |
3019 | |
3020 | if (t->act8b + t->rec8b < t->cyc8b) { |
3021 | t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; |
3022 | t->rec8b = t->cyc8b - t->act8b; |
3023 | } |
3024 | |
3025 | if (t->active + t->recover < t->cycle) { |
3026 | t->active += (t->cycle - (t->active + t->recover)) / 2; |
3027 | t->recover = t->cycle - t->active; |
3028 | } |
3029 | |
3030 | /* In a few cases quantisation may produce enough errors to |
3031 | leave t->cycle too low for the sum of active and recovery |
3032 | if so we must correct this */ |
3033 | if (t->active + t->recover > t->cycle) |
3034 | t->cycle = t->active + t->recover; |
3035 | |
3036 | return 0; |
3037 | } |
3038 | |
3039 | /** |
3040 | * ata_timing_cycle2mode - find xfer mode for the specified cycle duration |
3041 | * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. |
3042 | * @cycle: cycle duration in ns |
3043 | * |
3044 | * Return matching xfer mode for @cycle. The returned mode is of |
3045 | * the transfer type specified by @xfer_shift. If @cycle is too |
3046 | * slow for @xfer_shift, 0xff is returned. If @cycle is faster |
3047 | * than the fastest known mode, the fasted mode is returned. |
3048 | * |
3049 | * LOCKING: |
3050 | * None. |
3051 | * |
3052 | * RETURNS: |
3053 | * Matching xfer_mode, 0xff if no match found. |
3054 | */ |
3055 | u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) |
3056 | { |
3057 | u8 base_mode = 0xff, last_mode = 0xff; |
3058 | const struct ata_xfer_ent *ent; |
3059 | const struct ata_timing *t; |
3060 | |
3061 | for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) |
3062 | if (ent->shift == xfer_shift) |
3063 | base_mode = ent->base; |
3064 | |
3065 | for (t = ata_timing_find_mode(base_mode); |
3066 | t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { |
3067 | unsigned short this_cycle; |
3068 | |
3069 | switch (xfer_shift) { |
3070 | case ATA_SHIFT_PIO: |
3071 | case ATA_SHIFT_MWDMA: |
3072 | this_cycle = t->cycle; |
3073 | break; |
3074 | case ATA_SHIFT_UDMA: |
3075 | this_cycle = t->udma; |
3076 | break; |
3077 | default: |
3078 | return 0xff; |
3079 | } |
3080 | |
3081 | if (cycle > this_cycle) |
3082 | break; |
3083 | |
3084 | last_mode = t->mode; |
3085 | } |
3086 | |
3087 | return last_mode; |
3088 | } |
3089 | |
3090 | /** |
3091 | * ata_down_xfermask_limit - adjust dev xfer masks downward |
3092 | * @dev: Device to adjust xfer masks |
3093 | * @sel: ATA_DNXFER_* selector |
3094 | * |
3095 | * Adjust xfer masks of @dev downward. Note that this function |
3096 | * does not apply the change. Invoking ata_set_mode() afterwards |
3097 | * will apply the limit. |
3098 | * |
3099 | * LOCKING: |
3100 | * Inherited from caller. |
3101 | * |
3102 | * RETURNS: |
3103 | * 0 on success, negative errno on failure |
3104 | */ |
3105 | int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) |
3106 | { |
3107 | char buf[32]; |
3108 | unsigned long orig_mask, xfer_mask; |
3109 | unsigned long pio_mask, mwdma_mask, udma_mask; |
3110 | int quiet, highbit; |
3111 | |
3112 | quiet = !!(sel & ATA_DNXFER_QUIET); |
3113 | sel &= ~ATA_DNXFER_QUIET; |
3114 | |
3115 | xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, |
3116 | dev->mwdma_mask, |
3117 | dev->udma_mask); |
3118 | ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); |
3119 | |
3120 | switch (sel) { |
3121 | case ATA_DNXFER_PIO: |
3122 | highbit = fls(pio_mask) - 1; |
3123 | pio_mask &= ~(1 << highbit); |
3124 | break; |
3125 | |
3126 | case ATA_DNXFER_DMA: |
3127 | if (udma_mask) { |
3128 | highbit = fls(udma_mask) - 1; |
3129 | udma_mask &= ~(1 << highbit); |
3130 | if (!udma_mask) |
3131 | return -ENOENT; |
3132 | } else if (mwdma_mask) { |
3133 | highbit = fls(mwdma_mask) - 1; |
3134 | mwdma_mask &= ~(1 << highbit); |
3135 | if (!mwdma_mask) |
3136 | return -ENOENT; |
3137 | } |
3138 | break; |
3139 | |
3140 | case ATA_DNXFER_40C: |
3141 | udma_mask &= ATA_UDMA_MASK_40C; |
3142 | break; |
3143 | |
3144 | case ATA_DNXFER_FORCE_PIO0: |
3145 | pio_mask &= 1; |
3146 | case ATA_DNXFER_FORCE_PIO: |
3147 | mwdma_mask = 0; |
3148 | udma_mask = 0; |
3149 | break; |
3150 | |
3151 | default: |
3152 | BUG(); |
3153 | } |
3154 | |
3155 | xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); |
3156 | |
3157 | if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) |
3158 | return -ENOENT; |
3159 | |
3160 | if (!quiet) { |
3161 | if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) |
3162 | snprintf(buf, sizeof(buf), "%s:%s", |
3163 | ata_mode_string(xfer_mask), |
3164 | ata_mode_string(xfer_mask & ATA_MASK_PIO)); |
3165 | else |
3166 | snprintf(buf, sizeof(buf), "%s", |
3167 | ata_mode_string(xfer_mask)); |
3168 | |
3169 | ata_dev_warn(dev, "limiting speed to %s\n", buf); |
3170 | } |
3171 | |
3172 | ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, |
3173 | &dev->udma_mask); |
3174 | |
3175 | return 0; |
3176 | } |
3177 | |
3178 | static int ata_dev_set_mode(struct ata_device *dev) |
3179 | { |
3180 | struct ata_port *ap = dev->link->ap; |
3181 | struct ata_eh_context *ehc = &dev->link->eh_context; |
3182 | const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; |
3183 | const char *dev_err_whine = ""; |
3184 | int ign_dev_err = 0; |
3185 | unsigned int err_mask = 0; |
3186 | int rc; |
3187 | |
3188 | dev->flags &= ~ATA_DFLAG_PIO; |
3189 | if (dev->xfer_shift == ATA_SHIFT_PIO) |
3190 | dev->flags |= ATA_DFLAG_PIO; |
3191 | |
3192 | if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) |
3193 | dev_err_whine = " (SET_XFERMODE skipped)"; |
3194 | else { |
3195 | if (nosetxfer) |
3196 | ata_dev_warn(dev, |
3197 | "NOSETXFER but PATA detected - can't " |
3198 | "skip SETXFER, might malfunction\n"); |
3199 | err_mask = ata_dev_set_xfermode(dev); |
3200 | } |
3201 | |
3202 | if (err_mask & ~AC_ERR_DEV) |
3203 | goto fail; |
3204 | |
3205 | /* revalidate */ |
3206 | ehc->i.flags |= ATA_EHI_POST_SETMODE; |
3207 | rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); |
3208 | ehc->i.flags &= ~ATA_EHI_POST_SETMODE; |
3209 | if (rc) |
3210 | return rc; |
3211 | |
3212 | if (dev->xfer_shift == ATA_SHIFT_PIO) { |
3213 | /* Old CFA may refuse this command, which is just fine */ |
3214 | if (ata_id_is_cfa(dev->id)) |
3215 | ign_dev_err = 1; |
3216 | /* Catch several broken garbage emulations plus some pre |
3217 | ATA devices */ |
3218 | if (ata_id_major_version(dev->id) == 0 && |
3219 | dev->pio_mode <= XFER_PIO_2) |
3220 | ign_dev_err = 1; |
3221 | /* Some very old devices and some bad newer ones fail |
3222 | any kind of SET_XFERMODE request but support PIO0-2 |
3223 | timings and no IORDY */ |
3224 | if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) |
3225 | ign_dev_err = 1; |
3226 | } |
3227 | /* Early MWDMA devices do DMA but don't allow DMA mode setting. |
3228 | Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ |
3229 | if (dev->xfer_shift == ATA_SHIFT_MWDMA && |
3230 | dev->dma_mode == XFER_MW_DMA_0 && |
3231 | (dev->id[63] >> 8) & 1) |
3232 | ign_dev_err = 1; |
3233 | |
3234 | /* if the device is actually configured correctly, ignore dev err */ |
3235 | if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) |
3236 | ign_dev_err = 1; |
3237 | |
3238 | if (err_mask & AC_ERR_DEV) { |
3239 | if (!ign_dev_err) |
3240 | goto fail; |
3241 | else |
3242 | dev_err_whine = " (device error ignored)"; |
3243 | } |
3244 | |
3245 | DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", |
3246 | dev->xfer_shift, (int)dev->xfer_mode); |
3247 | |
3248 | ata_dev_info(dev, "configured for %s%s\n", |
3249 | ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), |
3250 | dev_err_whine); |
3251 | |
3252 | return 0; |
3253 | |
3254 | fail: |
3255 | ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask); |
3256 | return -EIO; |
3257 | } |
3258 | |
3259 | /** |
3260 | * ata_do_set_mode - Program timings and issue SET FEATURES - XFER |
3261 | * @link: link on which timings will be programmed |
3262 | * @r_failed_dev: out parameter for failed device |
3263 | * |
3264 | * Standard implementation of the function used to tune and set |
3265 | * ATA device disk transfer mode (PIO3, UDMA6, etc.). If |
3266 | * ata_dev_set_mode() fails, pointer to the failing device is |
3267 | * returned in @r_failed_dev. |
3268 | * |
3269 | * LOCKING: |
3270 | * PCI/etc. bus probe sem. |
3271 | * |
3272 | * RETURNS: |
3273 | * 0 on success, negative errno otherwise |
3274 | */ |
3275 | |
3276 | int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) |
3277 | { |
3278 | struct ata_port *ap = link->ap; |
3279 | struct ata_device *dev; |
3280 | int rc = 0, used_dma = 0, found = 0; |
3281 | |
3282 | /* step 1: calculate xfer_mask */ |
3283 | ata_for_each_dev(dev, link, ENABLED) { |
3284 | unsigned long pio_mask, dma_mask; |
3285 | unsigned int mode_mask; |
3286 | |
3287 | mode_mask = ATA_DMA_MASK_ATA; |
3288 | if (dev->class == ATA_DEV_ATAPI) |
3289 | mode_mask = ATA_DMA_MASK_ATAPI; |
3290 | else if (ata_id_is_cfa(dev->id)) |
3291 | mode_mask = ATA_DMA_MASK_CFA; |
3292 | |
3293 | ata_dev_xfermask(dev); |
3294 | ata_force_xfermask(dev); |
3295 | |
3296 | pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); |
3297 | |
3298 | if (libata_dma_mask & mode_mask) |
3299 | dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, |
3300 | dev->udma_mask); |
3301 | else |
3302 | dma_mask = 0; |
3303 | |
3304 | dev->pio_mode = ata_xfer_mask2mode(pio_mask); |
3305 | dev->dma_mode = ata_xfer_mask2mode(dma_mask); |
3306 | |
3307 | found = 1; |
3308 | if (ata_dma_enabled(dev)) |
3309 | used_dma = 1; |
3310 | } |
3311 | if (!found) |
3312 | goto out; |
3313 | |
3314 | /* step 2: always set host PIO timings */ |
3315 | ata_for_each_dev(dev, link, ENABLED) { |
3316 | if (dev->pio_mode == 0xff) { |
3317 | ata_dev_warn(dev, "no PIO support\n"); |
3318 | rc = -EINVAL; |
3319 | goto out; |
3320 | } |
3321 | |
3322 | dev->xfer_mode = dev->pio_mode; |
3323 | dev->xfer_shift = ATA_SHIFT_PIO; |
3324 | if (ap->ops->set_piomode) |
3325 | ap->ops->set_piomode(ap, dev); |
3326 | } |
3327 | |
3328 | /* step 3: set host DMA timings */ |
3329 | ata_for_each_dev(dev, link, ENABLED) { |
3330 | if (!ata_dma_enabled(dev)) |
3331 | continue; |
3332 | |
3333 | dev->xfer_mode = dev->dma_mode; |
3334 | dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); |
3335 | if (ap->ops->set_dmamode) |
3336 | ap->ops->set_dmamode(ap, dev); |
3337 | } |
3338 | |
3339 | /* step 4: update devices' xfer mode */ |
3340 | ata_for_each_dev(dev, link, ENABLED) { |
3341 | rc = ata_dev_set_mode(dev); |
3342 | if (rc) |
3343 | goto out; |
3344 | } |
3345 | |
3346 | /* Record simplex status. If we selected DMA then the other |
3347 | * host channels are not permitted to do so. |
3348 | */ |
3349 | if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) |
3350 | ap->host->simplex_claimed = ap; |
3351 | |
3352 | out: |
3353 | if (rc) |
3354 | *r_failed_dev = dev; |
3355 | return rc; |
3356 | } |
3357 | |
3358 | /** |
3359 | * ata_wait_ready - wait for link to become ready |
3360 | * @link: link to be waited on |
3361 | * @deadline: deadline jiffies for the operation |
3362 | * @check_ready: callback to check link readiness |
3363 | * |
3364 | * Wait for @link to become ready. @check_ready should return |
3365 | * positive number if @link is ready, 0 if it isn't, -ENODEV if |
3366 | * link doesn't seem to be occupied, other errno for other error |
3367 | * conditions. |
3368 | * |
3369 | * Transient -ENODEV conditions are allowed for |
3370 | * ATA_TMOUT_FF_WAIT. |
3371 | * |
3372 | * LOCKING: |
3373 | * EH context. |
3374 | * |
3375 | * RETURNS: |
3376 | * 0 if @linke is ready before @deadline; otherwise, -errno. |
3377 | */ |
3378 | int ata_wait_ready(struct ata_link *link, unsigned long deadline, |
3379 | int (*check_ready)(struct ata_link *link)) |
3380 | { |
3381 | unsigned long start = jiffies; |
3382 | unsigned long nodev_deadline; |
3383 | int warned = 0; |
3384 | |
3385 | /* choose which 0xff timeout to use, read comment in libata.h */ |
3386 | if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) |
3387 | nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); |
3388 | else |
3389 | nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); |
3390 | |
3391 | /* Slave readiness can't be tested separately from master. On |
3392 | * M/S emulation configuration, this function should be called |
3393 | * only on the master and it will handle both master and slave. |
3394 | */ |
3395 | WARN_ON(link == link->ap->slave_link); |
3396 | |
3397 | if (time_after(nodev_deadline, deadline)) |
3398 | nodev_deadline = deadline; |
3399 | |
3400 | while (1) { |
3401 | unsigned long now = jiffies; |
3402 | int ready, tmp; |
3403 | |
3404 | ready = tmp = check_ready(link); |
3405 | if (ready > 0) |
3406 | return 0; |
3407 | |
3408 | /* |
3409 | * -ENODEV could be transient. Ignore -ENODEV if link |
3410 | * is online. Also, some SATA devices take a long |
3411 | * time to clear 0xff after reset. Wait for |
3412 | * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't |
3413 | * offline. |
3414 | * |
3415 | * Note that some PATA controllers (pata_ali) explode |
3416 | * if status register is read more than once when |
3417 | * there's no device attached. |
3418 | */ |
3419 | if (ready == -ENODEV) { |
3420 | if (ata_link_online(link)) |
3421 | ready = 0; |
3422 | else if ((link->ap->flags & ATA_FLAG_SATA) && |
3423 | !ata_link_offline(link) && |
3424 | time_before(now, nodev_deadline)) |
3425 | ready = 0; |
3426 | } |
3427 | |
3428 | if (ready) |
3429 | return ready; |
3430 | if (time_after(now, deadline)) |
3431 | return -EBUSY; |
3432 | |
3433 | if (!warned && time_after(now, start + 5 * HZ) && |
3434 | (deadline - now > 3 * HZ)) { |
3435 | ata_link_warn(link, |
3436 | "link is slow to respond, please be patient " |
3437 | "(ready=%d)\n", tmp); |
3438 | warned = 1; |
3439 | } |
3440 | |
3441 | ata_msleep(link->ap, 50); |
3442 | } |
3443 | } |
3444 | |
3445 | /** |
3446 | * ata_wait_after_reset - wait for link to become ready after reset |
3447 | * @link: link to be waited on |
3448 | * @deadline: deadline jiffies for the operation |
3449 | * @check_ready: callback to check link readiness |
3450 | * |
3451 | * Wait for @link to become ready after reset. |
3452 | * |
3453 | * LOCKING: |
3454 | * EH context. |
3455 | * |
3456 | * RETURNS: |
3457 | * 0 if @linke is ready before @deadline; otherwise, -errno. |
3458 | */ |
3459 | int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, |
3460 | int (*check_ready)(struct ata_link *link)) |
3461 | { |
3462 | ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); |
3463 | |
3464 | return ata_wait_ready(link, deadline, check_ready); |
3465 | } |
3466 | |
3467 | /** |
3468 | * sata_link_debounce - debounce SATA phy status |
3469 | * @link: ATA link to debounce SATA phy status for |
3470 | * @params: timing parameters { interval, duratinon, timeout } in msec |
3471 | * @deadline: deadline jiffies for the operation |
3472 | * |
3473 | * Make sure SStatus of @link reaches stable state, determined by |
3474 | * holding the same value where DET is not 1 for @duration polled |
3475 | * every @interval, before @timeout. Timeout constraints the |
3476 | * beginning of the stable state. Because DET gets stuck at 1 on |
3477 | * some controllers after hot unplugging, this functions waits |
3478 | * until timeout then returns 0 if DET is stable at 1. |
3479 | * |
3480 | * @timeout is further limited by @deadline. The sooner of the |
3481 | * two is used. |
3482 | * |
3483 | * LOCKING: |
3484 | * Kernel thread context (may sleep) |
3485 | * |
3486 | * RETURNS: |
3487 | * 0 on success, -errno on failure. |
3488 | */ |
3489 | int sata_link_debounce(struct ata_link *link, const unsigned long *params, |
3490 | unsigned long deadline) |
3491 | { |
3492 | unsigned long interval = params[0]; |
3493 | unsigned long duration = params[1]; |
3494 | unsigned long last_jiffies, t; |
3495 | u32 last, cur; |
3496 | int rc; |
3497 | |
3498 | t = ata_deadline(jiffies, params[2]); |
3499 | if (time_before(t, deadline)) |
3500 | deadline = t; |
3501 | |
3502 | if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) |
3503 | return rc; |
3504 | cur &= 0xf; |
3505 | |
3506 | last = cur; |
3507 | last_jiffies = jiffies; |
3508 | |
3509 | while (1) { |
3510 | ata_msleep(link->ap, interval); |
3511 | if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) |
3512 | return rc; |
3513 | cur &= 0xf; |
3514 | |
3515 | /* DET stable? */ |
3516 | if (cur == last) { |
3517 | if (cur == 1 && time_before(jiffies, deadline)) |
3518 | continue; |
3519 | if (time_after(jiffies, |
3520 | ata_deadline(last_jiffies, duration))) |
3521 | return 0; |
3522 | continue; |
3523 | } |
3524 | |
3525 | /* unstable, start over */ |
3526 | last = cur; |
3527 | last_jiffies = jiffies; |
3528 | |
3529 | /* Check deadline. If debouncing failed, return |
3530 | * -EPIPE to tell upper layer to lower link speed. |
3531 | */ |
3532 | if (time_after(jiffies, deadline)) |
3533 | return -EPIPE; |
3534 | } |
3535 | } |
3536 | |
3537 | /** |
3538 | * sata_link_resume - resume SATA link |
3539 | * @link: ATA link to resume SATA |
3540 | * @params: timing parameters { interval, duratinon, timeout } in msec |
3541 | * @deadline: deadline jiffies for the operation |
3542 | * |
3543 | * Resume SATA phy @link and debounce it. |
3544 | * |
3545 | * LOCKING: |
3546 | * Kernel thread context (may sleep) |
3547 | * |
3548 | * RETURNS: |
3549 | * 0 on success, -errno on failure. |
3550 | */ |
3551 | int sata_link_resume(struct ata_link *link, const unsigned long *params, |
3552 | unsigned long deadline) |
3553 | { |
3554 | int tries = ATA_LINK_RESUME_TRIES; |
3555 | u32 scontrol, serror; |
3556 | int rc; |
3557 | |
3558 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) |
3559 | return rc; |
3560 | |
3561 | /* |
3562 | * Writes to SControl sometimes get ignored under certain |
3563 | * controllers (ata_piix SIDPR). Make sure DET actually is |
3564 | * cleared. |
3565 | */ |
3566 | do { |
3567 | scontrol = (scontrol & 0x0f0) | 0x300; |
3568 | if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) |
3569 | return rc; |
3570 | /* |
3571 | * Some PHYs react badly if SStatus is pounded |
3572 | * immediately after resuming. Delay 200ms before |
3573 | * debouncing. |
3574 | */ |
3575 | ata_msleep(link->ap, 200); |
3576 | |
3577 | /* is SControl restored correctly? */ |
3578 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) |
3579 | return rc; |
3580 | } while ((scontrol & 0xf0f) != 0x300 && --tries); |
3581 | |
3582 | if ((scontrol & 0xf0f) != 0x300) { |
3583 | ata_link_warn(link, "failed to resume link (SControl %X)\n", |
3584 | scontrol); |
3585 | return 0; |
3586 | } |
3587 | |
3588 | if (tries < ATA_LINK_RESUME_TRIES) |
3589 | ata_link_warn(link, "link resume succeeded after %d retries\n", |
3590 | ATA_LINK_RESUME_TRIES - tries); |
3591 | |
3592 | if ((rc = sata_link_debounce(link, params, deadline))) |
3593 | return rc; |
3594 | |
3595 | /* clear SError, some PHYs require this even for SRST to work */ |
3596 | if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) |
3597 | rc = sata_scr_write(link, SCR_ERROR, serror); |
3598 | |
3599 | return rc != -EINVAL ? rc : 0; |
3600 | } |
3601 | |
3602 | /** |
3603 | * sata_link_scr_lpm - manipulate SControl IPM and SPM fields |
3604 | * @link: ATA link to manipulate SControl for |
3605 | * @policy: LPM policy to configure |
3606 | * @spm_wakeup: initiate LPM transition to active state |
3607 | * |
3608 | * Manipulate the IPM field of the SControl register of @link |
3609 | * according to @policy. If @policy is ATA_LPM_MAX_POWER and |
3610 | * @spm_wakeup is %true, the SPM field is manipulated to wake up |
3611 | * the link. This function also clears PHYRDY_CHG before |
3612 | * returning. |
3613 | * |
3614 | * LOCKING: |
3615 | * EH context. |
3616 | * |
3617 | * RETURNS: |
3618 | * 0 on succes, -errno otherwise. |
3619 | */ |
3620 | int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, |
3621 | bool spm_wakeup) |
3622 | { |
3623 | struct ata_eh_context *ehc = &link->eh_context; |
3624 | bool woken_up = false; |
3625 | u32 scontrol; |
3626 | int rc; |
3627 | |
3628 | rc = sata_scr_read(link, SCR_CONTROL, &scontrol); |
3629 | if (rc) |
3630 | return rc; |
3631 | |
3632 | switch (policy) { |
3633 | case ATA_LPM_MAX_POWER: |
3634 | /* disable all LPM transitions */ |
3635 | scontrol |= (0x7 << 8); |
3636 | /* initiate transition to active state */ |
3637 | if (spm_wakeup) { |
3638 | scontrol |= (0x4 << 12); |
3639 | woken_up = true; |
3640 | } |
3641 | break; |
3642 | case ATA_LPM_MED_POWER: |
3643 | /* allow LPM to PARTIAL */ |
3644 | scontrol &= ~(0x1 << 8); |
3645 | scontrol |= (0x6 << 8); |
3646 | break; |
3647 | case ATA_LPM_MIN_POWER: |
3648 | if (ata_link_nr_enabled(link) > 0) |
3649 | /* no restrictions on LPM transitions */ |
3650 | scontrol &= ~(0x7 << 8); |
3651 | else { |
3652 | /* empty port, power off */ |
3653 | scontrol &= ~0xf; |
3654 | scontrol |= (0x1 << 2); |
3655 | } |
3656 | break; |
3657 | default: |
3658 | WARN_ON(1); |
3659 | } |
3660 | |
3661 | rc = sata_scr_write(link, SCR_CONTROL, scontrol); |
3662 | if (rc) |
3663 | return rc; |
3664 | |
3665 | /* give the link time to transit out of LPM state */ |
3666 | if (woken_up) |
3667 | msleep(10); |
3668 | |
3669 | /* clear PHYRDY_CHG from SError */ |
3670 | ehc->i.serror &= ~SERR_PHYRDY_CHG; |
3671 | return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); |
3672 | } |
3673 | |
3674 | /** |
3675 | * ata_std_prereset - prepare for reset |
3676 | * @link: ATA link to be reset |
3677 | * @deadline: deadline jiffies for the operation |
3678 | * |
3679 | * @link is about to be reset. Initialize it. Failure from |
3680 | * prereset makes libata abort whole reset sequence and give up |
3681 | * that port, so prereset should be best-effort. It does its |
3682 | * best to prepare for reset sequence but if things go wrong, it |
3683 | * should just whine, not fail. |
3684 | * |
3685 | * LOCKING: |
3686 | * Kernel thread context (may sleep) |
3687 | * |
3688 | * RETURNS: |
3689 | * 0 on success, -errno otherwise. |
3690 | */ |
3691 | int ata_std_prereset(struct ata_link *link, unsigned long deadline) |
3692 | { |
3693 | struct ata_port *ap = link->ap; |
3694 | struct ata_eh_context *ehc = &link->eh_context; |
3695 | const unsigned long *timing = sata_ehc_deb_timing(ehc); |
3696 | int rc; |
3697 | |
3698 | /* if we're about to do hardreset, nothing more to do */ |
3699 | if (ehc->i.action & ATA_EH_HARDRESET) |
3700 | return 0; |
3701 | |
3702 | /* if SATA, resume link */ |
3703 | if (ap->flags & ATA_FLAG_SATA) { |
3704 | rc = sata_link_resume(link, timing, deadline); |
3705 | /* whine about phy resume failure but proceed */ |
3706 | if (rc && rc != -EOPNOTSUPP) |
3707 | ata_link_warn(link, |
3708 | "failed to resume link for reset (errno=%d)\n", |
3709 | rc); |
3710 | } |
3711 | |
3712 | /* no point in trying softreset on offline link */ |
3713 | if (ata_phys_link_offline(link)) |
3714 | ehc->i.action &= ~ATA_EH_SOFTRESET; |
3715 | |
3716 | return 0; |
3717 | } |
3718 | |
3719 | /** |
3720 | * sata_link_hardreset - reset link via SATA phy reset |
3721 | * @link: link to reset |
3722 | * @timing: timing parameters { interval, duratinon, timeout } in msec |
3723 | * @deadline: deadline jiffies for the operation |
3724 | * @online: optional out parameter indicating link onlineness |
3725 | * @check_ready: optional callback to check link readiness |
3726 | * |
3727 | * SATA phy-reset @link using DET bits of SControl register. |
3728 | * After hardreset, link readiness is waited upon using |
3729 | * ata_wait_ready() if @check_ready is specified. LLDs are |
3730 | * allowed to not specify @check_ready and wait itself after this |
3731 | * function returns. Device classification is LLD's |
3732 | * responsibility. |
3733 | * |
3734 | * *@online is set to one iff reset succeeded and @link is online |
3735 | * after reset. |
3736 | * |
3737 | * LOCKING: |
3738 | * Kernel thread context (may sleep) |
3739 | * |
3740 | * RETURNS: |
3741 | * 0 on success, -errno otherwise. |
3742 | */ |
3743 | int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, |
3744 | unsigned long deadline, |
3745 | bool *online, int (*check_ready)(struct ata_link *)) |
3746 | { |
3747 | u32 scontrol; |
3748 | int rc; |
3749 | |
3750 | DPRINTK("ENTER\n"); |
3751 | |
3752 | if (online) |
3753 | *online = false; |
3754 | |
3755 | if (sata_set_spd_needed(link)) { |
3756 | /* SATA spec says nothing about how to reconfigure |
3757 | * spd. To be on the safe side, turn off phy during |
3758 | * reconfiguration. This works for at least ICH7 AHCI |
3759 | * and Sil3124. |
3760 | */ |
3761 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) |
3762 | goto out; |
3763 | |
3764 | scontrol = (scontrol & 0x0f0) | 0x304; |
3765 | |
3766 | if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) |
3767 | goto out; |
3768 | |
3769 | sata_set_spd(link); |
3770 | } |
3771 | |
3772 | /* issue phy wake/reset */ |
3773 | if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) |
3774 | goto out; |
3775 | |
3776 | scontrol = (scontrol & 0x0f0) | 0x301; |
3777 | |
3778 | if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) |
3779 | goto out; |
3780 | |
3781 | /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 |
3782 | * 10.4.2 says at least 1 ms. |
3783 | */ |
3784 | ata_msleep(link->ap, 1); |
3785 | |
3786 | /* bring link back */ |
3787 | rc = sata_link_resume(link, timing, deadline); |
3788 | if (rc) |
3789 | goto out; |
3790 | /* if link is offline nothing more to do */ |
3791 | if (ata_phys_link_offline(link)) |
3792 | goto out; |
3793 | |
3794 | /* Link is online. From this point, -ENODEV too is an error. */ |
3795 | if (online) |
3796 | *online = true; |
3797 | |
3798 | if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { |
3799 | /* If PMP is supported, we have to do follow-up SRST. |
3800 | * Some PMPs don't send D2H Reg FIS after hardreset if |
3801 | * the first port is empty. Wait only for |
3802 | * ATA_TMOUT_PMP_SRST_WAIT. |
3803 | */ |
3804 | if (check_ready) { |
3805 | unsigned long pmp_deadline; |
3806 | |
3807 | pmp_deadline = ata_deadline(jiffies, |
3808 | ATA_TMOUT_PMP_SRST_WAIT); |
3809 | if (time_after(pmp_deadline, deadline)) |
3810 | pmp_deadline = deadline; |
3811 | ata_wait_ready(link, pmp_deadline, check_ready); |
3812 | } |
3813 | rc = -EAGAIN; |
3814 | goto out; |
3815 | } |
3816 | |
3817 | rc = 0; |
3818 | if (check_ready) |
3819 | rc = ata_wait_ready(link, deadline, check_ready); |
3820 | out: |
3821 | if (rc && rc != -EAGAIN) { |
3822 | /* online is set iff link is online && reset succeeded */ |
3823 | if (online) |
3824 | *online = false; |
3825 | ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); |
3826 | } |
3827 | DPRINTK("EXIT, rc=%d\n", rc); |
3828 | return rc; |
3829 | } |
3830 | |
3831 | /** |
3832 | * sata_std_hardreset - COMRESET w/o waiting or classification |
3833 | * @link: link to reset |
3834 | * @class: resulting class of attached device |
3835 | * @deadline: deadline jiffies for the operation |
3836 | * |
3837 | * Standard SATA COMRESET w/o waiting or classification. |
3838 | * |
3839 | * LOCKING: |
3840 | * Kernel thread context (may sleep) |
3841 | * |
3842 | * RETURNS: |
3843 | * 0 if link offline, -EAGAIN if link online, -errno on errors. |
3844 | */ |
3845 | int sata_std_hardreset(struct ata_link *link, unsigned int *class, |
3846 | unsigned long deadline) |
3847 | { |
3848 | const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); |
3849 | bool online; |
3850 | int rc; |
3851 | |
3852 | /* do hardreset */ |
3853 | rc = sata_link_hardreset(link, timing, deadline, &online, NULL); |
3854 | return online ? -EAGAIN : rc; |
3855 | } |
3856 | |
3857 | /** |
3858 | * ata_std_postreset - standard postreset callback |
3859 | * @link: the target ata_link |
3860 | * @classes: classes of attached devices |
3861 | * |
3862 | * This function is invoked after a successful reset. Note that |
3863 | * the device might have been reset more than once using |
3864 | * different reset methods before postreset is invoked. |
3865 | * |
3866 | * LOCKING: |
3867 | * Kernel thread context (may sleep) |
3868 | */ |
3869 | void ata_std_postreset(struct ata_link *link, unsigned int *classes) |
3870 | { |
3871 | u32 serror; |
3872 | |
3873 | DPRINTK("ENTER\n"); |
3874 | |
3875 | /* reset complete, clear SError */ |
3876 | if (!sata_scr_read(link, SCR_ERROR, &serror)) |
3877 | sata_scr_write(link, SCR_ERROR, serror); |
3878 | |
3879 | /* print link status */ |
3880 | sata_print_link_status(link); |
3881 | |
3882 | DPRINTK("EXIT\n"); |
3883 | } |
3884 | |
3885 | /** |
3886 | * ata_dev_same_device - Determine whether new ID matches configured device |
3887 | * @dev: device to compare against |
3888 | * @new_class: class of the new device |
3889 | * @new_id: IDENTIFY page of the new device |
3890 | * |
3891 | * Compare @new_class and @new_id against @dev and determine |
3892 | * whether @dev is the device indicated by @new_class and |
3893 | * @new_id. |
3894 | * |
3895 | * LOCKING: |
3896 | * None. |
3897 | * |
3898 | * RETURNS: |
3899 | * 1 if @dev matches @new_class and @new_id, 0 otherwise. |
3900 | */ |
3901 | static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, |
3902 | const u16 *new_id) |
3903 | { |
3904 | const u16 *old_id = dev->id; |
3905 | unsigned char model[2][ATA_ID_PROD_LEN + 1]; |
3906 | unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; |
3907 | |
3908 | if (dev->class != new_class) { |
3909 | ata_dev_info(dev, "class mismatch %d != %d\n", |
3910 | dev->class, new_class); |
3911 | return 0; |
3912 | } |
3913 | |
3914 | ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); |
3915 | ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); |
3916 | ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); |
3917 | ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); |
3918 | |
3919 | if (strcmp(model[0], model[1])) { |
3920 | ata_dev_info(dev, "model number mismatch '%s' != '%s'\n", |
3921 | model[0], model[1]); |
3922 | return 0; |
3923 | } |
3924 | |
3925 | if (strcmp(serial[0], serial[1])) { |
3926 | ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n", |
3927 | serial[0], serial[1]); |
3928 | return 0; |
3929 | } |
3930 | |
3931 | return 1; |
3932 | } |
3933 | |
3934 | /** |
3935 | * ata_dev_reread_id - Re-read IDENTIFY data |
3936 | * @dev: target ATA device |
3937 | * @readid_flags: read ID flags |
3938 | * |
3939 | * Re-read IDENTIFY page and make sure @dev is still attached to |
3940 | * the port. |
3941 | * |
3942 | * LOCKING: |
3943 | * Kernel thread context (may sleep) |
3944 | * |
3945 | * RETURNS: |
3946 | * 0 on success, negative errno otherwise |
3947 | */ |
3948 | int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) |
3949 | { |
3950 | unsigned int class = dev->class; |
3951 | u16 *id = (void *)dev->link->ap->sector_buf; |
3952 | int rc; |
3953 | |
3954 | /* read ID data */ |
3955 | rc = ata_dev_read_id(dev, &class, readid_flags, id); |
3956 | if (rc) |
3957 | return rc; |
3958 | |
3959 | /* is the device still there? */ |
3960 | if (!ata_dev_same_device(dev, class, id)) |
3961 | return -ENODEV; |
3962 | |
3963 | memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); |
3964 | return 0; |
3965 | } |
3966 | |
3967 | /** |
3968 | * ata_dev_revalidate - Revalidate ATA device |
3969 | * @dev: device to revalidate |
3970 | * @new_class: new class code |
3971 | * @readid_flags: read ID flags |
3972 | * |
3973 | * Re-read IDENTIFY page, make sure @dev is still attached to the |
3974 | * port and reconfigure it according to the new IDENTIFY page. |
3975 | * |
3976 | * LOCKING: |
3977 | * Kernel thread context (may sleep) |
3978 | * |
3979 | * RETURNS: |
3980 | * 0 on success, negative errno otherwise |
3981 | */ |
3982 | int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, |
3983 | unsigned int readid_flags) |
3984 | { |
3985 | u64 n_sectors = dev->n_sectors; |
3986 | u64 n_native_sectors = dev->n_native_sectors; |
3987 | int rc; |
3988 | |
3989 | if (!ata_dev_enabled(dev)) |
3990 | return -ENODEV; |
3991 | |
3992 | /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ |
3993 | if (ata_class_enabled(new_class) && |
3994 | new_class != ATA_DEV_ATA && |
3995 | new_class != ATA_DEV_ATAPI && |
3996 | new_class != ATA_DEV_SEMB) { |
3997 | ata_dev_info(dev, "class mismatch %u != %u\n", |
3998 | dev->class, new_class); |
3999 | rc = -ENODEV; |
4000 | goto fail; |
4001 | } |
4002 | |
4003 | /* re-read ID */ |
4004 | rc = ata_dev_reread_id(dev, readid_flags); |
4005 | if (rc) |
4006 | goto fail; |
4007 | |
4008 | /* configure device according to the new ID */ |
4009 | rc = ata_dev_configure(dev); |
4010 | if (rc) |
4011 | goto fail; |
4012 | |
4013 | /* verify n_sectors hasn't changed */ |
4014 | if (dev->class != ATA_DEV_ATA || !n_sectors || |
4015 | dev->n_sectors == n_sectors) |
4016 | return 0; |
4017 | |
4018 | /* n_sectors has changed */ |
4019 | ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n", |
4020 | (unsigned long long)n_sectors, |
4021 | (unsigned long long)dev->n_sectors); |
4022 | |
4023 | /* |
4024 | * Something could have caused HPA to be unlocked |
4025 | * involuntarily. If n_native_sectors hasn't changed and the |
4026 | * new size matches it, keep the device. |
4027 | */ |
4028 | if (dev->n_native_sectors == n_native_sectors && |
4029 | dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { |
4030 | ata_dev_warn(dev, |
4031 | "new n_sectors matches native, probably " |
4032 | "late HPA unlock, n_sectors updated\n"); |
4033 | /* use the larger n_sectors */ |
4034 | return 0; |
4035 | } |
4036 | |
4037 | /* |
4038 | * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try |
4039 | * unlocking HPA in those cases. |
4040 | * |
4041 | * https://bugzilla.kernel.org/show_bug.cgi?id=15396 |
4042 | */ |
4043 | if (dev->n_native_sectors == n_native_sectors && |
4044 | dev->n_sectors < n_sectors && n_sectors == n_native_sectors && |
4045 | !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { |
4046 | ata_dev_warn(dev, |
4047 | "old n_sectors matches native, probably " |
4048 | "late HPA lock, will try to unlock HPA\n"); |
4049 | /* try unlocking HPA */ |
4050 | dev->flags |= ATA_DFLAG_UNLOCK_HPA; |
4051 | rc = -EIO; |
4052 | } else |
4053 | rc = -ENODEV; |
4054 | |
4055 | /* restore original n_[native_]sectors and fail */ |
4056 | dev->n_native_sectors = n_native_sectors; |
4057 | dev->n_sectors = n_sectors; |
4058 | fail: |
4059 | ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc); |
4060 | return rc; |
4061 | } |
4062 | |
4063 | struct ata_blacklist_entry { |
4064 | const char *model_num; |
4065 | const char *model_rev; |
4066 | unsigned long horkage; |
4067 | }; |
4068 | |
4069 | static const struct ata_blacklist_entry ata_device_blacklist [] = { |
4070 | /* Devices with DMA related problems under Linux */ |
4071 | { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, |
4072 | { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, |
4073 | { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, |
4074 | { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, |
4075 | { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, |
4076 | { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, |
4077 | { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, |
4078 | { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, |
4079 | { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, |
4080 | { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA }, |
4081 | { "CRD-84", NULL, ATA_HORKAGE_NODMA }, |
4082 | { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, |
4083 | { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, |
4084 | { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, |
4085 | { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, |
4086 | { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA }, |
4087 | { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, |
4088 | { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, |
4089 | { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, |
4090 | { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, |
4091 | { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, |
4092 | { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, |
4093 | { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, |
4094 | { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, |
4095 | { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, |
4096 | { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, |
4097 | { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, |
4098 | { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, |
4099 | { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, |
4100 | /* Odd clown on sil3726/4726 PMPs */ |
4101 | { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, |
4102 | |
4103 | /* Weird ATAPI devices */ |
4104 | { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, |
4105 | { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, |
4106 | { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, |
4107 | |
4108 | /* Devices we expect to fail diagnostics */ |
4109 | |
4110 | /* Devices where NCQ should be avoided */ |
4111 | /* NCQ is slow */ |
4112 | { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, |
4113 | { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, |
4114 | /* http://thread.gmane.org/gmane.linux.ide/14907 */ |
4115 | { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, |
4116 | /* NCQ is broken */ |
4117 | { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, |
4118 | { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, |
4119 | { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, |
4120 | { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, |
4121 | { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, |
4122 | |
4123 | /* Seagate NCQ + FLUSH CACHE firmware bug */ |
4124 | { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | |
4125 | ATA_HORKAGE_FIRMWARE_WARN }, |
4126 | |
4127 | { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | |
4128 | ATA_HORKAGE_FIRMWARE_WARN }, |
4129 | |
4130 | { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | |
4131 | ATA_HORKAGE_FIRMWARE_WARN }, |
4132 | |
4133 | { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | |
4134 | ATA_HORKAGE_FIRMWARE_WARN }, |
4135 | |
4136 | /* Blacklist entries taken from Silicon Image 3124/3132 |
4137 | Windows driver .inf file - also several Linux problem reports */ |
4138 | { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, |
4139 | { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, |
4140 | { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, |
4141 | |
4142 | /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ |
4143 | { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, |
4144 | |
4145 | /* devices which puke on READ_NATIVE_MAX */ |
4146 | { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, |
4147 | { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, |
4148 | { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, |
4149 | { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, |
4150 | |
4151 | /* this one allows HPA unlocking but fails IOs on the area */ |
4152 | { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, |
4153 | |
4154 | /* Devices which report 1 sector over size HPA */ |
4155 | { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, |
4156 | { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, |
4157 | { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, |
4158 | |
4159 | /* Devices which get the IVB wrong */ |
4160 | { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, |
4161 | /* Maybe we should just blacklist TSSTcorp... */ |
4162 | { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, }, |
4163 | |
4164 | /* Devices that do not need bridging limits applied */ |
4165 | { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, |
4166 | { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, }, |
4167 | |
4168 | /* Devices which aren't very happy with higher link speeds */ |
4169 | { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, |
4170 | { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, }, |
4171 | |
4172 | /* |
4173 | * Devices which choke on SETXFER. Applies only if both the |
4174 | * device and controller are SATA. |
4175 | */ |
4176 | { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, |
4177 | { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER }, |
4178 | { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER }, |
4179 | { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, |
4180 | { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, |
4181 | |
4182 | /* End Marker */ |
4183 | { } |
4184 | }; |
4185 | |
4186 | /** |
4187 | * glob_match - match a text string against a glob-style pattern |
4188 | * @text: the string to be examined |
4189 | * @pattern: the glob-style pattern to be matched against |
4190 | * |
4191 | * Either/both of text and pattern can be empty strings. |
4192 | * |
4193 | * Match text against a glob-style pattern, with wildcards and simple sets: |
4194 | * |
4195 | * ? matches any single character. |
4196 | * * matches any run of characters. |
4197 | * [xyz] matches a single character from the set: x, y, or z. |
4198 | * [a-d] matches a single character from the range: a, b, c, or d. |
4199 | * [a-d0-9] matches a single character from either range. |
4200 | * |
4201 | * The special characters ?, [, -, or *, can be matched using a set, eg. [*] |
4202 | * Behaviour with malformed patterns is undefined, though generally reasonable. |
4203 | * |
4204 | * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx" |
4205 | * |
4206 | * This function uses one level of recursion per '*' in pattern. |
4207 | * Since it calls _nothing_ else, and has _no_ explicit local variables, |
4208 | * this will not cause stack problems for any reasonable use here. |
4209 | * |
4210 | * RETURNS: |
4211 | * 0 on match, 1 otherwise. |
4212 | */ |
4213 | static int glob_match (const char *text, const char *pattern) |
4214 | { |
4215 | do { |
4216 | /* Match single character or a '?' wildcard */ |
4217 | if (*text == *pattern || *pattern == '?') { |
4218 | if (!*pattern++) |
4219 | return 0; /* End of both strings: match */ |
4220 | } else { |
4221 | /* Match single char against a '[' bracketed ']' pattern set */ |
4222 | if (!*text || *pattern != '[') |
4223 | break; /* Not a pattern set */ |
4224 | while (*++pattern && *pattern != ']' && *text != *pattern) { |
4225 | if (*pattern == '-' && *(pattern - 1) != '[') |
4226 | if (*text > *(pattern - 1) && *text < *(pattern + 1)) { |
4227 | ++pattern; |
4228 | break; |
4229 | } |
4230 | } |
4231 | if (!*pattern || *pattern == ']') |
4232 | return 1; /* No match */ |
4233 | while (*pattern && *pattern++ != ']'); |
4234 | } |
4235 | } while (*++text && *pattern); |
4236 | |
4237 | /* Match any run of chars against a '*' wildcard */ |
4238 | if (*pattern == '*') { |
4239 | if (!*++pattern) |
4240 | return 0; /* Match: avoid recursion at end of pattern */ |
4241 | /* Loop to handle additional pattern chars after the wildcard */ |
4242 | while (*text) { |
4243 | if (glob_match(text, pattern) == 0) |
4244 | return 0; /* Remainder matched */ |
4245 | ++text; /* Absorb (match) this char and try again */ |
4246 | } |
4247 | } |
4248 | if (!*text && !*pattern) |
4249 | return 0; /* End of both strings: match */ |
4250 | return 1; /* No match */ |
4251 | } |
4252 | |
4253 | static unsigned long ata_dev_blacklisted(const struct ata_device *dev) |
4254 | { |
4255 | unsigned char model_num[ATA_ID_PROD_LEN + 1]; |
4256 | unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; |
4257 | const struct ata_blacklist_entry *ad = ata_device_blacklist; |
4258 | |
4259 | ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); |
4260 | ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); |
4261 | |
4262 | while (ad->model_num) { |
4263 | if (!glob_match(model_num, ad->model_num)) { |
4264 | if (ad->model_rev == NULL) |
4265 | return ad->horkage; |
4266 | if (!glob_match(model_rev, ad->model_rev)) |
4267 | return ad->horkage; |
4268 | } |
4269 | ad++; |
4270 | } |
4271 | return 0; |
4272 | } |
4273 | |
4274 | static int ata_dma_blacklisted(const struct ata_device *dev) |
4275 | { |
4276 | /* We don't support polling DMA. |
4277 | * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) |
4278 | * if the LLDD handles only interrupts in the HSM_ST_LAST state. |
4279 | */ |
4280 | if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && |
4281 | (dev->flags & ATA_DFLAG_CDB_INTR)) |
4282 | return 1; |
4283 | return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; |
4284 | } |
4285 | |
4286 | /** |
4287 | * ata_is_40wire - check drive side detection |
4288 | * @dev: device |
4289 | * |
4290 | * Perform drive side detection decoding, allowing for device vendors |
4291 | * who can't follow the documentation. |
4292 | */ |
4293 | |
4294 | static int ata_is_40wire(struct ata_device *dev) |
4295 | { |
4296 | if (dev->horkage & ATA_HORKAGE_IVB) |
4297 | return ata_drive_40wire_relaxed(dev->id); |
4298 | return ata_drive_40wire(dev->id); |
4299 | } |
4300 | |
4301 | /** |
4302 | * cable_is_40wire - 40/80/SATA decider |
4303 | * @ap: port to consider |
4304 | * |
4305 | * This function encapsulates the policy for speed management |
4306 | * in one place. At the moment we don't cache the result but |
4307 | * there is a good case for setting ap->cbl to the result when |
4308 | * we are called with unknown cables (and figuring out if it |
4309 | * impacts hotplug at all). |
4310 | * |
4311 | * Return 1 if the cable appears to be 40 wire. |
4312 | */ |
4313 | |
4314 | static int cable_is_40wire(struct ata_port *ap) |
4315 | { |
4316 | struct ata_link *link; |
4317 | struct ata_device *dev; |
4318 | |
4319 | /* If the controller thinks we are 40 wire, we are. */ |
4320 | if (ap->cbl == ATA_CBL_PATA40) |
4321 | return 1; |
4322 | |
4323 | /* If the controller thinks we are 80 wire, we are. */ |
4324 | if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) |
4325 | return 0; |
4326 | |
4327 | /* If the system is known to be 40 wire short cable (eg |
4328 | * laptop), then we allow 80 wire modes even if the drive |
4329 | * isn't sure. |
4330 | */ |
4331 | if (ap->cbl == ATA_CBL_PATA40_SHORT) |
4332 | return 0; |
4333 | |
4334 | /* If the controller doesn't know, we scan. |
4335 | * |
4336 | * Note: We look for all 40 wire detects at this point. Any |
4337 | * 80 wire detect is taken to be 80 wire cable because |
4338 | * - in many setups only the one drive (slave if present) will |
4339 | * give a valid detect |
4340 | * - if you have a non detect capable drive you don't want it |
4341 | * to colour the choice |
4342 | */ |
4343 | ata_for_each_link(link, ap, EDGE) { |
4344 | ata_for_each_dev(dev, link, ENABLED) { |
4345 | if (!ata_is_40wire(dev)) |
4346 | return 0; |
4347 | } |
4348 | } |
4349 | return 1; |
4350 | } |
4351 | |
4352 | /** |
4353 | * ata_dev_xfermask - Compute supported xfermask of the given device |
4354 | * @dev: Device to compute xfermask for |
4355 | * |
4356 | * Compute supported xfermask of @dev and store it in |
4357 | * dev->*_mask. This function is responsible for applying all |
4358 | * known limits including host controller limits, device |
4359 | * blacklist, etc... |
4360 | * |
4361 | * LOCKING: |
4362 | * None. |
4363 | */ |
4364 | static void ata_dev_xfermask(struct ata_device *dev) |
4365 | { |
4366 | struct ata_link *link = dev->link; |
4367 | struct ata_port *ap = link->ap; |
4368 | struct ata_host *host = ap->host; |
4369 | unsigned long xfer_mask; |
4370 | |
4371 | /* controller modes available */ |
4372 | xfer_mask = ata_pack_xfermask(ap->pio_mask, |
4373 | ap->mwdma_mask, ap->udma_mask); |
4374 | |
4375 | /* drive modes available */ |
4376 | xfer_mask &= ata_pack_xfermask(dev->pio_mask, |
4377 | dev->mwdma_mask, dev->udma_mask); |
4378 | xfer_mask &= ata_id_xfermask(dev->id); |
4379 | |
4380 | /* |
4381 | * CFA Advanced TrueIDE timings are not allowed on a shared |
4382 | * cable |
4383 | */ |
4384 | if (ata_dev_pair(dev)) { |
4385 | /* No PIO5 or PIO6 */ |
4386 | xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); |
4387 | /* No MWDMA3 or MWDMA 4 */ |
4388 | xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); |
4389 | } |
4390 | |
4391 | if (ata_dma_blacklisted(dev)) { |
4392 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); |
4393 | ata_dev_warn(dev, |
4394 | "device is on DMA blacklist, disabling DMA\n"); |
4395 | } |
4396 | |
4397 | if ((host->flags & ATA_HOST_SIMPLEX) && |
4398 | host->simplex_claimed && host->simplex_claimed != ap) { |
4399 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); |
4400 | ata_dev_warn(dev, |
4401 | "simplex DMA is claimed by other device, disabling DMA\n"); |
4402 | } |
4403 | |
4404 | if (ap->flags & ATA_FLAG_NO_IORDY) |
4405 | xfer_mask &= ata_pio_mask_no_iordy(dev); |
4406 | |
4407 | if (ap->ops->mode_filter) |
4408 | xfer_mask = ap->ops->mode_filter(dev, xfer_mask); |
4409 | |
4410 | /* Apply cable rule here. Don't apply it early because when |
4411 | * we handle hot plug the cable type can itself change. |
4412 | * Check this last so that we know if the transfer rate was |
4413 | * solely limited by the cable. |
4414 | * Unknown or 80 wire cables reported host side are checked |
4415 | * drive side as well. Cases where we know a 40wire cable |
4416 | * is used safely for 80 are not checked here. |
4417 | */ |
4418 | if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) |
4419 | /* UDMA/44 or higher would be available */ |
4420 | if (cable_is_40wire(ap)) { |
4421 | ata_dev_warn(dev, |
4422 | "limited to UDMA/33 due to 40-wire cable\n"); |
4423 | xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); |
4424 | } |
4425 | |
4426 | ata_unpack_xfermask(xfer_mask, &dev->pio_mask, |
4427 | &dev->mwdma_mask, &dev->udma_mask); |
4428 | } |
4429 | |
4430 | /** |
4431 | * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command |
4432 | * @dev: Device to which command will be sent |
4433 | * |
4434 | * Issue SET FEATURES - XFER MODE command to device @dev |
4435 | * on port @ap. |
4436 | * |
4437 | * LOCKING: |
4438 | * PCI/etc. bus probe sem. |
4439 | * |
4440 | * RETURNS: |
4441 | * 0 on success, AC_ERR_* mask otherwise. |
4442 | */ |
4443 | |
4444 | static unsigned int ata_dev_set_xfermode(struct ata_device *dev) |
4445 | { |
4446 | struct ata_taskfile tf; |
4447 | unsigned int err_mask; |
4448 | |
4449 | /* set up set-features taskfile */ |
4450 | DPRINTK("set features - xfer mode\n"); |
4451 | |
4452 | /* Some controllers and ATAPI devices show flaky interrupt |
4453 | * behavior after setting xfer mode. Use polling instead. |
4454 | */ |
4455 | ata_tf_init(dev, &tf); |
4456 | tf.command = ATA_CMD_SET_FEATURES; |
4457 | tf.feature = SETFEATURES_XFER; |
4458 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; |
4459 | tf.protocol = ATA_PROT_NODATA; |
4460 | /* If we are using IORDY we must send the mode setting command */ |
4461 | if (ata_pio_need_iordy(dev)) |
4462 | tf.nsect = dev->xfer_mode; |
4463 | /* If the device has IORDY and the controller does not - turn it off */ |
4464 | else if (ata_id_has_iordy(dev->id)) |
4465 | tf.nsect = 0x01; |
4466 | else /* In the ancient relic department - skip all of this */ |
4467 | return 0; |
4468 | |
4469 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); |
4470 | |
4471 | DPRINTK("EXIT, err_mask=%x\n", err_mask); |
4472 | return err_mask; |
4473 | } |
4474 | |
4475 | /** |
4476 | * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES |
4477 | * @dev: Device to which command will be sent |
4478 | * @enable: Whether to enable or disable the feature |
4479 | * @feature: The sector count represents the feature to set |
4480 | * |
4481 | * Issue SET FEATURES - SATA FEATURES command to device @dev |
4482 | * on port @ap with sector count |
4483 | * |
4484 | * LOCKING: |
4485 | * PCI/etc. bus probe sem. |
4486 | * |
4487 | * RETURNS: |
4488 | * 0 on success, AC_ERR_* mask otherwise. |
4489 | */ |
4490 | unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) |
4491 | { |
4492 | struct ata_taskfile tf; |
4493 | unsigned int err_mask; |
4494 | |
4495 | /* set up set-features taskfile */ |
4496 | DPRINTK("set features - SATA features\n"); |
4497 | |
4498 | ata_tf_init(dev, &tf); |
4499 | tf.command = ATA_CMD_SET_FEATURES; |
4500 | tf.feature = enable; |
4501 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
4502 | tf.protocol = ATA_PROT_NODATA; |
4503 | tf.nsect = feature; |
4504 | |
4505 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); |
4506 | |
4507 | DPRINTK("EXIT, err_mask=%x\n", err_mask); |
4508 | return err_mask; |
4509 | } |
4510 | EXPORT_SYMBOL_GPL(ata_dev_set_feature); |
4511 | |
4512 | /** |
4513 | * ata_dev_init_params - Issue INIT DEV PARAMS command |
4514 | * @dev: Device to which command will be sent |
4515 | * @heads: Number of heads (taskfile parameter) |
4516 | * @sectors: Number of sectors (taskfile parameter) |
4517 | * |
4518 | * LOCKING: |
4519 | * Kernel thread context (may sleep) |
4520 | * |
4521 | * RETURNS: |
4522 | * 0 on success, AC_ERR_* mask otherwise. |
4523 | */ |
4524 | static unsigned int ata_dev_init_params(struct ata_device *dev, |
4525 | u16 heads, u16 sectors) |
4526 | { |
4527 | struct ata_taskfile tf; |
4528 | unsigned int err_mask; |
4529 | |
4530 | /* Number of sectors per track 1-255. Number of heads 1-16 */ |
4531 | if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) |
4532 | return AC_ERR_INVALID; |
4533 | |
4534 | /* set up init dev params taskfile */ |
4535 | DPRINTK("init dev params \n"); |
4536 | |
4537 | ata_tf_init(dev, &tf); |
4538 | tf.command = ATA_CMD_INIT_DEV_PARAMS; |
4539 | tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; |
4540 | tf.protocol = ATA_PROT_NODATA; |
4541 | tf.nsect = sectors; |
4542 | tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ |
4543 | |
4544 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); |
4545 | /* A clean abort indicates an original or just out of spec drive |
4546 | and we should continue as we issue the setup based on the |
4547 | drive reported working geometry */ |
4548 | if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) |
4549 | err_mask = 0; |
4550 | |
4551 | DPRINTK("EXIT, err_mask=%x\n", err_mask); |
4552 | return err_mask; |
4553 | } |
4554 | |
4555 | /** |
4556 | * ata_sg_clean - Unmap DMA memory associated with command |
4557 | * @qc: Command containing DMA memory to be released |
4558 | * |
4559 | * Unmap all mapped DMA memory associated with this command. |
4560 | * |
4561 | * LOCKING: |
4562 | * spin_lock_irqsave(host lock) |
4563 | */ |
4564 | void ata_sg_clean(struct ata_queued_cmd *qc) |
4565 | { |
4566 | struct ata_port *ap = qc->ap; |
4567 | struct scatterlist *sg = qc->sg; |
4568 | int dir = qc->dma_dir; |
4569 | |
4570 | WARN_ON_ONCE(sg == NULL); |
4571 | |
4572 | VPRINTK("unmapping %u sg elements\n", qc->n_elem); |
4573 | |
4574 | if (qc->n_elem) |
4575 | dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); |
4576 | |
4577 | qc->flags &= ~ATA_QCFLAG_DMAMAP; |
4578 | qc->sg = NULL; |
4579 | } |
4580 | |
4581 | /** |
4582 | * atapi_check_dma - Check whether ATAPI DMA can be supported |
4583 | * @qc: Metadata associated with taskfile to check |
4584 | * |
4585 | * Allow low-level driver to filter ATA PACKET commands, returning |
4586 | * a status indicating whether or not it is OK to use DMA for the |
4587 | * supplied PACKET command. |
4588 | * |
4589 | * LOCKING: |
4590 | * spin_lock_irqsave(host lock) |
4591 | * |
4592 | * RETURNS: 0 when ATAPI DMA can be used |
4593 | * nonzero otherwise |
4594 | */ |
4595 | int atapi_check_dma(struct ata_queued_cmd *qc) |
4596 | { |
4597 | struct ata_port *ap = qc->ap; |
4598 | |
4599 | /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a |
4600 | * few ATAPI devices choke on such DMA requests. |
4601 | */ |
4602 | if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && |
4603 | unlikely(qc->nbytes & 15)) |
4604 | return 1; |
4605 | |
4606 | if (ap->ops->check_atapi_dma) |
4607 | return ap->ops->check_atapi_dma(qc); |
4608 | |
4609 | return 0; |
4610 | } |
4611 | |
4612 | /** |
4613 | * ata_std_qc_defer - Check whether a qc needs to be deferred |
4614 | * @qc: ATA command in question |
4615 | * |
4616 | * Non-NCQ commands cannot run with any other command, NCQ or |
4617 | * not. As upper layer only knows the queue depth, we are |
4618 | * responsible for maintaining exclusion. This function checks |
4619 | * whether a new command @qc can be issued. |
4620 | * |
4621 | * LOCKING: |
4622 | * spin_lock_irqsave(host lock) |
4623 | * |
4624 | * RETURNS: |
4625 | * ATA_DEFER_* if deferring is needed, 0 otherwise. |
4626 | */ |
4627 | int ata_std_qc_defer(struct ata_queued_cmd *qc) |
4628 | { |
4629 | struct ata_link *link = qc->dev->link; |
4630 | |
4631 | if (qc->tf.protocol == ATA_PROT_NCQ) { |
4632 | if (!ata_tag_valid(link->active_tag)) |
4633 | return 0; |
4634 | } else { |
4635 | if (!ata_tag_valid(link->active_tag) && !link->sactive) |
4636 | return 0; |
4637 | } |
4638 | |
4639 | return ATA_DEFER_LINK; |
4640 | } |
4641 | |
4642 | void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } |
4643 | |
4644 | /** |
4645 | * ata_sg_init - Associate command with scatter-gather table. |
4646 | * @qc: Command to be associated |
4647 | * @sg: Scatter-gather table. |
4648 | * @n_elem: Number of elements in s/g table. |
4649 | * |
4650 | * Initialize the data-related elements of queued_cmd @qc |
4651 | * to point to a scatter-gather table @sg, containing @n_elem |
4652 | * elements. |
4653 | * |
4654 | * LOCKING: |
4655 | * spin_lock_irqsave(host lock) |
4656 | */ |
4657 | void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, |
4658 | unsigned int n_elem) |
4659 | { |
4660 | qc->sg = sg; |
4661 | qc->n_elem = n_elem; |
4662 | qc->cursg = qc->sg; |
4663 | } |
4664 | |
4665 | /** |
4666 | * ata_sg_setup - DMA-map the scatter-gather table associated with a command. |
4667 | * @qc: Command with scatter-gather table to be mapped. |
4668 | * |
4669 | * DMA-map the scatter-gather table associated with queued_cmd @qc. |
4670 | * |
4671 | * LOCKING: |
4672 | * spin_lock_irqsave(host lock) |
4673 | * |
4674 | * RETURNS: |
4675 | * Zero on success, negative on error. |
4676 | * |
4677 | */ |
4678 | static int ata_sg_setup(struct ata_queued_cmd *qc) |
4679 | { |
4680 | struct ata_port *ap = qc->ap; |
4681 | unsigned int n_elem; |
4682 | |
4683 | VPRINTK("ENTER, ata%u\n", ap->print_id); |
4684 | |
4685 | n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); |
4686 | if (n_elem < 1) |
4687 | return -1; |
4688 | |
4689 | DPRINTK("%d sg elements mapped\n", n_elem); |
4690 | qc->orig_n_elem = qc->n_elem; |
4691 | qc->n_elem = n_elem; |
4692 | qc->flags |= ATA_QCFLAG_DMAMAP; |
4693 | |
4694 | return 0; |
4695 | } |
4696 | |
4697 | /** |
4698 | * swap_buf_le16 - swap halves of 16-bit words in place |
4699 | * @buf: Buffer to swap |
4700 | * @buf_words: Number of 16-bit words in buffer. |
4701 | * |
4702 | * Swap halves of 16-bit words if needed to convert from |
4703 | * little-endian byte order to native cpu byte order, or |
4704 | * vice-versa. |
4705 | * |
4706 | * LOCKING: |
4707 | * Inherited from caller. |
4708 | */ |
4709 | void swap_buf_le16(u16 *buf, unsigned int buf_words) |
4710 | { |
4711 | #ifdef __BIG_ENDIAN |
4712 | unsigned int i; |
4713 | |
4714 | for (i = 0; i < buf_words; i++) |
4715 | buf[i] = le16_to_cpu(buf[i]); |
4716 | #endif /* __BIG_ENDIAN */ |
4717 | } |
4718 | |
4719 | /** |
4720 | * ata_qc_new - Request an available ATA command, for queueing |
4721 | * @ap: target port |
4722 | * |
4723 | * LOCKING: |
4724 | * None. |
4725 | */ |
4726 | |
4727 | static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) |
4728 | { |
4729 | struct ata_queued_cmd *qc = NULL; |
4730 | unsigned int i; |
4731 | |
4732 | /* no command while frozen */ |
4733 | if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) |
4734 | return NULL; |
4735 | |
4736 | /* the last tag is reserved for internal command. */ |
4737 | for (i = 0; i < ATA_MAX_QUEUE - 1; i++) |
4738 | if (!test_and_set_bit(i, &ap->qc_allocated)) { |
4739 | qc = __ata_qc_from_tag(ap, i); |
4740 | break; |
4741 | } |
4742 | |
4743 | if (qc) |
4744 | qc->tag = i; |
4745 | |
4746 | return qc; |
4747 | } |
4748 | |
4749 | /** |
4750 | * ata_qc_new_init - Request an available ATA command, and initialize it |
4751 | * @dev: Device from whom we request an available command structure |
4752 | * |
4753 | * LOCKING: |
4754 | * None. |
4755 | */ |
4756 | |
4757 | struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) |
4758 | { |
4759 | struct ata_port *ap = dev->link->ap; |
4760 | struct ata_queued_cmd *qc; |
4761 | |
4762 | qc = ata_qc_new(ap); |
4763 | if (qc) { |
4764 | qc->scsicmd = NULL; |
4765 | qc->ap = ap; |
4766 | qc->dev = dev; |
4767 | |
4768 | ata_qc_reinit(qc); |
4769 | } |
4770 | |
4771 | return qc; |
4772 | } |
4773 | |
4774 | /** |
4775 | * ata_qc_free - free unused ata_queued_cmd |
4776 | * @qc: Command to complete |
4777 | * |
4778 | * Designed to free unused ata_queued_cmd object |
4779 | * in case something prevents using it. |
4780 | * |
4781 | * LOCKING: |
4782 | * spin_lock_irqsave(host lock) |
4783 | */ |
4784 | void ata_qc_free(struct ata_queued_cmd *qc) |
4785 | { |
4786 | struct ata_port *ap; |
4787 | unsigned int tag; |
4788 | |
4789 | WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ |
4790 | ap = qc->ap; |
4791 | |
4792 | qc->flags = 0; |
4793 | tag = qc->tag; |
4794 | if (likely(ata_tag_valid(tag))) { |
4795 | qc->tag = ATA_TAG_POISON; |
4796 | clear_bit(tag, &ap->qc_allocated); |
4797 | } |
4798 | } |
4799 | |
4800 | void __ata_qc_complete(struct ata_queued_cmd *qc) |
4801 | { |
4802 | struct ata_port *ap; |
4803 | struct ata_link *link; |
4804 | |
4805 | WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ |
4806 | WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); |
4807 | ap = qc->ap; |
4808 | link = qc->dev->link; |
4809 | |
4810 | if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) |
4811 | ata_sg_clean(qc); |
4812 | |
4813 | /* command should be marked inactive atomically with qc completion */ |
4814 | if (qc->tf.protocol == ATA_PROT_NCQ) { |
4815 | link->sactive &= ~(1 << qc->tag); |
4816 | if (!link->sactive) |
4817 | ap->nr_active_links--; |
4818 | } else { |
4819 | link->active_tag = ATA_TAG_POISON; |
4820 | ap->nr_active_links--; |
4821 | } |
4822 | |
4823 | /* clear exclusive status */ |
4824 | if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && |
4825 | ap->excl_link == link)) |
4826 | ap->excl_link = NULL; |
4827 | |
4828 | /* atapi: mark qc as inactive to prevent the interrupt handler |
4829 | * from completing the command twice later, before the error handler |
4830 | * is called. (when rc != 0 and atapi request sense is needed) |
4831 | */ |
4832 | qc->flags &= ~ATA_QCFLAG_ACTIVE; |
4833 | ap->qc_active &= ~(1 << qc->tag); |
4834 | |
4835 | /* call completion callback */ |
4836 | qc->complete_fn(qc); |
4837 | } |
4838 | |
4839 | static void fill_result_tf(struct ata_queued_cmd *qc) |
4840 | { |
4841 | struct ata_port *ap = qc->ap; |
4842 | |
4843 | qc->result_tf.flags = qc->tf.flags; |
4844 | ap->ops->qc_fill_rtf(qc); |
4845 | } |
4846 | |
4847 | static void ata_verify_xfer(struct ata_queued_cmd *qc) |
4848 | { |
4849 | struct ata_device *dev = qc->dev; |
4850 | |
4851 | if (ata_is_nodata(qc->tf.protocol)) |
4852 | return; |
4853 | |
4854 | if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) |
4855 | return; |
4856 | |
4857 | dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; |
4858 | } |
4859 | |
4860 | /** |
4861 | * ata_qc_complete - Complete an active ATA command |
4862 | * @qc: Command to complete |
4863 | * |
4864 | * Indicate to the mid and upper layers that an ATA command has |
4865 | * completed, with either an ok or not-ok status. |
4866 | * |
4867 | * Refrain from calling this function multiple times when |
4868 | * successfully completing multiple NCQ commands. |
4869 | * ata_qc_complete_multiple() should be used instead, which will |
4870 | * properly update IRQ expect state. |
4871 | * |
4872 | * LOCKING: |
4873 | * spin_lock_irqsave(host lock) |
4874 | */ |
4875 | void ata_qc_complete(struct ata_queued_cmd *qc) |
4876 | { |
4877 | struct ata_port *ap = qc->ap; |
4878 | |
4879 | /* XXX: New EH and old EH use different mechanisms to |
4880 | * synchronize EH with regular execution path. |
4881 | * |
4882 | * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. |
4883 | * Normal execution path is responsible for not accessing a |
4884 | * failed qc. libata core enforces the rule by returning NULL |
4885 | * from ata_qc_from_tag() for failed qcs. |
4886 | * |
4887 | * Old EH depends on ata_qc_complete() nullifying completion |
4888 | * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does |
4889 | * not synchronize with interrupt handler. Only PIO task is |
4890 | * taken care of. |
4891 | */ |
4892 | if (ap->ops->error_handler) { |
4893 | struct ata_device *dev = qc->dev; |
4894 | struct ata_eh_info *ehi = &dev->link->eh_info; |
4895 | |
4896 | if (unlikely(qc->err_mask)) |
4897 | qc->flags |= ATA_QCFLAG_FAILED; |
4898 | |
4899 | /* |
4900 | * Finish internal commands without any further processing |
4901 | * and always with the result TF filled. |
4902 | */ |
4903 | if (unlikely(ata_tag_internal(qc->tag))) { |
4904 | fill_result_tf(qc); |
4905 | __ata_qc_complete(qc); |
4906 | return; |
4907 | } |
4908 | |
4909 | /* |
4910 | * Non-internal qc has failed. Fill the result TF and |
4911 | * summon EH. |
4912 | */ |
4913 | if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { |
4914 | fill_result_tf(qc); |
4915 | ata_qc_schedule_eh(qc); |
4916 | return; |
4917 | } |
4918 | |
4919 | WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); |
4920 | |
4921 | /* read result TF if requested */ |
4922 | if (qc->flags & ATA_QCFLAG_RESULT_TF) |
4923 | fill_result_tf(qc); |
4924 | |
4925 | /* Some commands need post-processing after successful |
4926 | * completion. |
4927 | */ |
4928 | switch (qc->tf.command) { |
4929 | case ATA_CMD_SET_FEATURES: |
4930 | if (qc->tf.feature != SETFEATURES_WC_ON && |
4931 | qc->tf.feature != SETFEATURES_WC_OFF) |
4932 | break; |
4933 | /* fall through */ |
4934 | case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ |
4935 | case ATA_CMD_SET_MULTI: /* multi_count changed */ |
4936 | /* revalidate device */ |
4937 | ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; |
4938 | ata_port_schedule_eh(ap); |
4939 | break; |
4940 | |
4941 | case ATA_CMD_SLEEP: |
4942 | dev->flags |= ATA_DFLAG_SLEEPING; |
4943 | break; |
4944 | } |
4945 | |
4946 | if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) |
4947 | ata_verify_xfer(qc); |
4948 | |
4949 | __ata_qc_complete(qc); |
4950 | } else { |
4951 | if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) |
4952 | return; |
4953 | |
4954 | /* read result TF if failed or requested */ |
4955 | if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) |
4956 | fill_result_tf(qc); |
4957 | |
4958 | __ata_qc_complete(qc); |
4959 | } |
4960 | } |
4961 | |
4962 | /** |
4963 | * ata_qc_complete_multiple - Complete multiple qcs successfully |
4964 | * @ap: port in question |
4965 | * @qc_active: new qc_active mask |
4966 | * |
4967 | * Complete in-flight commands. This functions is meant to be |
4968 | * called from low-level driver's interrupt routine to complete |
4969 | * requests normally. ap->qc_active and @qc_active is compared |
4970 | * and commands are completed accordingly. |
4971 | * |
4972 | * Always use this function when completing multiple NCQ commands |
4973 | * from IRQ handlers instead of calling ata_qc_complete() |
4974 | * multiple times to keep IRQ expect status properly in sync. |
4975 | * |
4976 | * LOCKING: |
4977 | * spin_lock_irqsave(host lock) |
4978 | * |
4979 | * RETURNS: |
4980 | * Number of completed commands on success, -errno otherwise. |
4981 | */ |
4982 | int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) |
4983 | { |
4984 | int nr_done = 0; |
4985 | u32 done_mask; |
4986 | |
4987 | done_mask = ap->qc_active ^ qc_active; |
4988 | |
4989 | if (unlikely(done_mask & qc_active)) { |
4990 | ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", |
4991 | ap->qc_active, qc_active); |
4992 | return -EINVAL; |
4993 | } |
4994 | |
4995 | while (done_mask) { |
4996 | struct ata_queued_cmd *qc; |
4997 | unsigned int tag = __ffs(done_mask); |
4998 | |
4999 | qc = ata_qc_from_tag(ap, tag); |
5000 | if (qc) { |
5001 | ata_qc_complete(qc); |
5002 | nr_done++; |
5003 | } |
5004 | done_mask &= ~(1 << tag); |
5005 | } |
5006 | |
5007 | return nr_done; |
5008 | } |
5009 | |
5010 | /** |
5011 | * ata_qc_issue - issue taskfile to device |
5012 | * @qc: command to issue to device |
5013 | * |
5014 | * Prepare an ATA command to submission to device. |
5015 | * This includes mapping the data into a DMA-able |
5016 | * area, filling in the S/G table, and finally |
5017 | * writing the taskfile to hardware, starting the command. |
5018 | * |
5019 | * LOCKING: |
5020 | * spin_lock_irqsave(host lock) |
5021 | */ |
5022 | void ata_qc_issue(struct ata_queued_cmd *qc) |
5023 | { |
5024 | struct ata_port *ap = qc->ap; |
5025 | struct ata_link *link = qc->dev->link; |
5026 | u8 prot = qc->tf.protocol; |
5027 | |
5028 | /* Make sure only one non-NCQ command is outstanding. The |
5029 | * check is skipped for old EH because it reuses active qc to |
5030 | * request ATAPI sense. |
5031 | */ |
5032 | WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); |
5033 | |
5034 | if (ata_is_ncq(prot)) { |
5035 | WARN_ON_ONCE(link->sactive & (1 << qc->tag)); |
5036 | |
5037 | if (!link->sactive) |
5038 | ap->nr_active_links++; |
5039 | link->sactive |= 1 << qc->tag; |
5040 | } else { |
5041 | WARN_ON_ONCE(link->sactive); |
5042 | |
5043 | ap->nr_active_links++; |
5044 | link->active_tag = qc->tag; |
5045 | } |
5046 | |
5047 | qc->flags |= ATA_QCFLAG_ACTIVE; |
5048 | ap->qc_active |= 1 << qc->tag; |
5049 | |
5050 | /* |
5051 | * We guarantee to LLDs that they will have at least one |
5052 | * non-zero sg if the command is a data command. |
5053 | */ |
5054 | if (WARN_ON_ONCE(ata_is_data(prot) && |
5055 | (!qc->sg || !qc->n_elem || !qc->nbytes))) |
5056 | goto sys_err; |
5057 | |
5058 | if (ata_is_dma(prot) || (ata_is_pio(prot) && |
5059 | (ap->flags & ATA_FLAG_PIO_DMA))) |
5060 | if (ata_sg_setup(qc)) |
5061 | goto sys_err; |
5062 | |
5063 | /* if device is sleeping, schedule reset and abort the link */ |
5064 | if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { |
5065 | link->eh_info.action |= ATA_EH_RESET; |
5066 | ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); |
5067 | ata_link_abort(link); |
5068 | return; |
5069 | } |
5070 | |
5071 | ap->ops->qc_prep(qc); |
5072 | |
5073 | qc->err_mask |= ap->ops->qc_issue(qc); |
5074 | if (unlikely(qc->err_mask)) |
5075 | goto err; |
5076 | return; |
5077 | |
5078 | sys_err: |
5079 | qc->err_mask |= AC_ERR_SYSTEM; |
5080 | err: |
5081 | ata_qc_complete(qc); |
5082 | } |
5083 | |
5084 | /** |
5085 | * sata_scr_valid - test whether SCRs are accessible |
5086 | * @link: ATA link to test SCR accessibility for |
5087 | * |
5088 | * Test whether SCRs are accessible for @link. |
5089 | * |
5090 | * LOCKING: |
5091 | * None. |
5092 | * |
5093 | * RETURNS: |
5094 | * 1 if SCRs are accessible, 0 otherwise. |
5095 | */ |
5096 | int sata_scr_valid(struct ata_link *link) |
5097 | { |
5098 | struct ata_port *ap = link->ap; |
5099 | |
5100 | return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; |
5101 | } |
5102 | |
5103 | /** |
5104 | * sata_scr_read - read SCR register of the specified port |
5105 | * @link: ATA link to read SCR for |
5106 | * @reg: SCR to read |
5107 | * @val: Place to store read value |
5108 | * |
5109 | * Read SCR register @reg of @link into *@val. This function is |
5110 | * guaranteed to succeed if @link is ap->link, the cable type of |
5111 | * the port is SATA and the port implements ->scr_read. |
5112 | * |
5113 | * LOCKING: |
5114 | * None if @link is ap->link. Kernel thread context otherwise. |
5115 | * |
5116 | * RETURNS: |
5117 | * 0 on success, negative errno on failure. |
5118 | */ |
5119 | int sata_scr_read(struct ata_link *link, int reg, u32 *val) |
5120 | { |
5121 | if (ata_is_host_link(link)) { |
5122 | if (sata_scr_valid(link)) |
5123 | return link->ap->ops->scr_read(link, reg, val); |
5124 | return -EOPNOTSUPP; |
5125 | } |
5126 | |
5127 | return sata_pmp_scr_read(link, reg, val); |
5128 | } |
5129 | |
5130 | /** |
5131 | * sata_scr_write - write SCR register of the specified port |
5132 | * @link: ATA link to write SCR for |
5133 | * @reg: SCR to write |
5134 | * @val: value to write |
5135 | * |
5136 | * Write @val to SCR register @reg of @link. This function is |
5137 | * guaranteed to succeed if @link is ap->link, the cable type of |
5138 | * the port is SATA and the port implements ->scr_read. |
5139 | * |
5140 | * LOCKING: |
5141 | * None if @link is ap->link. Kernel thread context otherwise. |
5142 | * |
5143 | * RETURNS: |
5144 | * 0 on success, negative errno on failure. |
5145 | */ |
5146 | int sata_scr_write(struct ata_link *link, int reg, u32 val) |
5147 | { |
5148 | if (ata_is_host_link(link)) { |
5149 | if (sata_scr_valid(link)) |
5150 | return link->ap->ops->scr_write(link, reg, val); |
5151 | return -EOPNOTSUPP; |
5152 | } |
5153 | |
5154 | return sata_pmp_scr_write(link, reg, val); |
5155 | } |
5156 | |
5157 | /** |
5158 | * sata_scr_write_flush - write SCR register of the specified port and flush |
5159 | * @link: ATA link to write SCR for |
5160 | * @reg: SCR to write |
5161 | * @val: value to write |
5162 | * |
5163 | * This function is identical to sata_scr_write() except that this |
5164 | * function performs flush after writing to the register. |
5165 | * |
5166 | * LOCKING: |
5167 | * None if @link is ap->link. Kernel thread context otherwise. |
5168 | * |
5169 | * RETURNS: |
5170 | * 0 on success, negative errno on failure. |
5171 | */ |
5172 | int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) |
5173 | { |
5174 | if (ata_is_host_link(link)) { |
5175 | int rc; |
5176 | |
5177 | if (sata_scr_valid(link)) { |
5178 | rc = link->ap->ops->scr_write(link, reg, val); |
5179 | if (rc == 0) |
5180 | rc = link->ap->ops->scr_read(link, reg, &val); |
5181 | return rc; |
5182 | } |
5183 | return -EOPNOTSUPP; |
5184 | } |
5185 | |
5186 | return sata_pmp_scr_write(link, reg, val); |
5187 | } |
5188 | |
5189 | /** |
5190 | * ata_phys_link_online - test whether the given link is online |
5191 | * @link: ATA link to test |
5192 | * |
5193 | * Test whether @link is online. Note that this function returns |
5194 | * 0 if online status of @link cannot be obtained, so |
5195 | * ata_link_online(link) != !ata_link_offline(link). |
5196 | * |
5197 | * LOCKING: |
5198 | * None. |
5199 | * |
5200 | * RETURNS: |
5201 | * True if the port online status is available and online. |
5202 | */ |
5203 | bool ata_phys_link_online(struct ata_link *link) |
5204 | { |
5205 | u32 sstatus; |
5206 | |
5207 | if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && |
5208 | ata_sstatus_online(sstatus)) |
5209 | return true; |
5210 | return false; |
5211 | } |
5212 | |
5213 | /** |
5214 | * ata_phys_link_offline - test whether the given link is offline |
5215 | * @link: ATA link to test |
5216 | * |
5217 | * Test whether @link is offline. Note that this function |
5218 | * returns 0 if offline status of @link cannot be obtained, so |
5219 | * ata_link_online(link) != !ata_link_offline(link). |
5220 | * |
5221 | * LOCKING: |
5222 | * None. |
5223 | * |
5224 | * RETURNS: |
5225 | * True if the port offline status is available and offline. |
5226 | */ |
5227 | bool ata_phys_link_offline(struct ata_link *link) |
5228 | { |
5229 | u32 sstatus; |
5230 | |
5231 | if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && |
5232 | !ata_sstatus_online(sstatus)) |
5233 | return true; |
5234 | return false; |
5235 | } |
5236 | |
5237 | /** |
5238 | * ata_link_online - test whether the given link is online |
5239 | * @link: ATA link to test |
5240 | * |
5241 | * Test whether @link is online. This is identical to |
5242 | * ata_phys_link_online() when there's no slave link. When |
5243 | * there's a slave link, this function should only be called on |
5244 | * the master link and will return true if any of M/S links is |
5245 | * online. |
5246 | * |
5247 | * LOCKING: |
5248 | * None. |
5249 | * |
5250 | * RETURNS: |
5251 | * True if the port online status is available and online. |
5252 | */ |
5253 | bool ata_link_online(struct ata_link *link) |
5254 | { |
5255 | struct ata_link *slave = link->ap->slave_link; |
5256 | |
5257 | WARN_ON(link == slave); /* shouldn't be called on slave link */ |
5258 | |
5259 | return ata_phys_link_online(link) || |
5260 | (slave && ata_phys_link_online(slave)); |
5261 | } |
5262 | |
5263 | /** |
5264 | * ata_link_offline - test whether the given link is offline |
5265 | * @link: ATA link to test |
5266 | * |
5267 | * Test whether @link is offline. This is identical to |
5268 | * ata_phys_link_offline() when there's no slave link. When |
5269 | * there's a slave link, this function should only be called on |
5270 | * the master link and will return true if both M/S links are |
5271 | * offline. |
5272 | * |
5273 | * LOCKING: |
5274 | * None. |
5275 | * |
5276 | * RETURNS: |
5277 | * True if the port offline status is available and offline. |
5278 | */ |
5279 | bool ata_link_offline(struct ata_link *link) |
5280 | { |
5281 | struct ata_link *slave = link->ap->slave_link; |
5282 | |
5283 | WARN_ON(link == slave); /* shouldn't be called on slave link */ |
5284 | |
5285 | return ata_phys_link_offline(link) && |
5286 | (!slave || ata_phys_link_offline(slave)); |
5287 | } |
5288 | |
5289 | #ifdef CONFIG_PM |
5290 | static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, |
5291 | unsigned int action, unsigned int ehi_flags, |
5292 | int *async) |
5293 | { |
5294 | struct ata_link *link; |
5295 | unsigned long flags; |
5296 | int rc = 0; |
5297 | |
5298 | /* Previous resume operation might still be in |
5299 | * progress. Wait for PM_PENDING to clear. |
5300 | */ |
5301 | if (ap->pflags & ATA_PFLAG_PM_PENDING) { |
5302 | if (async) { |
5303 | *async = -EAGAIN; |
5304 | return 0; |
5305 | } |
5306 | ata_port_wait_eh(ap); |
5307 | WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); |
5308 | } |
5309 | |
5310 | /* request PM ops to EH */ |
5311 | spin_lock_irqsave(ap->lock, flags); |
5312 | |
5313 | ap->pm_mesg = mesg; |
5314 | if (async) |
5315 | ap->pm_result = async; |
5316 | else |
5317 | ap->pm_result = &rc; |
5318 | |
5319 | ap->pflags |= ATA_PFLAG_PM_PENDING; |
5320 | ata_for_each_link(link, ap, HOST_FIRST) { |
5321 | link->eh_info.action |= action; |
5322 | link->eh_info.flags |= ehi_flags; |
5323 | } |
5324 | |
5325 | ata_port_schedule_eh(ap); |
5326 | |
5327 | spin_unlock_irqrestore(ap->lock, flags); |
5328 | |
5329 | /* wait and check result */ |
5330 | if (!async) { |
5331 | ata_port_wait_eh(ap); |
5332 | WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); |
5333 | } |
5334 | |
5335 | return rc; |
5336 | } |
5337 | |
5338 | static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async) |
5339 | { |
5340 | /* |
5341 | * On some hardware, device fails to respond after spun down |
5342 | * for suspend. As the device won't be used before being |
5343 | * resumed, we don't need to touch the device. Ask EH to skip |
5344 | * the usual stuff and proceed directly to suspend. |
5345 | * |
5346 | * http://thread.gmane.org/gmane.linux.ide/46764 |
5347 | */ |
5348 | unsigned int ehi_flags = ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY | |
5349 | ATA_EHI_NO_RECOVERY; |
5350 | return ata_port_request_pm(ap, mesg, 0, ehi_flags, async); |
5351 | } |
5352 | |
5353 | static int ata_port_suspend_common(struct device *dev, pm_message_t mesg) |
5354 | { |
5355 | struct ata_port *ap = to_ata_port(dev); |
5356 | |
5357 | return __ata_port_suspend_common(ap, mesg, NULL); |
5358 | } |
5359 | |
5360 | static int ata_port_suspend(struct device *dev) |
5361 | { |
5362 | if (pm_runtime_suspended(dev)) |
5363 | return 0; |
5364 | |
5365 | return ata_port_suspend_common(dev, PMSG_SUSPEND); |
5366 | } |
5367 | |
5368 | static int ata_port_do_freeze(struct device *dev) |
5369 | { |
5370 | if (pm_runtime_suspended(dev)) |
5371 | return 0; |
5372 | |
5373 | return ata_port_suspend_common(dev, PMSG_FREEZE); |
5374 | } |
5375 | |
5376 | static int ata_port_poweroff(struct device *dev) |
5377 | { |
5378 | return ata_port_suspend_common(dev, PMSG_HIBERNATE); |
5379 | } |
5380 | |
5381 | static int __ata_port_resume_common(struct ata_port *ap, pm_message_t mesg, |
5382 | int *async) |
5383 | { |
5384 | int rc; |
5385 | |
5386 | rc = ata_port_request_pm(ap, mesg, ATA_EH_RESET, |
5387 | ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async); |
5388 | return rc; |
5389 | } |
5390 | |
5391 | static int ata_port_resume_common(struct device *dev, pm_message_t mesg) |
5392 | { |
5393 | struct ata_port *ap = to_ata_port(dev); |
5394 | |
5395 | return __ata_port_resume_common(ap, mesg, NULL); |
5396 | } |
5397 | |
5398 | static int ata_port_resume(struct device *dev) |
5399 | { |
5400 | int rc; |
5401 | |
5402 | rc = ata_port_resume_common(dev, PMSG_RESUME); |
5403 | if (!rc) { |
5404 | pm_runtime_disable(dev); |
5405 | pm_runtime_set_active(dev); |
5406 | pm_runtime_enable(dev); |
5407 | } |
5408 | |
5409 | return rc; |
5410 | } |
5411 | |
5412 | /* |
5413 | * For ODDs, the upper layer will poll for media change every few seconds, |
5414 | * which will make it enter and leave suspend state every few seconds. And |
5415 | * as each suspend will cause a hard/soft reset, the gain of runtime suspend |
5416 | * is very little and the ODD may malfunction after constantly being reset. |
5417 | * So the idle callback here will not proceed to suspend if a non-ZPODD capable |
5418 | * ODD is attached to the port. |
5419 | */ |
5420 | static int ata_port_runtime_idle(struct device *dev) |
5421 | { |
5422 | struct ata_port *ap = to_ata_port(dev); |
5423 | struct ata_link *link; |
5424 | struct ata_device *adev; |
5425 | |
5426 | ata_for_each_link(link, ap, HOST_FIRST) { |
5427 | ata_for_each_dev(adev, link, ENABLED) |
5428 | if (adev->class == ATA_DEV_ATAPI && |
5429 | !zpodd_dev_enabled(adev)) |
5430 | return -EBUSY; |
5431 | } |
5432 | |
5433 | return pm_runtime_suspend(dev); |
5434 | } |
5435 | |
5436 | static int ata_port_runtime_suspend(struct device *dev) |
5437 | { |
5438 | return ata_port_suspend_common(dev, PMSG_AUTO_SUSPEND); |
5439 | } |
5440 | |
5441 | static int ata_port_runtime_resume(struct device *dev) |
5442 | { |
5443 | return ata_port_resume_common(dev, PMSG_AUTO_RESUME); |
5444 | } |
5445 | |
5446 | static const struct dev_pm_ops ata_port_pm_ops = { |
5447 | .suspend = ata_port_suspend, |
5448 | .resume = ata_port_resume, |
5449 | .freeze = ata_port_do_freeze, |
5450 | .thaw = ata_port_resume, |
5451 | .poweroff = ata_port_poweroff, |
5452 | .restore = ata_port_resume, |
5453 | |
5454 | .runtime_suspend = ata_port_runtime_suspend, |
5455 | .runtime_resume = ata_port_runtime_resume, |
5456 | .runtime_idle = ata_port_runtime_idle, |
5457 | }; |
5458 | |
5459 | /* sas ports don't participate in pm runtime management of ata_ports, |
5460 | * and need to resume ata devices at the domain level, not the per-port |
5461 | * level. sas suspend/resume is async to allow parallel port recovery |
5462 | * since sas has multiple ata_port instances per Scsi_Host. |
5463 | */ |
5464 | int ata_sas_port_async_suspend(struct ata_port *ap, int *async) |
5465 | { |
5466 | return __ata_port_suspend_common(ap, PMSG_SUSPEND, async); |
5467 | } |
5468 | EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend); |
5469 | |
5470 | int ata_sas_port_async_resume(struct ata_port *ap, int *async) |
5471 | { |
5472 | return __ata_port_resume_common(ap, PMSG_RESUME, async); |
5473 | } |
5474 | EXPORT_SYMBOL_GPL(ata_sas_port_async_resume); |
5475 | |
5476 | |
5477 | /** |
5478 | * ata_host_suspend - suspend host |
5479 | * @host: host to suspend |
5480 | * @mesg: PM message |
5481 | * |
5482 | * Suspend @host. Actual operation is performed by port suspend. |
5483 | */ |
5484 | int ata_host_suspend(struct ata_host *host, pm_message_t mesg) |
5485 | { |
5486 | host->dev->power.power_state = mesg; |
5487 | return 0; |
5488 | } |
5489 | |
5490 | /** |
5491 | * ata_host_resume - resume host |
5492 | * @host: host to resume |
5493 | * |
5494 | * Resume @host. Actual operation is performed by port resume. |
5495 | */ |
5496 | void ata_host_resume(struct ata_host *host) |
5497 | { |
5498 | host->dev->power.power_state = PMSG_ON; |
5499 | } |
5500 | #endif |
5501 | |
5502 | struct device_type ata_port_type = { |
5503 | .name = "ata_port", |
5504 | #ifdef CONFIG_PM |
5505 | .pm = &ata_port_pm_ops, |
5506 | #endif |
5507 | }; |
5508 | |
5509 | /** |
5510 | * ata_dev_init - Initialize an ata_device structure |
5511 | * @dev: Device structure to initialize |
5512 | * |
5513 | * Initialize @dev in preparation for probing. |
5514 | * |
5515 | * LOCKING: |
5516 | * Inherited from caller. |
5517 | */ |
5518 | void ata_dev_init(struct ata_device *dev) |
5519 | { |
5520 | struct ata_link *link = ata_dev_phys_link(dev); |
5521 | struct ata_port *ap = link->ap; |
5522 | unsigned long flags; |
5523 | |
5524 | /* SATA spd limit is bound to the attached device, reset together */ |
5525 | link->sata_spd_limit = link->hw_sata_spd_limit; |
5526 | link->sata_spd = 0; |
5527 | |
5528 | /* High bits of dev->flags are used to record warm plug |
5529 | * requests which occur asynchronously. Synchronize using |
5530 | * host lock. |
5531 | */ |
5532 | spin_lock_irqsave(ap->lock, flags); |
5533 | dev->flags &= ~ATA_DFLAG_INIT_MASK; |
5534 | dev->horkage = 0; |
5535 | spin_unlock_irqrestore(ap->lock, flags); |
5536 | |
5537 | memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, |
5538 | ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); |
5539 | dev->pio_mask = UINT_MAX; |
5540 | dev->mwdma_mask = UINT_MAX; |
5541 | dev->udma_mask = UINT_MAX; |
5542 | } |
5543 | |
5544 | /** |
5545 | * ata_link_init - Initialize an ata_link structure |
5546 | * @ap: ATA port link is attached to |
5547 | * @link: Link structure to initialize |
5548 | * @pmp: Port multiplier port number |
5549 | * |
5550 | * Initialize @link. |
5551 | * |
5552 | * LOCKING: |
5553 | * Kernel thread context (may sleep) |
5554 | */ |
5555 | void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) |
5556 | { |
5557 | int i; |
5558 | |
5559 | /* clear everything except for devices */ |
5560 | memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0, |
5561 | ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); |
5562 | |
5563 | link->ap = ap; |
5564 | link->pmp = pmp; |
5565 | link->active_tag = ATA_TAG_POISON; |
5566 | link->hw_sata_spd_limit = UINT_MAX; |
5567 | |
5568 | /* can't use iterator, ap isn't initialized yet */ |
5569 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
5570 | struct ata_device *dev = &link->device[i]; |
5571 | |
5572 | dev->link = link; |
5573 | dev->devno = dev - link->device; |
5574 | #ifdef CONFIG_ATA_ACPI |
5575 | dev->gtf_filter = ata_acpi_gtf_filter; |
5576 | #endif |
5577 | ata_dev_init(dev); |
5578 | } |
5579 | } |
5580 | |
5581 | /** |
5582 | * sata_link_init_spd - Initialize link->sata_spd_limit |
5583 | * @link: Link to configure sata_spd_limit for |
5584 | * |
5585 | * Initialize @link->[hw_]sata_spd_limit to the currently |
5586 | * configured value. |
5587 | * |
5588 | * LOCKING: |
5589 | * Kernel thread context (may sleep). |
5590 | * |
5591 | * RETURNS: |
5592 | * 0 on success, -errno on failure. |
5593 | */ |
5594 | int sata_link_init_spd(struct ata_link *link) |
5595 | { |
5596 | u8 spd; |
5597 | int rc; |
5598 | |
5599 | rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); |
5600 | if (rc) |
5601 | return rc; |
5602 | |
5603 | spd = (link->saved_scontrol >> 4) & 0xf; |
5604 | if (spd) |
5605 | link->hw_sata_spd_limit &= (1 << spd) - 1; |
5606 | |
5607 | ata_force_link_limits(link); |
5608 | |
5609 | link->sata_spd_limit = link->hw_sata_spd_limit; |
5610 | |
5611 | return 0; |
5612 | } |
5613 | |
5614 | /** |
5615 | * ata_port_alloc - allocate and initialize basic ATA port resources |
5616 | * @host: ATA host this allocated port belongs to |
5617 | * |
5618 | * Allocate and initialize basic ATA port resources. |
5619 | * |
5620 | * RETURNS: |
5621 | * Allocate ATA port on success, NULL on failure. |
5622 | * |
5623 | * LOCKING: |
5624 | * Inherited from calling layer (may sleep). |
5625 | */ |
5626 | struct ata_port *ata_port_alloc(struct ata_host *host) |
5627 | { |
5628 | struct ata_port *ap; |
5629 | |
5630 | DPRINTK("ENTER\n"); |
5631 | |
5632 | ap = kzalloc(sizeof(*ap), GFP_KERNEL); |
5633 | if (!ap) |
5634 | return NULL; |
5635 | |
5636 | ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; |
5637 | ap->lock = &host->lock; |
5638 | ap->print_id = -1; |
5639 | ap->host = host; |
5640 | ap->dev = host->dev; |
5641 | |
5642 | #if defined(ATA_VERBOSE_DEBUG) |
5643 | /* turn on all debugging levels */ |
5644 | ap->msg_enable = 0x00FF; |
5645 | #elif defined(ATA_DEBUG) |
5646 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; |
5647 | #else |
5648 | ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; |
5649 | #endif |
5650 | |
5651 | mutex_init(&ap->scsi_scan_mutex); |
5652 | INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); |
5653 | INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); |
5654 | INIT_LIST_HEAD(&ap->eh_done_q); |
5655 | init_waitqueue_head(&ap->eh_wait_q); |
5656 | init_completion(&ap->park_req_pending); |
5657 | init_timer_deferrable(&ap->fastdrain_timer); |
5658 | ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; |
5659 | ap->fastdrain_timer.data = (unsigned long)ap; |
5660 | |
5661 | ap->cbl = ATA_CBL_NONE; |
5662 | |
5663 | ata_link_init(ap, &ap->link, 0); |
5664 | |
5665 | #ifdef ATA_IRQ_TRAP |
5666 | ap->stats.unhandled_irq = 1; |
5667 | ap->stats.idle_irq = 1; |
5668 | #endif |
5669 | ata_sff_port_init(ap); |
5670 | |
5671 | return ap; |
5672 | } |
5673 | |
5674 | static void ata_host_release(struct device *gendev, void *res) |
5675 | { |
5676 | struct ata_host *host = dev_get_drvdata(gendev); |
5677 | int i; |
5678 | |
5679 | for (i = 0; i < host->n_ports; i++) { |
5680 | struct ata_port *ap = host->ports[i]; |
5681 | |
5682 | if (!ap) |
5683 | continue; |
5684 | |
5685 | if (ap->scsi_host) |
5686 | scsi_host_put(ap->scsi_host); |
5687 | |
5688 | kfree(ap->pmp_link); |
5689 | kfree(ap->slave_link); |
5690 | kfree(ap); |
5691 | host->ports[i] = NULL; |
5692 | } |
5693 | |
5694 | dev_set_drvdata(gendev, NULL); |
5695 | } |
5696 | |
5697 | /** |
5698 | * ata_host_alloc - allocate and init basic ATA host resources |
5699 | * @dev: generic device this host is associated with |
5700 | * @max_ports: maximum number of ATA ports associated with this host |
5701 | * |
5702 | * Allocate and initialize basic ATA host resources. LLD calls |
5703 | * this function to allocate a host, initializes it fully and |
5704 | * attaches it using ata_host_register(). |
5705 | * |
5706 | * @max_ports ports are allocated and host->n_ports is |
5707 | * initialized to @max_ports. The caller is allowed to decrease |
5708 | * host->n_ports before calling ata_host_register(). The unused |
5709 | * ports will be automatically freed on registration. |
5710 | * |
5711 | * RETURNS: |
5712 | * Allocate ATA host on success, NULL on failure. |
5713 | * |
5714 | * LOCKING: |
5715 | * Inherited from calling layer (may sleep). |
5716 | */ |
5717 | struct ata_host *ata_host_alloc(struct device *dev, int max_ports) |
5718 | { |
5719 | struct ata_host *host; |
5720 | size_t sz; |
5721 | int i; |
5722 | |
5723 | DPRINTK("ENTER\n"); |
5724 | |
5725 | if (!devres_open_group(dev, NULL, GFP_KERNEL)) |
5726 | return NULL; |
5727 | |
5728 | /* alloc a container for our list of ATA ports (buses) */ |
5729 | sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); |
5730 | /* alloc a container for our list of ATA ports (buses) */ |
5731 | host = devres_alloc(ata_host_release, sz, GFP_KERNEL); |
5732 | if (!host) |
5733 | goto err_out; |
5734 | |
5735 | devres_add(dev, host); |
5736 | dev_set_drvdata(dev, host); |
5737 | |
5738 | spin_lock_init(&host->lock); |
5739 | mutex_init(&host->eh_mutex); |
5740 | host->dev = dev; |
5741 | host->n_ports = max_ports; |
5742 | |
5743 | /* allocate ports bound to this host */ |
5744 | for (i = 0; i < max_ports; i++) { |
5745 | struct ata_port *ap; |
5746 | |
5747 | ap = ata_port_alloc(host); |
5748 | if (!ap) |
5749 | goto err_out; |
5750 | |
5751 | ap->port_no = i; |
5752 | host->ports[i] = ap; |
5753 | } |
5754 | |
5755 | devres_remove_group(dev, NULL); |
5756 | return host; |
5757 | |
5758 | err_out: |
5759 | devres_release_group(dev, NULL); |
5760 | return NULL; |
5761 | } |
5762 | |
5763 | /** |
5764 | * ata_host_alloc_pinfo - alloc host and init with port_info array |
5765 | * @dev: generic device this host is associated with |
5766 | * @ppi: array of ATA port_info to initialize host with |
5767 | * @n_ports: number of ATA ports attached to this host |
5768 | * |
5769 | * Allocate ATA host and initialize with info from @ppi. If NULL |
5770 | * terminated, @ppi may contain fewer entries than @n_ports. The |
5771 | * last entry will be used for the remaining ports. |
5772 | * |
5773 | * RETURNS: |
5774 | * Allocate ATA host on success, NULL on failure. |
5775 | * |
5776 | * LOCKING: |
5777 | * Inherited from calling layer (may sleep). |
5778 | */ |
5779 | struct ata_host *ata_host_alloc_pinfo(struct device *dev, |
5780 | const struct ata_port_info * const * ppi, |
5781 | int n_ports) |
5782 | { |
5783 | const struct ata_port_info *pi; |
5784 | struct ata_host *host; |
5785 | int i, j; |
5786 | |
5787 | host = ata_host_alloc(dev, n_ports); |
5788 | if (!host) |
5789 | return NULL; |
5790 | |
5791 | for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { |
5792 | struct ata_port *ap = host->ports[i]; |
5793 | |
5794 | if (ppi[j]) |
5795 | pi = ppi[j++]; |
5796 | |
5797 | ap->pio_mask = pi->pio_mask; |
5798 | ap->mwdma_mask = pi->mwdma_mask; |
5799 | ap->udma_mask = pi->udma_mask; |
5800 | ap->flags |= pi->flags; |
5801 | ap->link.flags |= pi->link_flags; |
5802 | ap->ops = pi->port_ops; |
5803 | |
5804 | if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) |
5805 | host->ops = pi->port_ops; |
5806 | } |
5807 | |
5808 | return host; |
5809 | } |
5810 | |
5811 | /** |
5812 | * ata_slave_link_init - initialize slave link |
5813 | * @ap: port to initialize slave link for |
5814 | * |
5815 | * Create and initialize slave link for @ap. This enables slave |
5816 | * link handling on the port. |
5817 | * |
5818 | * In libata, a port contains links and a link contains devices. |
5819 | * There is single host link but if a PMP is attached to it, |
5820 | * there can be multiple fan-out links. On SATA, there's usually |
5821 | * a single device connected to a link but PATA and SATA |
5822 | * controllers emulating TF based interface can have two - master |
5823 | * and slave. |
5824 | * |
5825 | * However, there are a few controllers which don't fit into this |
5826 | * abstraction too well - SATA controllers which emulate TF |
5827 | * interface with both master and slave devices but also have |
5828 | * separate SCR register sets for each device. These controllers |
5829 | * need separate links for physical link handling |
5830 | * (e.g. onlineness, link speed) but should be treated like a |
5831 | * traditional M/S controller for everything else (e.g. command |
5832 | * issue, softreset). |
5833 | * |
5834 | * slave_link is libata's way of handling this class of |
5835 | * controllers without impacting core layer too much. For |
5836 | * anything other than physical link handling, the default host |
5837 | * link is used for both master and slave. For physical link |
5838 | * handling, separate @ap->slave_link is used. All dirty details |
5839 | * are implemented inside libata core layer. From LLD's POV, the |
5840 | * only difference is that prereset, hardreset and postreset are |
5841 | * called once more for the slave link, so the reset sequence |
5842 | * looks like the following. |
5843 | * |
5844 | * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> |
5845 | * softreset(M) -> postreset(M) -> postreset(S) |
5846 | * |
5847 | * Note that softreset is called only for the master. Softreset |
5848 | * resets both M/S by definition, so SRST on master should handle |
5849 | * both (the standard method will work just fine). |
5850 | * |
5851 | * LOCKING: |
5852 | * Should be called before host is registered. |
5853 | * |
5854 | * RETURNS: |
5855 | * 0 on success, -errno on failure. |
5856 | */ |
5857 | int ata_slave_link_init(struct ata_port *ap) |
5858 | { |
5859 | struct ata_link *link; |
5860 | |
5861 | WARN_ON(ap->slave_link); |
5862 | WARN_ON(ap->flags & ATA_FLAG_PMP); |
5863 | |
5864 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
5865 | if (!link) |
5866 | return -ENOMEM; |
5867 | |
5868 | ata_link_init(ap, link, 1); |
5869 | ap->slave_link = link; |
5870 | return 0; |
5871 | } |
5872 | |
5873 | static void ata_host_stop(struct device *gendev, void *res) |
5874 | { |
5875 | struct ata_host *host = dev_get_drvdata(gendev); |
5876 | int i; |
5877 | |
5878 | WARN_ON(!(host->flags & ATA_HOST_STARTED)); |
5879 | |
5880 | for (i = 0; i < host->n_ports; i++) { |
5881 | struct ata_port *ap = host->ports[i]; |
5882 | |
5883 | if (ap->ops->port_stop) |
5884 | ap->ops->port_stop(ap); |
5885 | } |
5886 | |
5887 | if (host->ops->host_stop) |
5888 | host->ops->host_stop(host); |
5889 | } |
5890 | |
5891 | /** |
5892 | * ata_finalize_port_ops - finalize ata_port_operations |
5893 | * @ops: ata_port_operations to finalize |
5894 | * |
5895 | * An ata_port_operations can inherit from another ops and that |
5896 | * ops can again inherit from another. This can go on as many |
5897 | * times as necessary as long as there is no loop in the |
5898 | * inheritance chain. |
5899 | * |
5900 | * Ops tables are finalized when the host is started. NULL or |
5901 | * unspecified entries are inherited from the closet ancestor |
5902 | * which has the method and the entry is populated with it. |
5903 | * After finalization, the ops table directly points to all the |
5904 | * methods and ->inherits is no longer necessary and cleared. |
5905 | * |
5906 | * Using ATA_OP_NULL, inheriting ops can force a method to NULL. |
5907 | * |
5908 | * LOCKING: |
5909 | * None. |
5910 | */ |
5911 | static void ata_finalize_port_ops(struct ata_port_operations *ops) |
5912 | { |
5913 | static DEFINE_SPINLOCK(lock); |
5914 | const struct ata_port_operations *cur; |
5915 | void **begin = (void **)ops; |
5916 | void **end = (void **)&ops->inherits; |
5917 | void **pp; |
5918 | |
5919 | if (!ops || !ops->inherits) |
5920 | return; |
5921 | |
5922 | spin_lock(&lock); |
5923 | |
5924 | for (cur = ops->inherits; cur; cur = cur->inherits) { |
5925 | void **inherit = (void **)cur; |
5926 | |
5927 | for (pp = begin; pp < end; pp++, inherit++) |
5928 | if (!*pp) |
5929 | *pp = *inherit; |
5930 | } |
5931 | |
5932 | for (pp = begin; pp < end; pp++) |
5933 | if (IS_ERR(*pp)) |
5934 | *pp = NULL; |
5935 | |
5936 | ops->inherits = NULL; |
5937 | |
5938 | spin_unlock(&lock); |
5939 | } |
5940 | |
5941 | /** |
5942 | * ata_host_start - start and freeze ports of an ATA host |
5943 | * @host: ATA host to start ports for |
5944 | * |
5945 | * Start and then freeze ports of @host. Started status is |
5946 | * recorded in host->flags, so this function can be called |
5947 | * multiple times. Ports are guaranteed to get started only |
5948 | * once. If host->ops isn't initialized yet, its set to the |
5949 | * first non-dummy port ops. |
5950 | * |
5951 | * LOCKING: |
5952 | * Inherited from calling layer (may sleep). |
5953 | * |
5954 | * RETURNS: |
5955 | * 0 if all ports are started successfully, -errno otherwise. |
5956 | */ |
5957 | int ata_host_start(struct ata_host *host) |
5958 | { |
5959 | int have_stop = 0; |
5960 | void *start_dr = NULL; |
5961 | int i, rc; |
5962 | |
5963 | if (host->flags & ATA_HOST_STARTED) |
5964 | return 0; |
5965 | |
5966 | ata_finalize_port_ops(host->ops); |
5967 | |
5968 | for (i = 0; i < host->n_ports; i++) { |
5969 | struct ata_port *ap = host->ports[i]; |
5970 | |
5971 | ata_finalize_port_ops(ap->ops); |
5972 | |
5973 | if (!host->ops && !ata_port_is_dummy(ap)) |
5974 | host->ops = ap->ops; |
5975 | |
5976 | if (ap->ops->port_stop) |
5977 | have_stop = 1; |
5978 | } |
5979 | |
5980 | if (host->ops->host_stop) |
5981 | have_stop = 1; |
5982 | |
5983 | if (have_stop) { |
5984 | start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); |
5985 | if (!start_dr) |
5986 | return -ENOMEM; |
5987 | } |
5988 | |
5989 | for (i = 0; i < host->n_ports; i++) { |
5990 | struct ata_port *ap = host->ports[i]; |
5991 | |
5992 | if (ap->ops->port_start) { |
5993 | rc = ap->ops->port_start(ap); |
5994 | if (rc) { |
5995 | if (rc != -ENODEV) |
5996 | dev_err(host->dev, |
5997 | "failed to start port %d (errno=%d)\n", |
5998 | i, rc); |
5999 | goto err_out; |
6000 | } |
6001 | } |
6002 | ata_eh_freeze_port(ap); |
6003 | } |
6004 | |
6005 | if (start_dr) |
6006 | devres_add(host->dev, start_dr); |
6007 | host->flags |= ATA_HOST_STARTED; |
6008 | return 0; |
6009 | |
6010 | err_out: |
6011 | while (--i >= 0) { |
6012 | struct ata_port *ap = host->ports[i]; |
6013 | |
6014 | if (ap->ops->port_stop) |
6015 | ap->ops->port_stop(ap); |
6016 | } |
6017 | devres_free(start_dr); |
6018 | return rc; |
6019 | } |
6020 | |
6021 | /** |
6022 | * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas) |
6023 | * @host: host to initialize |
6024 | * @dev: device host is attached to |
6025 | * @ops: port_ops |
6026 | * |
6027 | */ |
6028 | void ata_host_init(struct ata_host *host, struct device *dev, |
6029 | struct ata_port_operations *ops) |
6030 | { |
6031 | spin_lock_init(&host->lock); |
6032 | mutex_init(&host->eh_mutex); |
6033 | host->dev = dev; |
6034 | host->ops = ops; |
6035 | } |
6036 | |
6037 | void __ata_port_probe(struct ata_port *ap) |
6038 | { |
6039 | struct ata_eh_info *ehi = &ap->link.eh_info; |
6040 | unsigned long flags; |
6041 | |
6042 | /* kick EH for boot probing */ |
6043 | spin_lock_irqsave(ap->lock, flags); |
6044 | |
6045 | ehi->probe_mask |= ATA_ALL_DEVICES; |
6046 | ehi->action |= ATA_EH_RESET; |
6047 | ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; |
6048 | |
6049 | ap->pflags &= ~ATA_PFLAG_INITIALIZING; |
6050 | ap->pflags |= ATA_PFLAG_LOADING; |
6051 | ata_port_schedule_eh(ap); |
6052 | |
6053 | spin_unlock_irqrestore(ap->lock, flags); |
6054 | } |
6055 | |
6056 | int ata_port_probe(struct ata_port *ap) |
6057 | { |
6058 | int rc = 0; |
6059 | |
6060 | if (ap->ops->error_handler) { |
6061 | __ata_port_probe(ap); |
6062 | ata_port_wait_eh(ap); |
6063 | } else { |
6064 | DPRINTK("ata%u: bus probe begin\n", ap->print_id); |
6065 | rc = ata_bus_probe(ap); |
6066 | DPRINTK("ata%u: bus probe end\n", ap->print_id); |
6067 | } |
6068 | return rc; |
6069 | } |
6070 | |
6071 | |
6072 | static void async_port_probe(void *data, async_cookie_t cookie) |
6073 | { |
6074 | struct ata_port *ap = data; |
6075 | |
6076 | /* |
6077 | * If we're not allowed to scan this host in parallel, |
6078 | * we need to wait until all previous scans have completed |
6079 | * before going further. |
6080 | * Jeff Garzik says this is only within a controller, so we |
6081 | * don't need to wait for port 0, only for later ports. |
6082 | */ |
6083 | if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) |
6084 | async_synchronize_cookie(cookie); |
6085 | |
6086 | (void)ata_port_probe(ap); |
6087 | |
6088 | /* in order to keep device order, we need to synchronize at this point */ |
6089 | async_synchronize_cookie(cookie); |
6090 | |
6091 | ata_scsi_scan_host(ap, 1); |
6092 | } |
6093 | |
6094 | /** |
6095 | * ata_host_register - register initialized ATA host |
6096 | * @host: ATA host to register |
6097 | * @sht: template for SCSI host |
6098 | * |
6099 | * Register initialized ATA host. @host is allocated using |
6100 | * ata_host_alloc() and fully initialized by LLD. This function |
6101 | * starts ports, registers @host with ATA and SCSI layers and |
6102 | * probe registered devices. |
6103 | * |
6104 | * LOCKING: |
6105 | * Inherited from calling layer (may sleep). |
6106 | * |
6107 | * RETURNS: |
6108 | * 0 on success, -errno otherwise. |
6109 | */ |
6110 | int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) |
6111 | { |
6112 | int i, rc; |
6113 | |
6114 | /* host must have been started */ |
6115 | if (!(host->flags & ATA_HOST_STARTED)) { |
6116 | dev_err(host->dev, "BUG: trying to register unstarted host\n"); |
6117 | WARN_ON(1); |
6118 | return -EINVAL; |
6119 | } |
6120 | |
6121 | /* Blow away unused ports. This happens when LLD can't |
6122 | * determine the exact number of ports to allocate at |
6123 | * allocation time. |
6124 | */ |
6125 | for (i = host->n_ports; host->ports[i]; i++) |
6126 | kfree(host->ports[i]); |
6127 | |
6128 | /* give ports names and add SCSI hosts */ |
6129 | for (i = 0; i < host->n_ports; i++) |
6130 | host->ports[i]->print_id = atomic_inc_return(&ata_print_id); |
6131 | |
6132 | |
6133 | /* Create associated sysfs transport objects */ |
6134 | for (i = 0; i < host->n_ports; i++) { |
6135 | rc = ata_tport_add(host->dev,host->ports[i]); |
6136 | if (rc) { |
6137 | goto err_tadd; |
6138 | } |
6139 | } |
6140 | |
6141 | rc = ata_scsi_add_hosts(host, sht); |
6142 | if (rc) |
6143 | goto err_tadd; |
6144 | |
6145 | /* set cable, sata_spd_limit and report */ |
6146 | for (i = 0; i < host->n_ports; i++) { |
6147 | struct ata_port *ap = host->ports[i]; |
6148 | unsigned long xfer_mask; |
6149 | |
6150 | /* set SATA cable type if still unset */ |
6151 | if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) |
6152 | ap->cbl = ATA_CBL_SATA; |
6153 | |
6154 | /* init sata_spd_limit to the current value */ |
6155 | sata_link_init_spd(&ap->link); |
6156 | if (ap->slave_link) |
6157 | sata_link_init_spd(ap->slave_link); |
6158 | |
6159 | /* print per-port info to dmesg */ |
6160 | xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, |
6161 | ap->udma_mask); |
6162 | |
6163 | if (!ata_port_is_dummy(ap)) { |
6164 | ata_port_info(ap, "%cATA max %s %s\n", |
6165 | (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', |
6166 | ata_mode_string(xfer_mask), |
6167 | ap->link.eh_info.desc); |
6168 | ata_ehi_clear_desc(&ap->link.eh_info); |
6169 | } else |
6170 | ata_port_info(ap, "DUMMY\n"); |
6171 | } |
6172 | |
6173 | /* perform each probe asynchronously */ |
6174 | for (i = 0; i < host->n_ports; i++) { |
6175 | struct ata_port *ap = host->ports[i]; |
6176 | async_schedule(async_port_probe, ap); |
6177 | } |
6178 | |
6179 | return 0; |
6180 | |
6181 | err_tadd: |
6182 | while (--i >= 0) { |
6183 | ata_tport_delete(host->ports[i]); |
6184 | } |
6185 | return rc; |
6186 | |
6187 | } |
6188 | |
6189 | /** |
6190 | * ata_host_activate - start host, request IRQ and register it |
6191 | * @host: target ATA host |
6192 | * @irq: IRQ to request |
6193 | * @irq_handler: irq_handler used when requesting IRQ |
6194 | * @irq_flags: irq_flags used when requesting IRQ |
6195 | * @sht: scsi_host_template to use when registering the host |
6196 | * |
6197 | * After allocating an ATA host and initializing it, most libata |
6198 | * LLDs perform three steps to activate the host - start host, |
6199 | * request IRQ and register it. This helper takes necessasry |
6200 | * arguments and performs the three steps in one go. |
6201 | * |
6202 | * An invalid IRQ skips the IRQ registration and expects the host to |
6203 | * have set polling mode on the port. In this case, @irq_handler |
6204 | * should be NULL. |
6205 | * |
6206 | * LOCKING: |
6207 | * Inherited from calling layer (may sleep). |
6208 | * |
6209 | * RETURNS: |
6210 | * 0 on success, -errno otherwise. |
6211 | */ |
6212 | int ata_host_activate(struct ata_host *host, int irq, |
6213 | irq_handler_t irq_handler, unsigned long irq_flags, |
6214 | struct scsi_host_template *sht) |
6215 | { |
6216 | int i, rc; |
6217 | |
6218 | rc = ata_host_start(host); |
6219 | if (rc) |
6220 | return rc; |
6221 | |
6222 | /* Special case for polling mode */ |
6223 | if (!irq) { |
6224 | WARN_ON(irq_handler); |
6225 | return ata_host_register(host, sht); |
6226 | } |
6227 | |
6228 | rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, |
6229 | dev_driver_string(host->dev), host); |
6230 | if (rc) |
6231 | return rc; |
6232 | |
6233 | for (i = 0; i < host->n_ports; i++) |
6234 | ata_port_desc(host->ports[i], "irq %d", irq); |
6235 | |
6236 | rc = ata_host_register(host, sht); |
6237 | /* if failed, just free the IRQ and leave ports alone */ |
6238 | if (rc) |
6239 | devm_free_irq(host->dev, irq, host); |
6240 | |
6241 | return rc; |
6242 | } |
6243 | |
6244 | /** |
6245 | * ata_port_detach - Detach ATA port in prepration of device removal |
6246 | * @ap: ATA port to be detached |
6247 | * |
6248 | * Detach all ATA devices and the associated SCSI devices of @ap; |
6249 | * then, remove the associated SCSI host. @ap is guaranteed to |
6250 | * be quiescent on return from this function. |
6251 | * |
6252 | * LOCKING: |
6253 | * Kernel thread context (may sleep). |
6254 | */ |
6255 | static void ata_port_detach(struct ata_port *ap) |
6256 | { |
6257 | unsigned long flags; |
6258 | |
6259 | if (!ap->ops->error_handler) |
6260 | goto skip_eh; |
6261 | |
6262 | /* tell EH we're leaving & flush EH */ |
6263 | spin_lock_irqsave(ap->lock, flags); |
6264 | ap->pflags |= ATA_PFLAG_UNLOADING; |
6265 | ata_port_schedule_eh(ap); |
6266 | spin_unlock_irqrestore(ap->lock, flags); |
6267 | |
6268 | /* wait till EH commits suicide */ |
6269 | ata_port_wait_eh(ap); |
6270 | |
6271 | /* it better be dead now */ |
6272 | WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); |
6273 | |
6274 | cancel_delayed_work_sync(&ap->hotplug_task); |
6275 | |
6276 | skip_eh: |
6277 | if (ap->pmp_link) { |
6278 | int i; |
6279 | for (i = 0; i < SATA_PMP_MAX_PORTS; i++) |
6280 | ata_tlink_delete(&ap->pmp_link[i]); |
6281 | } |
6282 | ata_tport_delete(ap); |
6283 | |
6284 | /* remove the associated SCSI host */ |
6285 | scsi_remove_host(ap->scsi_host); |
6286 | } |
6287 | |
6288 | /** |
6289 | * ata_host_detach - Detach all ports of an ATA host |
6290 | * @host: Host to detach |
6291 | * |
6292 | * Detach all ports of @host. |
6293 | * |
6294 | * LOCKING: |
6295 | * Kernel thread context (may sleep). |
6296 | */ |
6297 | void ata_host_detach(struct ata_host *host) |
6298 | { |
6299 | int i; |
6300 | |
6301 | for (i = 0; i < host->n_ports; i++) |
6302 | ata_port_detach(host->ports[i]); |
6303 | |
6304 | /* the host is dead now, dissociate ACPI */ |
6305 | ata_acpi_dissociate(host); |
6306 | } |
6307 | |
6308 | #ifdef CONFIG_PCI |
6309 | |
6310 | /** |
6311 | * ata_pci_remove_one - PCI layer callback for device removal |
6312 | * @pdev: PCI device that was removed |
6313 | * |
6314 | * PCI layer indicates to libata via this hook that hot-unplug or |
6315 | * module unload event has occurred. Detach all ports. Resource |
6316 | * release is handled via devres. |
6317 | * |
6318 | * LOCKING: |
6319 | * Inherited from PCI layer (may sleep). |
6320 | */ |
6321 | void ata_pci_remove_one(struct pci_dev *pdev) |
6322 | { |
6323 | struct ata_host *host = pci_get_drvdata(pdev); |
6324 | |
6325 | ata_host_detach(host); |
6326 | } |
6327 | |
6328 | /* move to PCI subsystem */ |
6329 | int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) |
6330 | { |
6331 | unsigned long tmp = 0; |
6332 | |
6333 | switch (bits->width) { |
6334 | case 1: { |
6335 | u8 tmp8 = 0; |
6336 | pci_read_config_byte(pdev, bits->reg, &tmp8); |
6337 | tmp = tmp8; |
6338 | break; |
6339 | } |
6340 | case 2: { |
6341 | u16 tmp16 = 0; |
6342 | pci_read_config_word(pdev, bits->reg, &tmp16); |
6343 | tmp = tmp16; |
6344 | break; |
6345 | } |
6346 | case 4: { |
6347 | u32 tmp32 = 0; |
6348 | pci_read_config_dword(pdev, bits->reg, &tmp32); |
6349 | tmp = tmp32; |
6350 | break; |
6351 | } |
6352 | |
6353 | default: |
6354 | return -EINVAL; |
6355 | } |
6356 | |
6357 | tmp &= bits->mask; |
6358 | |
6359 | return (tmp == bits->val) ? 1 : 0; |
6360 | } |
6361 | |
6362 | #ifdef CONFIG_PM |
6363 | void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) |
6364 | { |
6365 | pci_save_state(pdev); |
6366 | pci_disable_device(pdev); |
6367 | |
6368 | if (mesg.event & PM_EVENT_SLEEP) |
6369 | pci_set_power_state(pdev, PCI_D3hot); |
6370 | } |
6371 | |
6372 | int ata_pci_device_do_resume(struct pci_dev *pdev) |
6373 | { |
6374 | int rc; |
6375 | |
6376 | pci_set_power_state(pdev, PCI_D0); |
6377 | pci_restore_state(pdev); |
6378 | |
6379 | rc = pcim_enable_device(pdev); |
6380 | if (rc) { |
6381 | dev_err(&pdev->dev, |
6382 | "failed to enable device after resume (%d)\n", rc); |
6383 | return rc; |
6384 | } |
6385 | |
6386 | pci_set_master(pdev); |
6387 | return 0; |
6388 | } |
6389 | |
6390 | int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) |
6391 | { |
6392 | struct ata_host *host = pci_get_drvdata(pdev); |
6393 | int rc = 0; |
6394 | |
6395 | rc = ata_host_suspend(host, mesg); |
6396 | if (rc) |
6397 | return rc; |
6398 | |
6399 | ata_pci_device_do_suspend(pdev, mesg); |
6400 | |
6401 | return 0; |
6402 | } |
6403 | |
6404 | int ata_pci_device_resume(struct pci_dev *pdev) |
6405 | { |
6406 | struct ata_host *host = pci_get_drvdata(pdev); |
6407 | int rc; |
6408 | |
6409 | rc = ata_pci_device_do_resume(pdev); |
6410 | if (rc == 0) |
6411 | ata_host_resume(host); |
6412 | return rc; |
6413 | } |
6414 | #endif /* CONFIG_PM */ |
6415 | |
6416 | #endif /* CONFIG_PCI */ |
6417 | |
6418 | /** |
6419 | * ata_platform_remove_one - Platform layer callback for device removal |
6420 | * @pdev: Platform device that was removed |
6421 | * |
6422 | * Platform layer indicates to libata via this hook that hot-unplug or |
6423 | * module unload event has occurred. Detach all ports. Resource |
6424 | * release is handled via devres. |
6425 | * |
6426 | * LOCKING: |
6427 | * Inherited from platform layer (may sleep). |
6428 | */ |
6429 | int ata_platform_remove_one(struct platform_device *pdev) |
6430 | { |
6431 | struct ata_host *host = platform_get_drvdata(pdev); |
6432 | |
6433 | ata_host_detach(host); |
6434 | |
6435 | return 0; |
6436 | } |
6437 | |
6438 | static int __init ata_parse_force_one(char **cur, |
6439 | struct ata_force_ent *force_ent, |
6440 | const char **reason) |
6441 | { |
6442 | /* FIXME: Currently, there's no way to tag init const data and |
6443 | * using __initdata causes build failure on some versions of |
6444 | * gcc. Once __initdataconst is implemented, add const to the |
6445 | * following structure. |
6446 | */ |
6447 | static struct ata_force_param force_tbl[] __initdata = { |
6448 | { "40c", .cbl = ATA_CBL_PATA40 }, |
6449 | { "80c", .cbl = ATA_CBL_PATA80 }, |
6450 | { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, |
6451 | { "unk", .cbl = ATA_CBL_PATA_UNK }, |
6452 | { "ign", .cbl = ATA_CBL_PATA_IGN }, |
6453 | { "sata", .cbl = ATA_CBL_SATA }, |
6454 | { "1.5Gbps", .spd_limit = 1 }, |
6455 | { "3.0Gbps", .spd_limit = 2 }, |
6456 | { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, |
6457 | { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, |
6458 | { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, |
6459 | { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, |
6460 | { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, |
6461 | { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, |
6462 | { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, |
6463 | { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, |
6464 | { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, |
6465 | { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, |
6466 | { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, |
6467 | { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, |
6468 | { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, |
6469 | { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, |
6470 | { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, |
6471 | { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, |
6472 | { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, |
6473 | { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, |
6474 | { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, |
6475 | { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, |
6476 | { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, |
6477 | { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, |
6478 | { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, |
6479 | { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, |
6480 | { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, |
6481 | { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, |
6482 | { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, |
6483 | { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, |
6484 | { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, |
6485 | { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, |
6486 | { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, |
6487 | { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, |
6488 | { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, |
6489 | { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, |
6490 | { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, |
6491 | { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, |
6492 | { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, |
6493 | { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, |
6494 | { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, |
6495 | { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, |
6496 | { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, |
6497 | }; |
6498 | char *start = *cur, *p = *cur; |
6499 | char *id, *val, *endp; |
6500 | const struct ata_force_param *match_fp = NULL; |
6501 | int nr_matches = 0, i; |
6502 | |
6503 | /* find where this param ends and update *cur */ |
6504 | while (*p != '\0' && *p != ',') |
6505 | p++; |
6506 | |
6507 | if (*p == '\0') |
6508 | *cur = p; |
6509 | else |
6510 | *cur = p + 1; |
6511 | |
6512 | *p = '\0'; |
6513 | |
6514 | /* parse */ |
6515 | p = strchr(start, ':'); |
6516 | if (!p) { |
6517 | val = strstrip(start); |
6518 | goto parse_val; |
6519 | } |
6520 | *p = '\0'; |
6521 | |
6522 | id = strstrip(start); |
6523 | val = strstrip(p + 1); |
6524 | |
6525 | /* parse id */ |
6526 | p = strchr(id, '.'); |
6527 | if (p) { |
6528 | *p++ = '\0'; |
6529 | force_ent->device = simple_strtoul(p, &endp, 10); |
6530 | if (p == endp || *endp != '\0') { |
6531 | *reason = "invalid device"; |
6532 | return -EINVAL; |
6533 | } |
6534 | } |
6535 | |
6536 | force_ent->port = simple_strtoul(id, &endp, 10); |
6537 | if (p == endp || *endp != '\0') { |
6538 | *reason = "invalid port/link"; |
6539 | return -EINVAL; |
6540 | } |
6541 | |
6542 | parse_val: |
6543 | /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ |
6544 | for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { |
6545 | const struct ata_force_param *fp = &force_tbl[i]; |
6546 | |
6547 | if (strncasecmp(val, fp->name, strlen(val))) |
6548 | continue; |
6549 | |
6550 | nr_matches++; |
6551 | match_fp = fp; |
6552 | |
6553 | if (strcasecmp(val, fp->name) == 0) { |
6554 | nr_matches = 1; |
6555 | break; |
6556 | } |
6557 | } |
6558 | |
6559 | if (!nr_matches) { |
6560 | *reason = "unknown value"; |
6561 | return -EINVAL; |
6562 | } |
6563 | if (nr_matches > 1) { |
6564 | *reason = "ambigious value"; |
6565 | return -EINVAL; |
6566 | } |
6567 | |
6568 | force_ent->param = *match_fp; |
6569 | |
6570 | return 0; |
6571 | } |
6572 | |
6573 | static void __init ata_parse_force_param(void) |
6574 | { |
6575 | int idx = 0, size = 1; |
6576 | int last_port = -1, last_device = -1; |
6577 | char *p, *cur, *next; |
6578 | |
6579 | /* calculate maximum number of params and allocate force_tbl */ |
6580 | for (p = ata_force_param_buf; *p; p++) |
6581 | if (*p == ',') |
6582 | size++; |
6583 | |
6584 | ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); |
6585 | if (!ata_force_tbl) { |
6586 | printk(KERN_WARNING "ata: failed to extend force table, " |
6587 | "libata.force ignored\n"); |
6588 | return; |
6589 | } |
6590 | |
6591 | /* parse and populate the table */ |
6592 | for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { |
6593 | const char *reason = ""; |
6594 | struct ata_force_ent te = { .port = -1, .device = -1 }; |
6595 | |
6596 | next = cur; |
6597 | if (ata_parse_force_one(&next, &te, &reason)) { |
6598 | printk(KERN_WARNING "ata: failed to parse force " |
6599 | "parameter \"%s\" (%s)\n", |
6600 | cur, reason); |
6601 | continue; |
6602 | } |
6603 | |
6604 | if (te.port == -1) { |
6605 | te.port = last_port; |
6606 | te.device = last_device; |
6607 | } |
6608 | |
6609 | ata_force_tbl[idx++] = te; |
6610 | |
6611 | last_port = te.port; |
6612 | last_device = te.device; |
6613 | } |
6614 | |
6615 | ata_force_tbl_size = idx; |
6616 | } |
6617 | |
6618 | static int __init ata_init(void) |
6619 | { |
6620 | int rc; |
6621 | |
6622 | ata_parse_force_param(); |
6623 | |
6624 | ata_acpi_register(); |
6625 | |
6626 | rc = ata_sff_init(); |
6627 | if (rc) { |
6628 | kfree(ata_force_tbl); |
6629 | return rc; |
6630 | } |
6631 | |
6632 | libata_transport_init(); |
6633 | ata_scsi_transport_template = ata_attach_transport(); |
6634 | if (!ata_scsi_transport_template) { |
6635 | ata_sff_exit(); |
6636 | rc = -ENOMEM; |
6637 | goto err_out; |
6638 | } |
6639 | |
6640 | printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); |
6641 | return 0; |
6642 | |
6643 | err_out: |
6644 | return rc; |
6645 | } |
6646 | |
6647 | static void __exit ata_exit(void) |
6648 | { |
6649 | ata_release_transport(ata_scsi_transport_template); |
6650 | libata_transport_exit(); |
6651 | ata_sff_exit(); |
6652 | ata_acpi_unregister(); |
6653 | kfree(ata_force_tbl); |
6654 | } |
6655 | |
6656 | subsys_initcall(ata_init); |
6657 | module_exit(ata_exit); |
6658 | |
6659 | static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); |
6660 | |
6661 | int ata_ratelimit(void) |
6662 | { |
6663 | return __ratelimit(&ratelimit); |
6664 | } |
6665 | |
6666 | /** |
6667 | * ata_msleep - ATA EH owner aware msleep |
6668 | * @ap: ATA port to attribute the sleep to |
6669 | * @msecs: duration to sleep in milliseconds |
6670 | * |
6671 | * Sleeps @msecs. If the current task is owner of @ap's EH, the |
6672 | * ownership is released before going to sleep and reacquired |
6673 | * after the sleep is complete. IOW, other ports sharing the |
6674 | * @ap->host will be allowed to own the EH while this task is |
6675 | * sleeping. |
6676 | * |
6677 | * LOCKING: |
6678 | * Might sleep. |
6679 | */ |
6680 | void ata_msleep(struct ata_port *ap, unsigned int msecs) |
6681 | { |
6682 | bool owns_eh = ap && ap->host->eh_owner == current; |
6683 | |
6684 | if (owns_eh) |
6685 | ata_eh_release(ap); |
6686 | |
6687 | msleep(msecs); |
6688 | |
6689 | if (owns_eh) |
6690 | ata_eh_acquire(ap); |
6691 | } |
6692 | |
6693 | /** |
6694 | * ata_wait_register - wait until register value changes |
6695 | * @ap: ATA port to wait register for, can be NULL |
6696 | * @reg: IO-mapped register |
6697 | * @mask: Mask to apply to read register value |
6698 | * @val: Wait condition |
6699 | * @interval: polling interval in milliseconds |
6700 | * @timeout: timeout in milliseconds |
6701 | * |
6702 | * Waiting for some bits of register to change is a common |
6703 | * operation for ATA controllers. This function reads 32bit LE |
6704 | * IO-mapped register @reg and tests for the following condition. |
6705 | * |
6706 | * (*@reg & mask) != val |
6707 | * |
6708 | * If the condition is met, it returns; otherwise, the process is |
6709 | * repeated after @interval_msec until timeout. |
6710 | * |
6711 | * LOCKING: |
6712 | * Kernel thread context (may sleep) |
6713 | * |
6714 | * RETURNS: |
6715 | * The final register value. |
6716 | */ |
6717 | u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, |
6718 | unsigned long interval, unsigned long timeout) |
6719 | { |
6720 | unsigned long deadline; |
6721 | u32 tmp; |
6722 | |
6723 | tmp = ioread32(reg); |
6724 | |
6725 | /* Calculate timeout _after_ the first read to make sure |
6726 | * preceding writes reach the controller before starting to |
6727 | * eat away the timeout. |
6728 | */ |
6729 | deadline = ata_deadline(jiffies, timeout); |
6730 | |
6731 | while ((tmp & mask) == val && time_before(jiffies, deadline)) { |
6732 | ata_msleep(ap, interval); |
6733 | tmp = ioread32(reg); |
6734 | } |
6735 | |
6736 | return tmp; |
6737 | } |
6738 | |
6739 | /* |
6740 | * Dummy port_ops |
6741 | */ |
6742 | static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) |
6743 | { |
6744 | return AC_ERR_SYSTEM; |
6745 | } |
6746 | |
6747 | static void ata_dummy_error_handler(struct ata_port *ap) |
6748 | { |
6749 | /* truly dummy */ |
6750 | } |
6751 | |
6752 | struct ata_port_operations ata_dummy_port_ops = { |
6753 | .qc_prep = ata_noop_qc_prep, |
6754 | .qc_issue = ata_dummy_qc_issue, |
6755 | .error_handler = ata_dummy_error_handler, |
6756 | .sched_eh = ata_std_sched_eh, |
6757 | .end_eh = ata_std_end_eh, |
6758 | }; |
6759 | |
6760 | const struct ata_port_info ata_dummy_port_info = { |
6761 | .port_ops = &ata_dummy_port_ops, |
6762 | }; |
6763 | |
6764 | /* |
6765 | * Utility print functions |
6766 | */ |
6767 | int ata_port_printk(const struct ata_port *ap, const char *level, |
6768 | const char *fmt, ...) |
6769 | { |
6770 | struct va_format vaf; |
6771 | va_list args; |
6772 | int r; |
6773 | |
6774 | va_start(args, fmt); |
6775 | |
6776 | vaf.fmt = fmt; |
6777 | vaf.va = &args; |
6778 | |
6779 | r = printk("%sata%u: %pV", level, ap->print_id, &vaf); |
6780 | |
6781 | va_end(args); |
6782 | |
6783 | return r; |
6784 | } |
6785 | EXPORT_SYMBOL(ata_port_printk); |
6786 | |
6787 | int ata_link_printk(const struct ata_link *link, const char *level, |
6788 | const char *fmt, ...) |
6789 | { |
6790 | struct va_format vaf; |
6791 | va_list args; |
6792 | int r; |
6793 | |
6794 | va_start(args, fmt); |
6795 | |
6796 | vaf.fmt = fmt; |
6797 | vaf.va = &args; |
6798 | |
6799 | if (sata_pmp_attached(link->ap) || link->ap->slave_link) |
6800 | r = printk("%sata%u.%02u: %pV", |
6801 | level, link->ap->print_id, link->pmp, &vaf); |
6802 | else |
6803 | r = printk("%sata%u: %pV", |
6804 | level, link->ap->print_id, &vaf); |
6805 | |
6806 | va_end(args); |
6807 | |
6808 | return r; |
6809 | } |
6810 | EXPORT_SYMBOL(ata_link_printk); |
6811 | |
6812 | int ata_dev_printk(const struct ata_device *dev, const char *level, |
6813 | const char *fmt, ...) |
6814 | { |
6815 | struct va_format vaf; |
6816 | va_list args; |
6817 | int r; |
6818 | |
6819 | va_start(args, fmt); |
6820 | |
6821 | vaf.fmt = fmt; |
6822 | vaf.va = &args; |
6823 | |
6824 | r = printk("%sata%u.%02u: %pV", |
6825 | level, dev->link->ap->print_id, dev->link->pmp + dev->devno, |
6826 | &vaf); |
6827 | |
6828 | va_end(args); |
6829 | |
6830 | return r; |
6831 | } |
6832 | EXPORT_SYMBOL(ata_dev_printk); |
6833 | |
6834 | void ata_print_version(const struct device *dev, const char *version) |
6835 | { |
6836 | dev_printk(KERN_DEBUG, dev, "version %s\n", version); |
6837 | } |
6838 | EXPORT_SYMBOL(ata_print_version); |
6839 | |
6840 | /* |
6841 | * libata is essentially a library of internal helper functions for |
6842 | * low-level ATA host controller drivers. As such, the API/ABI is |
6843 | * likely to change as new drivers are added and updated. |
6844 | * Do not depend on ABI/API stability. |
6845 | */ |
6846 | EXPORT_SYMBOL_GPL(sata_deb_timing_normal); |
6847 | EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); |
6848 | EXPORT_SYMBOL_GPL(sata_deb_timing_long); |
6849 | EXPORT_SYMBOL_GPL(ata_base_port_ops); |
6850 | EXPORT_SYMBOL_GPL(sata_port_ops); |
6851 | EXPORT_SYMBOL_GPL(ata_dummy_port_ops); |
6852 | EXPORT_SYMBOL_GPL(ata_dummy_port_info); |
6853 | EXPORT_SYMBOL_GPL(ata_link_next); |
6854 | EXPORT_SYMBOL_GPL(ata_dev_next); |
6855 | EXPORT_SYMBOL_GPL(ata_std_bios_param); |
6856 | EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); |
6857 | EXPORT_SYMBOL_GPL(ata_host_init); |
6858 | EXPORT_SYMBOL_GPL(ata_host_alloc); |
6859 | EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); |
6860 | EXPORT_SYMBOL_GPL(ata_slave_link_init); |
6861 | EXPORT_SYMBOL_GPL(ata_host_start); |
6862 | EXPORT_SYMBOL_GPL(ata_host_register); |
6863 | EXPORT_SYMBOL_GPL(ata_host_activate); |
6864 | EXPORT_SYMBOL_GPL(ata_host_detach); |
6865 | EXPORT_SYMBOL_GPL(ata_sg_init); |
6866 | EXPORT_SYMBOL_GPL(ata_qc_complete); |
6867 | EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); |
6868 | EXPORT_SYMBOL_GPL(atapi_cmd_type); |
6869 | EXPORT_SYMBOL_GPL(ata_tf_to_fis); |
6870 | EXPORT_SYMBOL_GPL(ata_tf_from_fis); |
6871 | EXPORT_SYMBOL_GPL(ata_pack_xfermask); |
6872 | EXPORT_SYMBOL_GPL(ata_unpack_xfermask); |
6873 | EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); |
6874 | EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); |
6875 | EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); |
6876 | EXPORT_SYMBOL_GPL(ata_mode_string); |
6877 | EXPORT_SYMBOL_GPL(ata_id_xfermask); |
6878 | EXPORT_SYMBOL_GPL(ata_do_set_mode); |
6879 | EXPORT_SYMBOL_GPL(ata_std_qc_defer); |
6880 | EXPORT_SYMBOL_GPL(ata_noop_qc_prep); |
6881 | EXPORT_SYMBOL_GPL(ata_dev_disable); |
6882 | EXPORT_SYMBOL_GPL(sata_set_spd); |
6883 | EXPORT_SYMBOL_GPL(ata_wait_after_reset); |
6884 | EXPORT_SYMBOL_GPL(sata_link_debounce); |
6885 | EXPORT_SYMBOL_GPL(sata_link_resume); |
6886 | EXPORT_SYMBOL_GPL(sata_link_scr_lpm); |
6887 | EXPORT_SYMBOL_GPL(ata_std_prereset); |
6888 | EXPORT_SYMBOL_GPL(sata_link_hardreset); |
6889 | EXPORT_SYMBOL_GPL(sata_std_hardreset); |
6890 | EXPORT_SYMBOL_GPL(ata_std_postreset); |
6891 | EXPORT_SYMBOL_GPL(ata_dev_classify); |
6892 | EXPORT_SYMBOL_GPL(ata_dev_pair); |
6893 | EXPORT_SYMBOL_GPL(ata_ratelimit); |
6894 | EXPORT_SYMBOL_GPL(ata_msleep); |
6895 | EXPORT_SYMBOL_GPL(ata_wait_register); |
6896 | EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); |
6897 | EXPORT_SYMBOL_GPL(ata_scsi_slave_config); |
6898 | EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); |
6899 | EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); |
6900 | EXPORT_SYMBOL_GPL(__ata_change_queue_depth); |
6901 | EXPORT_SYMBOL_GPL(sata_scr_valid); |
6902 | EXPORT_SYMBOL_GPL(sata_scr_read); |
6903 | EXPORT_SYMBOL_GPL(sata_scr_write); |
6904 | EXPORT_SYMBOL_GPL(sata_scr_write_flush); |
6905 | EXPORT_SYMBOL_GPL(ata_link_online); |
6906 | EXPORT_SYMBOL_GPL(ata_link_offline); |
6907 | #ifdef CONFIG_PM |
6908 | EXPORT_SYMBOL_GPL(ata_host_suspend); |
6909 | EXPORT_SYMBOL_GPL(ata_host_resume); |
6910 | #endif /* CONFIG_PM */ |
6911 | EXPORT_SYMBOL_GPL(ata_id_string); |
6912 | EXPORT_SYMBOL_GPL(ata_id_c_string); |
6913 | EXPORT_SYMBOL_GPL(ata_do_dev_read_id); |
6914 | EXPORT_SYMBOL_GPL(ata_scsi_simulate); |
6915 | |
6916 | EXPORT_SYMBOL_GPL(ata_pio_need_iordy); |
6917 | EXPORT_SYMBOL_GPL(ata_timing_find_mode); |
6918 | EXPORT_SYMBOL_GPL(ata_timing_compute); |
6919 | EXPORT_SYMBOL_GPL(ata_timing_merge); |
6920 | EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); |
6921 | |
6922 | #ifdef CONFIG_PCI |
6923 | EXPORT_SYMBOL_GPL(pci_test_config_bits); |
6924 | EXPORT_SYMBOL_GPL(ata_pci_remove_one); |
6925 | #ifdef CONFIG_PM |
6926 | EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); |
6927 | EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); |
6928 | EXPORT_SYMBOL_GPL(ata_pci_device_suspend); |
6929 | EXPORT_SYMBOL_GPL(ata_pci_device_resume); |
6930 | #endif /* CONFIG_PM */ |
6931 | #endif /* CONFIG_PCI */ |
6932 | |
6933 | EXPORT_SYMBOL_GPL(ata_platform_remove_one); |
6934 | |
6935 | EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); |
6936 | EXPORT_SYMBOL_GPL(ata_ehi_push_desc); |
6937 | EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); |
6938 | EXPORT_SYMBOL_GPL(ata_port_desc); |
6939 | #ifdef CONFIG_PCI |
6940 | EXPORT_SYMBOL_GPL(ata_port_pbar_desc); |
6941 | #endif /* CONFIG_PCI */ |
6942 | EXPORT_SYMBOL_GPL(ata_port_schedule_eh); |
6943 | EXPORT_SYMBOL_GPL(ata_link_abort); |
6944 | EXPORT_SYMBOL_GPL(ata_port_abort); |
6945 | EXPORT_SYMBOL_GPL(ata_port_freeze); |
6946 | EXPORT_SYMBOL_GPL(sata_async_notification); |
6947 | EXPORT_SYMBOL_GPL(ata_eh_freeze_port); |
6948 | EXPORT_SYMBOL_GPL(ata_eh_thaw_port); |
6949 | EXPORT_SYMBOL_GPL(ata_eh_qc_complete); |
6950 | EXPORT_SYMBOL_GPL(ata_eh_qc_retry); |
6951 | EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); |
6952 | EXPORT_SYMBOL_GPL(ata_do_eh); |
6953 | EXPORT_SYMBOL_GPL(ata_std_error_handler); |
6954 | |
6955 | EXPORT_SYMBOL_GPL(ata_cable_40wire); |
6956 | EXPORT_SYMBOL_GPL(ata_cable_80wire); |
6957 | EXPORT_SYMBOL_GPL(ata_cable_unknown); |
6958 | EXPORT_SYMBOL_GPL(ata_cable_ignore); |
6959 | EXPORT_SYMBOL_GPL(ata_cable_sata); |
6960 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9