Root/target/linux/ixp4xx/patches-2.6.32/050-disable_dmabounce.patch

1--- a/arch/arm/Kconfig
2+++ b/arch/arm/Kconfig
3@@ -418,7 +418,6 @@ config ARCH_IXP4XX
4     select GENERIC_GPIO
5     select GENERIC_TIME
6     select GENERIC_CLOCKEVENTS
7- select DMABOUNCE if PCI
8     help
9       Support for Intel's IXP4XX (XScale) family of processors.
10 
11--- a/arch/arm/mach-ixp4xx/Kconfig
12+++ b/arch/arm/mach-ixp4xx/Kconfig
13@@ -199,6 +199,45 @@ config IXP4XX_INDIRECT_PCI
14       need to use the indirect method instead. If you don't know
15       what you need, leave this option unselected.
16 
17+config IXP4XX_LEGACY_DMABOUNCE
18+ bool "legacy PCI DMA bounce support"
19+ depends on PCI
20+ default n
21+ select DMABOUNCE
22+ help
23+ The IXP4xx is limited to a 64MB window for PCI DMA, which
24+ requires that PCI accesses above 64MB are bounced via buffers
25+ below 64MB. Furthermore the IXP4xx has an erratum where PCI
26+ read prefetches just below the 64MB limit can trigger lockups.
27+
28+ The kernel has traditionally handled these two issue by using
29+ ARM specific DMA bounce support code for all accesses >= 64MB.
30+ That code causes problems of its own, so it is desirable to
31+ disable it. As the kernel now has a workaround for the PCI read
32+ prefetch erratum, it no longer requires the ARM bounce code.
33+
34+ Enabling this option makes IXP4xx continue to use the problematic
35+ ARM DMA bounce code. Disabling this option makes IXP4xx use the
36+ kernel's generic bounce code.
37+
38+ Say 'N'.
39+
40+config IXP4XX_ZONE_DMA
41+ bool "Support > 64MB RAM"
42+ depends on !IXP4XX_LEGACY_DMABOUNCE
43+ default y
44+ select ZONE_DMA
45+ help
46+ The IXP4xx is limited to a 64MB window for PCI DMA, which
47+ requires that PCI accesses above 64MB are bounced via buffers
48+ below 64MB.
49+
50+ Disabling this option allows you to omit the support code for
51+ DMA-able memory allocations and DMA bouncing, but the kernel
52+ will then not work properly if more than 64MB of RAM is present.
53+
54+ Say 'Y' unless your platform is limited to <= 64MB of RAM.
55+
56 config IXP4XX_QMGR
57     tristate "IXP4xx Queue Manager support"
58     help
59--- a/arch/arm/mach-ixp4xx/common-pci.c
60+++ b/arch/arm/mach-ixp4xx/common-pci.c
61@@ -321,27 +321,38 @@ static int abort_handler(unsigned long a
62  */
63 static int ixp4xx_pci_platform_notify(struct device *dev)
64 {
65- if(dev->bus == &pci_bus_type) {
66- *dev->dma_mask = SZ_64M - 1;
67+ if (dev->bus == &pci_bus_type) {
68+ *dev->dma_mask = SZ_64M - 1;
69         dev->coherent_dma_mask = SZ_64M - 1;
70+#ifdef CONFIG_DMABOUNCE
71         dmabounce_register_dev(dev, 2048, 4096);
72+#endif
73     }
74     return 0;
75 }
76 
77 static int ixp4xx_pci_platform_notify_remove(struct device *dev)
78 {
79- if(dev->bus == &pci_bus_type) {
80+#ifdef CONFIG_DMABOUNCE
81+ if (dev->bus == &pci_bus_type)
82         dmabounce_unregister_dev(dev);
83- }
84+#endif
85     return 0;
86 }
87 
88+#ifdef CONFIG_DMABOUNCE
89 int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
90 {
91+ /* Note that this returns true for the last page below 64M due to
92+ * IXP4xx erratum 15 (SCR 1289), which states that PCI prefetches
93+ * can cross the boundary between valid memory and a reserved region
94+ * causing AHB bus errors and a lock-up.
95+ */
96     return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
97 }
98+#endif
99 
100+#ifdef CONFIG_ZONE_DMA
101 /*
102  * Only first 64MB of memory can be accessed via PCI.
103  * We use GFP_DMA to allocate safe buffers to do map/unmap.
104@@ -364,6 +375,7 @@ void __init ixp4xx_adjust_zones(int node
105     zhole_size[1] = zhole_size[0];
106     zhole_size[0] = 0;
107 }
108+#endif
109 
110 void __init ixp4xx_pci_preinit(void)
111 {
112@@ -517,19 +529,35 @@ struct pci_bus * __devinit ixp4xx_scan_b
113 int
114 pci_set_dma_mask(struct pci_dev *dev, u64 mask)
115 {
116- if (mask >= SZ_64M - 1 )
117+#ifdef CONFIG_DMABOUNCE
118+ if (mask >= SZ_64M - 1)
119         return 0;
120 
121     return -EIO;
122+#else
123+ /* Only honour masks < SZ_64M. Silently ignore masks >= SZ_64M
124+ as generic drivers do not know about IXP4xx PCI DMA quirks. */
125+ if (mask < SZ_64M)
126+ dev->dma_mask = mask;
127+ return 0;
128+#endif
129 }
130     
131 int
132 pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
133 {
134- if (mask >= SZ_64M - 1 )
135+#ifdef CONFIG_DMABOUNCE
136+ if (mask >= SZ_64M - 1)
137         return 0;
138 
139     return -EIO;
140+#else
141+ /* Only honour masks < SZ_64M. Silently ignore masks >= SZ_64M
142+ as generic drivers do not know about IXP4xx PCI DMA quirks. */
143+ if (mask < SZ_64M)
144+ dev->dev.coherent_dma_mask = mask;
145+ return 0;
146+#endif
147 }
148 
149 EXPORT_SYMBOL(ixp4xx_pci_read);
150--- a/arch/arm/mach-ixp4xx/include/mach/memory.h
151+++ b/arch/arm/mach-ixp4xx/include/mach/memory.h
152@@ -16,10 +16,12 @@
153 
154 #if !defined(__ASSEMBLY__) && defined(CONFIG_PCI)
155 
156+#ifdef CONFIG_ZONE_DMA
157 void ixp4xx_adjust_zones(int node, unsigned long *size, unsigned long *holes);
158 
159 #define arch_adjust_zones(node, size, holes) \
160     ixp4xx_adjust_zones(node, size, holes)
161+#endif
162 
163 #define ISA_DMA_THRESHOLD (SZ_64M - 1)
164 #define MAX_DMA_ADDRESS (PAGE_OFFSET + SZ_64M)
165

Archive Download this file



interactive