Root/
1 | /* |
2 | * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller. |
3 | * |
4 | * Copyright (c) 2008 Wind River Systems, Inc. |
5 | * |
6 | * Authors: Cao Qingtao <qingtao.cao@windriver.com> |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as |
10 | * published by the Free Software Foundation. |
11 | * |
12 | * This program is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. |
15 | * See the GNU General Public License for more details. |
16 | * |
17 | * You should have received a copy of the GNU General Public License |
18 | * along with this program; if not, write to the Free Software |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 | */ |
21 | |
22 | #include <linux/module.h> |
23 | #include <linux/init.h> |
24 | #include <linux/io.h> |
25 | #include <linux/edac.h> |
26 | #include <linux/of.h> |
27 | #include <linux/platform_device.h> |
28 | #include <linux/gfp.h> |
29 | |
30 | #include "edac_core.h" |
31 | #include "edac_module.h" |
32 | |
33 | #define CPC925_EDAC_REVISION " Ver: 1.0.0" |
34 | #define CPC925_EDAC_MOD_STR "cpc925_edac" |
35 | |
36 | #define cpc925_printk(level, fmt, arg...) \ |
37 | edac_printk(level, "CPC925", fmt, ##arg) |
38 | |
39 | #define cpc925_mc_printk(mci, level, fmt, arg...) \ |
40 | edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg) |
41 | |
42 | /* |
43 | * CPC925 registers are of 32 bits with bit0 defined at the |
44 | * most significant bit and bit31 at that of least significant. |
45 | */ |
46 | #define CPC925_BITS_PER_REG 32 |
47 | #define CPC925_BIT(nr) (1UL << (CPC925_BITS_PER_REG - 1 - nr)) |
48 | |
49 | /* |
50 | * EDAC device names for the error detections of |
51 | * CPU Interface and Hypertransport Link. |
52 | */ |
53 | #define CPC925_CPU_ERR_DEV "cpu" |
54 | #define CPC925_HT_LINK_DEV "htlink" |
55 | |
56 | /* Suppose DDR Refresh cycle is 15.6 microsecond */ |
57 | #define CPC925_REF_FREQ 0xFA69 |
58 | #define CPC925_SCRUB_BLOCK_SIZE 64 /* bytes */ |
59 | #define CPC925_NR_CSROWS 8 |
60 | |
61 | /* |
62 | * All registers and bits definitions are taken from |
63 | * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02". |
64 | */ |
65 | |
66 | /* |
67 | * CPU and Memory Controller Registers |
68 | */ |
69 | /************************************************************ |
70 | * Processor Interface Exception Mask Register (APIMASK) |
71 | ************************************************************/ |
72 | #define REG_APIMASK_OFFSET 0x30070 |
73 | enum apimask_bits { |
74 | APIMASK_DART = CPC925_BIT(0), /* DART Exception */ |
75 | APIMASK_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */ |
76 | APIMASK_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */ |
77 | APIMASK_STAT = CPC925_BIT(3), /* Status Exception */ |
78 | APIMASK_DERR = CPC925_BIT(4), /* Data Error Exception */ |
79 | APIMASK_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */ |
80 | APIMASK_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */ |
81 | /* BIT(7) Reserved */ |
82 | APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */ |
83 | APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */ |
84 | APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */ |
85 | APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */ |
86 | |
87 | CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 | |
88 | APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 | |
89 | APIMASK_ADRS1), |
90 | ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H | |
91 | APIMASK_ECC_UE_L | APIMASK_ECC_CE_L), |
92 | }; |
93 | #define APIMASK_ADI(n) CPC925_BIT(((n)+1)) |
94 | |
95 | /************************************************************ |
96 | * Processor Interface Exception Register (APIEXCP) |
97 | ************************************************************/ |
98 | #define REG_APIEXCP_OFFSET 0x30060 |
99 | enum apiexcp_bits { |
100 | APIEXCP_DART = CPC925_BIT(0), /* DART Exception */ |
101 | APIEXCP_ADI0 = CPC925_BIT(1), /* Handshake Error on PI0_ADI */ |
102 | APIEXCP_ADI1 = CPC925_BIT(2), /* Handshake Error on PI1_ADI */ |
103 | APIEXCP_STAT = CPC925_BIT(3), /* Status Exception */ |
104 | APIEXCP_DERR = CPC925_BIT(4), /* Data Error Exception */ |
105 | APIEXCP_ADRS0 = CPC925_BIT(5), /* Addressing Exception on PI0 */ |
106 | APIEXCP_ADRS1 = CPC925_BIT(6), /* Addressing Exception on PI1 */ |
107 | /* BIT(7) Reserved */ |
108 | APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */ |
109 | APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */ |
110 | APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */ |
111 | APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */ |
112 | |
113 | CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 | |
114 | APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 | |
115 | APIEXCP_ADRS1), |
116 | UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L), |
117 | CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L), |
118 | ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED), |
119 | }; |
120 | |
121 | /************************************************************ |
122 | * Memory Bus Configuration Register (MBCR) |
123 | ************************************************************/ |
124 | #define REG_MBCR_OFFSET 0x2190 |
125 | #define MBCR_64BITCFG_SHIFT 23 |
126 | #define MBCR_64BITCFG_MASK (1UL << MBCR_64BITCFG_SHIFT) |
127 | #define MBCR_64BITBUS_SHIFT 22 |
128 | #define MBCR_64BITBUS_MASK (1UL << MBCR_64BITBUS_SHIFT) |
129 | |
130 | /************************************************************ |
131 | * Memory Bank Mode Register (MBMR) |
132 | ************************************************************/ |
133 | #define REG_MBMR_OFFSET 0x21C0 |
134 | #define MBMR_MODE_MAX_VALUE 0xF |
135 | #define MBMR_MODE_SHIFT 25 |
136 | #define MBMR_MODE_MASK (MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT) |
137 | #define MBMR_BBA_SHIFT 24 |
138 | #define MBMR_BBA_MASK (1UL << MBMR_BBA_SHIFT) |
139 | |
140 | /************************************************************ |
141 | * Memory Bank Boundary Address Register (MBBAR) |
142 | ************************************************************/ |
143 | #define REG_MBBAR_OFFSET 0x21D0 |
144 | #define MBBAR_BBA_MAX_VALUE 0xFF |
145 | #define MBBAR_BBA_SHIFT 24 |
146 | #define MBBAR_BBA_MASK (MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT) |
147 | |
148 | /************************************************************ |
149 | * Memory Scrub Control Register (MSCR) |
150 | ************************************************************/ |
151 | #define REG_MSCR_OFFSET 0x2400 |
152 | #define MSCR_SCRUB_MOD_MASK 0xC0000000 /* scrub_mod - bit0:1*/ |
153 | #define MSCR_BACKGR_SCRUB 0x40000000 /* 01 */ |
154 | #define MSCR_SI_SHIFT 16 /* si - bit8:15*/ |
155 | #define MSCR_SI_MAX_VALUE 0xFF |
156 | #define MSCR_SI_MASK (MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT) |
157 | |
158 | /************************************************************ |
159 | * Memory Scrub Range Start Register (MSRSR) |
160 | ************************************************************/ |
161 | #define REG_MSRSR_OFFSET 0x2410 |
162 | |
163 | /************************************************************ |
164 | * Memory Scrub Range End Register (MSRER) |
165 | ************************************************************/ |
166 | #define REG_MSRER_OFFSET 0x2420 |
167 | |
168 | /************************************************************ |
169 | * Memory Scrub Pattern Register (MSPR) |
170 | ************************************************************/ |
171 | #define REG_MSPR_OFFSET 0x2430 |
172 | |
173 | /************************************************************ |
174 | * Memory Check Control Register (MCCR) |
175 | ************************************************************/ |
176 | #define REG_MCCR_OFFSET 0x2440 |
177 | enum mccr_bits { |
178 | MCCR_ECC_EN = CPC925_BIT(0), /* ECC high and low check */ |
179 | }; |
180 | |
181 | /************************************************************ |
182 | * Memory Check Range End Register (MCRER) |
183 | ************************************************************/ |
184 | #define REG_MCRER_OFFSET 0x2450 |
185 | |
186 | /************************************************************ |
187 | * Memory Error Address Register (MEAR) |
188 | ************************************************************/ |
189 | #define REG_MEAR_OFFSET 0x2460 |
190 | #define MEAR_BCNT_MAX_VALUE 0x3 |
191 | #define MEAR_BCNT_SHIFT 30 |
192 | #define MEAR_BCNT_MASK (MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT) |
193 | #define MEAR_RANK_MAX_VALUE 0x7 |
194 | #define MEAR_RANK_SHIFT 27 |
195 | #define MEAR_RANK_MASK (MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT) |
196 | #define MEAR_COL_MAX_VALUE 0x7FF |
197 | #define MEAR_COL_SHIFT 16 |
198 | #define MEAR_COL_MASK (MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT) |
199 | #define MEAR_BANK_MAX_VALUE 0x3 |
200 | #define MEAR_BANK_SHIFT 14 |
201 | #define MEAR_BANK_MASK (MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT) |
202 | #define MEAR_ROW_MASK 0x00003FFF |
203 | |
204 | /************************************************************ |
205 | * Memory Error Syndrome Register (MESR) |
206 | ************************************************************/ |
207 | #define REG_MESR_OFFSET 0x2470 |
208 | #define MESR_ECC_SYN_H_MASK 0xFF00 |
209 | #define MESR_ECC_SYN_L_MASK 0x00FF |
210 | |
211 | /************************************************************ |
212 | * Memory Mode Control Register (MMCR) |
213 | ************************************************************/ |
214 | #define REG_MMCR_OFFSET 0x2500 |
215 | enum mmcr_bits { |
216 | MMCR_REG_DIMM_MODE = CPC925_BIT(3), |
217 | }; |
218 | |
219 | /* |
220 | * HyperTransport Link Registers |
221 | */ |
222 | /************************************************************ |
223 | * Error Handling/Enumeration Scratch Pad Register (ERRCTRL) |
224 | ************************************************************/ |
225 | #define REG_ERRCTRL_OFFSET 0x70140 |
226 | enum errctrl_bits { /* nonfatal interrupts for */ |
227 | ERRCTRL_SERR_NF = CPC925_BIT(0), /* system error */ |
228 | ERRCTRL_CRC_NF = CPC925_BIT(1), /* CRC error */ |
229 | ERRCTRL_RSP_NF = CPC925_BIT(2), /* Response error */ |
230 | ERRCTRL_EOC_NF = CPC925_BIT(3), /* End-Of-Chain error */ |
231 | ERRCTRL_OVF_NF = CPC925_BIT(4), /* Overflow error */ |
232 | ERRCTRL_PROT_NF = CPC925_BIT(5), /* Protocol error */ |
233 | |
234 | ERRCTRL_RSP_ERR = CPC925_BIT(6), /* Response error received */ |
235 | ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */ |
236 | |
237 | HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF | |
238 | ERRCTRL_RSP_NF | ERRCTRL_EOC_NF | |
239 | ERRCTRL_OVF_NF | ERRCTRL_PROT_NF), |
240 | HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL), |
241 | }; |
242 | |
243 | /************************************************************ |
244 | * Link Configuration and Link Control Register (LINKCTRL) |
245 | ************************************************************/ |
246 | #define REG_LINKCTRL_OFFSET 0x70110 |
247 | enum linkctrl_bits { |
248 | LINKCTRL_CRC_ERR = (CPC925_BIT(22) | CPC925_BIT(23)), |
249 | LINKCTRL_LINK_FAIL = CPC925_BIT(27), |
250 | |
251 | HT_LINKCTRL_DETECTED = (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL), |
252 | }; |
253 | |
254 | /************************************************************ |
255 | * Link FreqCap/Error/Freq/Revision ID Register (LINKERR) |
256 | ************************************************************/ |
257 | #define REG_LINKERR_OFFSET 0x70120 |
258 | enum linkerr_bits { |
259 | LINKERR_EOC_ERR = CPC925_BIT(17), /* End-Of-Chain error */ |
260 | LINKERR_OVF_ERR = CPC925_BIT(18), /* Receive Buffer Overflow */ |
261 | LINKERR_PROT_ERR = CPC925_BIT(19), /* Protocol error */ |
262 | |
263 | HT_LINKERR_DETECTED = (LINKERR_EOC_ERR | LINKERR_OVF_ERR | |
264 | LINKERR_PROT_ERR), |
265 | }; |
266 | |
267 | /************************************************************ |
268 | * Bridge Control Register (BRGCTRL) |
269 | ************************************************************/ |
270 | #define REG_BRGCTRL_OFFSET 0x70300 |
271 | enum brgctrl_bits { |
272 | BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */ |
273 | BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */ |
274 | }; |
275 | |
276 | /* Private structure for edac memory controller */ |
277 | struct cpc925_mc_pdata { |
278 | void __iomem *vbase; |
279 | unsigned long total_mem; |
280 | const char *name; |
281 | int edac_idx; |
282 | }; |
283 | |
284 | /* Private structure for common edac device */ |
285 | struct cpc925_dev_info { |
286 | void __iomem *vbase; |
287 | struct platform_device *pdev; |
288 | char *ctl_name; |
289 | int edac_idx; |
290 | struct edac_device_ctl_info *edac_dev; |
291 | void (*init)(struct cpc925_dev_info *dev_info); |
292 | void (*exit)(struct cpc925_dev_info *dev_info); |
293 | void (*check)(struct edac_device_ctl_info *edac_dev); |
294 | }; |
295 | |
296 | /* Get total memory size from Open Firmware DTB */ |
297 | static void get_total_mem(struct cpc925_mc_pdata *pdata) |
298 | { |
299 | struct device_node *np = NULL; |
300 | const unsigned int *reg, *reg_end; |
301 | int len, sw, aw; |
302 | unsigned long start, size; |
303 | |
304 | np = of_find_node_by_type(NULL, "memory"); |
305 | if (!np) |
306 | return; |
307 | |
308 | aw = of_n_addr_cells(np); |
309 | sw = of_n_size_cells(np); |
310 | reg = (const unsigned int *)of_get_property(np, "reg", &len); |
311 | reg_end = reg + len/4; |
312 | |
313 | pdata->total_mem = 0; |
314 | do { |
315 | start = of_read_number(reg, aw); |
316 | reg += aw; |
317 | size = of_read_number(reg, sw); |
318 | reg += sw; |
319 | edac_dbg(1, "start 0x%lx, size 0x%lx\n", start, size); |
320 | pdata->total_mem += size; |
321 | } while (reg < reg_end); |
322 | |
323 | of_node_put(np); |
324 | edac_dbg(0, "total_mem 0x%lx\n", pdata->total_mem); |
325 | } |
326 | |
327 | static void cpc925_init_csrows(struct mem_ctl_info *mci) |
328 | { |
329 | struct cpc925_mc_pdata *pdata = mci->pvt_info; |
330 | struct csrow_info *csrow; |
331 | struct dimm_info *dimm; |
332 | enum dev_type dtype; |
333 | int index, j; |
334 | u32 mbmr, mbbar, bba, grain; |
335 | unsigned long row_size, nr_pages, last_nr_pages = 0; |
336 | |
337 | get_total_mem(pdata); |
338 | |
339 | for (index = 0; index < mci->nr_csrows; index++) { |
340 | mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET + |
341 | 0x20 * index); |
342 | mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET + |
343 | 0x20 + index); |
344 | bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) | |
345 | ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT); |
346 | |
347 | if (bba == 0) |
348 | continue; /* not populated */ |
349 | |
350 | csrow = mci->csrows[index]; |
351 | |
352 | row_size = bba * (1UL << 28); /* 256M */ |
353 | csrow->first_page = last_nr_pages; |
354 | nr_pages = row_size >> PAGE_SHIFT; |
355 | csrow->last_page = csrow->first_page + nr_pages - 1; |
356 | last_nr_pages = csrow->last_page + 1; |
357 | |
358 | switch (csrow->nr_channels) { |
359 | case 1: /* Single channel */ |
360 | grain = 32; /* four-beat burst of 32 bytes */ |
361 | break; |
362 | case 2: /* Dual channel */ |
363 | default: |
364 | grain = 64; /* four-beat burst of 64 bytes */ |
365 | break; |
366 | } |
367 | switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) { |
368 | case 6: /* 0110, no way to differentiate X8 VS X16 */ |
369 | case 5: /* 0101 */ |
370 | case 8: /* 1000 */ |
371 | dtype = DEV_X16; |
372 | break; |
373 | case 7: /* 0111 */ |
374 | case 9: /* 1001 */ |
375 | dtype = DEV_X8; |
376 | break; |
377 | default: |
378 | dtype = DEV_UNKNOWN; |
379 | break; |
380 | } |
381 | for (j = 0; j < csrow->nr_channels; j++) { |
382 | dimm = csrow->channels[j]->dimm; |
383 | dimm->nr_pages = nr_pages / csrow->nr_channels; |
384 | dimm->mtype = MEM_RDDR; |
385 | dimm->edac_mode = EDAC_SECDED; |
386 | dimm->grain = grain; |
387 | dimm->dtype = dtype; |
388 | } |
389 | } |
390 | } |
391 | |
392 | /* Enable memory controller ECC detection */ |
393 | static void cpc925_mc_init(struct mem_ctl_info *mci) |
394 | { |
395 | struct cpc925_mc_pdata *pdata = mci->pvt_info; |
396 | u32 apimask; |
397 | u32 mccr; |
398 | |
399 | /* Enable various ECC error exceptions */ |
400 | apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET); |
401 | if ((apimask & ECC_MASK_ENABLE) == 0) { |
402 | apimask |= ECC_MASK_ENABLE; |
403 | __raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET); |
404 | } |
405 | |
406 | /* Enable ECC detection */ |
407 | mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET); |
408 | if ((mccr & MCCR_ECC_EN) == 0) { |
409 | mccr |= MCCR_ECC_EN; |
410 | __raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET); |
411 | } |
412 | } |
413 | |
414 | /* Disable memory controller ECC detection */ |
415 | static void cpc925_mc_exit(struct mem_ctl_info *mci) |
416 | { |
417 | /* |
418 | * WARNING: |
419 | * We are supposed to clear the ECC error detection bits, |
420 | * and it will be no problem to do so. However, once they |
421 | * are cleared here if we want to re-install CPC925 EDAC |
422 | * module later, setting them up in cpc925_mc_init() will |
423 | * trigger machine check exception. |
424 | * Also, it's ok to leave ECC error detection bits enabled, |
425 | * since they are reset to 1 by default or by boot loader. |
426 | */ |
427 | |
428 | return; |
429 | } |
430 | |
431 | /* |
432 | * Revert DDR column/row/bank addresses into page frame number and |
433 | * offset in page. |
434 | * |
435 | * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs), |
436 | * physical address(PA) bits to column address(CA) bits mappings are: |
437 | * CA 0 1 2 3 4 5 6 7 8 9 10 |
438 | * PA 59 58 57 56 55 54 53 52 51 50 49 |
439 | * |
440 | * physical address(PA) bits to bank address(BA) bits mappings are: |
441 | * BA 0 1 |
442 | * PA 43 44 |
443 | * |
444 | * physical address(PA) bits to row address(RA) bits mappings are: |
445 | * RA 0 1 2 3 4 5 6 7 8 9 10 11 12 |
446 | * PA 36 35 34 48 47 46 45 40 41 42 39 38 37 |
447 | */ |
448 | static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear, |
449 | unsigned long *pfn, unsigned long *offset, int *csrow) |
450 | { |
451 | u32 bcnt, rank, col, bank, row; |
452 | u32 c; |
453 | unsigned long pa; |
454 | int i; |
455 | |
456 | bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT; |
457 | rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT; |
458 | col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT; |
459 | bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT; |
460 | row = mear & MEAR_ROW_MASK; |
461 | |
462 | *csrow = rank; |
463 | |
464 | #ifdef CONFIG_EDAC_DEBUG |
465 | if (mci->csrows[rank]->first_page == 0) { |
466 | cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a " |
467 | "non-populated csrow, broken hardware?\n"); |
468 | return; |
469 | } |
470 | #endif |
471 | |
472 | /* Revert csrow number */ |
473 | pa = mci->csrows[rank]->first_page << PAGE_SHIFT; |
474 | |
475 | /* Revert column address */ |
476 | col += bcnt; |
477 | for (i = 0; i < 11; i++) { |
478 | c = col & 0x1; |
479 | col >>= 1; |
480 | pa |= c << (14 - i); |
481 | } |
482 | |
483 | /* Revert bank address */ |
484 | pa |= bank << 19; |
485 | |
486 | /* Revert row address, in 4 steps */ |
487 | for (i = 0; i < 3; i++) { |
488 | c = row & 0x1; |
489 | row >>= 1; |
490 | pa |= c << (26 - i); |
491 | } |
492 | |
493 | for (i = 0; i < 3; i++) { |
494 | c = row & 0x1; |
495 | row >>= 1; |
496 | pa |= c << (21 + i); |
497 | } |
498 | |
499 | for (i = 0; i < 4; i++) { |
500 | c = row & 0x1; |
501 | row >>= 1; |
502 | pa |= c << (18 - i); |
503 | } |
504 | |
505 | for (i = 0; i < 3; i++) { |
506 | c = row & 0x1; |
507 | row >>= 1; |
508 | pa |= c << (29 - i); |
509 | } |
510 | |
511 | *offset = pa & (PAGE_SIZE - 1); |
512 | *pfn = pa >> PAGE_SHIFT; |
513 | |
514 | edac_dbg(0, "ECC physical address 0x%lx\n", pa); |
515 | } |
516 | |
517 | static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome) |
518 | { |
519 | if ((syndrome & MESR_ECC_SYN_H_MASK) == 0) |
520 | return 0; |
521 | |
522 | if ((syndrome & MESR_ECC_SYN_L_MASK) == 0) |
523 | return 1; |
524 | |
525 | cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n", |
526 | syndrome); |
527 | return 1; |
528 | } |
529 | |
530 | /* Check memory controller registers for ECC errors */ |
531 | static void cpc925_mc_check(struct mem_ctl_info *mci) |
532 | { |
533 | struct cpc925_mc_pdata *pdata = mci->pvt_info; |
534 | u32 apiexcp; |
535 | u32 mear; |
536 | u32 mesr; |
537 | u16 syndrome; |
538 | unsigned long pfn = 0, offset = 0; |
539 | int csrow = 0, channel = 0; |
540 | |
541 | /* APIEXCP is cleared when read */ |
542 | apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET); |
543 | if ((apiexcp & ECC_EXCP_DETECTED) == 0) |
544 | return; |
545 | |
546 | mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET); |
547 | syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK); |
548 | |
549 | mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET); |
550 | |
551 | /* Revert column/row addresses into page frame number, etc */ |
552 | cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow); |
553 | |
554 | if (apiexcp & CECC_EXCP_DETECTED) { |
555 | cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n"); |
556 | channel = cpc925_mc_find_channel(mci, syndrome); |
557 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, |
558 | pfn, offset, syndrome, |
559 | csrow, channel, -1, |
560 | mci->ctl_name, ""); |
561 | } |
562 | |
563 | if (apiexcp & UECC_EXCP_DETECTED) { |
564 | cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); |
565 | edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, |
566 | pfn, offset, 0, |
567 | csrow, -1, -1, |
568 | mci->ctl_name, ""); |
569 | } |
570 | |
571 | cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n"); |
572 | cpc925_mc_printk(mci, KERN_INFO, "APIMASK 0x%08x\n", |
573 | __raw_readl(pdata->vbase + REG_APIMASK_OFFSET)); |
574 | cpc925_mc_printk(mci, KERN_INFO, "APIEXCP 0x%08x\n", |
575 | apiexcp); |
576 | cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl 0x%08x\n", |
577 | __raw_readl(pdata->vbase + REG_MSCR_OFFSET)); |
578 | cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start 0x%08x\n", |
579 | __raw_readl(pdata->vbase + REG_MSRSR_OFFSET)); |
580 | cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End 0x%08x\n", |
581 | __raw_readl(pdata->vbase + REG_MSRER_OFFSET)); |
582 | cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern 0x%08x\n", |
583 | __raw_readl(pdata->vbase + REG_MSPR_OFFSET)); |
584 | cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl 0x%08x\n", |
585 | __raw_readl(pdata->vbase + REG_MCCR_OFFSET)); |
586 | cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End 0x%08x\n", |
587 | __raw_readl(pdata->vbase + REG_MCRER_OFFSET)); |
588 | cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address 0x%08x\n", |
589 | mesr); |
590 | cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome 0x%08x\n", |
591 | syndrome); |
592 | } |
593 | |
594 | /******************** CPU err device********************************/ |
595 | static u32 cpc925_cpu_mask_disabled(void) |
596 | { |
597 | struct device_node *cpus; |
598 | struct device_node *cpunode = NULL; |
599 | static u32 mask = 0; |
600 | |
601 | /* use cached value if available */ |
602 | if (mask != 0) |
603 | return mask; |
604 | |
605 | mask = APIMASK_ADI0 | APIMASK_ADI1; |
606 | |
607 | cpus = of_find_node_by_path("/cpus"); |
608 | if (cpus == NULL) { |
609 | cpc925_printk(KERN_DEBUG, "No /cpus node !\n"); |
610 | return 0; |
611 | } |
612 | |
613 | while ((cpunode = of_get_next_child(cpus, cpunode)) != NULL) { |
614 | const u32 *reg = of_get_property(cpunode, "reg", NULL); |
615 | |
616 | if (strcmp(cpunode->type, "cpu")) { |
617 | cpc925_printk(KERN_ERR, "Not a cpu node in /cpus: %s\n", cpunode->name); |
618 | continue; |
619 | } |
620 | |
621 | if (reg == NULL || *reg > 2) { |
622 | cpc925_printk(KERN_ERR, "Bad reg value at %s\n", cpunode->full_name); |
623 | continue; |
624 | } |
625 | |
626 | mask &= ~APIMASK_ADI(*reg); |
627 | } |
628 | |
629 | if (mask != (APIMASK_ADI0 | APIMASK_ADI1)) { |
630 | /* We assume that each CPU sits on it's own PI and that |
631 | * for present CPUs the reg property equals to the PI |
632 | * interface id */ |
633 | cpc925_printk(KERN_WARNING, |
634 | "Assuming PI id is equal to CPU MPIC id!\n"); |
635 | } |
636 | |
637 | of_node_put(cpunode); |
638 | of_node_put(cpus); |
639 | |
640 | return mask; |
641 | } |
642 | |
643 | /* Enable CPU Errors detection */ |
644 | static void cpc925_cpu_init(struct cpc925_dev_info *dev_info) |
645 | { |
646 | u32 apimask; |
647 | u32 cpumask; |
648 | |
649 | apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); |
650 | |
651 | cpumask = cpc925_cpu_mask_disabled(); |
652 | if (apimask & cpumask) { |
653 | cpc925_printk(KERN_WARNING, "CPU(s) not present, " |
654 | "but enabled in APIMASK, disabling\n"); |
655 | apimask &= ~cpumask; |
656 | } |
657 | |
658 | if ((apimask & CPU_MASK_ENABLE) == 0) |
659 | apimask |= CPU_MASK_ENABLE; |
660 | |
661 | __raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET); |
662 | } |
663 | |
664 | /* Disable CPU Errors detection */ |
665 | static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info) |
666 | { |
667 | /* |
668 | * WARNING: |
669 | * We are supposed to clear the CPU error detection bits, |
670 | * and it will be no problem to do so. However, once they |
671 | * are cleared here if we want to re-install CPC925 EDAC |
672 | * module later, setting them up in cpc925_cpu_init() will |
673 | * trigger machine check exception. |
674 | * Also, it's ok to leave CPU error detection bits enabled, |
675 | * since they are reset to 1 by default. |
676 | */ |
677 | |
678 | return; |
679 | } |
680 | |
681 | /* Check for CPU Errors */ |
682 | static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev) |
683 | { |
684 | struct cpc925_dev_info *dev_info = edac_dev->pvt_info; |
685 | u32 apiexcp; |
686 | u32 apimask; |
687 | |
688 | /* APIEXCP is cleared when read */ |
689 | apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET); |
690 | if ((apiexcp & CPU_EXCP_DETECTED) == 0) |
691 | return; |
692 | |
693 | if ((apiexcp & ~cpc925_cpu_mask_disabled()) == 0) |
694 | return; |
695 | |
696 | apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET); |
697 | cpc925_printk(KERN_INFO, "Processor Interface Fault\n" |
698 | "Processor Interface register dump:\n"); |
699 | cpc925_printk(KERN_INFO, "APIMASK 0x%08x\n", apimask); |
700 | cpc925_printk(KERN_INFO, "APIEXCP 0x%08x\n", apiexcp); |
701 | |
702 | edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name); |
703 | } |
704 | |
705 | /******************** HT Link err device****************************/ |
706 | /* Enable HyperTransport Link Error detection */ |
707 | static void cpc925_htlink_init(struct cpc925_dev_info *dev_info) |
708 | { |
709 | u32 ht_errctrl; |
710 | |
711 | ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); |
712 | if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) { |
713 | ht_errctrl |= HT_ERRCTRL_ENABLE; |
714 | __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET); |
715 | } |
716 | } |
717 | |
718 | /* Disable HyperTransport Link Error detection */ |
719 | static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info) |
720 | { |
721 | u32 ht_errctrl; |
722 | |
723 | ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); |
724 | ht_errctrl &= ~HT_ERRCTRL_ENABLE; |
725 | __raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET); |
726 | } |
727 | |
728 | /* Check for HyperTransport Link errors */ |
729 | static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev) |
730 | { |
731 | struct cpc925_dev_info *dev_info = edac_dev->pvt_info; |
732 | u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET); |
733 | u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET); |
734 | u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET); |
735 | u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET); |
736 | |
737 | if (!((brgctrl & BRGCTRL_DETSERR) || |
738 | (linkctrl & HT_LINKCTRL_DETECTED) || |
739 | (errctrl & HT_ERRCTRL_DETECTED) || |
740 | (linkerr & HT_LINKERR_DETECTED))) |
741 | return; |
742 | |
743 | cpc925_printk(KERN_INFO, "HT Link Fault\n" |
744 | "HT register dump:\n"); |
745 | cpc925_printk(KERN_INFO, "Bridge Ctrl 0x%08x\n", |
746 | brgctrl); |
747 | cpc925_printk(KERN_INFO, "Link Config Ctrl 0x%08x\n", |
748 | linkctrl); |
749 | cpc925_printk(KERN_INFO, "Error Enum and Ctrl 0x%08x\n", |
750 | errctrl); |
751 | cpc925_printk(KERN_INFO, "Link Error 0x%08x\n", |
752 | linkerr); |
753 | |
754 | /* Clear by write 1 */ |
755 | if (brgctrl & BRGCTRL_DETSERR) |
756 | __raw_writel(BRGCTRL_DETSERR, |
757 | dev_info->vbase + REG_BRGCTRL_OFFSET); |
758 | |
759 | if (linkctrl & HT_LINKCTRL_DETECTED) |
760 | __raw_writel(HT_LINKCTRL_DETECTED, |
761 | dev_info->vbase + REG_LINKCTRL_OFFSET); |
762 | |
763 | /* Initiate Secondary Bus Reset to clear the chain failure */ |
764 | if (errctrl & ERRCTRL_CHN_FAL) |
765 | __raw_writel(BRGCTRL_SECBUSRESET, |
766 | dev_info->vbase + REG_BRGCTRL_OFFSET); |
767 | |
768 | if (errctrl & ERRCTRL_RSP_ERR) |
769 | __raw_writel(ERRCTRL_RSP_ERR, |
770 | dev_info->vbase + REG_ERRCTRL_OFFSET); |
771 | |
772 | if (linkerr & HT_LINKERR_DETECTED) |
773 | __raw_writel(HT_LINKERR_DETECTED, |
774 | dev_info->vbase + REG_LINKERR_OFFSET); |
775 | |
776 | edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name); |
777 | } |
778 | |
779 | static struct cpc925_dev_info cpc925_devs[] = { |
780 | { |
781 | .ctl_name = CPC925_CPU_ERR_DEV, |
782 | .init = cpc925_cpu_init, |
783 | .exit = cpc925_cpu_exit, |
784 | .check = cpc925_cpu_check, |
785 | }, |
786 | { |
787 | .ctl_name = CPC925_HT_LINK_DEV, |
788 | .init = cpc925_htlink_init, |
789 | .exit = cpc925_htlink_exit, |
790 | .check = cpc925_htlink_check, |
791 | }, |
792 | {0}, /* Terminated by NULL */ |
793 | }; |
794 | |
795 | /* |
796 | * Add CPU Err detection and HyperTransport Link Err detection |
797 | * as common "edac_device", they have no corresponding device |
798 | * nodes in the Open Firmware DTB and we have to add platform |
799 | * devices for them. Also, they will share the MMIO with that |
800 | * of memory controller. |
801 | */ |
802 | static void cpc925_add_edac_devices(void __iomem *vbase) |
803 | { |
804 | struct cpc925_dev_info *dev_info; |
805 | |
806 | if (!vbase) { |
807 | cpc925_printk(KERN_ERR, "MMIO not established yet\n"); |
808 | return; |
809 | } |
810 | |
811 | for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) { |
812 | dev_info->vbase = vbase; |
813 | dev_info->pdev = platform_device_register_simple( |
814 | dev_info->ctl_name, 0, NULL, 0); |
815 | if (IS_ERR(dev_info->pdev)) { |
816 | cpc925_printk(KERN_ERR, |
817 | "Can't register platform device for %s\n", |
818 | dev_info->ctl_name); |
819 | continue; |
820 | } |
821 | |
822 | /* |
823 | * Don't have to allocate private structure but |
824 | * make use of cpc925_devs[] instead. |
825 | */ |
826 | dev_info->edac_idx = edac_device_alloc_index(); |
827 | dev_info->edac_dev = |
828 | edac_device_alloc_ctl_info(0, dev_info->ctl_name, |
829 | 1, NULL, 0, 0, NULL, 0, dev_info->edac_idx); |
830 | if (!dev_info->edac_dev) { |
831 | cpc925_printk(KERN_ERR, "No memory for edac device\n"); |
832 | goto err1; |
833 | } |
834 | |
835 | dev_info->edac_dev->pvt_info = dev_info; |
836 | dev_info->edac_dev->dev = &dev_info->pdev->dev; |
837 | dev_info->edac_dev->ctl_name = dev_info->ctl_name; |
838 | dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR; |
839 | dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev); |
840 | |
841 | if (edac_op_state == EDAC_OPSTATE_POLL) |
842 | dev_info->edac_dev->edac_check = dev_info->check; |
843 | |
844 | if (dev_info->init) |
845 | dev_info->init(dev_info); |
846 | |
847 | if (edac_device_add_device(dev_info->edac_dev) > 0) { |
848 | cpc925_printk(KERN_ERR, |
849 | "Unable to add edac device for %s\n", |
850 | dev_info->ctl_name); |
851 | goto err2; |
852 | } |
853 | |
854 | edac_dbg(0, "Successfully added edac device for %s\n", |
855 | dev_info->ctl_name); |
856 | |
857 | continue; |
858 | |
859 | err2: |
860 | if (dev_info->exit) |
861 | dev_info->exit(dev_info); |
862 | edac_device_free_ctl_info(dev_info->edac_dev); |
863 | err1: |
864 | platform_device_unregister(dev_info->pdev); |
865 | } |
866 | } |
867 | |
868 | /* |
869 | * Delete the common "edac_device" for CPU Err Detection |
870 | * and HyperTransport Link Err Detection |
871 | */ |
872 | static void cpc925_del_edac_devices(void) |
873 | { |
874 | struct cpc925_dev_info *dev_info; |
875 | |
876 | for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) { |
877 | if (dev_info->edac_dev) { |
878 | edac_device_del_device(dev_info->edac_dev->dev); |
879 | edac_device_free_ctl_info(dev_info->edac_dev); |
880 | platform_device_unregister(dev_info->pdev); |
881 | } |
882 | |
883 | if (dev_info->exit) |
884 | dev_info->exit(dev_info); |
885 | |
886 | edac_dbg(0, "Successfully deleted edac device for %s\n", |
887 | dev_info->ctl_name); |
888 | } |
889 | } |
890 | |
891 | /* Convert current back-ground scrub rate into byte/sec bandwidth */ |
892 | static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci) |
893 | { |
894 | struct cpc925_mc_pdata *pdata = mci->pvt_info; |
895 | int bw; |
896 | u32 mscr; |
897 | u8 si; |
898 | |
899 | mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET); |
900 | si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT; |
901 | |
902 | edac_dbg(0, "Mem Scrub Ctrl Register 0x%x\n", mscr); |
903 | |
904 | if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) || |
905 | (si == 0)) { |
906 | cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n"); |
907 | bw = 0; |
908 | } else |
909 | bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si; |
910 | |
911 | return bw; |
912 | } |
913 | |
914 | /* Return 0 for single channel; 1 for dual channel */ |
915 | static int cpc925_mc_get_channels(void __iomem *vbase) |
916 | { |
917 | int dual = 0; |
918 | u32 mbcr; |
919 | |
920 | mbcr = __raw_readl(vbase + REG_MBCR_OFFSET); |
921 | |
922 | /* |
923 | * Dual channel only when 128-bit wide physical bus |
924 | * and 128-bit configuration. |
925 | */ |
926 | if (((mbcr & MBCR_64BITCFG_MASK) == 0) && |
927 | ((mbcr & MBCR_64BITBUS_MASK) == 0)) |
928 | dual = 1; |
929 | |
930 | edac_dbg(0, "%s channel\n", (dual > 0) ? "Dual" : "Single"); |
931 | |
932 | return dual; |
933 | } |
934 | |
935 | static int cpc925_probe(struct platform_device *pdev) |
936 | { |
937 | static int edac_mc_idx; |
938 | struct mem_ctl_info *mci; |
939 | struct edac_mc_layer layers[2]; |
940 | void __iomem *vbase; |
941 | struct cpc925_mc_pdata *pdata; |
942 | struct resource *r; |
943 | int res = 0, nr_channels; |
944 | |
945 | edac_dbg(0, "%s platform device found!\n", pdev->name); |
946 | |
947 | if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) { |
948 | res = -ENOMEM; |
949 | goto out; |
950 | } |
951 | |
952 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
953 | if (!r) { |
954 | cpc925_printk(KERN_ERR, "Unable to get resource\n"); |
955 | res = -ENOENT; |
956 | goto err1; |
957 | } |
958 | |
959 | if (!devm_request_mem_region(&pdev->dev, |
960 | r->start, |
961 | resource_size(r), |
962 | pdev->name)) { |
963 | cpc925_printk(KERN_ERR, "Unable to request mem region\n"); |
964 | res = -EBUSY; |
965 | goto err1; |
966 | } |
967 | |
968 | vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r)); |
969 | if (!vbase) { |
970 | cpc925_printk(KERN_ERR, "Unable to ioremap device\n"); |
971 | res = -ENOMEM; |
972 | goto err2; |
973 | } |
974 | |
975 | nr_channels = cpc925_mc_get_channels(vbase) + 1; |
976 | |
977 | layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; |
978 | layers[0].size = CPC925_NR_CSROWS; |
979 | layers[0].is_virt_csrow = true; |
980 | layers[1].type = EDAC_MC_LAYER_CHANNEL; |
981 | layers[1].size = nr_channels; |
982 | layers[1].is_virt_csrow = false; |
983 | mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers, |
984 | sizeof(struct cpc925_mc_pdata)); |
985 | if (!mci) { |
986 | cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n"); |
987 | res = -ENOMEM; |
988 | goto err2; |
989 | } |
990 | |
991 | pdata = mci->pvt_info; |
992 | pdata->vbase = vbase; |
993 | pdata->edac_idx = edac_mc_idx++; |
994 | pdata->name = pdev->name; |
995 | |
996 | mci->pdev = &pdev->dev; |
997 | platform_set_drvdata(pdev, mci); |
998 | mci->dev_name = dev_name(&pdev->dev); |
999 | mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; |
1000 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
1001 | mci->edac_cap = EDAC_FLAG_SECDED; |
1002 | mci->mod_name = CPC925_EDAC_MOD_STR; |
1003 | mci->mod_ver = CPC925_EDAC_REVISION; |
1004 | mci->ctl_name = pdev->name; |
1005 | |
1006 | if (edac_op_state == EDAC_OPSTATE_POLL) |
1007 | mci->edac_check = cpc925_mc_check; |
1008 | |
1009 | mci->ctl_page_to_phys = NULL; |
1010 | mci->scrub_mode = SCRUB_SW_SRC; |
1011 | mci->set_sdram_scrub_rate = NULL; |
1012 | mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate; |
1013 | |
1014 | cpc925_init_csrows(mci); |
1015 | |
1016 | /* Setup memory controller registers */ |
1017 | cpc925_mc_init(mci); |
1018 | |
1019 | if (edac_mc_add_mc(mci) > 0) { |
1020 | cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n"); |
1021 | goto err3; |
1022 | } |
1023 | |
1024 | cpc925_add_edac_devices(vbase); |
1025 | |
1026 | /* get this far and it's successful */ |
1027 | edac_dbg(0, "success\n"); |
1028 | |
1029 | res = 0; |
1030 | goto out; |
1031 | |
1032 | err3: |
1033 | cpc925_mc_exit(mci); |
1034 | edac_mc_free(mci); |
1035 | err2: |
1036 | devm_release_mem_region(&pdev->dev, r->start, resource_size(r)); |
1037 | err1: |
1038 | devres_release_group(&pdev->dev, cpc925_probe); |
1039 | out: |
1040 | return res; |
1041 | } |
1042 | |
1043 | static int cpc925_remove(struct platform_device *pdev) |
1044 | { |
1045 | struct mem_ctl_info *mci = platform_get_drvdata(pdev); |
1046 | |
1047 | /* |
1048 | * Delete common edac devices before edac mc, because |
1049 | * the former share the MMIO of the latter. |
1050 | */ |
1051 | cpc925_del_edac_devices(); |
1052 | cpc925_mc_exit(mci); |
1053 | |
1054 | edac_mc_del_mc(&pdev->dev); |
1055 | edac_mc_free(mci); |
1056 | |
1057 | return 0; |
1058 | } |
1059 | |
1060 | static struct platform_driver cpc925_edac_driver = { |
1061 | .probe = cpc925_probe, |
1062 | .remove = cpc925_remove, |
1063 | .driver = { |
1064 | .name = "cpc925_edac", |
1065 | } |
1066 | }; |
1067 | |
1068 | static int __init cpc925_edac_init(void) |
1069 | { |
1070 | int ret = 0; |
1071 | |
1072 | printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n"); |
1073 | printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n"); |
1074 | |
1075 | /* Only support POLL mode so far */ |
1076 | edac_op_state = EDAC_OPSTATE_POLL; |
1077 | |
1078 | ret = platform_driver_register(&cpc925_edac_driver); |
1079 | if (ret) { |
1080 | printk(KERN_WARNING "Failed to register %s\n", |
1081 | CPC925_EDAC_MOD_STR); |
1082 | } |
1083 | |
1084 | return ret; |
1085 | } |
1086 | |
1087 | static void __exit cpc925_edac_exit(void) |
1088 | { |
1089 | platform_driver_unregister(&cpc925_edac_driver); |
1090 | } |
1091 | |
1092 | module_init(cpc925_edac_init); |
1093 | module_exit(cpc925_edac_exit); |
1094 | |
1095 | MODULE_LICENSE("GPL"); |
1096 | MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>"); |
1097 | MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module"); |
1098 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9