Root/drivers/edac/amd64_edac.c

1#include "amd64_edac.h"
2#include <asm/amd_nb.h>
3
4static struct edac_pci_ctl_info *amd64_ctl_pci;
5
6static int report_gart_errors;
7module_param(report_gart_errors, int, 0644);
8
9/*
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
12 */
13static int ecc_enable_override;
14module_param(ecc_enable_override, int, 0644);
15
16static struct msr __percpu *msrs;
17
18/*
19 * count successfully initialized driver instances for setup_pci_device()
20 */
21static atomic_t drv_instances = ATOMIC_INIT(0);
22
23/* Per-node driver instances */
24static struct mem_ctl_info **mcis;
25static struct ecc_settings **ecc_stngs;
26
27/*
28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
30 * or higher value'.
31 *
32 *FIXME: Produce a better mapping/linearisation.
33 */
34struct scrubrate {
35       u32 scrubval; /* bit pattern for scrub rate */
36       u32 bandwidth; /* bandwidth consumed (bytes/sec) */
37} scrubrates[] = {
38    { 0x01, 1600000000UL},
39    { 0x02, 800000000UL},
40    { 0x03, 400000000UL},
41    { 0x04, 200000000UL},
42    { 0x05, 100000000UL},
43    { 0x06, 50000000UL},
44    { 0x07, 25000000UL},
45    { 0x08, 12284069UL},
46    { 0x09, 6274509UL},
47    { 0x0A, 3121951UL},
48    { 0x0B, 1560975UL},
49    { 0x0C, 781440UL},
50    { 0x0D, 390720UL},
51    { 0x0E, 195300UL},
52    { 0x0F, 97650UL},
53    { 0x10, 48854UL},
54    { 0x11, 24427UL},
55    { 0x12, 12213UL},
56    { 0x13, 6101UL},
57    { 0x14, 3051UL},
58    { 0x15, 1523UL},
59    { 0x16, 761UL},
60    { 0x00, 0UL}, /* scrubbing off */
61};
62
63static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64                      u32 *val, const char *func)
65{
66    int err = 0;
67
68    err = pci_read_config_dword(pdev, offset, val);
69    if (err)
70        amd64_warn("%s: error reading F%dx%03x.\n",
71               func, PCI_FUNC(pdev->devfn), offset);
72
73    return err;
74}
75
76int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77                u32 val, const char *func)
78{
79    int err = 0;
80
81    err = pci_write_config_dword(pdev, offset, val);
82    if (err)
83        amd64_warn("%s: error writing to F%dx%03x.\n",
84               func, PCI_FUNC(pdev->devfn), offset);
85
86    return err;
87}
88
89/*
90 *
91 * Depending on the family, F2 DCT reads need special handling:
92 *
93 * K8: has a single DCT only
94 *
95 * F10h: each DCT has its own set of regs
96 * DCT0 -> F2x040..
97 * DCT1 -> F2x140..
98 *
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
100 *
101 */
102static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
103                   const char *func)
104{
105    if (addr >= 0x100)
106        return -EINVAL;
107
108    return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
109}
110
111static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
112                 const char *func)
113{
114    return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
115}
116
117/*
118 * Select DCT to which PCI cfg accesses are routed
119 */
120static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
121{
122    u32 reg = 0;
123
124    amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
125    reg &= 0xfffffffe;
126    reg |= dct;
127    amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
128}
129
130static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
131                 const char *func)
132{
133    u8 dct = 0;
134
135    if (addr >= 0x140 && addr <= 0x1a0) {
136        dct = 1;
137        addr -= 0x100;
138    }
139
140    f15h_select_dct(pvt, dct);
141
142    return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
143}
144
145/*
146 * Memory scrubber control interface. For K8, memory scrubbing is handled by
147 * hardware and can involve L2 cache, dcache as well as the main memory. With
148 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
149 * functionality.
150 *
151 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
152 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
153 * bytes/sec for the setting.
154 *
155 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
156 * other archs, we might not have access to the caches directly.
157 */
158
159/*
160 * scan the scrub rate mapping table for a close or matching bandwidth value to
161 * issue. If requested is too big, then use last maximum value found.
162 */
163static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
164{
165    u32 scrubval;
166    int i;
167
168    /*
169     * map the configured rate (new_bw) to a value specific to the AMD64
170     * memory controller and apply to register. Search for the first
171     * bandwidth entry that is greater or equal than the setting requested
172     * and program that. If at last entry, turn off DRAM scrubbing.
173     */
174    for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
175        /*
176         * skip scrub rates which aren't recommended
177         * (see F10 BKDG, F3x58)
178         */
179        if (scrubrates[i].scrubval < min_rate)
180            continue;
181
182        if (scrubrates[i].bandwidth <= new_bw)
183            break;
184
185        /*
186         * if no suitable bandwidth found, turn off DRAM scrubbing
187         * entirely by falling back to the last element in the
188         * scrubrates array.
189         */
190    }
191
192    scrubval = scrubrates[i].scrubval;
193
194    pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
195
196    if (scrubval)
197        return scrubrates[i].bandwidth;
198
199    return 0;
200}
201
202static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
203{
204    struct amd64_pvt *pvt = mci->pvt_info;
205    u32 min_scrubrate = 0x5;
206
207    if (boot_cpu_data.x86 == 0xf)
208        min_scrubrate = 0x0;
209
210    /* F15h Erratum #505 */
211    if (boot_cpu_data.x86 == 0x15)
212        f15h_select_dct(pvt, 0);
213
214    return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
215}
216
217static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
218{
219    struct amd64_pvt *pvt = mci->pvt_info;
220    u32 scrubval = 0;
221    int i, retval = -EINVAL;
222
223    /* F15h Erratum #505 */
224    if (boot_cpu_data.x86 == 0x15)
225        f15h_select_dct(pvt, 0);
226
227    amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
228
229    scrubval = scrubval & 0x001F;
230
231    for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
232        if (scrubrates[i].scrubval == scrubval) {
233            retval = scrubrates[i].bandwidth;
234            break;
235        }
236    }
237    return retval;
238}
239
240/*
241 * returns true if the SysAddr given by sys_addr matches the
242 * DRAM base/limit associated with node_id
243 */
244static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
245                   unsigned nid)
246{
247    u64 addr;
248
249    /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
250     * all ones if the most significant implemented address bit is 1.
251     * Here we discard bits 63-40. See section 3.4.2 of AMD publication
252     * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
253     * Application Programming.
254     */
255    addr = sys_addr & 0x000000ffffffffffull;
256
257    return ((addr >= get_dram_base(pvt, nid)) &&
258        (addr <= get_dram_limit(pvt, nid)));
259}
260
261/*
262 * Attempt to map a SysAddr to a node. On success, return a pointer to the
263 * mem_ctl_info structure for the node that the SysAddr maps to.
264 *
265 * On failure, return NULL.
266 */
267static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
268                        u64 sys_addr)
269{
270    struct amd64_pvt *pvt;
271    unsigned node_id;
272    u32 intlv_en, bits;
273
274    /*
275     * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
276     * 3.4.4.2) registers to map the SysAddr to a node ID.
277     */
278    pvt = mci->pvt_info;
279
280    /*
281     * The value of this field should be the same for all DRAM Base
282     * registers. Therefore we arbitrarily choose to read it from the
283     * register for node 0.
284     */
285    intlv_en = dram_intlv_en(pvt, 0);
286
287    if (intlv_en == 0) {
288        for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
289            if (amd64_base_limit_match(pvt, sys_addr, node_id))
290                goto found;
291        }
292        goto err_no_match;
293    }
294
295    if (unlikely((intlv_en != 0x01) &&
296             (intlv_en != 0x03) &&
297             (intlv_en != 0x07))) {
298        amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
299        return NULL;
300    }
301
302    bits = (((u32) sys_addr) >> 12) & intlv_en;
303
304    for (node_id = 0; ; ) {
305        if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
306            break; /* intlv_sel field matches */
307
308        if (++node_id >= DRAM_RANGES)
309            goto err_no_match;
310    }
311
312    /* sanity test for sys_addr */
313    if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
314        amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
315               "range for node %d with node interleaving enabled.\n",
316               __func__, sys_addr, node_id);
317        return NULL;
318    }
319
320found:
321    return edac_mc_find((int)node_id);
322
323err_no_match:
324    edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
325         (unsigned long)sys_addr);
326
327    return NULL;
328}
329
330/*
331 * compute the CS base address of the @csrow on the DRAM controller @dct.
332 * For details see F2x[5C:40] in the processor's BKDG
333 */
334static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
335                 u64 *base, u64 *mask)
336{
337    u64 csbase, csmask, base_bits, mask_bits;
338    u8 addr_shift;
339
340    if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
341        csbase = pvt->csels[dct].csbases[csrow];
342        csmask = pvt->csels[dct].csmasks[csrow];
343        base_bits = GENMASK(21, 31) | GENMASK(9, 15);
344        mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
345        addr_shift = 4;
346    } else {
347        csbase = pvt->csels[dct].csbases[csrow];
348        csmask = pvt->csels[dct].csmasks[csrow >> 1];
349        addr_shift = 8;
350
351        if (boot_cpu_data.x86 == 0x15)
352            base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
353        else
354            base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
355    }
356
357    *base = (csbase & base_bits) << addr_shift;
358
359    *mask = ~0ULL;
360    /* poke holes for the csmask */
361    *mask &= ~(mask_bits << addr_shift);
362    /* OR them in */
363    *mask |= (csmask & mask_bits) << addr_shift;
364}
365
366#define for_each_chip_select(i, dct, pvt) \
367    for (i = 0; i < pvt->csels[dct].b_cnt; i++)
368
369#define chip_select_base(i, dct, pvt) \
370    pvt->csels[dct].csbases[i]
371
372#define for_each_chip_select_mask(i, dct, pvt) \
373    for (i = 0; i < pvt->csels[dct].m_cnt; i++)
374
375/*
376 * @input_addr is an InputAddr associated with the node given by mci. Return the
377 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
378 */
379static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
380{
381    struct amd64_pvt *pvt;
382    int csrow;
383    u64 base, mask;
384
385    pvt = mci->pvt_info;
386
387    for_each_chip_select(csrow, 0, pvt) {
388        if (!csrow_enabled(csrow, 0, pvt))
389            continue;
390
391        get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
392
393        mask = ~mask;
394
395        if ((input_addr & mask) == (base & mask)) {
396            edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
397                 (unsigned long)input_addr, csrow,
398                 pvt->mc_node_id);
399
400            return csrow;
401        }
402    }
403    edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
404         (unsigned long)input_addr, pvt->mc_node_id);
405
406    return -1;
407}
408
409/*
410 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
411 * for the node represented by mci. Info is passed back in *hole_base,
412 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
413 * info is invalid. Info may be invalid for either of the following reasons:
414 *
415 * - The revision of the node is not E or greater. In this case, the DRAM Hole
416 * Address Register does not exist.
417 *
418 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
419 * indicating that its contents are not valid.
420 *
421 * The values passed back in *hole_base, *hole_offset, and *hole_size are
422 * complete 32-bit values despite the fact that the bitfields in the DHAR
423 * only represent bits 31-24 of the base and offset values.
424 */
425int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
426                 u64 *hole_offset, u64 *hole_size)
427{
428    struct amd64_pvt *pvt = mci->pvt_info;
429    u64 base;
430
431    /* only revE and later have the DRAM Hole Address Register */
432    if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
433        edac_dbg(1, " revision %d for node %d does not support DHAR\n",
434             pvt->ext_model, pvt->mc_node_id);
435        return 1;
436    }
437
438    /* valid for Fam10h and above */
439    if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
440        edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
441        return 1;
442    }
443
444    if (!dhar_valid(pvt)) {
445        edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
446             pvt->mc_node_id);
447        return 1;
448    }
449
450    /* This node has Memory Hoisting */
451
452    /* +------------------+--------------------+--------------------+-----
453     * | memory | DRAM hole | relocated |
454     * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
455     * | | | DRAM hole |
456     * | | | [0x100000000, |
457     * | | | (0x100000000+ |
458     * | | | (0xffffffff-x))] |
459     * +------------------+--------------------+--------------------+-----
460     *
461     * Above is a diagram of physical memory showing the DRAM hole and the
462     * relocated addresses from the DRAM hole. As shown, the DRAM hole
463     * starts at address x (the base address) and extends through address
464     * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
465     * addresses in the hole so that they start at 0x100000000.
466     */
467
468    base = dhar_base(pvt);
469
470    *hole_base = base;
471    *hole_size = (0x1ull << 32) - base;
472
473    if (boot_cpu_data.x86 > 0xf)
474        *hole_offset = f10_dhar_offset(pvt);
475    else
476        *hole_offset = k8_dhar_offset(pvt);
477
478    edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
479         pvt->mc_node_id, (unsigned long)*hole_base,
480         (unsigned long)*hole_offset, (unsigned long)*hole_size);
481
482    return 0;
483}
484EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
485
486/*
487 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
488 * assumed that sys_addr maps to the node given by mci.
489 *
490 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
491 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
492 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
493 * then it is also involved in translating a SysAddr to a DramAddr. Sections
494 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
495 * These parts of the documentation are unclear. I interpret them as follows:
496 *
497 * When node n receives a SysAddr, it processes the SysAddr as follows:
498 *
499 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
500 * Limit registers for node n. If the SysAddr is not within the range
501 * specified by the base and limit values, then node n ignores the Sysaddr
502 * (since it does not map to node n). Otherwise continue to step 2 below.
503 *
504 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
505 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
506 * the range of relocated addresses (starting at 0x100000000) from the DRAM
507 * hole. If not, skip to step 3 below. Else get the value of the
508 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
509 * offset defined by this value from the SysAddr.
510 *
511 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
512 * Base register for node n. To obtain the DramAddr, subtract the base
513 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
514 */
515static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
516{
517    struct amd64_pvt *pvt = mci->pvt_info;
518    u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
519    int ret = 0;
520
521    dram_base = get_dram_base(pvt, pvt->mc_node_id);
522
523    ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
524                      &hole_size);
525    if (!ret) {
526        if ((sys_addr >= (1ull << 32)) &&
527            (sys_addr < ((1ull << 32) + hole_size))) {
528            /* use DHAR to translate SysAddr to DramAddr */
529            dram_addr = sys_addr - hole_offset;
530
531            edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
532                 (unsigned long)sys_addr,
533                 (unsigned long)dram_addr);
534
535            return dram_addr;
536        }
537    }
538
539    /*
540     * Translate the SysAddr to a DramAddr as shown near the start of
541     * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
542     * only deals with 40-bit values. Therefore we discard bits 63-40 of
543     * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
544     * discard are all 1s. Otherwise the bits we discard are all 0s. See
545     * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
546     * Programmer's Manual Volume 1 Application Programming.
547     */
548    dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
549
550    edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
551         (unsigned long)sys_addr, (unsigned long)dram_addr);
552    return dram_addr;
553}
554
555/*
556 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
557 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
558 * for node interleaving.
559 */
560static int num_node_interleave_bits(unsigned intlv_en)
561{
562    static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
563    int n;
564
565    BUG_ON(intlv_en > 7);
566    n = intlv_shift_table[intlv_en];
567    return n;
568}
569
570/* Translate the DramAddr given by @dram_addr to an InputAddr. */
571static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
572{
573    struct amd64_pvt *pvt;
574    int intlv_shift;
575    u64 input_addr;
576
577    pvt = mci->pvt_info;
578
579    /*
580     * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
581     * concerning translating a DramAddr to an InputAddr.
582     */
583    intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
584    input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
585              (dram_addr & 0xfff);
586
587    edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
588         intlv_shift, (unsigned long)dram_addr,
589         (unsigned long)input_addr);
590
591    return input_addr;
592}
593
594/*
595 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
596 * assumed that @sys_addr maps to the node given by mci.
597 */
598static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
599{
600    u64 input_addr;
601
602    input_addr =
603        dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
604
605    edac_dbg(2, "SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
606         (unsigned long)sys_addr, (unsigned long)input_addr);
607
608    return input_addr;
609}
610
611
612/*
613 * @input_addr is an InputAddr associated with the node represented by mci.
614 * Translate @input_addr to a DramAddr and return the result.
615 */
616static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
617{
618    struct amd64_pvt *pvt;
619    unsigned node_id, intlv_shift;
620    u64 bits, dram_addr;
621    u32 intlv_sel;
622
623    /*
624     * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
625     * shows how to translate a DramAddr to an InputAddr. Here we reverse
626     * this procedure. When translating from a DramAddr to an InputAddr, the
627     * bits used for node interleaving are discarded. Here we recover these
628     * bits from the IntlvSel field of the DRAM Limit register (section
629     * 3.4.4.2) for the node that input_addr is associated with.
630     */
631    pvt = mci->pvt_info;
632    node_id = pvt->mc_node_id;
633
634    BUG_ON(node_id > 7);
635
636    intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
637    if (intlv_shift == 0) {
638        edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n",
639             (unsigned long)input_addr);
640
641        return input_addr;
642    }
643
644    bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
645        (input_addr & 0xfff);
646
647    intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
648    dram_addr = bits + (intlv_sel << 12);
649
650    edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n",
651         (unsigned long)input_addr,
652         (unsigned long)dram_addr, intlv_shift);
653
654    return dram_addr;
655}
656
657/*
658 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
659 * @dram_addr to a SysAddr.
660 */
661static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
662{
663    struct amd64_pvt *pvt = mci->pvt_info;
664    u64 hole_base, hole_offset, hole_size, base, sys_addr;
665    int ret = 0;
666
667    ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
668                      &hole_size);
669    if (!ret) {
670        if ((dram_addr >= hole_base) &&
671            (dram_addr < (hole_base + hole_size))) {
672            sys_addr = dram_addr + hole_offset;
673
674            edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n",
675                 (unsigned long)dram_addr,
676                 (unsigned long)sys_addr);
677
678            return sys_addr;
679        }
680    }
681
682    base = get_dram_base(pvt, pvt->mc_node_id);
683    sys_addr = dram_addr + base;
684
685    /*
686     * The sys_addr we have computed up to this point is a 40-bit value
687     * because the k8 deals with 40-bit values. However, the value we are
688     * supposed to return is a full 64-bit physical address. The AMD
689     * x86-64 architecture specifies that the most significant implemented
690     * address bit through bit 63 of a physical address must be either all
691     * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
692     * 64-bit value below. See section 3.4.2 of AMD publication 24592:
693     * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
694     * Programming.
695     */
696    sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
697
698    edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
699         pvt->mc_node_id, (unsigned long)dram_addr,
700         (unsigned long)sys_addr);
701
702    return sys_addr;
703}
704
705/*
706 * @input_addr is an InputAddr associated with the node given by mci. Translate
707 * @input_addr to a SysAddr.
708 */
709static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
710                     u64 input_addr)
711{
712    return dram_addr_to_sys_addr(mci,
713                     input_addr_to_dram_addr(mci, input_addr));
714}
715
716/* Map the Error address to a PAGE and PAGE OFFSET. */
717static inline void error_address_to_page_and_offset(u64 error_address,
718                            u32 *page, u32 *offset)
719{
720    *page = (u32) (error_address >> PAGE_SHIFT);
721    *offset = ((u32) error_address) & ~PAGE_MASK;
722}
723
724/*
725 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
726 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
727 * of a node that detected an ECC memory error. mci represents the node that
728 * the error address maps to (possibly different from the node that detected
729 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
730 * error.
731 */
732static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
733{
734    int csrow;
735
736    csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
737
738    if (csrow == -1)
739        amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
740                  "address 0x%lx\n", (unsigned long)sys_addr);
741    return csrow;
742}
743
744static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
745
746/*
747 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
748 * are ECC capable.
749 */
750static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
751{
752    u8 bit;
753    unsigned long edac_cap = EDAC_FLAG_NONE;
754
755    bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
756        ? 19
757        : 17;
758
759    if (pvt->dclr0 & BIT(bit))
760        edac_cap = EDAC_FLAG_SECDED;
761
762    return edac_cap;
763}
764
765static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
766
767static void amd64_dump_dramcfg_low(u32 dclr, int chan)
768{
769    edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
770
771    edac_dbg(1, " DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
772         (dclr & BIT(16)) ? "un" : "",
773         (dclr & BIT(19)) ? "yes" : "no");
774
775    edac_dbg(1, " PAR/ERR parity: %s\n",
776         (dclr & BIT(8)) ? "enabled" : "disabled");
777
778    if (boot_cpu_data.x86 == 0x10)
779        edac_dbg(1, " DCT 128bit mode width: %s\n",
780             (dclr & BIT(11)) ? "128b" : "64b");
781
782    edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
783         (dclr & BIT(12)) ? "yes" : "no",
784         (dclr & BIT(13)) ? "yes" : "no",
785         (dclr & BIT(14)) ? "yes" : "no",
786         (dclr & BIT(15)) ? "yes" : "no");
787}
788
789/* Display and decode various NB registers for debug purposes. */
790static void dump_misc_regs(struct amd64_pvt *pvt)
791{
792    edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
793
794    edac_dbg(1, " NB two channel DRAM capable: %s\n",
795         (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
796
797    edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
798         (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
799         (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
800
801    amd64_dump_dramcfg_low(pvt->dclr0, 0);
802
803    edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
804
805    edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
806         pvt->dhar, dhar_base(pvt),
807         (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
808         : f10_dhar_offset(pvt));
809
810    edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
811
812    amd64_debug_display_dimm_sizes(pvt, 0);
813
814    /* everything below this point is Fam10h and above */
815    if (boot_cpu_data.x86 == 0xf)
816        return;
817
818    amd64_debug_display_dimm_sizes(pvt, 1);
819
820    amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
821
822    /* Only if NOT ganged does dclr1 have valid info */
823    if (!dct_ganging_enabled(pvt))
824        amd64_dump_dramcfg_low(pvt->dclr1, 1);
825}
826
827/*
828 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
829 */
830static void prep_chip_selects(struct amd64_pvt *pvt)
831{
832    if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
833        pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
834        pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
835    } else {
836        pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
837        pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
838    }
839}
840
841/*
842 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
843 */
844static void read_dct_base_mask(struct amd64_pvt *pvt)
845{
846    int cs;
847
848    prep_chip_selects(pvt);
849
850    for_each_chip_select(cs, 0, pvt) {
851        int reg0 = DCSB0 + (cs * 4);
852        int reg1 = DCSB1 + (cs * 4);
853        u32 *base0 = &pvt->csels[0].csbases[cs];
854        u32 *base1 = &pvt->csels[1].csbases[cs];
855
856        if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
857            edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
858                 cs, *base0, reg0);
859
860        if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
861            continue;
862
863        if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
864            edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
865                 cs, *base1, reg1);
866    }
867
868    for_each_chip_select_mask(cs, 0, pvt) {
869        int reg0 = DCSM0 + (cs * 4);
870        int reg1 = DCSM1 + (cs * 4);
871        u32 *mask0 = &pvt->csels[0].csmasks[cs];
872        u32 *mask1 = &pvt->csels[1].csmasks[cs];
873
874        if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
875            edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
876                 cs, *mask0, reg0);
877
878        if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
879            continue;
880
881        if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
882            edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
883                 cs, *mask1, reg1);
884    }
885}
886
887static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
888{
889    enum mem_type type;
890
891    /* F15h supports only DDR3 */
892    if (boot_cpu_data.x86 >= 0x15)
893        type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
894    else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
895        if (pvt->dchr0 & DDR3_MODE)
896            type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
897        else
898            type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
899    } else {
900        type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
901    }
902
903    amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
904
905    return type;
906}
907
908/* Get the number of DCT channels the memory controller is using. */
909static int k8_early_channel_count(struct amd64_pvt *pvt)
910{
911    int flag;
912
913    if (pvt->ext_model >= K8_REV_F)
914        /* RevF (NPT) and later */
915        flag = pvt->dclr0 & WIDTH_128;
916    else
917        /* RevE and earlier */
918        flag = pvt->dclr0 & REVE_WIDTH_128;
919
920    /* not used */
921    pvt->dclr1 = 0;
922
923    return (flag) ? 2 : 1;
924}
925
926/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
927static u64 get_error_address(struct mce *m)
928{
929    struct cpuinfo_x86 *c = &boot_cpu_data;
930    u64 addr;
931    u8 start_bit = 1;
932    u8 end_bit = 47;
933
934    if (c->x86 == 0xf) {
935        start_bit = 3;
936        end_bit = 39;
937    }
938
939    addr = m->addr & GENMASK(start_bit, end_bit);
940
941    /*
942     * Erratum 637 workaround
943     */
944    if (c->x86 == 0x15) {
945        struct amd64_pvt *pvt;
946        u64 cc6_base, tmp_addr;
947        u32 tmp;
948        u8 mce_nid, intlv_en;
949
950        if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
951            return addr;
952
953        mce_nid = amd_get_nb_id(m->extcpu);
954        pvt = mcis[mce_nid]->pvt_info;
955
956        amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
957        intlv_en = tmp >> 21 & 0x7;
958
959        /* add [47:27] + 3 trailing bits */
960        cc6_base = (tmp & GENMASK(0, 20)) << 3;
961
962        /* reverse and add DramIntlvEn */
963        cc6_base |= intlv_en ^ 0x7;
964
965        /* pin at [47:24] */
966        cc6_base <<= 24;
967
968        if (!intlv_en)
969            return cc6_base | (addr & GENMASK(0, 23));
970
971        amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
972
973                            /* faster log2 */
974        tmp_addr = (addr & GENMASK(12, 23)) << __fls(intlv_en + 1);
975
976        /* OR DramIntlvSel into bits [14:12] */
977        tmp_addr |= (tmp & GENMASK(21, 23)) >> 9;
978
979        /* add remaining [11:0] bits from original MC4_ADDR */
980        tmp_addr |= addr & GENMASK(0, 11);
981
982        return cc6_base | tmp_addr;
983    }
984
985    return addr;
986}
987
988static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
989{
990    struct cpuinfo_x86 *c = &boot_cpu_data;
991    int off = range << 3;
992
993    amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
994    amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
995
996    if (c->x86 == 0xf)
997        return;
998
999    if (!dram_rw(pvt, range))
1000        return;
1001
1002    amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1003    amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1004
1005    /* Factor in CC6 save area by reading dst node's limit reg */
1006    if (c->x86 == 0x15) {
1007        struct pci_dev *f1 = NULL;
1008        u8 nid = dram_dst_node(pvt, range);
1009        u32 llim;
1010
1011        f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
1012        if (WARN_ON(!f1))
1013            return;
1014
1015        amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1016
1017        pvt->ranges[range].lim.lo &= GENMASK(0, 15);
1018
1019                        /* {[39:27],111b} */
1020        pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1021
1022        pvt->ranges[range].lim.hi &= GENMASK(0, 7);
1023
1024                        /* [47:40] */
1025        pvt->ranges[range].lim.hi |= llim >> 13;
1026
1027        pci_dev_put(f1);
1028    }
1029}
1030
1031static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1032                    u16 syndrome)
1033{
1034    struct mem_ctl_info *src_mci;
1035    struct amd64_pvt *pvt = mci->pvt_info;
1036    int channel, csrow;
1037    u32 page, offset;
1038
1039    error_address_to_page_and_offset(sys_addr, &page, &offset);
1040
1041    /*
1042     * Find out which node the error address belongs to. This may be
1043     * different from the node that detected the error.
1044     */
1045    src_mci = find_mc_by_sys_addr(mci, sys_addr);
1046    if (!src_mci) {
1047        amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1048                 (unsigned long)sys_addr);
1049        edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1050                     page, offset, syndrome,
1051                     -1, -1, -1,
1052                     "failed to map error addr to a node",
1053                     "");
1054        return;
1055    }
1056
1057    /* Now map the sys_addr to a CSROW */
1058    csrow = sys_addr_to_csrow(src_mci, sys_addr);
1059    if (csrow < 0) {
1060        edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1061                     page, offset, syndrome,
1062                     -1, -1, -1,
1063                     "failed to map error addr to a csrow",
1064                     "");
1065        return;
1066    }
1067
1068    /* CHIPKILL enabled */
1069    if (pvt->nbcfg & NBCFG_CHIPKILL) {
1070        channel = get_channel_from_ecc_syndrome(mci, syndrome);
1071        if (channel < 0) {
1072            /*
1073             * Syndrome didn't map, so we don't know which of the
1074             * 2 DIMMs is in error. So we need to ID 'both' of them
1075             * as suspect.
1076             */
1077            amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
1078                      "possible error reporting race\n",
1079                      syndrome);
1080            edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1081                         page, offset, syndrome,
1082                         csrow, -1, -1,
1083                         "unknown syndrome - possible error reporting race",
1084                         "");
1085            return;
1086        }
1087    } else {
1088        /*
1089         * non-chipkill ecc mode
1090         *
1091         * The k8 documentation is unclear about how to determine the
1092         * channel number when using non-chipkill memory. This method
1093         * was obtained from email communication with someone at AMD.
1094         * (Wish the email was placed in this comment - norsk)
1095         */
1096        channel = ((sys_addr & BIT(3)) != 0);
1097    }
1098
1099    edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci, 1,
1100                 page, offset, syndrome,
1101                 csrow, channel, -1,
1102                 "", "");
1103}
1104
1105static int ddr2_cs_size(unsigned i, bool dct_width)
1106{
1107    unsigned shift = 0;
1108
1109    if (i <= 2)
1110        shift = i;
1111    else if (!(i & 0x1))
1112        shift = i >> 1;
1113    else
1114        shift = (i + 1) >> 1;
1115
1116    return 128 << (shift + !!dct_width);
1117}
1118
1119static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1120                  unsigned cs_mode)
1121{
1122    u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1123
1124    if (pvt->ext_model >= K8_REV_F) {
1125        WARN_ON(cs_mode > 11);
1126        return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1127    }
1128    else if (pvt->ext_model >= K8_REV_D) {
1129        unsigned diff;
1130        WARN_ON(cs_mode > 10);
1131
1132        /*
1133         * the below calculation, besides trying to win an obfuscated C
1134         * contest, maps cs_mode values to DIMM chip select sizes. The
1135         * mappings are:
1136         *
1137         * cs_mode CS size (mb)
1138         * ======= ============
1139         * 0 32
1140         * 1 64
1141         * 2 128
1142         * 3 128
1143         * 4 256
1144         * 5 512
1145         * 6 256
1146         * 7 512
1147         * 8 1024
1148         * 9 1024
1149         * 10 2048
1150         *
1151         * Basically, it calculates a value with which to shift the
1152         * smallest CS size of 32MB.
1153         *
1154         * ddr[23]_cs_size have a similar purpose.
1155         */
1156        diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1157
1158        return 32 << (cs_mode - diff);
1159    }
1160    else {
1161        WARN_ON(cs_mode > 6);
1162        return 32 << cs_mode;
1163    }
1164}
1165
1166/*
1167 * Get the number of DCT channels in use.
1168 *
1169 * Return:
1170 * number of Memory Channels in operation
1171 * Pass back:
1172 * contents of the DCL0_LOW register
1173 */
1174static int f1x_early_channel_count(struct amd64_pvt *pvt)
1175{
1176    int i, j, channels = 0;
1177
1178    /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1179    if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
1180        return 2;
1181
1182    /*
1183     * Need to check if in unganged mode: In such, there are 2 channels,
1184     * but they are not in 128 bit mode and thus the above 'dclr0' status
1185     * bit will be OFF.
1186     *
1187     * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1188     * their CSEnable bit on. If so, then SINGLE DIMM case.
1189     */
1190    edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1191
1192    /*
1193     * Check DRAM Bank Address Mapping values for each DIMM to see if there
1194     * is more than just one DIMM present in unganged mode. Need to check
1195     * both controllers since DIMMs can be placed in either one.
1196     */
1197    for (i = 0; i < 2; i++) {
1198        u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1199
1200        for (j = 0; j < 4; j++) {
1201            if (DBAM_DIMM(j, dbam) > 0) {
1202                channels++;
1203                break;
1204            }
1205        }
1206    }
1207
1208    if (channels > 2)
1209        channels = 2;
1210
1211    amd64_info("MCT channel count: %d\n", channels);
1212
1213    return channels;
1214}
1215
1216static int ddr3_cs_size(unsigned i, bool dct_width)
1217{
1218    unsigned shift = 0;
1219    int cs_size = 0;
1220
1221    if (i == 0 || i == 3 || i == 4)
1222        cs_size = -1;
1223    else if (i <= 2)
1224        shift = i;
1225    else if (i == 12)
1226        shift = 7;
1227    else if (!(i & 0x1))
1228        shift = i >> 1;
1229    else
1230        shift = (i + 1) >> 1;
1231
1232    if (cs_size != -1)
1233        cs_size = (128 * (1 << !!dct_width)) << shift;
1234
1235    return cs_size;
1236}
1237
1238static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1239                   unsigned cs_mode)
1240{
1241    u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1242
1243    WARN_ON(cs_mode > 11);
1244
1245    if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1246        return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1247    else
1248        return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1249}
1250
1251/*
1252 * F15h supports only 64bit DCT interfaces
1253 */
1254static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1255                   unsigned cs_mode)
1256{
1257    WARN_ON(cs_mode > 12);
1258
1259    return ddr3_cs_size(cs_mode, false);
1260}
1261
1262static void read_dram_ctl_register(struct amd64_pvt *pvt)
1263{
1264
1265    if (boot_cpu_data.x86 == 0xf)
1266        return;
1267
1268    if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1269        edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1270             pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1271
1272        edac_dbg(0, " DCTs operate in %s mode\n",
1273             (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1274
1275        if (!dct_ganging_enabled(pvt))
1276            edac_dbg(0, " Address range split per DCT: %s\n",
1277                 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1278
1279        edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1280             (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1281             (dct_memory_cleared(pvt) ? "yes" : "no"));
1282
1283        edac_dbg(0, " channel interleave: %s, "
1284             "interleave bits selector: 0x%x\n",
1285             (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1286             dct_sel_interleave_addr(pvt));
1287    }
1288
1289    amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
1290}
1291
1292/*
1293 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1294 * Interleaving Modes.
1295 */
1296static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1297                bool hi_range_sel, u8 intlv_en)
1298{
1299    u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1300
1301    if (dct_ganging_enabled(pvt))
1302        return 0;
1303
1304    if (hi_range_sel)
1305        return dct_sel_high;
1306
1307    /*
1308     * see F2x110[DctSelIntLvAddr] - channel interleave mode
1309     */
1310    if (dct_interleave_enabled(pvt)) {
1311        u8 intlv_addr = dct_sel_interleave_addr(pvt);
1312
1313        /* return DCT select function: 0=DCT0, 1=DCT1 */
1314        if (!intlv_addr)
1315            return sys_addr >> 6 & 1;
1316
1317        if (intlv_addr & 0x2) {
1318            u8 shift = intlv_addr & 0x1 ? 9 : 6;
1319            u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1320
1321            return ((sys_addr >> shift) & 1) ^ temp;
1322        }
1323
1324        return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1325    }
1326
1327    if (dct_high_range_enabled(pvt))
1328        return ~dct_sel_high & 1;
1329
1330    return 0;
1331}
1332
1333/* Convert the sys_addr to the normalized DCT address */
1334static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
1335                 u64 sys_addr, bool hi_rng,
1336                 u32 dct_sel_base_addr)
1337{
1338    u64 chan_off;
1339    u64 dram_base = get_dram_base(pvt, range);
1340    u64 hole_off = f10_dhar_offset(pvt);
1341    u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1342
1343    if (hi_rng) {
1344        /*
1345         * if
1346         * base address of high range is below 4Gb
1347         * (bits [47:27] at [31:11])
1348         * DRAM address space on this DCT is hoisted above 4Gb &&
1349         * sys_addr > 4Gb
1350         *
1351         * remove hole offset from sys_addr
1352         * else
1353         * remove high range offset from sys_addr
1354         */
1355        if ((!(dct_sel_base_addr >> 16) ||
1356             dct_sel_base_addr < dhar_base(pvt)) &&
1357            dhar_valid(pvt) &&
1358            (sys_addr >= BIT_64(32)))
1359            chan_off = hole_off;
1360        else
1361            chan_off = dct_sel_base_off;
1362    } else {
1363        /*
1364         * if
1365         * we have a valid hole &&
1366         * sys_addr > 4Gb
1367         *
1368         * remove hole
1369         * else
1370         * remove dram base to normalize to DCT address
1371         */
1372        if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1373            chan_off = hole_off;
1374        else
1375            chan_off = dram_base;
1376    }
1377
1378    return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
1379}
1380
1381/*
1382 * checks if the csrow passed in is marked as SPARED, if so returns the new
1383 * spare row
1384 */
1385static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1386{
1387    int tmp_cs;
1388
1389    if (online_spare_swap_done(pvt, dct) &&
1390        csrow == online_spare_bad_dramcs(pvt, dct)) {
1391
1392        for_each_chip_select(tmp_cs, dct, pvt) {
1393            if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1394                csrow = tmp_cs;
1395                break;
1396            }
1397        }
1398    }
1399    return csrow;
1400}
1401
1402/*
1403 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1404 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1405 *
1406 * Return:
1407 * -EINVAL: NOT FOUND
1408 * 0..csrow = Chip-Select Row
1409 */
1410static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1411{
1412    struct mem_ctl_info *mci;
1413    struct amd64_pvt *pvt;
1414    u64 cs_base, cs_mask;
1415    int cs_found = -EINVAL;
1416    int csrow;
1417
1418    mci = mcis[nid];
1419    if (!mci)
1420        return cs_found;
1421
1422    pvt = mci->pvt_info;
1423
1424    edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1425
1426    for_each_chip_select(csrow, dct, pvt) {
1427        if (!csrow_enabled(csrow, dct, pvt))
1428            continue;
1429
1430        get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1431
1432        edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1433             csrow, cs_base, cs_mask);
1434
1435        cs_mask = ~cs_mask;
1436
1437        edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1438             (in_addr & cs_mask), (cs_base & cs_mask));
1439
1440        if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1441            cs_found = f10_process_possible_spare(pvt, dct, csrow);
1442
1443            edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1444            break;
1445        }
1446    }
1447    return cs_found;
1448}
1449
1450/*
1451 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1452 * swapped with a region located at the bottom of memory so that the GPU can use
1453 * the interleaved region and thus two channels.
1454 */
1455static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1456{
1457    u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1458
1459    if (boot_cpu_data.x86 == 0x10) {
1460        /* only revC3 and revE have that feature */
1461        if (boot_cpu_data.x86_model < 4 ||
1462            (boot_cpu_data.x86_model < 0xa &&
1463             boot_cpu_data.x86_mask < 3))
1464            return sys_addr;
1465    }
1466
1467    amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
1468
1469    if (!(swap_reg & 0x1))
1470        return sys_addr;
1471
1472    swap_base = (swap_reg >> 3) & 0x7f;
1473    swap_limit = (swap_reg >> 11) & 0x7f;
1474    rgn_size = (swap_reg >> 20) & 0x7f;
1475    tmp_addr = sys_addr >> 27;
1476
1477    if (!(sys_addr >> 34) &&
1478        (((tmp_addr >= swap_base) &&
1479         (tmp_addr <= swap_limit)) ||
1480         (tmp_addr < rgn_size)))
1481        return sys_addr ^ (u64)swap_base << 27;
1482
1483    return sys_addr;
1484}
1485
1486/* For a given @dram_range, check if @sys_addr falls within it. */
1487static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1488                  u64 sys_addr, int *nid, int *chan_sel)
1489{
1490    int cs_found = -EINVAL;
1491    u64 chan_addr;
1492    u32 dct_sel_base;
1493    u8 channel;
1494    bool high_range = false;
1495
1496    u8 node_id = dram_dst_node(pvt, range);
1497    u8 intlv_en = dram_intlv_en(pvt, range);
1498    u32 intlv_sel = dram_intlv_sel(pvt, range);
1499
1500    edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1501         range, sys_addr, get_dram_limit(pvt, range));
1502
1503    if (dhar_valid(pvt) &&
1504        dhar_base(pvt) <= sys_addr &&
1505        sys_addr < BIT_64(32)) {
1506        amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1507                sys_addr);
1508        return -EINVAL;
1509    }
1510
1511    if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1512        return -EINVAL;
1513
1514    sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1515
1516    dct_sel_base = dct_sel_baseaddr(pvt);
1517
1518    /*
1519     * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1520     * select between DCT0 and DCT1.
1521     */
1522    if (dct_high_range_enabled(pvt) &&
1523       !dct_ganging_enabled(pvt) &&
1524       ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1525        high_range = true;
1526
1527    channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1528
1529    chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1530                      high_range, dct_sel_base);
1531
1532    /* Remove node interleaving, see F1x120 */
1533    if (intlv_en)
1534        chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1535                (chan_addr & 0xfff);
1536
1537    /* remove channel interleave */
1538    if (dct_interleave_enabled(pvt) &&
1539       !dct_high_range_enabled(pvt) &&
1540       !dct_ganging_enabled(pvt)) {
1541
1542        if (dct_sel_interleave_addr(pvt) != 1) {
1543            if (dct_sel_interleave_addr(pvt) == 0x3)
1544                /* hash 9 */
1545                chan_addr = ((chan_addr >> 10) << 9) |
1546                         (chan_addr & 0x1ff);
1547            else
1548                /* A[6] or hash 6 */
1549                chan_addr = ((chan_addr >> 7) << 6) |
1550                         (chan_addr & 0x3f);
1551        } else
1552            /* A[12] */
1553            chan_addr = ((chan_addr >> 13) << 12) |
1554                     (chan_addr & 0xfff);
1555    }
1556
1557    edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1558
1559    cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1560
1561    if (cs_found >= 0) {
1562        *nid = node_id;
1563        *chan_sel = channel;
1564    }
1565    return cs_found;
1566}
1567
1568static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1569                       int *node, int *chan_sel)
1570{
1571    int cs_found = -EINVAL;
1572    unsigned range;
1573
1574    for (range = 0; range < DRAM_RANGES; range++) {
1575
1576        if (!dram_rw(pvt, range))
1577            continue;
1578
1579        if ((get_dram_base(pvt, range) <= sys_addr) &&
1580            (get_dram_limit(pvt, range) >= sys_addr)) {
1581
1582            cs_found = f1x_match_to_this_node(pvt, range,
1583                              sys_addr, node,
1584                              chan_sel);
1585            if (cs_found >= 0)
1586                break;
1587        }
1588    }
1589    return cs_found;
1590}
1591
1592/*
1593 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1594 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1595 *
1596 * The @sys_addr is usually an error address received from the hardware
1597 * (MCX_ADDR).
1598 */
1599static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1600                     u16 syndrome)
1601{
1602    struct amd64_pvt *pvt = mci->pvt_info;
1603    u32 page, offset;
1604    int nid, csrow, chan = 0;
1605
1606    error_address_to_page_and_offset(sys_addr, &page, &offset);
1607
1608    csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1609
1610    if (csrow < 0) {
1611        edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1612                     page, offset, syndrome,
1613                     -1, -1, -1,
1614                     "failed to map error addr to a csrow",
1615                     "");
1616        return;
1617    }
1618
1619    /*
1620     * We need the syndromes for channel detection only when we're
1621     * ganged. Otherwise @chan should already contain the channel at
1622     * this point.
1623     */
1624    if (dct_ganging_enabled(pvt))
1625        chan = get_channel_from_ecc_syndrome(mci, syndrome);
1626
1627    edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1628                 page, offset, syndrome,
1629                 csrow, chan, -1,
1630                 "", "");
1631}
1632
1633/*
1634 * debug routine to display the memory sizes of all logical DIMMs and its
1635 * CSROWs
1636 */
1637static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
1638{
1639    int dimm, size0, size1, factor = 0;
1640    u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1641    u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1642
1643    if (boot_cpu_data.x86 == 0xf) {
1644        if (pvt->dclr0 & WIDTH_128)
1645            factor = 1;
1646
1647        /* K8 families < revF not supported yet */
1648           if (pvt->ext_model < K8_REV_F)
1649            return;
1650           else
1651               WARN_ON(ctrl != 0);
1652    }
1653
1654    dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1655    dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1656                           : pvt->csels[0].csbases;
1657
1658    edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1659         ctrl, dbam);
1660
1661    edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1662
1663    /* Dump memory sizes for DIMM and its CSROWs */
1664    for (dimm = 0; dimm < 4; dimm++) {
1665
1666        size0 = 0;
1667        if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1668            size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1669                             DBAM_DIMM(dimm, dbam));
1670
1671        size1 = 0;
1672        if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1673            size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1674                             DBAM_DIMM(dimm, dbam));
1675
1676        amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1677                dimm * 2, size0 << factor,
1678                dimm * 2 + 1, size1 << factor);
1679    }
1680}
1681
1682static struct amd64_family_type amd64_family_types[] = {
1683    [K8_CPUS] = {
1684        .ctl_name = "K8",
1685        .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1686        .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1687        .ops = {
1688            .early_channel_count = k8_early_channel_count,
1689            .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1690            .dbam_to_cs = k8_dbam_to_chip_select,
1691            .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1692        }
1693    },
1694    [F10_CPUS] = {
1695        .ctl_name = "F10h",
1696        .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1697        .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1698        .ops = {
1699            .early_channel_count = f1x_early_channel_count,
1700            .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1701            .dbam_to_cs = f10_dbam_to_chip_select,
1702            .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1703        }
1704    },
1705    [F15_CPUS] = {
1706        .ctl_name = "F15h",
1707        .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1708        .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
1709        .ops = {
1710            .early_channel_count = f1x_early_channel_count,
1711            .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1712            .dbam_to_cs = f15_dbam_to_chip_select,
1713            .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1714        }
1715    },
1716};
1717
1718static struct pci_dev *pci_get_related_function(unsigned int vendor,
1719                        unsigned int device,
1720                        struct pci_dev *related)
1721{
1722    struct pci_dev *dev = NULL;
1723
1724    dev = pci_get_device(vendor, device, dev);
1725    while (dev) {
1726        if ((dev->bus->number == related->bus->number) &&
1727            (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1728            break;
1729        dev = pci_get_device(vendor, device, dev);
1730    }
1731
1732    return dev;
1733}
1734
1735/*
1736 * These are tables of eigenvectors (one per line) which can be used for the
1737 * construction of the syndrome tables. The modified syndrome search algorithm
1738 * uses those to find the symbol in error and thus the DIMM.
1739 *
1740 * Algorithm courtesy of Ross LaFetra from AMD.
1741 */
1742static u16 x4_vectors[] = {
1743    0x2f57, 0x1afe, 0x66cc, 0xdd88,
1744    0x11eb, 0x3396, 0x7f4c, 0xeac8,
1745    0x0001, 0x0002, 0x0004, 0x0008,
1746    0x1013, 0x3032, 0x4044, 0x8088,
1747    0x106b, 0x30d6, 0x70fc, 0xe0a8,
1748    0x4857, 0xc4fe, 0x13cc, 0x3288,
1749    0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1750    0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1751    0x15c1, 0x2a42, 0x89ac, 0x4758,
1752    0x2b03, 0x1602, 0x4f0c, 0xca08,
1753    0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1754    0x8ba7, 0x465e, 0x244c, 0x1cc8,
1755    0x2b87, 0x164e, 0x642c, 0xdc18,
1756    0x40b9, 0x80de, 0x1094, 0x20e8,
1757    0x27db, 0x1eb6, 0x9dac, 0x7b58,
1758    0x11c1, 0x2242, 0x84ac, 0x4c58,
1759    0x1be5, 0x2d7a, 0x5e34, 0xa718,
1760    0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1761    0x4c97, 0xc87e, 0x11fc, 0x33a8,
1762    0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1763    0x16b3, 0x3d62, 0x4f34, 0x8518,
1764    0x1e2f, 0x391a, 0x5cac, 0xf858,
1765    0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1766    0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1767    0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1768    0x4397, 0xc27e, 0x17fc, 0x3ea8,
1769    0x1617, 0x3d3e, 0x6464, 0xb8b8,
1770    0x23ff, 0x12aa, 0xab6c, 0x56d8,
1771    0x2dfb, 0x1ba6, 0x913c, 0x7328,
1772    0x185d, 0x2ca6, 0x7914, 0x9e28,
1773    0x171b, 0x3e36, 0x7d7c, 0xebe8,
1774    0x4199, 0x82ee, 0x19f4, 0x2e58,
1775    0x4807, 0xc40e, 0x130c, 0x3208,
1776    0x1905, 0x2e0a, 0x5804, 0xac08,
1777    0x213f, 0x132a, 0xadfc, 0x5ba8,
1778    0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1779};
1780
1781static u16 x8_vectors[] = {
1782    0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1783    0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1784    0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1785    0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1786    0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1787    0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1788    0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1789    0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1790    0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1791    0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1792    0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1793    0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1794    0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1795    0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1796    0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1797    0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1798    0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1799    0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1800    0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1801};
1802
1803static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
1804               unsigned v_dim)
1805{
1806    unsigned int i, err_sym;
1807
1808    for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1809        u16 s = syndrome;
1810        unsigned v_idx = err_sym * v_dim;
1811        unsigned v_end = (err_sym + 1) * v_dim;
1812
1813        /* walk over all 16 bits of the syndrome */
1814        for (i = 1; i < (1U << 16); i <<= 1) {
1815
1816            /* if bit is set in that eigenvector... */
1817            if (v_idx < v_end && vectors[v_idx] & i) {
1818                u16 ev_comp = vectors[v_idx++];
1819
1820                /* ... and bit set in the modified syndrome, */
1821                if (s & i) {
1822                    /* remove it. */
1823                    s ^= ev_comp;
1824
1825                    if (!s)
1826                        return err_sym;
1827                }
1828
1829            } else if (s & i)
1830                /* can't get to zero, move to next symbol */
1831                break;
1832        }
1833    }
1834
1835    edac_dbg(0, "syndrome(%x) not found\n", syndrome);
1836    return -1;
1837}
1838
1839static int map_err_sym_to_channel(int err_sym, int sym_size)
1840{
1841    if (sym_size == 4)
1842        switch (err_sym) {
1843        case 0x20:
1844        case 0x21:
1845            return 0;
1846            break;
1847        case 0x22:
1848        case 0x23:
1849            return 1;
1850            break;
1851        default:
1852            return err_sym >> 4;
1853            break;
1854        }
1855    /* x8 symbols */
1856    else
1857        switch (err_sym) {
1858        /* imaginary bits not in a DIMM */
1859        case 0x10:
1860            WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1861                      err_sym);
1862            return -1;
1863            break;
1864
1865        case 0x11:
1866            return 0;
1867            break;
1868        case 0x12:
1869            return 1;
1870            break;
1871        default:
1872            return err_sym >> 3;
1873            break;
1874        }
1875    return -1;
1876}
1877
1878static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1879{
1880    struct amd64_pvt *pvt = mci->pvt_info;
1881    int err_sym = -1;
1882
1883    if (pvt->ecc_sym_sz == 8)
1884        err_sym = decode_syndrome(syndrome, x8_vectors,
1885                      ARRAY_SIZE(x8_vectors),
1886                      pvt->ecc_sym_sz);
1887    else if (pvt->ecc_sym_sz == 4)
1888        err_sym = decode_syndrome(syndrome, x4_vectors,
1889                      ARRAY_SIZE(x4_vectors),
1890                      pvt->ecc_sym_sz);
1891    else {
1892        amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
1893        return err_sym;
1894    }
1895
1896    return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
1897}
1898
1899/*
1900 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1901 * ADDRESS and process.
1902 */
1903static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
1904{
1905    struct amd64_pvt *pvt = mci->pvt_info;
1906    u64 sys_addr;
1907    u16 syndrome;
1908
1909    /* Ensure that the Error Address is VALID */
1910    if (!(m->status & MCI_STATUS_ADDRV)) {
1911        amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1912        edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
1913                     0, 0, 0,
1914                     -1, -1, -1,
1915                     "HW has no ERROR_ADDRESS available",
1916                     "");
1917        return;
1918    }
1919
1920    sys_addr = get_error_address(m);
1921    syndrome = extract_syndrome(m->status);
1922
1923    amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1924
1925    pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
1926}
1927
1928/* Handle any Un-correctable Errors (UEs) */
1929static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1930{
1931    struct mem_ctl_info *log_mci, *src_mci = NULL;
1932    int csrow;
1933    u64 sys_addr;
1934    u32 page, offset;
1935
1936    log_mci = mci;
1937
1938    if (!(m->status & MCI_STATUS_ADDRV)) {
1939        amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1940        edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1941                     0, 0, 0,
1942                     -1, -1, -1,
1943                     "HW has no ERROR_ADDRESS available",
1944                     "");
1945        return;
1946    }
1947
1948    sys_addr = get_error_address(m);
1949    error_address_to_page_and_offset(sys_addr, &page, &offset);
1950
1951    /*
1952     * Find out which node the error address belongs to. This may be
1953     * different from the node that detected the error.
1954     */
1955    src_mci = find_mc_by_sys_addr(mci, sys_addr);
1956    if (!src_mci) {
1957        amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1958                  (unsigned long)sys_addr);
1959        edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1960                     page, offset, 0,
1961                     -1, -1, -1,
1962                     "ERROR ADDRESS NOT mapped to a MC",
1963                     "");
1964        return;
1965    }
1966
1967    log_mci = src_mci;
1968
1969    csrow = sys_addr_to_csrow(log_mci, sys_addr);
1970    if (csrow < 0) {
1971        amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1972                  (unsigned long)sys_addr);
1973        edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1974                     page, offset, 0,
1975                     -1, -1, -1,
1976                     "ERROR ADDRESS NOT mapped to CS",
1977                     "");
1978    } else {
1979        edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
1980                     page, offset, 0,
1981                     csrow, -1, -1,
1982                     "", "");
1983    }
1984}
1985
1986static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1987                        struct mce *m)
1988{
1989    u16 ec = EC(m->status);
1990    u8 xec = XEC(m->status, 0x1f);
1991    u8 ecc_type = (m->status >> 45) & 0x3;
1992
1993    /* Bail early out if this was an 'observed' error */
1994    if (PP(ec) == NBSL_PP_OBS)
1995        return;
1996
1997    /* Do only ECC errors */
1998    if (xec && xec != F10_NBSL_EXT_ERR_ECC)
1999        return;
2000
2001    if (ecc_type == 2)
2002        amd64_handle_ce(mci, m);
2003    else if (ecc_type == 1)
2004        amd64_handle_ue(mci, m);
2005}
2006
2007void amd64_decode_bus_error(int node_id, struct mce *m)
2008{
2009    __amd64_decode_bus_error(mcis[node_id], m);
2010}
2011
2012/*
2013 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
2014 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
2015 */
2016static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
2017{
2018    /* Reserve the ADDRESS MAP Device */
2019    pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2020    if (!pvt->F1) {
2021        amd64_err("error address map device not found: "
2022              "vendor %x device 0x%x (broken BIOS?)\n",
2023              PCI_VENDOR_ID_AMD, f1_id);
2024        return -ENODEV;
2025    }
2026
2027    /* Reserve the MISC Device */
2028    pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2029    if (!pvt->F3) {
2030        pci_dev_put(pvt->F1);
2031        pvt->F1 = NULL;
2032
2033        amd64_err("error F3 device not found: "
2034              "vendor %x device 0x%x (broken BIOS?)\n",
2035              PCI_VENDOR_ID_AMD, f3_id);
2036
2037        return -ENODEV;
2038    }
2039    edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2040    edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2041    edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2042
2043    return 0;
2044}
2045
2046static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2047{
2048    pci_dev_put(pvt->F1);
2049    pci_dev_put(pvt->F3);
2050}
2051
2052/*
2053 * Retrieve the hardware registers of the memory controller (this includes the
2054 * 'Address Map' and 'Misc' device regs)
2055 */
2056static void read_mc_regs(struct amd64_pvt *pvt)
2057{
2058    struct cpuinfo_x86 *c = &boot_cpu_data;
2059    u64 msr_val;
2060    u32 tmp;
2061    unsigned range;
2062
2063    /*
2064     * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2065     * those are Read-As-Zero
2066     */
2067    rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2068    edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2069
2070    /* check first whether TOP_MEM2 is enabled */
2071    rdmsrl(MSR_K8_SYSCFG, msr_val);
2072    if (msr_val & (1U << 21)) {
2073        rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2074        edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2075    } else
2076        edac_dbg(0, " TOP_MEM2 disabled\n");
2077
2078    amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2079
2080    read_dram_ctl_register(pvt);
2081
2082    for (range = 0; range < DRAM_RANGES; range++) {
2083        u8 rw;
2084
2085        /* read settings for this DRAM range */
2086        read_dram_base_limit_regs(pvt, range);
2087
2088        rw = dram_rw(pvt, range);
2089        if (!rw)
2090            continue;
2091
2092        edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2093             range,
2094             get_dram_base(pvt, range),
2095             get_dram_limit(pvt, range));
2096
2097        edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2098             dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2099             (rw & 0x1) ? "R" : "-",
2100             (rw & 0x2) ? "W" : "-",
2101             dram_intlv_sel(pvt, range),
2102             dram_dst_node(pvt, range));
2103    }
2104
2105    read_dct_base_mask(pvt);
2106
2107    amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2108    amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
2109
2110    amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2111
2112    amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
2113    amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
2114
2115    if (!dct_ganging_enabled(pvt)) {
2116        amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
2117        amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
2118    }
2119
2120    pvt->ecc_sym_sz = 4;
2121
2122    if (c->x86 >= 0x10) {
2123        amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2124        amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
2125
2126        /* F10h, revD and later can do x8 ECC too */
2127        if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
2128            pvt->ecc_sym_sz = 8;
2129    }
2130    dump_misc_regs(pvt);
2131}
2132
2133/*
2134 * NOTE: CPU Revision Dependent code
2135 *
2136 * Input:
2137 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2138 * k8 private pointer to -->
2139 * DRAM Bank Address mapping register
2140 * node_id
2141 * DCL register where dual_channel_active is
2142 *
2143 * The DBAM register consists of 4 sets of 4 bits each definitions:
2144 *
2145 * Bits: CSROWs
2146 * 0-3 CSROWs 0 and 1
2147 * 4-7 CSROWs 2 and 3
2148 * 8-11 CSROWs 4 and 5
2149 * 12-15 CSROWs 6 and 7
2150 *
2151 * Values range from: 0 to 15
2152 * The meaning of the values depends on CPU revision and dual-channel state,
2153 * see relevant BKDG more info.
2154 *
2155 * The memory controller provides for total of only 8 CSROWs in its current
2156 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2157 * single channel or two (2) DIMMs in dual channel mode.
2158 *
2159 * The following code logic collapses the various tables for CSROW based on CPU
2160 * revision.
2161 *
2162 * Returns:
2163 * The number of PAGE_SIZE pages on the specified CSROW number it
2164 * encompasses
2165 *
2166 */
2167static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2168{
2169    u32 cs_mode, nr_pages;
2170    u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2171
2172    /*
2173     * The math on this doesn't look right on the surface because x/2*4 can
2174     * be simplified to x*2 but this expression makes use of the fact that
2175     * it is integral math where 1/2=0. This intermediate value becomes the
2176     * number of bits to shift the DBAM register to extract the proper CSROW
2177     * field.
2178     */
2179    cs_mode = (dbam >> ((csrow_nr / 2) * 4)) & 0xF;
2180
2181    nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2182
2183    edac_dbg(0, " (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2184    edac_dbg(0, " nr_pages/channel= %u channel-count = %d\n",
2185         nr_pages, pvt->channel_count);
2186
2187    return nr_pages;
2188}
2189
2190/*
2191 * Initialize the array of csrow attribute instances, based on the values
2192 * from pci config hardware registers.
2193 */
2194static int init_csrows(struct mem_ctl_info *mci)
2195{
2196    struct csrow_info *csrow;
2197    struct dimm_info *dimm;
2198    struct amd64_pvt *pvt = mci->pvt_info;
2199    u64 base, mask;
2200    u32 val;
2201    int i, j, empty = 1;
2202    enum mem_type mtype;
2203    enum edac_type edac_mode;
2204    int nr_pages = 0;
2205
2206    amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2207
2208    pvt->nbcfg = val;
2209
2210    edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2211         pvt->mc_node_id, val,
2212         !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2213
2214    for_each_chip_select(i, 0, pvt) {
2215        csrow = mci->csrows[i];
2216
2217        if (!csrow_enabled(i, 0, pvt) && !csrow_enabled(i, 1, pvt)) {
2218            edac_dbg(1, "----CSROW %d VALID for MC node %d\n",
2219                 i, pvt->mc_node_id);
2220            continue;
2221        }
2222
2223        empty = 0;
2224        if (csrow_enabled(i, 0, pvt))
2225            nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2226        if (csrow_enabled(i, 1, pvt))
2227            nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
2228
2229        get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2230        /* 8 bytes of resolution */
2231
2232        mtype = amd64_determine_memory_type(pvt, i);
2233
2234        edac_dbg(1, " for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2235        edac_dbg(1, " nr_pages: %u\n",
2236             nr_pages * pvt->channel_count);
2237
2238        /*
2239         * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2240         */
2241        if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2242            edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2243                    EDAC_S4ECD4ED : EDAC_SECDED;
2244        else
2245            edac_mode = EDAC_NONE;
2246
2247        for (j = 0; j < pvt->channel_count; j++) {
2248            dimm = csrow->channels[j]->dimm;
2249            dimm->mtype = mtype;
2250            dimm->edac_mode = edac_mode;
2251            dimm->nr_pages = nr_pages;
2252        }
2253    }
2254
2255    return empty;
2256}
2257
2258/* get all cores on this DCT */
2259static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
2260{
2261    int cpu;
2262
2263    for_each_online_cpu(cpu)
2264        if (amd_get_nb_id(cpu) == nid)
2265            cpumask_set_cpu(cpu, mask);
2266}
2267
2268/* check MCG_CTL on all the cpus on this node */
2269static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
2270{
2271    cpumask_var_t mask;
2272    int cpu, nbe;
2273    bool ret = false;
2274
2275    if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2276        amd64_warn("%s: Error allocating mask\n", __func__);
2277        return false;
2278    }
2279
2280    get_cpus_on_this_dct_cpumask(mask, nid);
2281
2282    rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2283
2284    for_each_cpu(cpu, mask) {
2285        struct msr *reg = per_cpu_ptr(msrs, cpu);
2286        nbe = reg->l & MSR_MCGCTL_NBE;
2287
2288        edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2289             cpu, reg->q,
2290             (nbe ? "enabled" : "disabled"));
2291
2292        if (!nbe)
2293            goto out;
2294    }
2295    ret = true;
2296
2297out:
2298    free_cpumask_var(mask);
2299    return ret;
2300}
2301
2302static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2303{
2304    cpumask_var_t cmask;
2305    int cpu;
2306
2307    if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2308        amd64_warn("%s: error allocating mask\n", __func__);
2309        return false;
2310    }
2311
2312    get_cpus_on_this_dct_cpumask(cmask, nid);
2313
2314    rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2315
2316    for_each_cpu(cpu, cmask) {
2317
2318        struct msr *reg = per_cpu_ptr(msrs, cpu);
2319
2320        if (on) {
2321            if (reg->l & MSR_MCGCTL_NBE)
2322                s->flags.nb_mce_enable = 1;
2323
2324            reg->l |= MSR_MCGCTL_NBE;
2325        } else {
2326            /*
2327             * Turn off NB MCE reporting only when it was off before
2328             */
2329            if (!s->flags.nb_mce_enable)
2330                reg->l &= ~MSR_MCGCTL_NBE;
2331        }
2332    }
2333    wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2334
2335    free_cpumask_var(cmask);
2336
2337    return 0;
2338}
2339
2340static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2341                       struct pci_dev *F3)
2342{
2343    bool ret = true;
2344    u32 value, mask = 0x3; /* UECC/CECC enable */
2345
2346    if (toggle_ecc_err_reporting(s, nid, ON)) {
2347        amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2348        return false;
2349    }
2350
2351    amd64_read_pci_cfg(F3, NBCTL, &value);
2352
2353    s->old_nbctl = value & mask;
2354    s->nbctl_valid = true;
2355
2356    value |= mask;
2357    amd64_write_pci_cfg(F3, NBCTL, value);
2358
2359    amd64_read_pci_cfg(F3, NBCFG, &value);
2360
2361    edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2362         nid, value, !!(value & NBCFG_ECC_ENABLE));
2363
2364    if (!(value & NBCFG_ECC_ENABLE)) {
2365        amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2366
2367        s->flags.nb_ecc_prev = 0;
2368
2369        /* Attempt to turn on DRAM ECC Enable */
2370        value |= NBCFG_ECC_ENABLE;
2371        amd64_write_pci_cfg(F3, NBCFG, value);
2372
2373        amd64_read_pci_cfg(F3, NBCFG, &value);
2374
2375        if (!(value & NBCFG_ECC_ENABLE)) {
2376            amd64_warn("Hardware rejected DRAM ECC enable,"
2377                   "check memory DIMM configuration.\n");
2378            ret = false;
2379        } else {
2380            amd64_info("Hardware accepted DRAM ECC Enable\n");
2381        }
2382    } else {
2383        s->flags.nb_ecc_prev = 1;
2384    }
2385
2386    edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2387         nid, value, !!(value & NBCFG_ECC_ENABLE));
2388
2389    return ret;
2390}
2391
2392static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2393                    struct pci_dev *F3)
2394{
2395    u32 value, mask = 0x3; /* UECC/CECC enable */
2396
2397
2398    if (!s->nbctl_valid)
2399        return;
2400
2401    amd64_read_pci_cfg(F3, NBCTL, &value);
2402    value &= ~mask;
2403    value |= s->old_nbctl;
2404
2405    amd64_write_pci_cfg(F3, NBCTL, value);
2406
2407    /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2408    if (!s->flags.nb_ecc_prev) {
2409        amd64_read_pci_cfg(F3, NBCFG, &value);
2410        value &= ~NBCFG_ECC_ENABLE;
2411        amd64_write_pci_cfg(F3, NBCFG, value);
2412    }
2413
2414    /* restore the NB Enable MCGCTL bit */
2415    if (toggle_ecc_err_reporting(s, nid, OFF))
2416        amd64_warn("Error restoring NB MCGCTL settings!\n");
2417}
2418
2419/*
2420 * EDAC requires that the BIOS have ECC enabled before
2421 * taking over the processing of ECC errors. A command line
2422 * option allows to force-enable hardware ECC later in
2423 * enable_ecc_error_reporting().
2424 */
2425static const char *ecc_msg =
2426    "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2427    " Either enable ECC checking or force module loading by setting "
2428    "'ecc_enable_override'.\n"
2429    " (Note that use of the override may cause unknown side effects.)\n";
2430
2431static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2432{
2433    u32 value;
2434    u8 ecc_en = 0;
2435    bool nb_mce_en = false;
2436
2437    amd64_read_pci_cfg(F3, NBCFG, &value);
2438
2439    ecc_en = !!(value & NBCFG_ECC_ENABLE);
2440    amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2441
2442    nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2443    if (!nb_mce_en)
2444        amd64_notice("NB MCE bank disabled, set MSR "
2445                 "0x%08x[4] on node %d to enable.\n",
2446                 MSR_IA32_MCG_CTL, nid);
2447
2448    if (!ecc_en || !nb_mce_en) {
2449        amd64_notice("%s", ecc_msg);
2450        return false;
2451    }
2452    return true;
2453}
2454
2455static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2456{
2457    int rc;
2458
2459    rc = amd64_create_sysfs_dbg_files(mci);
2460    if (rc < 0)
2461        return rc;
2462
2463    if (boot_cpu_data.x86 >= 0x10) {
2464        rc = amd64_create_sysfs_inject_files(mci);
2465        if (rc < 0)
2466            return rc;
2467    }
2468
2469    return 0;
2470}
2471
2472static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
2473{
2474    amd64_remove_sysfs_dbg_files(mci);
2475
2476    if (boot_cpu_data.x86 >= 0x10)
2477        amd64_remove_sysfs_inject_files(mci);
2478}
2479
2480static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2481                 struct amd64_family_type *fam)
2482{
2483    struct amd64_pvt *pvt = mci->pvt_info;
2484
2485    mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2486    mci->edac_ctl_cap = EDAC_FLAG_NONE;
2487
2488    if (pvt->nbcap & NBCAP_SECDED)
2489        mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2490
2491    if (pvt->nbcap & NBCAP_CHIPKILL)
2492        mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2493
2494    mci->edac_cap = amd64_determine_edac_cap(pvt);
2495    mci->mod_name = EDAC_MOD_STR;
2496    mci->mod_ver = EDAC_AMD64_VERSION;
2497    mci->ctl_name = fam->ctl_name;
2498    mci->dev_name = pci_name(pvt->F2);
2499    mci->ctl_page_to_phys = NULL;
2500
2501    /* memory scrubber interface */
2502    mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2503    mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2504}
2505
2506/*
2507 * returns a pointer to the family descriptor on success, NULL otherwise.
2508 */
2509static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2510{
2511    u8 fam = boot_cpu_data.x86;
2512    struct amd64_family_type *fam_type = NULL;
2513
2514    switch (fam) {
2515    case 0xf:
2516        fam_type = &amd64_family_types[K8_CPUS];
2517        pvt->ops = &amd64_family_types[K8_CPUS].ops;
2518        break;
2519
2520    case 0x10:
2521        fam_type = &amd64_family_types[F10_CPUS];
2522        pvt->ops = &amd64_family_types[F10_CPUS].ops;
2523        break;
2524
2525    case 0x15:
2526        fam_type = &amd64_family_types[F15_CPUS];
2527        pvt->ops = &amd64_family_types[F15_CPUS].ops;
2528        break;
2529
2530    default:
2531        amd64_err("Unsupported family!\n");
2532        return NULL;
2533    }
2534
2535    pvt->ext_model = boot_cpu_data.x86_model >> 4;
2536
2537    amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2538             (fam == 0xf ?
2539                (pvt->ext_model >= K8_REV_F ? "revF or later "
2540                                 : "revE or earlier ")
2541                 : ""), pvt->mc_node_id);
2542    return fam_type;
2543}
2544
2545static int amd64_init_one_instance(struct pci_dev *F2)
2546{
2547    struct amd64_pvt *pvt = NULL;
2548    struct amd64_family_type *fam_type = NULL;
2549    struct mem_ctl_info *mci = NULL;
2550    struct edac_mc_layer layers[2];
2551    int err = 0, ret;
2552    u8 nid = get_node_id(F2);
2553
2554    ret = -ENOMEM;
2555    pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2556    if (!pvt)
2557        goto err_ret;
2558
2559    pvt->mc_node_id = nid;
2560    pvt->F2 = F2;
2561
2562    ret = -EINVAL;
2563    fam_type = amd64_per_family_init(pvt);
2564    if (!fam_type)
2565        goto err_free;
2566
2567    ret = -ENODEV;
2568    err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2569    if (err)
2570        goto err_free;
2571
2572    read_mc_regs(pvt);
2573
2574    /*
2575     * We need to determine how many memory channels there are. Then use
2576     * that information for calculating the size of the dynamic instance
2577     * tables in the 'mci' structure.
2578     */
2579    ret = -EINVAL;
2580    pvt->channel_count = pvt->ops->early_channel_count(pvt);
2581    if (pvt->channel_count < 0)
2582        goto err_siblings;
2583
2584    ret = -ENOMEM;
2585    layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2586    layers[0].size = pvt->csels[0].b_cnt;
2587    layers[0].is_virt_csrow = true;
2588    layers[1].type = EDAC_MC_LAYER_CHANNEL;
2589    layers[1].size = pvt->channel_count;
2590    layers[1].is_virt_csrow = false;
2591    mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2592    if (!mci)
2593        goto err_siblings;
2594
2595    mci->pvt_info = pvt;
2596    mci->pdev = &pvt->F2->dev;
2597
2598    setup_mci_misc_attrs(mci, fam_type);
2599
2600    if (init_csrows(mci))
2601        mci->edac_cap = EDAC_FLAG_NONE;
2602
2603    ret = -ENODEV;
2604    if (edac_mc_add_mc(mci)) {
2605        edac_dbg(1, "failed edac_mc_add_mc()\n");
2606        goto err_add_mc;
2607    }
2608    if (set_mc_sysfs_attrs(mci)) {
2609        edac_dbg(1, "failed edac_mc_add_mc()\n");
2610        goto err_add_sysfs;
2611    }
2612
2613    /* register stuff with EDAC MCE */
2614    if (report_gart_errors)
2615        amd_report_gart_errors(true);
2616
2617    amd_register_ecc_decoder(amd64_decode_bus_error);
2618
2619    mcis[nid] = mci;
2620
2621    atomic_inc(&drv_instances);
2622
2623    return 0;
2624
2625err_add_sysfs:
2626    edac_mc_del_mc(mci->pdev);
2627err_add_mc:
2628    edac_mc_free(mci);
2629
2630err_siblings:
2631    free_mc_sibling_devs(pvt);
2632
2633err_free:
2634    kfree(pvt);
2635
2636err_ret:
2637    return ret;
2638}
2639
2640static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2641                         const struct pci_device_id *mc_type)
2642{
2643    u8 nid = get_node_id(pdev);
2644    struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2645    struct ecc_settings *s;
2646    int ret = 0;
2647
2648    ret = pci_enable_device(pdev);
2649    if (ret < 0) {
2650        edac_dbg(0, "ret=%d\n", ret);
2651        return -EIO;
2652    }
2653
2654    ret = -ENOMEM;
2655    s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2656    if (!s)
2657        goto err_out;
2658
2659    ecc_stngs[nid] = s;
2660
2661    if (!ecc_enabled(F3, nid)) {
2662        ret = -ENODEV;
2663
2664        if (!ecc_enable_override)
2665            goto err_enable;
2666
2667        amd64_warn("Forcing ECC on!\n");
2668
2669        if (!enable_ecc_error_reporting(s, nid, F3))
2670            goto err_enable;
2671    }
2672
2673    ret = amd64_init_one_instance(pdev);
2674    if (ret < 0) {
2675        amd64_err("Error probing instance: %d\n", nid);
2676        restore_ecc_error_reporting(s, nid, F3);
2677    }
2678
2679    return ret;
2680
2681err_enable:
2682    kfree(s);
2683    ecc_stngs[nid] = NULL;
2684
2685err_out:
2686    return ret;
2687}
2688
2689static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2690{
2691    struct mem_ctl_info *mci;
2692    struct amd64_pvt *pvt;
2693    u8 nid = get_node_id(pdev);
2694    struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2695    struct ecc_settings *s = ecc_stngs[nid];
2696
2697    mci = find_mci_by_dev(&pdev->dev);
2698    del_mc_sysfs_attrs(mci);
2699    /* Remove from EDAC CORE tracking list */
2700    mci = edac_mc_del_mc(&pdev->dev);
2701    if (!mci)
2702        return;
2703
2704    pvt = mci->pvt_info;
2705
2706    restore_ecc_error_reporting(s, nid, F3);
2707
2708    free_mc_sibling_devs(pvt);
2709
2710    /* unregister from EDAC MCE */
2711    amd_report_gart_errors(false);
2712    amd_unregister_ecc_decoder(amd64_decode_bus_error);
2713
2714    kfree(ecc_stngs[nid]);
2715    ecc_stngs[nid] = NULL;
2716
2717    /* Free the EDAC CORE resources */
2718    mci->pvt_info = NULL;
2719    mcis[nid] = NULL;
2720
2721    kfree(pvt);
2722    edac_mc_free(mci);
2723}
2724
2725/*
2726 * This table is part of the interface for loading drivers for PCI devices. The
2727 * PCI core identifies what devices are on a system during boot, and then
2728 * inquiry this table to see if this driver is for a given device found.
2729 */
2730static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
2731    {
2732        .vendor = PCI_VENDOR_ID_AMD,
2733        .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2734        .subvendor = PCI_ANY_ID,
2735        .subdevice = PCI_ANY_ID,
2736        .class = 0,
2737        .class_mask = 0,
2738    },
2739    {
2740        .vendor = PCI_VENDOR_ID_AMD,
2741        .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2742        .subvendor = PCI_ANY_ID,
2743        .subdevice = PCI_ANY_ID,
2744        .class = 0,
2745        .class_mask = 0,
2746    },
2747    {
2748        .vendor = PCI_VENDOR_ID_AMD,
2749        .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
2750        .subvendor = PCI_ANY_ID,
2751        .subdevice = PCI_ANY_ID,
2752        .class = 0,
2753        .class_mask = 0,
2754    },
2755
2756    {0, }
2757};
2758MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2759
2760static struct pci_driver amd64_pci_driver = {
2761    .name = EDAC_MOD_STR,
2762    .probe = amd64_probe_one_instance,
2763    .remove = __devexit_p(amd64_remove_one_instance),
2764    .id_table = amd64_pci_table,
2765};
2766
2767static void setup_pci_device(void)
2768{
2769    struct mem_ctl_info *mci;
2770    struct amd64_pvt *pvt;
2771
2772    if (amd64_ctl_pci)
2773        return;
2774
2775    mci = mcis[0];
2776    if (mci) {
2777
2778        pvt = mci->pvt_info;
2779        amd64_ctl_pci =
2780            edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2781
2782        if (!amd64_ctl_pci) {
2783            pr_warning("%s(): Unable to create PCI control\n",
2784                   __func__);
2785
2786            pr_warning("%s(): PCI error report via EDAC not set\n",
2787                   __func__);
2788            }
2789    }
2790}
2791
2792static int __init amd64_edac_init(void)
2793{
2794    int err = -ENODEV;
2795
2796    printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
2797
2798    opstate_init();
2799
2800    if (amd_cache_northbridges() < 0)
2801        goto err_ret;
2802
2803    err = -ENOMEM;
2804    mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2805    ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2806    if (!(mcis && ecc_stngs))
2807        goto err_free;
2808
2809    msrs = msrs_alloc();
2810    if (!msrs)
2811        goto err_free;
2812
2813    err = pci_register_driver(&amd64_pci_driver);
2814    if (err)
2815        goto err_pci;
2816
2817    err = -ENODEV;
2818    if (!atomic_read(&drv_instances))
2819        goto err_no_instances;
2820
2821    setup_pci_device();
2822    return 0;
2823
2824err_no_instances:
2825    pci_unregister_driver(&amd64_pci_driver);
2826
2827err_pci:
2828    msrs_free(msrs);
2829    msrs = NULL;
2830
2831err_free:
2832    kfree(mcis);
2833    mcis = NULL;
2834
2835    kfree(ecc_stngs);
2836    ecc_stngs = NULL;
2837
2838err_ret:
2839    return err;
2840}
2841
2842static void __exit amd64_edac_exit(void)
2843{
2844    if (amd64_ctl_pci)
2845        edac_pci_release_generic_ctl(amd64_ctl_pci);
2846
2847    pci_unregister_driver(&amd64_pci_driver);
2848
2849    kfree(ecc_stngs);
2850    ecc_stngs = NULL;
2851
2852    kfree(mcis);
2853    mcis = NULL;
2854
2855    msrs_free(msrs);
2856    msrs = NULL;
2857}
2858
2859module_init(amd64_edac_init);
2860module_exit(amd64_edac_exit);
2861
2862MODULE_LICENSE("GPL");
2863MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2864        "Dave Peterson, Thayne Harbaugh");
2865MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2866        EDAC_AMD64_VERSION);
2867
2868module_param(edac_op_state, int, 0444);
2869MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
2870

Archive Download this file



interactive