1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
19 * count successfully initialized driver instances for setup_pci_device()
21 static atomic_t drv_instances = ATOMIC_INIT(0);
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
28 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
29 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
32 *FIXME: Produce a better mapping/linearisation.
35 u32 scrubval; /* bit pattern for scrub rate */
36 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
38 { 0x01, 1600000000UL},
60 { 0x00, 0UL}, /* scrubbing off */
63 static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
64 u32 *val, const char *func)
68 err = pci_read_config_dword(pdev, offset, val);
70 amd64_warn("%s: error reading F%dx%03x.\n",
71 func, PCI_FUNC(pdev->devfn), offset);
76 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
77 u32 val, const char *func)
81 err = pci_write_config_dword(pdev, offset, val);
83 amd64_warn("%s: error writing to F%dx%03x.\n",
84 func, PCI_FUNC(pdev->devfn), offset);
91 * Depending on the family, F2 DCT reads need special handling:
93 * K8: has a single DCT only
95 * F10h: each DCT has its own set of regs
99 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
102 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
108 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
111 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
114 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
117 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
123 if (addr >= 0x140 && addr <= 0x1a0) {
128 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
131 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
133 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
137 * Memory scrubber control interface. For K8, memory scrubbing is handled by
138 * hardware and can involve L2 cache, dcache as well as the main memory. With
139 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
142 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
143 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
144 * bytes/sec for the setting.
146 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
147 * other archs, we might not have access to the caches directly.
151 * scan the scrub rate mapping table for a close or matching bandwidth value to
152 * issue. If requested is too big, then use last maximum value found.
154 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
160 * map the configured rate (new_bw) to a value specific to the AMD64
161 * memory controller and apply to register. Search for the first
162 * bandwidth entry that is greater or equal than the setting requested
163 * and program that. If at last entry, turn off DRAM scrubbing.
165 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
167 * skip scrub rates which aren't recommended
168 * (see F10 BKDG, F3x58)
170 if (scrubrates[i].scrubval < min_rate)
173 if (scrubrates[i].bandwidth <= new_bw)
177 * if no suitable bandwidth found, turn off DRAM scrubbing
178 * entirely by falling back to the last element in the
183 scrubval = scrubrates[i].scrubval;
185 pci_write_bits32(ctl, SCRCTRL, scrubval, 0x001F);
188 return scrubrates[i].bandwidth;
193 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
195 struct amd64_pvt *pvt = mci->pvt_info;
196 u32 min_scrubrate = 0x5;
198 if (boot_cpu_data.x86 == 0xf)
201 return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
204 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
206 struct amd64_pvt *pvt = mci->pvt_info;
208 int i, retval = -EINVAL;
210 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
212 scrubval = scrubval & 0x001F;
214 amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval);
216 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
217 if (scrubrates[i].scrubval == scrubval) {
218 retval = scrubrates[i].bandwidth;
226 * returns true if the SysAddr given by sys_addr matches the
227 * DRAM base/limit associated with node_id
229 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
234 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
235 * all ones if the most significant implemented address bit is 1.
236 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
237 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
238 * Application Programming.
240 addr = sys_addr & 0x000000ffffffffffull;
242 return ((addr >= get_dram_base(pvt, nid)) &&
243 (addr <= get_dram_limit(pvt, nid)));
247 * Attempt to map a SysAddr to a node. On success, return a pointer to the
248 * mem_ctl_info structure for the node that the SysAddr maps to.
250 * On failure, return NULL.
252 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
255 struct amd64_pvt *pvt;
260 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
261 * 3.4.4.2) registers to map the SysAddr to a node ID.
266 * The value of this field should be the same for all DRAM Base
267 * registers. Therefore we arbitrarily choose to read it from the
268 * register for node 0.
270 intlv_en = dram_intlv_en(pvt, 0);
273 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
274 if (amd64_base_limit_match(pvt, sys_addr, node_id))
280 if (unlikely((intlv_en != 0x01) &&
281 (intlv_en != 0x03) &&
282 (intlv_en != 0x07))) {
283 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
287 bits = (((u32) sys_addr) >> 12) & intlv_en;
289 for (node_id = 0; ; ) {
290 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
291 break; /* intlv_sel field matches */
293 if (++node_id >= DRAM_RANGES)
297 /* sanity test for sys_addr */
298 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
299 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
300 "range for node %d with node interleaving enabled.\n",
301 __func__, sys_addr, node_id);
306 return edac_mc_find((int)node_id);
309 debugf2("sys_addr 0x%lx doesn't match any node\n",
310 (unsigned long)sys_addr);
316 * compute the CS base address of the @csrow on the DRAM controller @dct.
317 * For details see F2x[5C:40] in the processor's BKDG
319 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
320 u64 *base, u64 *mask)
322 u64 csbase, csmask, base_bits, mask_bits;
325 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
326 csbase = pvt->csels[dct].csbases[csrow];
327 csmask = pvt->csels[dct].csmasks[csrow];
328 base_bits = GENMASK(21, 31) | GENMASK(9, 15);
329 mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
332 csbase = pvt->csels[dct].csbases[csrow];
333 csmask = pvt->csels[dct].csmasks[csrow >> 1];
336 if (boot_cpu_data.x86 == 0x15)
337 base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
339 base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
342 *base = (csbase & base_bits) << addr_shift;
345 /* poke holes for the csmask */
346 *mask &= ~(mask_bits << addr_shift);
348 *mask |= (csmask & mask_bits) << addr_shift;
351 #define for_each_chip_select(i, dct, pvt) \
352 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
354 #define chip_select_base(i, dct, pvt) \
355 pvt->csels[dct].csbases[i]
357 #define for_each_chip_select_mask(i, dct, pvt) \
358 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
361 * @input_addr is an InputAddr associated with the node given by mci. Return the
362 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
364 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
366 struct amd64_pvt *pvt;
372 for_each_chip_select(csrow, 0, pvt) {
373 if (!csrow_enabled(csrow, 0, pvt))
376 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
380 if ((input_addr & mask) == (base & mask)) {
381 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
382 (unsigned long)input_addr, csrow,
388 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
389 (unsigned long)input_addr, pvt->mc_node_id);
395 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
396 * for the node represented by mci. Info is passed back in *hole_base,
397 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
398 * info is invalid. Info may be invalid for either of the following reasons:
400 * - The revision of the node is not E or greater. In this case, the DRAM Hole
401 * Address Register does not exist.
403 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
404 * indicating that its contents are not valid.
406 * The values passed back in *hole_base, *hole_offset, and *hole_size are
407 * complete 32-bit values despite the fact that the bitfields in the DHAR
408 * only represent bits 31-24 of the base and offset values.
410 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
411 u64 *hole_offset, u64 *hole_size)
413 struct amd64_pvt *pvt = mci->pvt_info;
416 /* only revE and later have the DRAM Hole Address Register */
417 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
418 debugf1(" revision %d for node %d does not support DHAR\n",
419 pvt->ext_model, pvt->mc_node_id);
423 /* valid for Fam10h and above */
424 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
425 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
429 if (!dhar_valid(pvt)) {
430 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
435 /* This node has Memory Hoisting */
437 /* +------------------+--------------------+--------------------+-----
438 * | memory | DRAM hole | relocated |
439 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
441 * | | | [0x100000000, |
442 * | | | (0x100000000+ |
443 * | | | (0xffffffff-x))] |
444 * +------------------+--------------------+--------------------+-----
446 * Above is a diagram of physical memory showing the DRAM hole and the
447 * relocated addresses from the DRAM hole. As shown, the DRAM hole
448 * starts at address x (the base address) and extends through address
449 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
450 * addresses in the hole so that they start at 0x100000000.
453 base = dhar_base(pvt);
456 *hole_size = (0x1ull << 32) - base;
458 if (boot_cpu_data.x86 > 0xf)
459 *hole_offset = f10_dhar_offset(pvt);
461 *hole_offset = k8_dhar_offset(pvt);
463 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
464 pvt->mc_node_id, (unsigned long)*hole_base,
465 (unsigned long)*hole_offset, (unsigned long)*hole_size);
469 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
472 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
473 * assumed that sys_addr maps to the node given by mci.
475 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
476 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
477 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
478 * then it is also involved in translating a SysAddr to a DramAddr. Sections
479 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
480 * These parts of the documentation are unclear. I interpret them as follows:
482 * When node n receives a SysAddr, it processes the SysAddr as follows:
484 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
485 * Limit registers for node n. If the SysAddr is not within the range
486 * specified by the base and limit values, then node n ignores the Sysaddr
487 * (since it does not map to node n). Otherwise continue to step 2 below.
489 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
490 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
491 * the range of relocated addresses (starting at 0x100000000) from the DRAM
492 * hole. If not, skip to step 3 below. Else get the value of the
493 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
494 * offset defined by this value from the SysAddr.
496 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
497 * Base register for node n. To obtain the DramAddr, subtract the base
498 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
500 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
502 struct amd64_pvt *pvt = mci->pvt_info;
503 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
506 dram_base = get_dram_base(pvt, pvt->mc_node_id);
508 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
511 if ((sys_addr >= (1ull << 32)) &&
512 (sys_addr < ((1ull << 32) + hole_size))) {
513 /* use DHAR to translate SysAddr to DramAddr */
514 dram_addr = sys_addr - hole_offset;
516 debugf2("using DHAR to translate SysAddr 0x%lx to "
518 (unsigned long)sys_addr,
519 (unsigned long)dram_addr);
526 * Translate the SysAddr to a DramAddr as shown near the start of
527 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
528 * only deals with 40-bit values. Therefore we discard bits 63-40 of
529 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
530 * discard are all 1s. Otherwise the bits we discard are all 0s. See
531 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
532 * Programmer's Manual Volume 1 Application Programming.
534 dram_addr = (sys_addr & GENMASK(0, 39)) - dram_base;
536 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
537 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
538 (unsigned long)dram_addr);
543 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
544 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
545 * for node interleaving.
547 static int num_node_interleave_bits(unsigned intlv_en)
549 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
552 BUG_ON(intlv_en > 7);
553 n = intlv_shift_table[intlv_en];
557 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
558 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
560 struct amd64_pvt *pvt;
567 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
568 * concerning translating a DramAddr to an InputAddr.
570 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
571 input_addr = ((dram_addr >> intlv_shift) & GENMASK(12, 35)) +
574 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
575 intlv_shift, (unsigned long)dram_addr,
576 (unsigned long)input_addr);
582 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
583 * assumed that @sys_addr maps to the node given by mci.
585 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
590 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
592 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
593 (unsigned long)sys_addr, (unsigned long)input_addr);
600 * @input_addr is an InputAddr associated with the node represented by mci.
601 * Translate @input_addr to a DramAddr and return the result.
603 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
605 struct amd64_pvt *pvt;
606 unsigned node_id, intlv_shift;
611 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
612 * shows how to translate a DramAddr to an InputAddr. Here we reverse
613 * this procedure. When translating from a DramAddr to an InputAddr, the
614 * bits used for node interleaving are discarded. Here we recover these
615 * bits from the IntlvSel field of the DRAM Limit register (section
616 * 3.4.4.2) for the node that input_addr is associated with.
619 node_id = pvt->mc_node_id;
623 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
624 if (intlv_shift == 0) {
625 debugf1(" InputAddr 0x%lx translates to DramAddr of "
626 "same value\n", (unsigned long)input_addr);
631 bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
632 (input_addr & 0xfff);
634 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
635 dram_addr = bits + (intlv_sel << 12);
637 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
638 "(%d node interleave bits)\n", (unsigned long)input_addr,
639 (unsigned long)dram_addr, intlv_shift);
645 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
646 * @dram_addr to a SysAddr.
648 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
650 struct amd64_pvt *pvt = mci->pvt_info;
651 u64 hole_base, hole_offset, hole_size, base, sys_addr;
654 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
657 if ((dram_addr >= hole_base) &&
658 (dram_addr < (hole_base + hole_size))) {
659 sys_addr = dram_addr + hole_offset;
661 debugf1("using DHAR to translate DramAddr 0x%lx to "
662 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
663 (unsigned long)sys_addr);
669 base = get_dram_base(pvt, pvt->mc_node_id);
670 sys_addr = dram_addr + base;
673 * The sys_addr we have computed up to this point is a 40-bit value
674 * because the k8 deals with 40-bit values. However, the value we are
675 * supposed to return is a full 64-bit physical address. The AMD
676 * x86-64 architecture specifies that the most significant implemented
677 * address bit through bit 63 of a physical address must be either all
678 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
679 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
680 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
683 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
685 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
686 pvt->mc_node_id, (unsigned long)dram_addr,
687 (unsigned long)sys_addr);
693 * @input_addr is an InputAddr associated with the node given by mci. Translate
694 * @input_addr to a SysAddr.
696 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
699 return dram_addr_to_sys_addr(mci,
700 input_addr_to_dram_addr(mci, input_addr));
704 * Find the minimum and maximum InputAddr values that map to the given @csrow.
705 * Pass back these values in *input_addr_min and *input_addr_max.
707 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
708 u64 *input_addr_min, u64 *input_addr_max)
710 struct amd64_pvt *pvt;
714 BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
716 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
718 *input_addr_min = base & ~mask;
719 *input_addr_max = base | mask;
722 /* Map the Error address to a PAGE and PAGE OFFSET. */
723 static inline void error_address_to_page_and_offset(u64 error_address,
724 u32 *page, u32 *offset)
726 *page = (u32) (error_address >> PAGE_SHIFT);
727 *offset = ((u32) error_address) & ~PAGE_MASK;
731 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
732 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
733 * of a node that detected an ECC memory error. mci represents the node that
734 * the error address maps to (possibly different from the node that detected
735 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
738 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
742 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
745 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
746 "address 0x%lx\n", (unsigned long)sys_addr);
750 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
753 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
756 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
759 enum dev_type edac_cap = EDAC_FLAG_NONE;
761 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
765 if (pvt->dclr0 & BIT(bit))
766 edac_cap = EDAC_FLAG_SECDED;
772 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
774 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
776 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
778 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
779 (dclr & BIT(16)) ? "un" : "",
780 (dclr & BIT(19)) ? "yes" : "no");
782 debugf1(" PAR/ERR parity: %s\n",
783 (dclr & BIT(8)) ? "enabled" : "disabled");
785 if (boot_cpu_data.x86 == 0x10)
786 debugf1(" DCT 128bit mode width: %s\n",
787 (dclr & BIT(11)) ? "128b" : "64b");
789 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
790 (dclr & BIT(12)) ? "yes" : "no",
791 (dclr & BIT(13)) ? "yes" : "no",
792 (dclr & BIT(14)) ? "yes" : "no",
793 (dclr & BIT(15)) ? "yes" : "no");
796 /* Display and decode various NB registers for debug purposes. */
797 static void dump_misc_regs(struct amd64_pvt *pvt)
799 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
801 debugf1(" NB two channel DRAM capable: %s\n",
802 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
804 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
805 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
806 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
808 amd64_dump_dramcfg_low(pvt->dclr0, 0);
810 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
812 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
814 pvt->dhar, dhar_base(pvt),
815 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
816 : f10_dhar_offset(pvt));
818 debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
820 amd64_debug_display_dimm_sizes(0, pvt);
822 /* everything below this point is Fam10h and above */
823 if (boot_cpu_data.x86 == 0xf)
826 amd64_debug_display_dimm_sizes(1, pvt);
828 amd64_info("using %s syndromes.\n", ((pvt->ecc_sym_sz == 8) ? "x8" : "x4"));
830 /* Only if NOT ganged does dclr1 have valid info */
831 if (!dct_ganging_enabled(pvt))
832 amd64_dump_dramcfg_low(pvt->dclr1, 1);
836 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
838 static void prep_chip_selects(struct amd64_pvt *pvt)
840 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
841 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
842 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
844 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
845 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
850 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
852 static void read_dct_base_mask(struct amd64_pvt *pvt)
856 prep_chip_selects(pvt);
858 for_each_chip_select(cs, 0, pvt) {
859 u32 reg0 = DCSB0 + (cs * 4);
860 u32 reg1 = DCSB1 + (cs * 4);
861 u32 *base0 = &pvt->csels[0].csbases[cs];
862 u32 *base1 = &pvt->csels[1].csbases[cs];
864 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
865 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
868 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
871 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
872 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
876 for_each_chip_select_mask(cs, 0, pvt) {
877 u32 reg0 = DCSM0 + (cs * 4);
878 u32 reg1 = DCSM1 + (cs * 4);
879 u32 *mask0 = &pvt->csels[0].csmasks[cs];
880 u32 *mask1 = &pvt->csels[1].csmasks[cs];
882 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
883 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
886 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
889 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
890 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
895 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
899 /* F15h supports only DDR3 */
900 if (boot_cpu_data.x86 >= 0x15)
901 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
902 else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
903 if (pvt->dchr0 & DDR3_MODE)
904 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
906 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
908 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
911 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
916 /* Get the number of DCT channels the memory controller is using. */
917 static int k8_early_channel_count(struct amd64_pvt *pvt)
921 if (pvt->ext_model >= K8_REV_F)
922 /* RevF (NPT) and later */
923 flag = pvt->dclr0 & WIDTH_128;
925 /* RevE and earlier */
926 flag = pvt->dclr0 & REVE_WIDTH_128;
931 return (flag) ? 2 : 1;
934 /* On F10h and later ErrAddr is MC4_ADDR[47:1] */
935 static u64 get_error_address(struct mce *m)
940 if (boot_cpu_data.x86 == 0xf) {
945 return m->addr & GENMASK(start_bit, end_bit);
948 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
950 u32 off = range << 3;
952 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
953 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
955 if (boot_cpu_data.x86 == 0xf)
958 if (!dram_rw(pvt, range))
961 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
962 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
965 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
968 struct mem_ctl_info *src_mci;
969 struct amd64_pvt *pvt = mci->pvt_info;
973 /* CHIPKILL enabled */
974 if (pvt->nbcfg & NBCFG_CHIPKILL) {
975 channel = get_channel_from_ecc_syndrome(mci, syndrome);
978 * Syndrome didn't map, so we don't know which of the
979 * 2 DIMMs is in error. So we need to ID 'both' of them
982 amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
983 "error reporting race\n", syndrome);
984 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
989 * non-chipkill ecc mode
991 * The k8 documentation is unclear about how to determine the
992 * channel number when using non-chipkill memory. This method
993 * was obtained from email communication with someone at AMD.
994 * (Wish the email was placed in this comment - norsk)
996 channel = ((sys_addr & BIT(3)) != 0);
1000 * Find out which node the error address belongs to. This may be
1001 * different from the node that detected the error.
1003 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1005 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1006 (unsigned long)sys_addr);
1007 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1011 /* Now map the sys_addr to a CSROW */
1012 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1014 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1016 error_address_to_page_and_offset(sys_addr, &page, &offset);
1018 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1019 channel, EDAC_MOD_STR);
1023 static int ddr2_cs_size(unsigned i, bool dct_width)
1029 else if (!(i & 0x1))
1032 shift = (i + 1) >> 1;
1034 return 128 << (shift + !!dct_width);
1037 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1040 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1042 if (pvt->ext_model >= K8_REV_F) {
1043 WARN_ON(cs_mode > 11);
1044 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1046 else if (pvt->ext_model >= K8_REV_D) {
1047 WARN_ON(cs_mode > 10);
1049 if (cs_mode == 3 || cs_mode == 8)
1050 return 32 << (cs_mode - 1);
1052 return 32 << cs_mode;
1055 WARN_ON(cs_mode > 6);
1056 return 32 << cs_mode;
1061 * Get the number of DCT channels in use.
1064 * number of Memory Channels in operation
1066 * contents of the DCL0_LOW register
1068 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1070 int i, j, channels = 0;
1072 /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
1073 if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
1077 * Need to check if in unganged mode: In such, there are 2 channels,
1078 * but they are not in 128 bit mode and thus the above 'dclr0' status
1081 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1082 * their CSEnable bit on. If so, then SINGLE DIMM case.
1084 debugf0("Data width is not 128 bits - need more decoding\n");
1087 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1088 * is more than just one DIMM present in unganged mode. Need to check
1089 * both controllers since DIMMs can be placed in either one.
1091 for (i = 0; i < 2; i++) {
1092 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1094 for (j = 0; j < 4; j++) {
1095 if (DBAM_DIMM(j, dbam) > 0) {
1105 amd64_info("MCT channel count: %d\n", channels);
1110 static int ddr3_cs_size(unsigned i, bool dct_width)
1115 if (i == 0 || i == 3 || i == 4)
1121 else if (!(i & 0x1))
1124 shift = (i + 1) >> 1;
1127 cs_size = (128 * (1 << !!dct_width)) << shift;
1132 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1135 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1137 WARN_ON(cs_mode > 11);
1139 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1140 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1142 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1146 * F15h supports only 64bit DCT interfaces
1148 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1151 WARN_ON(cs_mode > 12);
1153 return ddr3_cs_size(cs_mode, false);
1156 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1159 if (boot_cpu_data.x86 == 0xf)
1162 if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1163 debugf0("F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1164 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1166 debugf0(" DCTs operate in %s mode.\n",
1167 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1169 if (!dct_ganging_enabled(pvt))
1170 debugf0(" Address range split per DCT: %s\n",
1171 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1173 debugf0(" data interleave for ECC: %s, "
1174 "DRAM cleared since last warm reset: %s\n",
1175 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1176 (dct_memory_cleared(pvt) ? "yes" : "no"));
1178 debugf0(" channel interleave: %s, "
1179 "interleave bits selector: 0x%x\n",
1180 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1181 dct_sel_interleave_addr(pvt));
1184 amd64_read_dct_pci_cfg(pvt, DCT_SEL_HI, &pvt->dct_sel_hi);
1188 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1189 * Interleaving Modes.
1191 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1192 bool hi_range_sel, u8 intlv_en)
1194 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1196 if (dct_ganging_enabled(pvt))
1200 return dct_sel_high;
1203 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1205 if (dct_interleave_enabled(pvt)) {
1206 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1208 /* return DCT select function: 0=DCT0, 1=DCT1 */
1210 return sys_addr >> 6 & 1;
1212 if (intlv_addr & 0x2) {
1213 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1214 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1216 return ((sys_addr >> shift) & 1) ^ temp;
1219 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1222 if (dct_high_range_enabled(pvt))
1223 return ~dct_sel_high & 1;
1228 /* Convert the sys_addr to the normalized DCT address */
1229 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, int range,
1230 u64 sys_addr, bool hi_rng,
1231 u32 dct_sel_base_addr)
1234 u64 dram_base = get_dram_base(pvt, range);
1235 u64 hole_off = f10_dhar_offset(pvt);
1236 u32 hole_valid = dhar_valid(pvt);
1237 u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1242 * base address of high range is below 4Gb
1243 * (bits [47:27] at [31:11])
1244 * DRAM address space on this DCT is hoisted above 4Gb &&
1247 * remove hole offset from sys_addr
1249 * remove high range offset from sys_addr
1251 if ((!(dct_sel_base_addr >> 16) ||
1252 dct_sel_base_addr < dhar_base(pvt)) &&
1254 (sys_addr >= BIT_64(32)))
1255 chan_off = hole_off;
1257 chan_off = dct_sel_base_off;
1261 * we have a valid hole &&
1266 * remove dram base to normalize to DCT address
1268 if (hole_valid && (sys_addr >= BIT_64(32)))
1269 chan_off = hole_off;
1271 chan_off = dram_base;
1274 return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
1278 * checks if the csrow passed in is marked as SPARED, if so returns the new
1281 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1285 if (online_spare_swap_done(pvt, dct) &&
1286 csrow == online_spare_bad_dramcs(pvt, dct)) {
1288 for_each_chip_select(tmp_cs, dct, pvt) {
1289 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1299 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1300 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1303 * -EINVAL: NOT FOUND
1304 * 0..csrow = Chip-Select Row
1306 static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1308 struct mem_ctl_info *mci;
1309 struct amd64_pvt *pvt;
1310 u64 cs_base, cs_mask;
1311 int cs_found = -EINVAL;
1318 pvt = mci->pvt_info;
1320 debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1322 for_each_chip_select(csrow, dct, pvt) {
1323 if (!csrow_enabled(csrow, dct, pvt))
1326 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1328 debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1329 csrow, cs_base, cs_mask);
1333 debugf1(" (InputAddr & ~CSMask)=0x%llx "
1334 "(CSBase & ~CSMask)=0x%llx\n",
1335 (in_addr & cs_mask), (cs_base & cs_mask));
1337 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1338 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1340 debugf1(" MATCH csrow=%d\n", cs_found);
1348 * See F2x10C. Non-interleaved graphics framebuffer memory under the 16G is
1349 * swapped with a region located at the bottom of memory so that the GPU can use
1350 * the interleaved region and thus two channels.
1352 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1354 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1356 if (boot_cpu_data.x86 == 0x10) {
1357 /* only revC3 and revE have that feature */
1358 if (boot_cpu_data.x86_model < 4 ||
1359 (boot_cpu_data.x86_model < 0xa &&
1360 boot_cpu_data.x86_mask < 3))
1364 amd64_read_dct_pci_cfg(pvt, SWAP_INTLV_REG, &swap_reg);
1366 if (!(swap_reg & 0x1))
1369 swap_base = (swap_reg >> 3) & 0x7f;
1370 swap_limit = (swap_reg >> 11) & 0x7f;
1371 rgn_size = (swap_reg >> 20) & 0x7f;
1372 tmp_addr = sys_addr >> 27;
1374 if (!(sys_addr >> 34) &&
1375 (((tmp_addr >= swap_base) &&
1376 (tmp_addr <= swap_limit)) ||
1377 (tmp_addr < rgn_size)))
1378 return sys_addr ^ (u64)swap_base << 27;
1383 /* For a given @dram_range, check if @sys_addr falls within it. */
1384 static int f1x_match_to_this_node(struct amd64_pvt *pvt, int range,
1385 u64 sys_addr, int *nid, int *chan_sel)
1387 int cs_found = -EINVAL;
1391 bool high_range = false;
1393 u8 node_id = dram_dst_node(pvt, range);
1394 u8 intlv_en = dram_intlv_en(pvt, range);
1395 u32 intlv_sel = dram_intlv_sel(pvt, range);
1397 debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1398 range, sys_addr, get_dram_limit(pvt, range));
1400 if (dhar_valid(pvt) &&
1401 dhar_base(pvt) <= sys_addr &&
1402 sys_addr < BIT_64(32)) {
1403 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1409 (intlv_sel != ((sys_addr >> 12) & intlv_en))) {
1410 amd64_warn("Botched intlv bits, en: 0x%x, sel: 0x%x\n",
1411 intlv_en, intlv_sel);
1415 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1417 dct_sel_base = dct_sel_baseaddr(pvt);
1420 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1421 * select between DCT0 and DCT1.
1423 if (dct_high_range_enabled(pvt) &&
1424 !dct_ganging_enabled(pvt) &&
1425 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1428 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1430 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1431 high_range, dct_sel_base);
1433 /* Remove node interleaving, see F1x120 */
1435 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1436 (chan_addr & 0xfff);
1438 /* remove channel interleave */
1439 if (dct_interleave_enabled(pvt) &&
1440 !dct_high_range_enabled(pvt) &&
1441 !dct_ganging_enabled(pvt)) {
1443 if (dct_sel_interleave_addr(pvt) != 1) {
1444 if (dct_sel_interleave_addr(pvt) == 0x3)
1446 chan_addr = ((chan_addr >> 10) << 9) |
1447 (chan_addr & 0x1ff);
1449 /* A[6] or hash 6 */
1450 chan_addr = ((chan_addr >> 7) << 6) |
1454 chan_addr = ((chan_addr >> 13) << 12) |
1455 (chan_addr & 0xfff);
1458 debugf1(" Normalized DCT addr: 0x%llx\n", chan_addr);
1460 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1462 if (cs_found >= 0) {
1464 *chan_sel = channel;
1469 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1470 int *node, int *chan_sel)
1472 int range, cs_found = -EINVAL;
1474 for (range = 0; range < DRAM_RANGES; range++) {
1476 if (!dram_rw(pvt, range))
1479 if ((get_dram_base(pvt, range) <= sys_addr) &&
1480 (get_dram_limit(pvt, range) >= sys_addr)) {
1482 cs_found = f1x_match_to_this_node(pvt, range,
1493 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1494 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1496 * The @sys_addr is usually an error address received from the hardware
1499 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1502 struct amd64_pvt *pvt = mci->pvt_info;
1504 int nid, csrow, chan = 0;
1506 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1509 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1513 error_address_to_page_and_offset(sys_addr, &page, &offset);
1516 * We need the syndromes for channel detection only when we're
1517 * ganged. Otherwise @chan should already contain the channel at
1520 if (dct_ganging_enabled(pvt))
1521 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1524 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
1528 * Channel unknown, report all channels on this CSROW as failed.
1530 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1531 edac_mc_handle_ce(mci, page, offset, syndrome,
1532 csrow, chan, EDAC_MOD_STR);
1536 * debug routine to display the memory sizes of all logical DIMMs and its
1539 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1541 int dimm, size0, size1, factor = 0;
1542 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
1543 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1545 if (boot_cpu_data.x86 == 0xf) {
1546 if (pvt->dclr0 & WIDTH_128)
1549 /* K8 families < revF not supported yet */
1550 if (pvt->ext_model < K8_REV_F)
1556 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1557 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1558 : pvt->csels[0].csbases;
1560 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
1562 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1564 /* Dump memory sizes for DIMM and its CSROWs */
1565 for (dimm = 0; dimm < 4; dimm++) {
1568 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1569 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
1570 DBAM_DIMM(dimm, dbam));
1573 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1574 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
1575 DBAM_DIMM(dimm, dbam));
1577 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1578 dimm * 2, size0 << factor,
1579 dimm * 2 + 1, size1 << factor);
1583 static struct amd64_family_type amd64_family_types[] = {
1586 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1587 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1589 .early_channel_count = k8_early_channel_count,
1590 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1591 .dbam_to_cs = k8_dbam_to_chip_select,
1592 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1597 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1598 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1600 .early_channel_count = f1x_early_channel_count,
1601 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1602 .dbam_to_cs = f10_dbam_to_chip_select,
1603 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1608 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
1609 .f3_id = PCI_DEVICE_ID_AMD_15H_NB_F3,
1611 .early_channel_count = f1x_early_channel_count,
1612 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
1613 .dbam_to_cs = f15_dbam_to_chip_select,
1614 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1619 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1620 unsigned int device,
1621 struct pci_dev *related)
1623 struct pci_dev *dev = NULL;
1625 dev = pci_get_device(vendor, device, dev);
1627 if ((dev->bus->number == related->bus->number) &&
1628 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1630 dev = pci_get_device(vendor, device, dev);
1637 * These are tables of eigenvectors (one per line) which can be used for the
1638 * construction of the syndrome tables. The modified syndrome search algorithm
1639 * uses those to find the symbol in error and thus the DIMM.
1641 * Algorithm courtesy of Ross LaFetra from AMD.
1643 static u16 x4_vectors[] = {
1644 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1645 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1646 0x0001, 0x0002, 0x0004, 0x0008,
1647 0x1013, 0x3032, 0x4044, 0x8088,
1648 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1649 0x4857, 0xc4fe, 0x13cc, 0x3288,
1650 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1651 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1652 0x15c1, 0x2a42, 0x89ac, 0x4758,
1653 0x2b03, 0x1602, 0x4f0c, 0xca08,
1654 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1655 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1656 0x2b87, 0x164e, 0x642c, 0xdc18,
1657 0x40b9, 0x80de, 0x1094, 0x20e8,
1658 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1659 0x11c1, 0x2242, 0x84ac, 0x4c58,
1660 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1661 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1662 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1663 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1664 0x16b3, 0x3d62, 0x4f34, 0x8518,
1665 0x1e2f, 0x391a, 0x5cac, 0xf858,
1666 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1667 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1668 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1669 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1670 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1671 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1672 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1673 0x185d, 0x2ca6, 0x7914, 0x9e28,
1674 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1675 0x4199, 0x82ee, 0x19f4, 0x2e58,
1676 0x4807, 0xc40e, 0x130c, 0x3208,
1677 0x1905, 0x2e0a, 0x5804, 0xac08,
1678 0x213f, 0x132a, 0xadfc, 0x5ba8,
1679 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1682 static u16 x8_vectors[] = {
1683 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1684 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1685 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1686 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1687 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1688 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1689 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1690 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1691 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1692 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1693 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1694 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1695 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1696 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1697 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1698 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1699 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1700 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1701 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1704 static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
1707 unsigned int i, err_sym;
1709 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1711 int v_idx = err_sym * v_dim;
1712 int v_end = (err_sym + 1) * v_dim;
1714 /* walk over all 16 bits of the syndrome */
1715 for (i = 1; i < (1U << 16); i <<= 1) {
1717 /* if bit is set in that eigenvector... */
1718 if (v_idx < v_end && vectors[v_idx] & i) {
1719 u16 ev_comp = vectors[v_idx++];
1721 /* ... and bit set in the modified syndrome, */
1731 /* can't get to zero, move to next symbol */
1736 debugf0("syndrome(%x) not found\n", syndrome);
1740 static int map_err_sym_to_channel(int err_sym, int sym_size)
1753 return err_sym >> 4;
1759 /* imaginary bits not in a DIMM */
1761 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1773 return err_sym >> 3;
1779 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1781 struct amd64_pvt *pvt = mci->pvt_info;
1784 if (pvt->ecc_sym_sz == 8)
1785 err_sym = decode_syndrome(syndrome, x8_vectors,
1786 ARRAY_SIZE(x8_vectors),
1788 else if (pvt->ecc_sym_sz == 4)
1789 err_sym = decode_syndrome(syndrome, x4_vectors,
1790 ARRAY_SIZE(x4_vectors),
1793 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
1797 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
1801 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1802 * ADDRESS and process.
1804 static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
1806 struct amd64_pvt *pvt = mci->pvt_info;
1810 /* Ensure that the Error Address is VALID */
1811 if (!(m->status & MCI_STATUS_ADDRV)) {
1812 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1813 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1817 sys_addr = get_error_address(m);
1818 syndrome = extract_syndrome(m->status);
1820 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1822 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, syndrome);
1825 /* Handle any Un-correctable Errors (UEs) */
1826 static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1828 struct mem_ctl_info *log_mci, *src_mci = NULL;
1835 if (!(m->status & MCI_STATUS_ADDRV)) {
1836 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1837 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1841 sys_addr = get_error_address(m);
1844 * Find out which node the error address belongs to. This may be
1845 * different from the node that detected the error.
1847 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1849 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1850 (unsigned long)sys_addr);
1851 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1857 csrow = sys_addr_to_csrow(log_mci, sys_addr);
1859 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1860 (unsigned long)sys_addr);
1861 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1863 error_address_to_page_and_offset(sys_addr, &page, &offset);
1864 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
1868 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1871 u16 ec = EC(m->status);
1872 u8 xec = XEC(m->status, 0x1f);
1873 u8 ecc_type = (m->status >> 45) & 0x3;
1875 /* Bail early out if this was an 'observed' error */
1876 if (PP(ec) == NBSL_PP_OBS)
1879 /* Do only ECC errors */
1880 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
1884 amd64_handle_ce(mci, m);
1885 else if (ecc_type == 1)
1886 amd64_handle_ue(mci, m);
1889 void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
1891 struct mem_ctl_info *mci = mcis[node_id];
1893 __amd64_decode_bus_error(mci, m);
1897 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
1898 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
1900 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
1902 /* Reserve the ADDRESS MAP Device */
1903 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
1905 amd64_err("error address map device not found: "
1906 "vendor %x device 0x%x (broken BIOS?)\n",
1907 PCI_VENDOR_ID_AMD, f1_id);
1911 /* Reserve the MISC Device */
1912 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
1914 pci_dev_put(pvt->F1);
1917 amd64_err("error F3 device not found: "
1918 "vendor %x device 0x%x (broken BIOS?)\n",
1919 PCI_VENDOR_ID_AMD, f3_id);
1923 debugf1("F1: %s\n", pci_name(pvt->F1));
1924 debugf1("F2: %s\n", pci_name(pvt->F2));
1925 debugf1("F3: %s\n", pci_name(pvt->F3));
1930 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
1932 pci_dev_put(pvt->F1);
1933 pci_dev_put(pvt->F3);
1937 * Retrieve the hardware registers of the memory controller (this includes the
1938 * 'Address Map' and 'Misc' device regs)
1940 static void read_mc_regs(struct amd64_pvt *pvt)
1942 struct cpuinfo_x86 *c = &boot_cpu_data;
1948 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
1949 * those are Read-As-Zero
1951 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
1952 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
1954 /* check first whether TOP_MEM2 is enabled */
1955 rdmsrl(MSR_K8_SYSCFG, msr_val);
1956 if (msr_val & (1U << 21)) {
1957 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
1958 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
1960 debugf0(" TOP_MEM2 disabled.\n");
1962 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
1964 read_dram_ctl_register(pvt);
1966 for (range = 0; range < DRAM_RANGES; range++) {
1969 /* read settings for this DRAM range */
1970 read_dram_base_limit_regs(pvt, range);
1972 rw = dram_rw(pvt, range);
1976 debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
1978 get_dram_base(pvt, range),
1979 get_dram_limit(pvt, range));
1981 debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
1982 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
1983 (rw & 0x1) ? "R" : "-",
1984 (rw & 0x2) ? "W" : "-",
1985 dram_intlv_sel(pvt, range),
1986 dram_dst_node(pvt, range));
1989 read_dct_base_mask(pvt);
1991 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
1992 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
1994 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
1996 amd64_read_dct_pci_cfg(pvt, DCLR0, &pvt->dclr0);
1997 amd64_read_dct_pci_cfg(pvt, DCHR0, &pvt->dchr0);
1999 if (!dct_ganging_enabled(pvt)) {
2000 amd64_read_dct_pci_cfg(pvt, DCLR1, &pvt->dclr1);
2001 amd64_read_dct_pci_cfg(pvt, DCHR1, &pvt->dchr1);
2004 pvt->ecc_sym_sz = 4;
2006 if (c->x86 >= 0x10) {
2007 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2008 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
2010 /* F10h, revD and later can do x8 ECC too */
2011 if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
2012 pvt->ecc_sym_sz = 8;
2014 dump_misc_regs(pvt);
2018 * NOTE: CPU Revision Dependent code
2021 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2022 * k8 private pointer to -->
2023 * DRAM Bank Address mapping register
2025 * DCL register where dual_channel_active is
2027 * The DBAM register consists of 4 sets of 4 bits each definitions:
2030 * 0-3 CSROWs 0 and 1
2031 * 4-7 CSROWs 2 and 3
2032 * 8-11 CSROWs 4 and 5
2033 * 12-15 CSROWs 6 and 7
2035 * Values range from: 0 to 15
2036 * The meaning of the values depends on CPU revision and dual-channel state,
2037 * see relevant BKDG more info.
2039 * The memory controller provides for total of only 8 CSROWs in its current
2040 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2041 * single channel or two (2) DIMMs in dual channel mode.
2043 * The following code logic collapses the various tables for CSROW based on CPU
2047 * The number of PAGE_SIZE pages on the specified CSROW number it
2051 static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2053 u32 cs_mode, nr_pages;
2056 * The math on this doesn't look right on the surface because x/2*4 can
2057 * be simplified to x*2 but this expression makes use of the fact that
2058 * it is integral math where 1/2=0. This intermediate value becomes the
2059 * number of bits to shift the DBAM register to extract the proper CSROW
2062 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2064 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2067 * If dual channel then double the memory size of single channel.
2068 * Channel count is 1 or 2
2070 nr_pages <<= (pvt->channel_count - 1);
2072 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2073 debugf0(" nr_pages= %u channel-count = %d\n",
2074 nr_pages, pvt->channel_count);
2080 * Initialize the array of csrow attribute instances, based on the values
2081 * from pci config hardware registers.
2083 static int init_csrows(struct mem_ctl_info *mci)
2085 struct csrow_info *csrow;
2086 struct amd64_pvt *pvt = mci->pvt_info;
2087 u64 input_addr_min, input_addr_max, sys_addr, base, mask;
2091 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2095 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2096 pvt->mc_node_id, val,
2097 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2099 for_each_chip_select(i, 0, pvt) {
2100 csrow = &mci->csrows[i];
2102 if (!csrow_enabled(i, 0, pvt)) {
2103 debugf1("----CSROW %d EMPTY for node %d\n", i,
2108 debugf1("----CSROW %d VALID for MC node %d\n",
2109 i, pvt->mc_node_id);
2112 csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2113 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2114 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2115 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2116 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2117 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2119 get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2120 csrow->page_mask = ~mask;
2121 /* 8 bytes of resolution */
2123 csrow->mtype = amd64_determine_memory_type(pvt, i);
2125 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2126 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2127 (unsigned long)input_addr_min,
2128 (unsigned long)input_addr_max);
2129 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2130 (unsigned long)sys_addr, csrow->page_mask);
2131 debugf1(" nr_pages: %u first_page: 0x%lx "
2132 "last_page: 0x%lx\n",
2133 (unsigned)csrow->nr_pages,
2134 csrow->first_page, csrow->last_page);
2137 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2139 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2141 (pvt->nbcfg & NBCFG_CHIPKILL) ?
2142 EDAC_S4ECD4ED : EDAC_SECDED;
2144 csrow->edac_mode = EDAC_NONE;
2150 /* get all cores on this DCT */
2151 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
2155 for_each_online_cpu(cpu)
2156 if (amd_get_nb_id(cpu) == nid)
2157 cpumask_set_cpu(cpu, mask);
2160 /* check MCG_CTL on all the cpus on this node */
2161 static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
2167 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2168 amd64_warn("%s: Error allocating mask\n", __func__);
2172 get_cpus_on_this_dct_cpumask(mask, nid);
2174 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2176 for_each_cpu(cpu, mask) {
2177 struct msr *reg = per_cpu_ptr(msrs, cpu);
2178 nbe = reg->l & MSR_MCGCTL_NBE;
2180 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2182 (nbe ? "enabled" : "disabled"));
2190 free_cpumask_var(mask);
2194 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2196 cpumask_var_t cmask;
2199 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2200 amd64_warn("%s: error allocating mask\n", __func__);
2204 get_cpus_on_this_dct_cpumask(cmask, nid);
2206 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2208 for_each_cpu(cpu, cmask) {
2210 struct msr *reg = per_cpu_ptr(msrs, cpu);
2213 if (reg->l & MSR_MCGCTL_NBE)
2214 s->flags.nb_mce_enable = 1;
2216 reg->l |= MSR_MCGCTL_NBE;
2219 * Turn off NB MCE reporting only when it was off before
2221 if (!s->flags.nb_mce_enable)
2222 reg->l &= ~MSR_MCGCTL_NBE;
2225 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2227 free_cpumask_var(cmask);
2232 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2236 u32 value, mask = 0x3; /* UECC/CECC enable */
2238 if (toggle_ecc_err_reporting(s, nid, ON)) {
2239 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2243 amd64_read_pci_cfg(F3, NBCTL, &value);
2245 s->old_nbctl = value & mask;
2246 s->nbctl_valid = true;
2249 amd64_write_pci_cfg(F3, NBCTL, value);
2251 amd64_read_pci_cfg(F3, NBCFG, &value);
2253 debugf0("1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2254 nid, value, !!(value & NBCFG_ECC_ENABLE));
2256 if (!(value & NBCFG_ECC_ENABLE)) {
2257 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2259 s->flags.nb_ecc_prev = 0;
2261 /* Attempt to turn on DRAM ECC Enable */
2262 value |= NBCFG_ECC_ENABLE;
2263 amd64_write_pci_cfg(F3, NBCFG, value);
2265 amd64_read_pci_cfg(F3, NBCFG, &value);
2267 if (!(value & NBCFG_ECC_ENABLE)) {
2268 amd64_warn("Hardware rejected DRAM ECC enable,"
2269 "check memory DIMM configuration.\n");
2272 amd64_info("Hardware accepted DRAM ECC Enable\n");
2275 s->flags.nb_ecc_prev = 1;
2278 debugf0("2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
2279 nid, value, !!(value & NBCFG_ECC_ENABLE));
2284 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2287 u32 value, mask = 0x3; /* UECC/CECC enable */
2290 if (!s->nbctl_valid)
2293 amd64_read_pci_cfg(F3, NBCTL, &value);
2295 value |= s->old_nbctl;
2297 amd64_write_pci_cfg(F3, NBCTL, value);
2299 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2300 if (!s->flags.nb_ecc_prev) {
2301 amd64_read_pci_cfg(F3, NBCFG, &value);
2302 value &= ~NBCFG_ECC_ENABLE;
2303 amd64_write_pci_cfg(F3, NBCFG, value);
2306 /* restore the NB Enable MCGCTL bit */
2307 if (toggle_ecc_err_reporting(s, nid, OFF))
2308 amd64_warn("Error restoring NB MCGCTL settings!\n");
2312 * EDAC requires that the BIOS have ECC enabled before
2313 * taking over the processing of ECC errors. A command line
2314 * option allows to force-enable hardware ECC later in
2315 * enable_ecc_error_reporting().
2317 static const char *ecc_msg =
2318 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2319 " Either enable ECC checking or force module loading by setting "
2320 "'ecc_enable_override'.\n"
2321 " (Note that use of the override may cause unknown side effects.)\n";
2323 static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2327 bool nb_mce_en = false;
2329 amd64_read_pci_cfg(F3, NBCFG, &value);
2331 ecc_en = !!(value & NBCFG_ECC_ENABLE);
2332 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2334 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2336 amd64_notice("NB MCE bank disabled, set MSR "
2337 "0x%08x[4] on node %d to enable.\n",
2338 MSR_IA32_MCG_CTL, nid);
2340 if (!ecc_en || !nb_mce_en) {
2341 amd64_notice("%s", ecc_msg);
2347 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2348 ARRAY_SIZE(amd64_inj_attrs) +
2351 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2353 static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2355 unsigned int i = 0, j = 0;
2357 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2358 sysfs_attrs[i] = amd64_dbg_attrs[i];
2360 if (boot_cpu_data.x86 >= 0x10)
2361 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2362 sysfs_attrs[i] = amd64_inj_attrs[j];
2364 sysfs_attrs[i] = terminator;
2366 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2369 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
2370 struct amd64_family_type *fam)
2372 struct amd64_pvt *pvt = mci->pvt_info;
2374 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2375 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2377 if (pvt->nbcap & NBCAP_SECDED)
2378 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2380 if (pvt->nbcap & NBCAP_CHIPKILL)
2381 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2383 mci->edac_cap = amd64_determine_edac_cap(pvt);
2384 mci->mod_name = EDAC_MOD_STR;
2385 mci->mod_ver = EDAC_AMD64_VERSION;
2386 mci->ctl_name = fam->ctl_name;
2387 mci->dev_name = pci_name(pvt->F2);
2388 mci->ctl_page_to_phys = NULL;
2390 /* memory scrubber interface */
2391 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2392 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2396 * returns a pointer to the family descriptor on success, NULL otherwise.
2398 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2400 u8 fam = boot_cpu_data.x86;
2401 struct amd64_family_type *fam_type = NULL;
2405 fam_type = &amd64_family_types[K8_CPUS];
2406 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2410 fam_type = &amd64_family_types[F10_CPUS];
2411 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2415 fam_type = &amd64_family_types[F15_CPUS];
2416 pvt->ops = &amd64_family_types[F15_CPUS].ops;
2420 amd64_err("Unsupported family!\n");
2424 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2426 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
2428 (pvt->ext_model >= K8_REV_F ? "revF or later "
2429 : "revE or earlier ")
2430 : ""), pvt->mc_node_id);
2434 static int amd64_init_one_instance(struct pci_dev *F2)
2436 struct amd64_pvt *pvt = NULL;
2437 struct amd64_family_type *fam_type = NULL;
2438 struct mem_ctl_info *mci = NULL;
2440 u8 nid = get_node_id(F2);
2443 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2447 pvt->mc_node_id = nid;
2451 fam_type = amd64_per_family_init(pvt);
2456 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2463 * We need to determine how many memory channels there are. Then use
2464 * that information for calculating the size of the dynamic instance
2465 * tables in the 'mci' structure.
2468 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2469 if (pvt->channel_count < 0)
2473 mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
2477 mci->pvt_info = pvt;
2478 mci->dev = &pvt->F2->dev;
2480 setup_mci_misc_attrs(mci, fam_type);
2482 if (init_csrows(mci))
2483 mci->edac_cap = EDAC_FLAG_NONE;
2485 set_mc_sysfs_attrs(mci);
2488 if (edac_mc_add_mc(mci)) {
2489 debugf1("failed edac_mc_add_mc()\n");
2493 /* register stuff with EDAC MCE */
2494 if (report_gart_errors)
2495 amd_report_gart_errors(true);
2497 amd_register_ecc_decoder(amd64_decode_bus_error);
2501 atomic_inc(&drv_instances);
2509 free_mc_sibling_devs(pvt);
2518 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2519 const struct pci_device_id *mc_type)
2521 u8 nid = get_node_id(pdev);
2522 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2523 struct ecc_settings *s;
2526 ret = pci_enable_device(pdev);
2528 debugf0("ret=%d\n", ret);
2533 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2539 if (!ecc_enabled(F3, nid)) {
2542 if (!ecc_enable_override)
2545 amd64_warn("Forcing ECC on!\n");
2547 if (!enable_ecc_error_reporting(s, nid, F3))
2551 ret = amd64_init_one_instance(pdev);
2553 amd64_err("Error probing instance: %d\n", nid);
2554 restore_ecc_error_reporting(s, nid, F3);
2561 ecc_stngs[nid] = NULL;
2567 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2569 struct mem_ctl_info *mci;
2570 struct amd64_pvt *pvt;
2571 u8 nid = get_node_id(pdev);
2572 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2573 struct ecc_settings *s = ecc_stngs[nid];
2575 /* Remove from EDAC CORE tracking list */
2576 mci = edac_mc_del_mc(&pdev->dev);
2580 pvt = mci->pvt_info;
2582 restore_ecc_error_reporting(s, nid, F3);
2584 free_mc_sibling_devs(pvt);
2586 /* unregister from EDAC MCE */
2587 amd_report_gart_errors(false);
2588 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2590 kfree(ecc_stngs[nid]);
2591 ecc_stngs[nid] = NULL;
2593 /* Free the EDAC CORE resources */
2594 mci->pvt_info = NULL;
2602 * This table is part of the interface for loading drivers for PCI devices. The
2603 * PCI core identifies what devices are on a system during boot, and then
2604 * inquiry this table to see if this driver is for a given device found.
2606 static const struct pci_device_id amd64_pci_table[] __devinitdata = {
2608 .vendor = PCI_VENDOR_ID_AMD,
2609 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2610 .subvendor = PCI_ANY_ID,
2611 .subdevice = PCI_ANY_ID,
2616 .vendor = PCI_VENDOR_ID_AMD,
2617 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2618 .subvendor = PCI_ANY_ID,
2619 .subdevice = PCI_ANY_ID,
2624 .vendor = PCI_VENDOR_ID_AMD,
2625 .device = PCI_DEVICE_ID_AMD_15H_NB_F2,
2626 .subvendor = PCI_ANY_ID,
2627 .subdevice = PCI_ANY_ID,
2634 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2636 static struct pci_driver amd64_pci_driver = {
2637 .name = EDAC_MOD_STR,
2638 .probe = amd64_probe_one_instance,
2639 .remove = __devexit_p(amd64_remove_one_instance),
2640 .id_table = amd64_pci_table,
2643 static void setup_pci_device(void)
2645 struct mem_ctl_info *mci;
2646 struct amd64_pvt *pvt;
2654 pvt = mci->pvt_info;
2656 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2658 if (!amd64_ctl_pci) {
2659 pr_warning("%s(): Unable to create PCI control\n",
2662 pr_warning("%s(): PCI error report via EDAC not set\n",
2668 static int __init amd64_edac_init(void)
2672 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
2676 if (amd_cache_northbridges() < 0)
2680 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2681 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2682 if (!(mcis && ecc_stngs))
2685 msrs = msrs_alloc();
2689 err = pci_register_driver(&amd64_pci_driver);
2694 if (!atomic_read(&drv_instances))
2695 goto err_no_instances;
2701 pci_unregister_driver(&amd64_pci_driver);
2718 static void __exit amd64_edac_exit(void)
2721 edac_pci_release_generic_ctl(amd64_ctl_pci);
2723 pci_unregister_driver(&amd64_pci_driver);
2735 module_init(amd64_edac_init);
2736 module_exit(amd64_edac_exit);
2738 MODULE_LICENSE("GPL");
2739 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2740 "Dave Peterson, Thayne Harbaugh");
2741 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2742 EDAC_AMD64_VERSION);
2744 module_param(edac_op_state, int, 0444);
2745 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");