1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
19 * count successfully initialized driver instances for setup_pci_device()
21 static atomic_t drv_instances = ATOMIC_INIT(0);
23 /* Per-node driver instances */
24 static struct mem_ctl_info **mcis;
25 static struct ecc_settings **ecc_stngs;
28 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
31 static int ddr2_dbam_revCG[] = {
41 static int ddr2_dbam_revD[] = {
53 static int ddr2_dbam[] = { [0] = 128,
62 static int ddr3_dbam[] = { [0] = -1,
73 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
74 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
77 *FIXME: Produce a better mapping/linearisation.
82 u32 scrubval; /* bit pattern for scrub rate */
83 u32 bandwidth; /* bandwidth consumed (bytes/sec) */
85 { 0x01, 1600000000UL},
107 { 0x00, 0UL}, /* scrubbing off */
110 static int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
111 u32 *val, const char *func)
115 err = pci_read_config_dword(pdev, offset, val);
117 amd64_warn("%s: error reading F%dx%03x.\n",
118 func, PCI_FUNC(pdev->devfn), offset);
123 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
124 u32 val, const char *func)
128 err = pci_write_config_dword(pdev, offset, val);
130 amd64_warn("%s: error writing to F%dx%03x.\n",
131 func, PCI_FUNC(pdev->devfn), offset);
138 * Depending on the family, F2 DCT reads need special handling:
140 * K8: has a single DCT only
142 * F10h: each DCT has its own set of regs
146 * F15h: we select which DCT we access using F1x10C[DctCfgSel]
149 static int k8_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
155 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
158 static int f10_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
161 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
164 static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
170 if (addr >= 0x140 && addr <= 0x1a0) {
175 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
178 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
180 return __amd64_read_pci_cfg_dword(pvt->F2, addr, val, func);
184 * Memory scrubber control interface. For K8, memory scrubbing is handled by
185 * hardware and can involve L2 cache, dcache as well as the main memory. With
186 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
189 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
190 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
191 * bytes/sec for the setting.
193 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
194 * other archs, we might not have access to the caches directly.
198 * scan the scrub rate mapping table for a close or matching bandwidth value to
199 * issue. If requested is too big, then use last maximum value found.
201 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
207 * map the configured rate (new_bw) to a value specific to the AMD64
208 * memory controller and apply to register. Search for the first
209 * bandwidth entry that is greater or equal than the setting requested
210 * and program that. If at last entry, turn off DRAM scrubbing.
212 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
214 * skip scrub rates which aren't recommended
215 * (see F10 BKDG, F3x58)
217 if (scrubrates[i].scrubval < min_rate)
220 if (scrubrates[i].bandwidth <= new_bw)
224 * if no suitable bandwidth found, turn off DRAM scrubbing
225 * entirely by falling back to the last element in the
230 scrubval = scrubrates[i].scrubval;
232 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
235 return scrubrates[i].bandwidth;
240 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
242 struct amd64_pvt *pvt = mci->pvt_info;
244 return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
247 static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
249 struct amd64_pvt *pvt = mci->pvt_info;
251 int i, retval = -EINVAL;
253 amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
255 scrubval = scrubval & 0x001F;
257 amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval);
259 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
260 if (scrubrates[i].scrubval == scrubval) {
261 retval = scrubrates[i].bandwidth;
269 * returns true if the SysAddr given by sys_addr matches the
270 * DRAM base/limit associated with node_id
272 static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, int nid)
276 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
277 * all ones if the most significant implemented address bit is 1.
278 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
279 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
280 * Application Programming.
282 addr = sys_addr & 0x000000ffffffffffull;
284 return ((addr >= get_dram_base(pvt, nid)) &&
285 (addr <= get_dram_limit(pvt, nid)));
289 * Attempt to map a SysAddr to a node. On success, return a pointer to the
290 * mem_ctl_info structure for the node that the SysAddr maps to.
292 * On failure, return NULL.
294 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
297 struct amd64_pvt *pvt;
302 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
303 * 3.4.4.2) registers to map the SysAddr to a node ID.
308 * The value of this field should be the same for all DRAM Base
309 * registers. Therefore we arbitrarily choose to read it from the
310 * register for node 0.
312 intlv_en = dram_intlv_en(pvt, 0);
315 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
316 if (amd64_base_limit_match(pvt, sys_addr, node_id))
322 if (unlikely((intlv_en != 0x01) &&
323 (intlv_en != 0x03) &&
324 (intlv_en != 0x07))) {
325 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
329 bits = (((u32) sys_addr) >> 12) & intlv_en;
331 for (node_id = 0; ; ) {
332 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
333 break; /* intlv_sel field matches */
335 if (++node_id >= DRAM_RANGES)
339 /* sanity test for sys_addr */
340 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
341 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
342 "range for node %d with node interleaving enabled.\n",
343 __func__, sys_addr, node_id);
348 return edac_mc_find(node_id);
351 debugf2("sys_addr 0x%lx doesn't match any node\n",
352 (unsigned long)sys_addr);
358 * compute the CS base address of the @csrow on the DRAM controller @dct.
359 * For details see F2x[5C:40] in the processor's BKDG
361 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
362 u64 *base, u64 *mask)
364 u64 csbase, csmask, base_bits, mask_bits;
367 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
368 csbase = pvt->csels[dct].csbases[csrow];
369 csmask = pvt->csels[dct].csmasks[csrow];
370 base_bits = GENMASK(21, 31) | GENMASK(9, 15);
371 mask_bits = GENMASK(21, 29) | GENMASK(9, 15);
374 csbase = pvt->csels[dct].csbases[csrow];
375 csmask = pvt->csels[dct].csmasks[csrow >> 1];
378 if (boot_cpu_data.x86 == 0x15)
379 base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
381 base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
384 *base = (csbase & base_bits) << addr_shift;
387 /* poke holes for the csmask */
388 *mask &= ~(mask_bits << addr_shift);
390 *mask |= (csmask & mask_bits) << addr_shift;
393 #define for_each_chip_select(i, dct, pvt) \
394 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
396 #define for_each_chip_select_mask(i, dct, pvt) \
397 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
400 * @input_addr is an InputAddr associated with the node given by mci. Return the
401 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
403 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
405 struct amd64_pvt *pvt;
411 for_each_chip_select(csrow, 0, pvt) {
412 if (!csrow_enabled(csrow, 0, pvt))
415 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
419 if ((input_addr & mask) == (base & mask)) {
420 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
421 (unsigned long)input_addr, csrow,
427 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
428 (unsigned long)input_addr, pvt->mc_node_id);
434 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
435 * for the node represented by mci. Info is passed back in *hole_base,
436 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
437 * info is invalid. Info may be invalid for either of the following reasons:
439 * - The revision of the node is not E or greater. In this case, the DRAM Hole
440 * Address Register does not exist.
442 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
443 * indicating that its contents are not valid.
445 * The values passed back in *hole_base, *hole_offset, and *hole_size are
446 * complete 32-bit values despite the fact that the bitfields in the DHAR
447 * only represent bits 31-24 of the base and offset values.
449 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
450 u64 *hole_offset, u64 *hole_size)
452 struct amd64_pvt *pvt = mci->pvt_info;
455 /* only revE and later have the DRAM Hole Address Register */
456 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
457 debugf1(" revision %d for node %d does not support DHAR\n",
458 pvt->ext_model, pvt->mc_node_id);
462 /* valid for Fam10h and above */
463 if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
464 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
468 if (!dhar_valid(pvt)) {
469 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
474 /* This node has Memory Hoisting */
476 /* +------------------+--------------------+--------------------+-----
477 * | memory | DRAM hole | relocated |
478 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
480 * | | | [0x100000000, |
481 * | | | (0x100000000+ |
482 * | | | (0xffffffff-x))] |
483 * +------------------+--------------------+--------------------+-----
485 * Above is a diagram of physical memory showing the DRAM hole and the
486 * relocated addresses from the DRAM hole. As shown, the DRAM hole
487 * starts at address x (the base address) and extends through address
488 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
489 * addresses in the hole so that they start at 0x100000000.
492 base = dhar_base(pvt);
495 *hole_size = (0x1ull << 32) - base;
497 if (boot_cpu_data.x86 > 0xf)
498 *hole_offset = f10_dhar_offset(pvt);
500 *hole_offset = k8_dhar_offset(pvt);
502 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
503 pvt->mc_node_id, (unsigned long)*hole_base,
504 (unsigned long)*hole_offset, (unsigned long)*hole_size);
508 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
511 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
512 * assumed that sys_addr maps to the node given by mci.
514 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
515 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
516 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
517 * then it is also involved in translating a SysAddr to a DramAddr. Sections
518 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
519 * These parts of the documentation are unclear. I interpret them as follows:
521 * When node n receives a SysAddr, it processes the SysAddr as follows:
523 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
524 * Limit registers for node n. If the SysAddr is not within the range
525 * specified by the base and limit values, then node n ignores the Sysaddr
526 * (since it does not map to node n). Otherwise continue to step 2 below.
528 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
529 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
530 * the range of relocated addresses (starting at 0x100000000) from the DRAM
531 * hole. If not, skip to step 3 below. Else get the value of the
532 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
533 * offset defined by this value from the SysAddr.
535 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
536 * Base register for node n. To obtain the DramAddr, subtract the base
537 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
539 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
541 struct amd64_pvt *pvt = mci->pvt_info;
542 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
545 dram_base = get_dram_base(pvt, pvt->mc_node_id);
547 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
550 if ((sys_addr >= (1ull << 32)) &&
551 (sys_addr < ((1ull << 32) + hole_size))) {
552 /* use DHAR to translate SysAddr to DramAddr */
553 dram_addr = sys_addr - hole_offset;
555 debugf2("using DHAR to translate SysAddr 0x%lx to "
557 (unsigned long)sys_addr,
558 (unsigned long)dram_addr);
565 * Translate the SysAddr to a DramAddr as shown near the start of
566 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
567 * only deals with 40-bit values. Therefore we discard bits 63-40 of
568 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
569 * discard are all 1s. Otherwise the bits we discard are all 0s. See
570 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
571 * Programmer's Manual Volume 1 Application Programming.
573 dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
575 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
576 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
577 (unsigned long)dram_addr);
582 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
583 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
584 * for node interleaving.
586 static int num_node_interleave_bits(unsigned intlv_en)
588 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
591 BUG_ON(intlv_en > 7);
592 n = intlv_shift_table[intlv_en];
596 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
597 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
599 struct amd64_pvt *pvt;
606 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
607 * concerning translating a DramAddr to an InputAddr.
609 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
610 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
613 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
614 intlv_shift, (unsigned long)dram_addr,
615 (unsigned long)input_addr);
621 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
622 * assumed that @sys_addr maps to the node given by mci.
624 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
629 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
631 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
632 (unsigned long)sys_addr, (unsigned long)input_addr);
639 * @input_addr is an InputAddr associated with the node represented by mci.
640 * Translate @input_addr to a DramAddr and return the result.
642 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
644 struct amd64_pvt *pvt;
645 int node_id, intlv_shift;
650 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
651 * shows how to translate a DramAddr to an InputAddr. Here we reverse
652 * this procedure. When translating from a DramAddr to an InputAddr, the
653 * bits used for node interleaving are discarded. Here we recover these
654 * bits from the IntlvSel field of the DRAM Limit register (section
655 * 3.4.4.2) for the node that input_addr is associated with.
658 node_id = pvt->mc_node_id;
659 BUG_ON((node_id < 0) || (node_id > 7));
661 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
663 if (intlv_shift == 0) {
664 debugf1(" InputAddr 0x%lx translates to DramAddr of "
665 "same value\n", (unsigned long)input_addr);
670 bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
671 (input_addr & 0xfff);
673 intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
674 dram_addr = bits + (intlv_sel << 12);
676 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
677 "(%d node interleave bits)\n", (unsigned long)input_addr,
678 (unsigned long)dram_addr, intlv_shift);
684 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
685 * @dram_addr to a SysAddr.
687 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
689 struct amd64_pvt *pvt = mci->pvt_info;
690 u64 hole_base, hole_offset, hole_size, base, sys_addr;
693 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
696 if ((dram_addr >= hole_base) &&
697 (dram_addr < (hole_base + hole_size))) {
698 sys_addr = dram_addr + hole_offset;
700 debugf1("using DHAR to translate DramAddr 0x%lx to "
701 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
702 (unsigned long)sys_addr);
708 base = get_dram_base(pvt, pvt->mc_node_id);
709 sys_addr = dram_addr + base;
712 * The sys_addr we have computed up to this point is a 40-bit value
713 * because the k8 deals with 40-bit values. However, the value we are
714 * supposed to return is a full 64-bit physical address. The AMD
715 * x86-64 architecture specifies that the most significant implemented
716 * address bit through bit 63 of a physical address must be either all
717 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
718 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
719 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
722 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
724 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
725 pvt->mc_node_id, (unsigned long)dram_addr,
726 (unsigned long)sys_addr);
732 * @input_addr is an InputAddr associated with the node given by mci. Translate
733 * @input_addr to a SysAddr.
735 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
738 return dram_addr_to_sys_addr(mci,
739 input_addr_to_dram_addr(mci, input_addr));
743 * Find the minimum and maximum InputAddr values that map to the given @csrow.
744 * Pass back these values in *input_addr_min and *input_addr_max.
746 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
747 u64 *input_addr_min, u64 *input_addr_max)
749 struct amd64_pvt *pvt;
753 BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
755 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
757 *input_addr_min = base & ~mask;
758 *input_addr_max = base | mask;
761 /* Map the Error address to a PAGE and PAGE OFFSET. */
762 static inline void error_address_to_page_and_offset(u64 error_address,
763 u32 *page, u32 *offset)
765 *page = (u32) (error_address >> PAGE_SHIFT);
766 *offset = ((u32) error_address) & ~PAGE_MASK;
770 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
771 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
772 * of a node that detected an ECC memory error. mci represents the node that
773 * the error address maps to (possibly different from the node that detected
774 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
777 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
781 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
784 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
785 "address 0x%lx\n", (unsigned long)sys_addr);
789 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
791 static u16 extract_syndrome(struct err_regs *err)
793 return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
797 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
800 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
803 enum dev_type edac_cap = EDAC_FLAG_NONE;
805 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
809 if (pvt->dclr0 & BIT(bit))
810 edac_cap = EDAC_FLAG_SECDED;
816 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
818 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
820 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
822 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
823 (dclr & BIT(16)) ? "un" : "",
824 (dclr & BIT(19)) ? "yes" : "no");
826 debugf1(" PAR/ERR parity: %s\n",
827 (dclr & BIT(8)) ? "enabled" : "disabled");
829 debugf1(" DCT 128bit mode width: %s\n",
830 (dclr & BIT(11)) ? "128b" : "64b");
832 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
833 (dclr & BIT(12)) ? "yes" : "no",
834 (dclr & BIT(13)) ? "yes" : "no",
835 (dclr & BIT(14)) ? "yes" : "no",
836 (dclr & BIT(15)) ? "yes" : "no");
839 /* Display and decode various NB registers for debug purposes. */
840 static void dump_misc_regs(struct amd64_pvt *pvt)
842 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
844 debugf1(" NB two channel DRAM capable: %s\n",
845 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
847 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
848 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
849 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
851 amd64_dump_dramcfg_low(pvt->dclr0, 0);
853 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
855 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
857 pvt->dhar, dhar_base(pvt),
858 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
859 : f10_dhar_offset(pvt));
861 debugf1(" DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
863 amd64_debug_display_dimm_sizes(0, pvt);
865 /* everything below this point is Fam10h and above */
866 if (boot_cpu_data.x86 == 0xf)
869 amd64_debug_display_dimm_sizes(1, pvt);
871 amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
873 /* Only if NOT ganged does dclr1 have valid info */
874 if (!dct_ganging_enabled(pvt))
875 amd64_dump_dramcfg_low(pvt->dclr1, 1);
878 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
880 amd64_read_dct_pci_cfg(pvt, DBAM0, &pvt->dbam0);
881 amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
885 * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
887 static void prep_chip_selects(struct amd64_pvt *pvt)
889 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
890 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
891 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
893 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
894 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
899 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask registers
901 static void read_dct_base_mask(struct amd64_pvt *pvt)
905 prep_chip_selects(pvt);
907 for_each_chip_select(cs, 0, pvt) {
908 u32 reg0 = DCSB0 + (cs * 4);
909 u32 reg1 = DCSB1 + (cs * 4);
910 u32 *base0 = &pvt->csels[0].csbases[cs];
911 u32 *base1 = &pvt->csels[1].csbases[cs];
913 if (!amd64_read_dct_pci_cfg(pvt, reg0, base0))
914 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
917 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
920 if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
921 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
925 for_each_chip_select_mask(cs, 0, pvt) {
926 u32 reg0 = DCSM0 + (cs * 4);
927 u32 reg1 = DCSM1 + (cs * 4);
928 u32 *mask0 = &pvt->csels[0].csmasks[cs];
929 u32 *mask1 = &pvt->csels[1].csmasks[cs];
931 if (!amd64_read_dct_pci_cfg(pvt, reg0, mask0))
932 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
935 if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
938 if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
939 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
944 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
948 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
949 if (pvt->dchr0 & DDR3_MODE)
950 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
952 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
954 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
957 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
963 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
964 * and the later RevF memory controllers (DDR vs DDR2)
967 * number of memory channels in operation
969 * contents of the DCL0_LOW register
971 static int k8_early_channel_count(struct amd64_pvt *pvt)
975 err = amd64_read_dct_pci_cfg(pvt, F10_DCLR_0, &pvt->dclr0);
979 if (pvt->ext_model >= K8_REV_F)
980 /* RevF (NPT) and later */
981 flag = pvt->dclr0 & F10_WIDTH_128;
983 /* RevE and earlier */
984 flag = pvt->dclr0 & REVE_WIDTH_128;
989 return (flag) ? 2 : 1;
992 /* extract the ERROR ADDRESS for the K8 CPUs */
993 static u64 k8_get_error_address(struct mem_ctl_info *mci,
994 struct err_regs *info)
996 return (((u64) (info->nbeah & 0xff)) << 32) +
997 (info->nbeal & ~0x03);
1000 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1002 u32 off = range << 3;
1004 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1005 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1007 if (boot_cpu_data.x86 == 0xf)
1010 if (!dram_rw(pvt, range))
1013 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1014 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1017 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1018 struct err_regs *err_info, u64 sys_addr)
1020 struct mem_ctl_info *src_mci;
1025 syndrome = extract_syndrome(err_info);
1027 /* CHIPKILL enabled */
1028 if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
1029 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1032 * Syndrome didn't map, so we don't know which of the
1033 * 2 DIMMs is in error. So we need to ID 'both' of them
1036 amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
1037 "error reporting race\n", syndrome);
1038 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1043 * non-chipkill ecc mode
1045 * The k8 documentation is unclear about how to determine the
1046 * channel number when using non-chipkill memory. This method
1047 * was obtained from email communication with someone at AMD.
1048 * (Wish the email was placed in this comment - norsk)
1050 channel = ((sys_addr & BIT(3)) != 0);
1054 * Find out which node the error address belongs to. This may be
1055 * different from the node that detected the error.
1057 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1059 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1060 (unsigned long)sys_addr);
1061 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1065 /* Now map the sys_addr to a CSROW */
1066 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1068 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1070 error_address_to_page_and_offset(sys_addr, &page, &offset);
1072 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1073 channel, EDAC_MOD_STR);
1077 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1081 if (pvt->ext_model >= K8_REV_F)
1082 dbam_map = ddr2_dbam;
1083 else if (pvt->ext_model >= K8_REV_D)
1084 dbam_map = ddr2_dbam_revD;
1086 dbam_map = ddr2_dbam_revCG;
1088 return dbam_map[cs_mode];
1092 * Get the number of DCT channels in use.
1095 * number of Memory Channels in operation
1097 * contents of the DCL0_LOW register
1099 static int f10_early_channel_count(struct amd64_pvt *pvt)
1101 int dbams[] = { DBAM0, DBAM1 };
1102 int i, j, channels = 0;
1105 /* If we are in 128 bit mode, then we are using 2 channels */
1106 if (pvt->dclr0 & F10_WIDTH_128) {
1112 * Need to check if in unganged mode: In such, there are 2 channels,
1113 * but they are not in 128 bit mode and thus the above 'dclr0' status
1116 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1117 * their CSEnable bit on. If so, then SINGLE DIMM case.
1119 debugf0("Data width is not 128 bits - need more decoding\n");
1122 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1123 * is more than just one DIMM present in unganged mode. Need to check
1124 * both controllers since DIMMs can be placed in either one.
1126 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1127 if (amd64_read_dct_pci_cfg(pvt, dbams[i], &dbam))
1130 for (j = 0; j < 4; j++) {
1131 if (DBAM_DIMM(j, dbam) > 0) {
1141 amd64_info("MCT channel count: %d\n", channels);
1150 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1154 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1155 dbam_map = ddr3_dbam;
1157 dbam_map = ddr2_dbam;
1159 return dbam_map[cs_mode];
1162 static u64 f10_get_error_address(struct mem_ctl_info *mci,
1163 struct err_regs *info)
1165 return (((u64) (info->nbeah & 0xffff)) << 32) +
1166 (info->nbeal & ~0x01);
1169 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1172 if (!amd64_read_dct_pci_cfg(pvt, F10_DCTL_SEL_LOW, &pvt->dct_sel_low)) {
1173 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, High range addrs at: 0x%x\n",
1174 pvt->dct_sel_low, dct_sel_baseaddr(pvt));
1176 debugf0(" DCT mode: %s, All DCTs on: %s\n",
1177 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
1178 (dct_dram_enabled(pvt) ? "yes" : "no"));
1180 if (!dct_ganging_enabled(pvt))
1181 debugf0(" Address range split per DCT: %s\n",
1182 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1184 debugf0(" DCT data interleave for ECC: %s, "
1185 "DRAM cleared since last warm reset: %s\n",
1186 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1187 (dct_memory_cleared(pvt) ? "yes" : "no"));
1189 debugf0(" DCT channel interleave: %s, "
1190 "DCT interleave bits selector: 0x%x\n",
1191 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1192 dct_sel_interleave_addr(pvt));
1195 amd64_read_dct_pci_cfg(pvt, F10_DCTL_SEL_HIGH, &pvt->dct_sel_hi);
1199 * Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1200 * Interleaving Modes.
1202 static u8 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1203 bool hi_range_sel, u8 intlv_en)
1205 u32 dct_sel_high = (pvt->dct_sel_low >> 1) & 1;
1207 if (dct_ganging_enabled(pvt))
1211 return dct_sel_high;
1214 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1216 if (dct_interleave_enabled(pvt)) {
1217 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1219 /* return DCT select function: 0=DCT0, 1=DCT1 */
1221 return sys_addr >> 6 & 1;
1223 if (intlv_addr & 0x2) {
1224 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1225 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1227 return ((sys_addr >> shift) & 1) ^ temp;
1230 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1233 if (dct_high_range_enabled(pvt))
1234 return ~dct_sel_high & 1;
1239 /* Convert the sys_addr to the normalized DCT address */
1240 static u64 f10_get_norm_dct_addr(struct amd64_pvt *pvt, int range,
1241 u64 sys_addr, bool hi_rng,
1242 u32 dct_sel_base_addr)
1245 u64 dram_base = get_dram_base(pvt, range);
1246 u64 hole_off = f10_dhar_offset(pvt);
1247 u32 hole_valid = dhar_valid(pvt);
1248 u64 dct_sel_base_off = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1253 * base address of high range is below 4Gb
1254 * (bits [47:27] at [31:11])
1255 * DRAM address space on this DCT is hoisted above 4Gb &&
1258 * remove hole offset from sys_addr
1260 * remove high range offset from sys_addr
1262 if ((!(dct_sel_base_addr >> 16) ||
1263 dct_sel_base_addr < dhar_base(pvt)) &&
1265 (sys_addr >= BIT_64(32)))
1266 chan_off = hole_off;
1268 chan_off = dct_sel_base_off;
1272 * we have a valid hole &&
1277 * remove dram base to normalize to DCT address
1279 if (hole_valid && (sys_addr >= BIT_64(32)))
1280 chan_off = hole_off;
1282 chan_off = dram_base;
1285 return (sys_addr & GENMASK(6,47)) - (chan_off & GENMASK(23,47));
1288 /* Hack for the time being - Can we get this from BIOS?? */
1289 #define CH0SPARE_RANK 0
1290 #define CH1SPARE_RANK 1
1293 * checks if the csrow passed in is marked as SPARED, if so returns the new
1296 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1301 /* Depending on channel, isolate respective SPARING info */
1303 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1304 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1305 if (swap_done && (csrow == bad_dram_cs))
1306 csrow = CH1SPARE_RANK;
1308 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1309 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1310 if (swap_done && (csrow == bad_dram_cs))
1311 csrow = CH0SPARE_RANK;
1317 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1318 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1321 * -EINVAL: NOT FOUND
1322 * 0..csrow = Chip-Select Row
1324 static int f10_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
1326 struct mem_ctl_info *mci;
1327 struct amd64_pvt *pvt;
1328 u64 cs_base, cs_mask;
1329 int cs_found = -EINVAL;
1336 pvt = mci->pvt_info;
1338 debugf1("input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1340 for_each_chip_select(csrow, dct, pvt) {
1341 if (!csrow_enabled(csrow, dct, pvt))
1344 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1346 debugf1(" CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1347 csrow, cs_base, cs_mask);
1351 debugf1(" (InputAddr & ~CSMask)=0x%llx "
1352 "(CSBase & ~CSMask)=0x%llx\n",
1353 (in_addr & cs_mask), (cs_base & cs_mask));
1355 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1356 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1358 debugf1(" MATCH csrow=%d\n", cs_found);
1365 /* For a given @dram_range, check if @sys_addr falls within it. */
1366 static int f10_match_to_this_node(struct amd64_pvt *pvt, int range,
1367 u64 sys_addr, int *nid, int *chan_sel)
1369 int cs_found = -EINVAL;
1371 u32 tmp, dct_sel_base;
1373 bool high_range = false;
1375 u8 node_id = dram_dst_node(pvt, range);
1376 u8 intlv_en = dram_intlv_en(pvt, range);
1377 u32 intlv_sel = dram_intlv_sel(pvt, range);
1379 debugf1("(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1380 range, sys_addr, get_dram_limit(pvt, range));
1383 (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1386 dct_sel_base = dct_sel_baseaddr(pvt);
1389 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1390 * select between DCT0 and DCT1.
1392 if (dct_high_range_enabled(pvt) &&
1393 !dct_ganging_enabled(pvt) &&
1394 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1397 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1399 chan_addr = f10_get_norm_dct_addr(pvt, range, sys_addr,
1400 high_range, dct_sel_base);
1402 /* remove Node ID (in case of memory interleaving) */
1403 tmp = chan_addr & 0xFC0;
1405 chan_addr = ((chan_addr >> hweight8(intlv_en)) & 0xFFFFFFFFF000ULL) | tmp;
1407 /* remove channel interleave and hash */
1408 if (dct_interleave_enabled(pvt) &&
1409 !dct_high_range_enabled(pvt) &&
1410 !dct_ganging_enabled(pvt)) {
1411 if (dct_sel_interleave_addr(pvt) != 1)
1412 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1414 tmp = chan_addr & 0xFC0;
1415 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1420 debugf1(" (ChannelAddrLong=0x%llx)\n", chan_addr);
1422 cs_found = f10_lookup_addr_in_dct(chan_addr, node_id, channel);
1424 if (cs_found >= 0) {
1426 *chan_sel = channel;
1431 static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1432 int *node, int *chan_sel)
1434 int range, cs_found = -EINVAL;
1436 for (range = 0; range < DRAM_RANGES; range++) {
1438 if (!dram_rw(pvt, range))
1441 if ((get_dram_base(pvt, range) <= sys_addr) &&
1442 (get_dram_limit(pvt, range) >= sys_addr)) {
1444 cs_found = f10_match_to_this_node(pvt, range,
1455 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1456 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1458 * The @sys_addr is usually an error address received from the hardware
1461 static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1462 struct err_regs *err_info,
1465 struct amd64_pvt *pvt = mci->pvt_info;
1467 int nid, csrow, chan = 0;
1470 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1473 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1477 error_address_to_page_and_offset(sys_addr, &page, &offset);
1479 syndrome = extract_syndrome(err_info);
1482 * We need the syndromes for channel detection only when we're
1483 * ganged. Otherwise @chan should already contain the channel at
1486 if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
1487 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1490 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
1494 * Channel unknown, report all channels on this CSROW as failed.
1496 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1497 edac_mc_handle_ce(mci, page, offset, syndrome,
1498 csrow, chan, EDAC_MOD_STR);
1502 * debug routine to display the memory sizes of all logical DIMMs and its
1505 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1507 int dimm, size0, size1, factor = 0;
1511 if (boot_cpu_data.x86 == 0xf) {
1512 if (pvt->dclr0 & F10_WIDTH_128)
1515 /* K8 families < revF not supported yet */
1516 if (pvt->ext_model < K8_REV_F)
1522 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1 : pvt->dbam0;
1523 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->csels[1].csbases
1524 : pvt->csels[0].csbases;
1526 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n", ctrl, dbam);
1528 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1530 /* Dump memory sizes for DIMM and its CSROWs */
1531 for (dimm = 0; dimm < 4; dimm++) {
1534 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
1535 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1538 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
1539 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1541 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1542 dimm * 2, size0 << factor,
1543 dimm * 2 + 1, size1 << factor);
1547 static struct amd64_family_type amd64_family_types[] = {
1550 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1551 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1553 .early_channel_count = k8_early_channel_count,
1554 .get_error_address = k8_get_error_address,
1555 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1556 .dbam_to_cs = k8_dbam_to_chip_select,
1557 .read_dct_pci_cfg = k8_read_dct_pci_cfg,
1562 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1563 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1565 .early_channel_count = f10_early_channel_count,
1566 .get_error_address = f10_get_error_address,
1567 .read_dram_ctl_register = f10_read_dram_ctl_register,
1568 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1569 .dbam_to_cs = f10_dbam_to_chip_select,
1570 .read_dct_pci_cfg = f10_read_dct_pci_cfg,
1576 .read_dct_pci_cfg = f15_read_dct_pci_cfg,
1581 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1582 unsigned int device,
1583 struct pci_dev *related)
1585 struct pci_dev *dev = NULL;
1587 dev = pci_get_device(vendor, device, dev);
1589 if ((dev->bus->number == related->bus->number) &&
1590 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1592 dev = pci_get_device(vendor, device, dev);
1599 * These are tables of eigenvectors (one per line) which can be used for the
1600 * construction of the syndrome tables. The modified syndrome search algorithm
1601 * uses those to find the symbol in error and thus the DIMM.
1603 * Algorithm courtesy of Ross LaFetra from AMD.
1605 static u16 x4_vectors[] = {
1606 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1607 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1608 0x0001, 0x0002, 0x0004, 0x0008,
1609 0x1013, 0x3032, 0x4044, 0x8088,
1610 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1611 0x4857, 0xc4fe, 0x13cc, 0x3288,
1612 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1613 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1614 0x15c1, 0x2a42, 0x89ac, 0x4758,
1615 0x2b03, 0x1602, 0x4f0c, 0xca08,
1616 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1617 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1618 0x2b87, 0x164e, 0x642c, 0xdc18,
1619 0x40b9, 0x80de, 0x1094, 0x20e8,
1620 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1621 0x11c1, 0x2242, 0x84ac, 0x4c58,
1622 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1623 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1624 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1625 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1626 0x16b3, 0x3d62, 0x4f34, 0x8518,
1627 0x1e2f, 0x391a, 0x5cac, 0xf858,
1628 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1629 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1630 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1631 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1632 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1633 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1634 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1635 0x185d, 0x2ca6, 0x7914, 0x9e28,
1636 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1637 0x4199, 0x82ee, 0x19f4, 0x2e58,
1638 0x4807, 0xc40e, 0x130c, 0x3208,
1639 0x1905, 0x2e0a, 0x5804, 0xac08,
1640 0x213f, 0x132a, 0xadfc, 0x5ba8,
1641 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1644 static u16 x8_vectors[] = {
1645 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1646 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1647 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1648 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1649 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1650 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1651 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1652 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1653 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1654 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1655 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1656 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1657 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1658 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1659 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1660 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1661 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1662 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1663 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1666 static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
1669 unsigned int i, err_sym;
1671 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1673 int v_idx = err_sym * v_dim;
1674 int v_end = (err_sym + 1) * v_dim;
1676 /* walk over all 16 bits of the syndrome */
1677 for (i = 1; i < (1U << 16); i <<= 1) {
1679 /* if bit is set in that eigenvector... */
1680 if (v_idx < v_end && vectors[v_idx] & i) {
1681 u16 ev_comp = vectors[v_idx++];
1683 /* ... and bit set in the modified syndrome, */
1693 /* can't get to zero, move to next symbol */
1698 debugf0("syndrome(%x) not found\n", syndrome);
1702 static int map_err_sym_to_channel(int err_sym, int sym_size)
1715 return err_sym >> 4;
1721 /* imaginary bits not in a DIMM */
1723 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1735 return err_sym >> 3;
1741 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1743 struct amd64_pvt *pvt = mci->pvt_info;
1746 if (pvt->syn_type == 8)
1747 err_sym = decode_syndrome(syndrome, x8_vectors,
1748 ARRAY_SIZE(x8_vectors),
1750 else if (pvt->syn_type == 4)
1751 err_sym = decode_syndrome(syndrome, x4_vectors,
1752 ARRAY_SIZE(x4_vectors),
1755 amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
1759 return map_err_sym_to_channel(err_sym, pvt->syn_type);
1763 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1764 * ADDRESS and process.
1766 static void amd64_handle_ce(struct mem_ctl_info *mci,
1767 struct err_regs *info)
1769 struct amd64_pvt *pvt = mci->pvt_info;
1772 /* Ensure that the Error Address is VALID */
1773 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
1774 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1775 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1779 sys_addr = pvt->ops->get_error_address(mci, info);
1781 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1783 pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
1786 /* Handle any Un-correctable Errors (UEs) */
1787 static void amd64_handle_ue(struct mem_ctl_info *mci,
1788 struct err_regs *info)
1790 struct amd64_pvt *pvt = mci->pvt_info;
1791 struct mem_ctl_info *log_mci, *src_mci = NULL;
1798 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
1799 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1800 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1804 sys_addr = pvt->ops->get_error_address(mci, info);
1807 * Find out which node the error address belongs to. This may be
1808 * different from the node that detected the error.
1810 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1812 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1813 (unsigned long)sys_addr);
1814 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1820 csrow = sys_addr_to_csrow(log_mci, sys_addr);
1822 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1823 (unsigned long)sys_addr);
1824 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1826 error_address_to_page_and_offset(sys_addr, &page, &offset);
1827 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
1831 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1832 struct err_regs *info)
1834 u16 ec = EC(info->nbsl);
1835 u8 xec = XEC(info->nbsl, 0x1f);
1836 int ecc_type = (info->nbsh >> 13) & 0x3;
1838 /* Bail early out if this was an 'observed' error */
1839 if (PP(ec) == K8_NBSL_PP_OBS)
1842 /* Do only ECC errors */
1843 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
1847 amd64_handle_ce(mci, info);
1848 else if (ecc_type == 1)
1849 amd64_handle_ue(mci, info);
1852 void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
1854 struct mem_ctl_info *mci = mcis[node_id];
1855 struct err_regs regs;
1857 regs.nbsl = (u32) m->status;
1858 regs.nbsh = (u32)(m->status >> 32);
1859 regs.nbeal = (u32) m->addr;
1860 regs.nbeah = (u32)(m->addr >> 32);
1863 __amd64_decode_bus_error(mci, ®s);
1866 * Check the UE bit of the NB status high register, if set generate some
1867 * logs. If NOT a GART error, then process the event as a NO-INFO event.
1868 * If it was a GART error, skip that process.
1870 * FIXME: this should go somewhere else, if at all.
1872 if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
1873 edac_mc_handle_ue_no_info(mci, "UE bit is set");
1878 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
1879 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
1881 static int reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 f1_id, u16 f3_id)
1883 /* Reserve the ADDRESS MAP Device */
1884 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
1886 amd64_err("error address map device not found: "
1887 "vendor %x device 0x%x (broken BIOS?)\n",
1888 PCI_VENDOR_ID_AMD, f1_id);
1892 /* Reserve the MISC Device */
1893 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
1895 pci_dev_put(pvt->F1);
1898 amd64_err("error F3 device not found: "
1899 "vendor %x device 0x%x (broken BIOS?)\n",
1900 PCI_VENDOR_ID_AMD, f3_id);
1904 debugf1("F1: %s\n", pci_name(pvt->F1));
1905 debugf1("F2: %s\n", pci_name(pvt->F2));
1906 debugf1("F3: %s\n", pci_name(pvt->F3));
1911 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
1913 pci_dev_put(pvt->F1);
1914 pci_dev_put(pvt->F3);
1918 * Retrieve the hardware registers of the memory controller (this includes the
1919 * 'Address Map' and 'Misc' device regs)
1921 static void read_mc_regs(struct amd64_pvt *pvt)
1928 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
1929 * those are Read-As-Zero
1931 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
1932 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
1934 /* check first whether TOP_MEM2 is enabled */
1935 rdmsrl(MSR_K8_SYSCFG, msr_val);
1936 if (msr_val & (1U << 21)) {
1937 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
1938 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
1940 debugf0(" TOP_MEM2 disabled.\n");
1942 amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
1944 if (pvt->ops->read_dram_ctl_register)
1945 pvt->ops->read_dram_ctl_register(pvt);
1947 for (range = 0; range < DRAM_RANGES; range++) {
1950 /* read settings for this DRAM range */
1951 read_dram_base_limit_regs(pvt, range);
1953 rw = dram_rw(pvt, range);
1957 debugf1(" DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
1959 get_dram_base(pvt, range),
1960 get_dram_limit(pvt, range));
1962 debugf1(" IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
1963 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
1964 (rw & 0x1) ? "R" : "-",
1965 (rw & 0x2) ? "W" : "-",
1966 dram_intlv_sel(pvt, range),
1967 dram_dst_node(pvt, range));
1970 read_dct_base_mask(pvt);
1972 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
1973 amd64_read_dbam_reg(pvt);
1975 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
1977 amd64_read_dct_pci_cfg(pvt, F10_DCLR_0, &pvt->dclr0);
1978 amd64_read_dct_pci_cfg(pvt, F10_DCHR_0, &pvt->dchr0);
1980 if (!dct_ganging_enabled(pvt)) {
1981 amd64_read_dct_pci_cfg(pvt, F10_DCLR_1, &pvt->dclr1);
1982 amd64_read_dct_pci_cfg(pvt, F10_DCHR_1, &pvt->dchr1);
1985 if (boot_cpu_data.x86 >= 0x10)
1986 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
1988 if (boot_cpu_data.x86 == 0x10 &&
1989 boot_cpu_data.x86_model > 7 &&
1990 /* F3x180[EccSymbolSize]=1 => x8 symbols */
1996 dump_misc_regs(pvt);
2000 * NOTE: CPU Revision Dependent code
2003 * @csrow_nr ChipSelect Row Number (0..NUM_CHIPSELECTS-1)
2004 * k8 private pointer to -->
2005 * DRAM Bank Address mapping register
2007 * DCL register where dual_channel_active is
2009 * The DBAM register consists of 4 sets of 4 bits each definitions:
2012 * 0-3 CSROWs 0 and 1
2013 * 4-7 CSROWs 2 and 3
2014 * 8-11 CSROWs 4 and 5
2015 * 12-15 CSROWs 6 and 7
2017 * Values range from: 0 to 15
2018 * The meaning of the values depends on CPU revision and dual-channel state,
2019 * see relevant BKDG more info.
2021 * The memory controller provides for total of only 8 CSROWs in its current
2022 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2023 * single channel or two (2) DIMMs in dual channel mode.
2025 * The following code logic collapses the various tables for CSROW based on CPU
2029 * The number of PAGE_SIZE pages on the specified CSROW number it
2033 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2035 u32 cs_mode, nr_pages;
2038 * The math on this doesn't look right on the surface because x/2*4 can
2039 * be simplified to x*2 but this expression makes use of the fact that
2040 * it is integral math where 1/2=0. This intermediate value becomes the
2041 * number of bits to shift the DBAM register to extract the proper CSROW
2044 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2046 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
2049 * If dual channel then double the memory size of single channel.
2050 * Channel count is 1 or 2
2052 nr_pages <<= (pvt->channel_count - 1);
2054 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2055 debugf0(" nr_pages= %u channel-count = %d\n",
2056 nr_pages, pvt->channel_count);
2062 * Initialize the array of csrow attribute instances, based on the values
2063 * from pci config hardware registers.
2065 static int init_csrows(struct mem_ctl_info *mci)
2067 struct csrow_info *csrow;
2068 struct amd64_pvt *pvt = mci->pvt_info;
2069 u64 input_addr_min, input_addr_max, sys_addr, base, mask;
2073 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &val);
2076 pvt->ctl_error_info.nbcfg = val;
2078 debugf0("node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2079 pvt->mc_node_id, val,
2080 !!(val & K8_NBCFG_CHIPKILL), !!(val & K8_NBCFG_ECC_ENABLE));
2082 for_each_chip_select(i, 0, pvt) {
2083 csrow = &mci->csrows[i];
2085 if (!csrow_enabled(i, 0, pvt)) {
2086 debugf1("----CSROW %d EMPTY for node %d\n", i,
2091 debugf1("----CSROW %d VALID for MC node %d\n",
2092 i, pvt->mc_node_id);
2095 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2096 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2097 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2098 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2099 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2100 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2102 get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2103 csrow->page_mask = ~mask;
2104 /* 8 bytes of resolution */
2106 csrow->mtype = amd64_determine_memory_type(pvt, i);
2108 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2109 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2110 (unsigned long)input_addr_min,
2111 (unsigned long)input_addr_max);
2112 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2113 (unsigned long)sys_addr, csrow->page_mask);
2114 debugf1(" nr_pages: %u first_page: 0x%lx "
2115 "last_page: 0x%lx\n",
2116 (unsigned)csrow->nr_pages,
2117 csrow->first_page, csrow->last_page);
2120 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2122 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2124 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2125 EDAC_S4ECD4ED : EDAC_SECDED;
2127 csrow->edac_mode = EDAC_NONE;
2133 /* get all cores on this DCT */
2134 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2138 for_each_online_cpu(cpu)
2139 if (amd_get_nb_id(cpu) == nid)
2140 cpumask_set_cpu(cpu, mask);
2143 /* check MCG_CTL on all the cpus on this node */
2144 static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2150 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2151 amd64_warn("%s: Error allocating mask\n", __func__);
2155 get_cpus_on_this_dct_cpumask(mask, nid);
2157 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2159 for_each_cpu(cpu, mask) {
2160 struct msr *reg = per_cpu_ptr(msrs, cpu);
2161 nbe = reg->l & K8_MSR_MCGCTL_NBE;
2163 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2165 (nbe ? "enabled" : "disabled"));
2173 free_cpumask_var(mask);
2177 static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
2179 cpumask_var_t cmask;
2182 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2183 amd64_warn("%s: error allocating mask\n", __func__);
2187 get_cpus_on_this_dct_cpumask(cmask, nid);
2189 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2191 for_each_cpu(cpu, cmask) {
2193 struct msr *reg = per_cpu_ptr(msrs, cpu);
2196 if (reg->l & K8_MSR_MCGCTL_NBE)
2197 s->flags.nb_mce_enable = 1;
2199 reg->l |= K8_MSR_MCGCTL_NBE;
2202 * Turn off NB MCE reporting only when it was off before
2204 if (!s->flags.nb_mce_enable)
2205 reg->l &= ~K8_MSR_MCGCTL_NBE;
2208 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2210 free_cpumask_var(cmask);
2215 static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2219 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2221 if (toggle_ecc_err_reporting(s, nid, ON)) {
2222 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2226 amd64_read_pci_cfg(F3, K8_NBCTL, &value);
2228 /* turn on UECCEn and CECCEn bits */
2229 s->old_nbctl = value & mask;
2230 s->nbctl_valid = true;
2233 amd64_write_pci_cfg(F3, K8_NBCTL, value);
2235 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2237 debugf0("1: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2239 !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
2241 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2242 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2244 s->flags.nb_ecc_prev = 0;
2246 /* Attempt to turn on DRAM ECC Enable */
2247 value |= K8_NBCFG_ECC_ENABLE;
2248 amd64_write_pci_cfg(F3, K8_NBCFG, value);
2250 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2252 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2253 amd64_warn("Hardware rejected DRAM ECC enable,"
2254 "check memory DIMM configuration.\n");
2257 amd64_info("Hardware accepted DRAM ECC Enable\n");
2260 s->flags.nb_ecc_prev = 1;
2263 debugf0("2: node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2265 !!(value & K8_NBCFG_CHIPKILL), !!(value & K8_NBCFG_ECC_ENABLE));
2270 static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
2273 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2275 if (!s->nbctl_valid)
2278 amd64_read_pci_cfg(F3, K8_NBCTL, &value);
2280 value |= s->old_nbctl;
2282 amd64_write_pci_cfg(F3, K8_NBCTL, value);
2284 /* restore previous BIOS DRAM ECC "off" setting we force-enabled */
2285 if (!s->flags.nb_ecc_prev) {
2286 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2287 value &= ~K8_NBCFG_ECC_ENABLE;
2288 amd64_write_pci_cfg(F3, K8_NBCFG, value);
2291 /* restore the NB Enable MCGCTL bit */
2292 if (toggle_ecc_err_reporting(s, nid, OFF))
2293 amd64_warn("Error restoring NB MCGCTL settings!\n");
2297 * EDAC requires that the BIOS have ECC enabled before
2298 * taking over the processing of ECC errors. A command line
2299 * option allows to force-enable hardware ECC later in
2300 * enable_ecc_error_reporting().
2302 static const char *ecc_msg =
2303 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2304 " Either enable ECC checking or force module loading by setting "
2305 "'ecc_enable_override'.\n"
2306 " (Note that use of the override may cause unknown side effects.)\n";
2308 static bool ecc_enabled(struct pci_dev *F3, u8 nid)
2312 bool nb_mce_en = false;
2314 amd64_read_pci_cfg(F3, K8_NBCFG, &value);
2316 ecc_en = !!(value & K8_NBCFG_ECC_ENABLE);
2317 amd64_info("DRAM ECC %s.\n", (ecc_en ? "enabled" : "disabled"));
2319 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(nid);
2321 amd64_notice("NB MCE bank disabled, set MSR "
2322 "0x%08x[4] on node %d to enable.\n",
2323 MSR_IA32_MCG_CTL, nid);
2325 if (!ecc_en || !nb_mce_en) {
2326 amd64_notice("%s", ecc_msg);
2332 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2333 ARRAY_SIZE(amd64_inj_attrs) +
2336 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2338 static void set_mc_sysfs_attrs(struct mem_ctl_info *mci)
2340 unsigned int i = 0, j = 0;
2342 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2343 sysfs_attrs[i] = amd64_dbg_attrs[i];
2345 if (boot_cpu_data.x86 >= 0x10)
2346 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2347 sysfs_attrs[i] = amd64_inj_attrs[j];
2349 sysfs_attrs[i] = terminator;
2351 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2354 static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
2356 struct amd64_pvt *pvt = mci->pvt_info;
2358 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2359 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2361 if (pvt->nbcap & K8_NBCAP_SECDED)
2362 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2364 if (pvt->nbcap & K8_NBCAP_CHIPKILL)
2365 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2367 mci->edac_cap = amd64_determine_edac_cap(pvt);
2368 mci->mod_name = EDAC_MOD_STR;
2369 mci->mod_ver = EDAC_AMD64_VERSION;
2370 mci->ctl_name = pvt->ctl_name;
2371 mci->dev_name = pci_name(pvt->F2);
2372 mci->ctl_page_to_phys = NULL;
2374 /* memory scrubber interface */
2375 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2376 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2380 * returns a pointer to the family descriptor on success, NULL otherwise.
2382 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2384 u8 fam = boot_cpu_data.x86;
2385 struct amd64_family_type *fam_type = NULL;
2389 fam_type = &amd64_family_types[K8_CPUS];
2390 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2391 pvt->ctl_name = fam_type->ctl_name;
2392 pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
2395 fam_type = &amd64_family_types[F10_CPUS];
2396 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2397 pvt->ctl_name = fam_type->ctl_name;
2398 pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
2402 amd64_err("Unsupported family!\n");
2406 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2408 amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
2410 (pvt->ext_model >= K8_REV_F ? "revF or later "
2411 : "revE or earlier ")
2412 : ""), pvt->mc_node_id);
2416 static int amd64_init_one_instance(struct pci_dev *F2)
2418 struct amd64_pvt *pvt = NULL;
2419 struct amd64_family_type *fam_type = NULL;
2420 struct mem_ctl_info *mci = NULL;
2422 u8 nid = get_node_id(F2);
2425 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2429 pvt->mc_node_id = nid;
2433 fam_type = amd64_per_family_init(pvt);
2438 err = reserve_mc_sibling_devs(pvt, fam_type->f1_id, fam_type->f3_id);
2445 * We need to determine how many memory channels there are. Then use
2446 * that information for calculating the size of the dynamic instance
2447 * tables in the 'mci' structure.
2450 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2451 if (pvt->channel_count < 0)
2455 mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
2459 mci->pvt_info = pvt;
2460 mci->dev = &pvt->F2->dev;
2462 setup_mci_misc_attrs(mci);
2464 if (init_csrows(mci))
2465 mci->edac_cap = EDAC_FLAG_NONE;
2467 set_mc_sysfs_attrs(mci);
2470 if (edac_mc_add_mc(mci)) {
2471 debugf1("failed edac_mc_add_mc()\n");
2475 /* register stuff with EDAC MCE */
2476 if (report_gart_errors)
2477 amd_report_gart_errors(true);
2479 amd_register_ecc_decoder(amd64_decode_bus_error);
2483 atomic_inc(&drv_instances);
2491 free_mc_sibling_devs(pvt);
2500 static int __devinit amd64_probe_one_instance(struct pci_dev *pdev,
2501 const struct pci_device_id *mc_type)
2503 u8 nid = get_node_id(pdev);
2504 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2505 struct ecc_settings *s;
2508 ret = pci_enable_device(pdev);
2510 debugf0("ret=%d\n", ret);
2515 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
2521 if (!ecc_enabled(F3, nid)) {
2524 if (!ecc_enable_override)
2527 amd64_warn("Forcing ECC on!\n");
2529 if (!enable_ecc_error_reporting(s, nid, F3))
2533 ret = amd64_init_one_instance(pdev);
2535 amd64_err("Error probing instance: %d\n", nid);
2536 restore_ecc_error_reporting(s, nid, F3);
2543 ecc_stngs[nid] = NULL;
2549 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2551 struct mem_ctl_info *mci;
2552 struct amd64_pvt *pvt;
2553 u8 nid = get_node_id(pdev);
2554 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
2555 struct ecc_settings *s = ecc_stngs[nid];
2557 /* Remove from EDAC CORE tracking list */
2558 mci = edac_mc_del_mc(&pdev->dev);
2562 pvt = mci->pvt_info;
2564 restore_ecc_error_reporting(s, nid, F3);
2566 free_mc_sibling_devs(pvt);
2568 /* unregister from EDAC MCE */
2569 amd_report_gart_errors(false);
2570 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2572 kfree(ecc_stngs[nid]);
2573 ecc_stngs[nid] = NULL;
2575 /* Free the EDAC CORE resources */
2576 mci->pvt_info = NULL;
2584 * This table is part of the interface for loading drivers for PCI devices. The
2585 * PCI core identifies what devices are on a system during boot, and then
2586 * inquiry this table to see if this driver is for a given device found.
2588 static const struct pci_device_id amd64_pci_table[] __devinitdata = {
2590 .vendor = PCI_VENDOR_ID_AMD,
2591 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2592 .subvendor = PCI_ANY_ID,
2593 .subdevice = PCI_ANY_ID,
2598 .vendor = PCI_VENDOR_ID_AMD,
2599 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2600 .subvendor = PCI_ANY_ID,
2601 .subdevice = PCI_ANY_ID,
2607 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2609 static struct pci_driver amd64_pci_driver = {
2610 .name = EDAC_MOD_STR,
2611 .probe = amd64_probe_one_instance,
2612 .remove = __devexit_p(amd64_remove_one_instance),
2613 .id_table = amd64_pci_table,
2616 static void setup_pci_device(void)
2618 struct mem_ctl_info *mci;
2619 struct amd64_pvt *pvt;
2627 pvt = mci->pvt_info;
2629 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2631 if (!amd64_ctl_pci) {
2632 pr_warning("%s(): Unable to create PCI control\n",
2635 pr_warning("%s(): PCI error report via EDAC not set\n",
2641 static int __init amd64_edac_init(void)
2645 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
2649 if (amd_cache_northbridges() < 0)
2653 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2654 ecc_stngs = kzalloc(amd_nb_num() * sizeof(ecc_stngs[0]), GFP_KERNEL);
2655 if (!(mcis && ecc_stngs))
2658 msrs = msrs_alloc();
2662 err = pci_register_driver(&amd64_pci_driver);
2667 if (!atomic_read(&drv_instances))
2668 goto err_no_instances;
2674 pci_unregister_driver(&amd64_pci_driver);
2691 static void __exit amd64_edac_exit(void)
2694 edac_pci_release_generic_ctl(amd64_ctl_pci);
2696 pci_unregister_driver(&amd64_pci_driver);
2708 module_init(amd64_edac_init);
2709 module_exit(amd64_edac_exit);
2711 MODULE_LICENSE("GPL");
2712 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2713 "Dave Peterson, Thayne Harbaugh");
2714 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2715 EDAC_AMD64_VERSION);
2717 module_param(edac_op_state, int, 0444);
2718 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");