1 #include "amd64_edac.h"
2 #include <asm/amd_nb.h>
4 static struct edac_pci_ctl_info *amd64_ctl_pci;
6 static int report_gart_errors;
7 module_param(report_gart_errors, int, 0644);
10 * Set by command line parameter. If BIOS has enabled the ECC, this override is
11 * cleared to prevent re-enabling the hardware by this driver.
13 static int ecc_enable_override;
14 module_param(ecc_enable_override, int, 0644);
16 static struct msr __percpu *msrs;
18 /* Per-node driver instances */
19 static struct mem_ctl_info **mcis;
20 static struct amd64_pvt **pvts;
23 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
26 static int ddr2_dbam_revCG[] = {
36 static int ddr2_dbam_revD[] = {
48 static int ddr2_dbam[] = { [0] = 128,
57 static int ddr3_dbam[] = { [0] = -1,
68 * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
69 * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
72 *FIXME: Produce a better mapping/linearisation.
75 struct scrubrate scrubrates[] = {
76 { 0x01, 1600000000UL},
98 { 0x00, 0UL}, /* scrubbing off */
102 * Memory scrubber control interface. For K8, memory scrubbing is handled by
103 * hardware and can involve L2 cache, dcache as well as the main memory. With
104 * F10, this is extended to L3 cache scrubbing on CPU models sporting that
107 * This causes the "units" for the scrubbing speed to vary from 64 byte blocks
108 * (dram) over to cache lines. This is nasty, so we will use bandwidth in
109 * bytes/sec for the setting.
111 * Currently, we only do dram scrubbing. If the scrubbing is done in software on
112 * other archs, we might not have access to the caches directly.
116 * scan the scrub rate mapping table for a close or matching bandwidth value to
117 * issue. If requested is too big, then use last maximum value found.
119 static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate)
125 * map the configured rate (new_bw) to a value specific to the AMD64
126 * memory controller and apply to register. Search for the first
127 * bandwidth entry that is greater or equal than the setting requested
128 * and program that. If at last entry, turn off DRAM scrubbing.
130 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
132 * skip scrub rates which aren't recommended
133 * (see F10 BKDG, F3x58)
135 if (scrubrates[i].scrubval < min_rate)
138 if (scrubrates[i].bandwidth <= new_bw)
142 * if no suitable bandwidth found, turn off DRAM scrubbing
143 * entirely by falling back to the last element in the
148 scrubval = scrubrates[i].scrubval;
150 amd64_info("Setting scrub rate bandwidth: %u\n",
151 scrubrates[i].bandwidth);
153 amd64_info("Turning scrubbing off.\n");
155 pci_write_bits32(ctl, K8_SCRCTRL, scrubval, 0x001F);
160 static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
162 struct amd64_pvt *pvt = mci->pvt_info;
164 return __amd64_set_scrub_rate(pvt->F3, bw, pvt->min_scrubrate);
167 static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
169 struct amd64_pvt *pvt = mci->pvt_info;
173 amd64_read_pci_cfg(pvt->F3, K8_SCRCTRL, &scrubval);
175 scrubval = scrubval & 0x001F;
177 amd64_debug("pci-read, sdram scrub control value: %d\n", scrubval);
179 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
180 if (scrubrates[i].scrubval == scrubval) {
181 *bw = scrubrates[i].bandwidth;
190 /* Map from a CSROW entry to the mask entry that operates on it */
191 static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
193 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
199 /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */
200 static u32 amd64_get_dct_base(struct amd64_pvt *pvt, int dct, int csrow)
203 return pvt->dcsb0[csrow];
205 return pvt->dcsb1[csrow];
209 * Return the 'mask' address the i'th CS entry. This function is needed because
210 * there number of DCSM registers on Rev E and prior vs Rev F and later is
213 static u32 amd64_get_dct_mask(struct amd64_pvt *pvt, int dct, int csrow)
216 return pvt->dcsm0[amd64_map_to_dcs_mask(pvt, csrow)];
218 return pvt->dcsm1[amd64_map_to_dcs_mask(pvt, csrow)];
223 * In *base and *limit, pass back the full 40-bit base and limit physical
224 * addresses for the node given by node_id. This information is obtained from
225 * DRAM Base (section 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers. The
226 * base and limit addresses are of type SysAddr, as defined at the start of
227 * section 3.4.4 (p. 70). They are the lowest and highest physical addresses
228 * in the address range they represent.
230 static void amd64_get_base_and_limit(struct amd64_pvt *pvt, int node_id,
231 u64 *base, u64 *limit)
233 *base = pvt->dram_base[node_id];
234 *limit = pvt->dram_limit[node_id];
238 * Return 1 if the SysAddr given by sys_addr matches the base/limit associated
241 static int amd64_base_limit_match(struct amd64_pvt *pvt,
242 u64 sys_addr, int node_id)
244 u64 base, limit, addr;
246 amd64_get_base_and_limit(pvt, node_id, &base, &limit);
248 /* The K8 treats this as a 40-bit value. However, bits 63-40 will be
249 * all ones if the most significant implemented address bit is 1.
250 * Here we discard bits 63-40. See section 3.4.2 of AMD publication
251 * 24592: AMD x86-64 Architecture Programmer's Manual Volume 1
252 * Application Programming.
254 addr = sys_addr & 0x000000ffffffffffull;
256 return (addr >= base) && (addr <= limit);
260 * Attempt to map a SysAddr to a node. On success, return a pointer to the
261 * mem_ctl_info structure for the node that the SysAddr maps to.
263 * On failure, return NULL.
265 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
268 struct amd64_pvt *pvt;
273 * Here we use the DRAM Base (section 3.4.4.1) and DRAM Limit (section
274 * 3.4.4.2) registers to map the SysAddr to a node ID.
279 * The value of this field should be the same for all DRAM Base
280 * registers. Therefore we arbitrarily choose to read it from the
281 * register for node 0.
283 intlv_en = pvt->dram_IntlvEn[0];
286 for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) {
287 if (amd64_base_limit_match(pvt, sys_addr, node_id))
293 if (unlikely((intlv_en != 0x01) &&
294 (intlv_en != 0x03) &&
295 (intlv_en != 0x07))) {
296 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
300 bits = (((u32) sys_addr) >> 12) & intlv_en;
302 for (node_id = 0; ; ) {
303 if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits)
304 break; /* intlv_sel field matches */
306 if (++node_id >= DRAM_REG_COUNT)
310 /* sanity test for sys_addr */
311 if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) {
312 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
313 "range for node %d with node interleaving enabled.\n",
314 __func__, sys_addr, node_id);
319 return edac_mc_find(node_id);
322 debugf2("sys_addr 0x%lx doesn't match any node\n",
323 (unsigned long)sys_addr);
329 * Extract the DRAM CS base address from selected csrow register.
331 static u64 base_from_dct_base(struct amd64_pvt *pvt, int csrow)
333 return ((u64) (amd64_get_dct_base(pvt, 0, csrow) & pvt->dcsb_base)) <<
338 * Extract the mask from the dcsb0[csrow] entry in a CPU revision-specific way.
340 static u64 mask_from_dct_mask(struct amd64_pvt *pvt, int csrow)
342 u64 dcsm_bits, other_bits;
345 /* Extract bits from DRAM CS Mask. */
346 dcsm_bits = amd64_get_dct_mask(pvt, 0, csrow) & pvt->dcsm_mask;
348 other_bits = pvt->dcsm_mask;
349 other_bits = ~(other_bits << pvt->dcs_shift);
352 * The extracted bits from DCSM belong in the spaces represented by
353 * the cleared bits in other_bits.
355 mask = (dcsm_bits << pvt->dcs_shift) | other_bits;
361 * @input_addr is an InputAddr associated with the node given by mci. Return the
362 * csrow that input_addr maps to, or -1 on failure (no csrow claims input_addr).
364 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
366 struct amd64_pvt *pvt;
373 * Here we use the DRAM CS Base and DRAM CS Mask registers. For each CS
374 * base/mask register pair, test the condition shown near the start of
375 * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E).
377 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
379 /* This DRAM chip select is disabled on this node */
380 if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0)
383 base = base_from_dct_base(pvt, csrow);
384 mask = ~mask_from_dct_mask(pvt, csrow);
386 if ((input_addr & mask) == (base & mask)) {
387 debugf2("InputAddr 0x%lx matches csrow %d (node %d)\n",
388 (unsigned long)input_addr, csrow,
395 debugf2("no matching csrow for InputAddr 0x%lx (MC node %d)\n",
396 (unsigned long)input_addr, pvt->mc_node_id);
402 * Return the base value defined by the DRAM Base register for the node
403 * represented by mci. This function returns the full 40-bit value despite the
404 * fact that the register only stores bits 39-24 of the value. See section
405 * 3.4.4.1 (BKDG #26094, K8, revA-E)
407 static inline u64 get_dram_base(struct mem_ctl_info *mci)
409 struct amd64_pvt *pvt = mci->pvt_info;
411 return pvt->dram_base[pvt->mc_node_id];
415 * Obtain info from the DRAM Hole Address Register (section 3.4.8, pub #26094)
416 * for the node represented by mci. Info is passed back in *hole_base,
417 * *hole_offset, and *hole_size. Function returns 0 if info is valid or 1 if
418 * info is invalid. Info may be invalid for either of the following reasons:
420 * - The revision of the node is not E or greater. In this case, the DRAM Hole
421 * Address Register does not exist.
423 * - The DramHoleValid bit is cleared in the DRAM Hole Address Register,
424 * indicating that its contents are not valid.
426 * The values passed back in *hole_base, *hole_offset, and *hole_size are
427 * complete 32-bit values despite the fact that the bitfields in the DHAR
428 * only represent bits 31-24 of the base and offset values.
430 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
431 u64 *hole_offset, u64 *hole_size)
433 struct amd64_pvt *pvt = mci->pvt_info;
436 /* only revE and later have the DRAM Hole Address Register */
437 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
438 debugf1(" revision %d for node %d does not support DHAR\n",
439 pvt->ext_model, pvt->mc_node_id);
443 /* only valid for Fam10h */
444 if (boot_cpu_data.x86 == 0x10 &&
445 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) == 0) {
446 debugf1(" Dram Memory Hoisting is DISABLED on this system\n");
450 if ((pvt->dhar & DHAR_VALID) == 0) {
451 debugf1(" Dram Memory Hoisting is DISABLED on this node %d\n",
456 /* This node has Memory Hoisting */
458 /* +------------------+--------------------+--------------------+-----
459 * | memory | DRAM hole | relocated |
460 * | [0, (x - 1)] | [x, 0xffffffff] | addresses from |
462 * | | | [0x100000000, |
463 * | | | (0x100000000+ |
464 * | | | (0xffffffff-x))] |
465 * +------------------+--------------------+--------------------+-----
467 * Above is a diagram of physical memory showing the DRAM hole and the
468 * relocated addresses from the DRAM hole. As shown, the DRAM hole
469 * starts at address x (the base address) and extends through address
470 * 0xffffffff. The DRAM Hole Address Register (DHAR) relocates the
471 * addresses in the hole so that they start at 0x100000000.
474 base = dhar_base(pvt->dhar);
477 *hole_size = (0x1ull << 32) - base;
479 if (boot_cpu_data.x86 > 0xf)
480 *hole_offset = f10_dhar_offset(pvt->dhar);
482 *hole_offset = k8_dhar_offset(pvt->dhar);
484 debugf1(" DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
485 pvt->mc_node_id, (unsigned long)*hole_base,
486 (unsigned long)*hole_offset, (unsigned long)*hole_size);
490 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
493 * Return the DramAddr that the SysAddr given by @sys_addr maps to. It is
494 * assumed that sys_addr maps to the node given by mci.
496 * The first part of section 3.4.4 (p. 70) shows how the DRAM Base (section
497 * 3.4.4.1) and DRAM Limit (section 3.4.4.2) registers are used to translate a
498 * SysAddr to a DramAddr. If the DRAM Hole Address Register (DHAR) is enabled,
499 * then it is also involved in translating a SysAddr to a DramAddr. Sections
500 * 3.4.8 and 3.5.8.2 describe the DHAR and how it is used for memory hoisting.
501 * These parts of the documentation are unclear. I interpret them as follows:
503 * When node n receives a SysAddr, it processes the SysAddr as follows:
505 * 1. It extracts the DRAMBase and DRAMLimit values from the DRAM Base and DRAM
506 * Limit registers for node n. If the SysAddr is not within the range
507 * specified by the base and limit values, then node n ignores the Sysaddr
508 * (since it does not map to node n). Otherwise continue to step 2 below.
510 * 2. If the DramHoleValid bit of the DHAR for node n is clear, the DHAR is
511 * disabled so skip to step 3 below. Otherwise see if the SysAddr is within
512 * the range of relocated addresses (starting at 0x100000000) from the DRAM
513 * hole. If not, skip to step 3 below. Else get the value of the
514 * DramHoleOffset field from the DHAR. To obtain the DramAddr, subtract the
515 * offset defined by this value from the SysAddr.
517 * 3. Obtain the base address for node n from the DRAMBase field of the DRAM
518 * Base register for node n. To obtain the DramAddr, subtract the base
519 * address from the SysAddr, as shown near the start of section 3.4.4 (p.70).
521 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
523 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
526 dram_base = get_dram_base(mci);
528 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
531 if ((sys_addr >= (1ull << 32)) &&
532 (sys_addr < ((1ull << 32) + hole_size))) {
533 /* use DHAR to translate SysAddr to DramAddr */
534 dram_addr = sys_addr - hole_offset;
536 debugf2("using DHAR to translate SysAddr 0x%lx to "
538 (unsigned long)sys_addr,
539 (unsigned long)dram_addr);
546 * Translate the SysAddr to a DramAddr as shown near the start of
547 * section 3.4.4 (p. 70). Although sys_addr is a 64-bit value, the k8
548 * only deals with 40-bit values. Therefore we discard bits 63-40 of
549 * sys_addr below. If bit 39 of sys_addr is 1 then the bits we
550 * discard are all 1s. Otherwise the bits we discard are all 0s. See
551 * section 3.4.2 of AMD publication 24592: AMD x86-64 Architecture
552 * Programmer's Manual Volume 1 Application Programming.
554 dram_addr = (sys_addr & 0xffffffffffull) - dram_base;
556 debugf2("using DRAM Base register to translate SysAddr 0x%lx to "
557 "DramAddr 0x%lx\n", (unsigned long)sys_addr,
558 (unsigned long)dram_addr);
563 * @intlv_en is the value of the IntlvEn field from a DRAM Base register
564 * (section 3.4.4.1). Return the number of bits from a SysAddr that are used
565 * for node interleaving.
567 static int num_node_interleave_bits(unsigned intlv_en)
569 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
572 BUG_ON(intlv_en > 7);
573 n = intlv_shift_table[intlv_en];
577 /* Translate the DramAddr given by @dram_addr to an InputAddr. */
578 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
580 struct amd64_pvt *pvt;
587 * See the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
588 * concerning translating a DramAddr to an InputAddr.
590 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
591 input_addr = ((dram_addr >> intlv_shift) & 0xffffff000ull) +
594 debugf2(" Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
595 intlv_shift, (unsigned long)dram_addr,
596 (unsigned long)input_addr);
602 * Translate the SysAddr represented by @sys_addr to an InputAddr. It is
603 * assumed that @sys_addr maps to the node given by mci.
605 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
610 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
612 debugf2("SysAdddr 0x%lx translates to InputAddr 0x%lx\n",
613 (unsigned long)sys_addr, (unsigned long)input_addr);
620 * @input_addr is an InputAddr associated with the node represented by mci.
621 * Translate @input_addr to a DramAddr and return the result.
623 static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
625 struct amd64_pvt *pvt;
626 int node_id, intlv_shift;
631 * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
632 * shows how to translate a DramAddr to an InputAddr. Here we reverse
633 * this procedure. When translating from a DramAddr to an InputAddr, the
634 * bits used for node interleaving are discarded. Here we recover these
635 * bits from the IntlvSel field of the DRAM Limit register (section
636 * 3.4.4.2) for the node that input_addr is associated with.
639 node_id = pvt->mc_node_id;
640 BUG_ON((node_id < 0) || (node_id > 7));
642 intlv_shift = num_node_interleave_bits(pvt->dram_IntlvEn[0]);
644 if (intlv_shift == 0) {
645 debugf1(" InputAddr 0x%lx translates to DramAddr of "
646 "same value\n", (unsigned long)input_addr);
651 bits = ((input_addr & 0xffffff000ull) << intlv_shift) +
652 (input_addr & 0xfff);
654 intlv_sel = pvt->dram_IntlvSel[node_id] & ((1 << intlv_shift) - 1);
655 dram_addr = bits + (intlv_sel << 12);
657 debugf1("InputAddr 0x%lx translates to DramAddr 0x%lx "
658 "(%d node interleave bits)\n", (unsigned long)input_addr,
659 (unsigned long)dram_addr, intlv_shift);
665 * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
666 * @dram_addr to a SysAddr.
668 static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
670 struct amd64_pvt *pvt = mci->pvt_info;
671 u64 hole_base, hole_offset, hole_size, base, limit, sys_addr;
674 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
677 if ((dram_addr >= hole_base) &&
678 (dram_addr < (hole_base + hole_size))) {
679 sys_addr = dram_addr + hole_offset;
681 debugf1("using DHAR to translate DramAddr 0x%lx to "
682 "SysAddr 0x%lx\n", (unsigned long)dram_addr,
683 (unsigned long)sys_addr);
689 amd64_get_base_and_limit(pvt, pvt->mc_node_id, &base, &limit);
690 sys_addr = dram_addr + base;
693 * The sys_addr we have computed up to this point is a 40-bit value
694 * because the k8 deals with 40-bit values. However, the value we are
695 * supposed to return is a full 64-bit physical address. The AMD
696 * x86-64 architecture specifies that the most significant implemented
697 * address bit through bit 63 of a physical address must be either all
698 * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
699 * 64-bit value below. See section 3.4.2 of AMD publication 24592:
700 * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
703 sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
705 debugf1(" Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
706 pvt->mc_node_id, (unsigned long)dram_addr,
707 (unsigned long)sys_addr);
713 * @input_addr is an InputAddr associated with the node given by mci. Translate
714 * @input_addr to a SysAddr.
716 static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
719 return dram_addr_to_sys_addr(mci,
720 input_addr_to_dram_addr(mci, input_addr));
724 * Find the minimum and maximum InputAddr values that map to the given @csrow.
725 * Pass back these values in *input_addr_min and *input_addr_max.
727 static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
728 u64 *input_addr_min, u64 *input_addr_max)
730 struct amd64_pvt *pvt;
734 BUG_ON((csrow < 0) || (csrow >= pvt->cs_count));
736 base = base_from_dct_base(pvt, csrow);
737 mask = mask_from_dct_mask(pvt, csrow);
739 *input_addr_min = base & ~mask;
740 *input_addr_max = base | mask | pvt->dcs_mask_notused;
743 /* Map the Error address to a PAGE and PAGE OFFSET. */
744 static inline void error_address_to_page_and_offset(u64 error_address,
745 u32 *page, u32 *offset)
747 *page = (u32) (error_address >> PAGE_SHIFT);
748 *offset = ((u32) error_address) & ~PAGE_MASK;
752 * @sys_addr is an error address (a SysAddr) extracted from the MCA NB Address
753 * Low (section 3.6.4.5) and MCA NB Address High (section 3.6.4.6) registers
754 * of a node that detected an ECC memory error. mci represents the node that
755 * the error address maps to (possibly different from the node that detected
756 * the error). Return the number of the csrow that sys_addr maps to, or -1 on
759 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
763 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
766 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
767 "address 0x%lx\n", (unsigned long)sys_addr);
771 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
773 static u16 extract_syndrome(struct err_regs *err)
775 return ((err->nbsh >> 15) & 0xff) | ((err->nbsl >> 16) & 0xff00);
779 * Determine if the DIMMs have ECC enabled. ECC is enabled ONLY if all the DIMMs
782 static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
785 enum dev_type edac_cap = EDAC_FLAG_NONE;
787 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
791 if (pvt->dclr0 & BIT(bit))
792 edac_cap = EDAC_FLAG_SECDED;
798 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
800 static void amd64_dump_dramcfg_low(u32 dclr, int chan)
802 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
804 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
805 (dclr & BIT(16)) ? "un" : "",
806 (dclr & BIT(19)) ? "yes" : "no");
808 debugf1(" PAR/ERR parity: %s\n",
809 (dclr & BIT(8)) ? "enabled" : "disabled");
811 debugf1(" DCT 128bit mode width: %s\n",
812 (dclr & BIT(11)) ? "128b" : "64b");
814 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
815 (dclr & BIT(12)) ? "yes" : "no",
816 (dclr & BIT(13)) ? "yes" : "no",
817 (dclr & BIT(14)) ? "yes" : "no",
818 (dclr & BIT(15)) ? "yes" : "no");
821 /* Display and decode various NB registers for debug purposes. */
822 static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
826 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
828 debugf1(" NB two channel DRAM capable: %s\n",
829 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
831 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
832 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
833 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
835 amd64_dump_dramcfg_low(pvt->dclr0, 0);
837 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
839 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
842 dhar_base(pvt->dhar),
843 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
844 : f10_dhar_offset(pvt->dhar));
846 debugf1(" DramHoleValid: %s\n",
847 (pvt->dhar & DHAR_VALID) ? "yes" : "no");
849 /* everything below this point is Fam10h and above */
850 if (boot_cpu_data.x86 == 0xf) {
851 amd64_debug_display_dimm_sizes(0, pvt);
855 amd64_info("using %s syndromes.\n", ((pvt->syn_type == 8) ? "x8" : "x4"));
857 /* Only if NOT ganged does dclr1 have valid info */
858 if (!dct_ganging_enabled(pvt))
859 amd64_dump_dramcfg_low(pvt->dclr1, 1);
862 * Determine if ganged and then dump memory sizes for first controller,
863 * and if NOT ganged dump info for 2nd controller.
865 ganged = dct_ganging_enabled(pvt);
867 amd64_debug_display_dimm_sizes(0, pvt);
870 amd64_debug_display_dimm_sizes(1, pvt);
873 /* Read in both of DBAM registers */
874 static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
876 amd64_read_pci_cfg(pvt->F2, DBAM0, &pvt->dbam0);
878 if (boot_cpu_data.x86 >= 0x10)
879 amd64_read_pci_cfg(pvt->F2, DBAM1, &pvt->dbam1);
883 * NOTE: CPU Revision Dependent code: Rev E and Rev F
885 * Set the DCSB and DCSM mask values depending on the CPU revision value. Also
886 * set the shift factor for the DCSB and DCSM values.
888 * ->dcs_mask_notused, RevE:
890 * To find the max InputAddr for the csrow, start with the base address and set
891 * all bits that are "don't care" bits in the test at the start of section
894 * The "don't care" bits are all set bits in the mask and all bits in the gaps
895 * between bit ranges [35:25] and [19:13]. The value REV_E_DCS_NOTUSED_BITS
896 * represents bits [24:20] and [12:0], which are all bits in the above-mentioned
899 * ->dcs_mask_notused, RevF and later:
901 * To find the max InputAddr for the csrow, start with the base address and set
902 * all bits that are "don't care" bits in the test at the start of NPT section
905 * The "don't care" bits are all set bits in the mask and all bits in the gaps
906 * between bit ranges [36:27] and [21:13].
908 * The value REV_F_F1Xh_DCS_NOTUSED_BITS represents bits [26:22] and [12:0],
909 * which are all bits in the above-mentioned gaps.
911 static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
914 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
915 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
916 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
917 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
918 pvt->dcs_shift = REV_E_DCS_SHIFT;
922 pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS;
923 pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS;
924 pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS;
925 pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT;
932 * Function 2 Offset F10_DCSB0; read in the DCS Base and DCS Mask hw registers
934 static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
938 amd64_set_dct_base_and_mask(pvt);
940 for (cs = 0; cs < pvt->cs_count; cs++) {
941 reg = K8_DCSB0 + (cs * 4);
942 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsb0[cs]))
943 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
944 cs, pvt->dcsb0[cs], reg);
946 /* If DCT are NOT ganged, then read in DCT1's base */
947 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
948 reg = F10_DCSB1 + (cs * 4);
949 if (!amd64_read_pci_cfg(pvt->F2, reg,
951 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
952 cs, pvt->dcsb1[cs], reg);
958 for (cs = 0; cs < pvt->num_dcsm; cs++) {
959 reg = K8_DCSM0 + (cs * 4);
960 if (!amd64_read_pci_cfg(pvt->F2, reg, &pvt->dcsm0[cs]))
961 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
962 cs, pvt->dcsm0[cs], reg);
964 /* If DCT are NOT ganged, then read in DCT1's mask */
965 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
966 reg = F10_DCSM1 + (cs * 4);
967 if (!amd64_read_pci_cfg(pvt->F2, reg,
969 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
970 cs, pvt->dcsm1[cs], reg);
977 static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
981 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
982 if (pvt->dchr0 & DDR3_MODE)
983 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
985 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
987 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
990 amd64_info("CS%d: %s\n", cs, edac_mem_types[type]);
996 * Read the DRAM Configuration Low register. It differs between CG, D & E revs
997 * and the later RevF memory controllers (DDR vs DDR2)
1000 * number of memory channels in operation
1002 * contents of the DCL0_LOW register
1004 static int k8_early_channel_count(struct amd64_pvt *pvt)
1008 err = amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
1012 if (pvt->ext_model >= K8_REV_F)
1013 /* RevF (NPT) and later */
1014 flag = pvt->dclr0 & F10_WIDTH_128;
1016 /* RevE and earlier */
1017 flag = pvt->dclr0 & REVE_WIDTH_128;
1022 return (flag) ? 2 : 1;
1025 /* extract the ERROR ADDRESS for the K8 CPUs */
1026 static u64 k8_get_error_address(struct mem_ctl_info *mci,
1027 struct err_regs *info)
1029 return (((u64) (info->nbeah & 0xff)) << 32) +
1030 (info->nbeal & ~0x03);
1034 * Read the Base and Limit registers for K8 based Memory controllers; extract
1035 * fields from the 'raw' reg into separate data fields
1037 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN
1039 static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1042 u32 off = dram << 3; /* 8 bytes between DRAM entries */
1044 amd64_read_pci_cfg(pvt->F1, K8_DRAM_BASE_LOW + off, &low);
1046 /* Extract parts into separate data entries */
1047 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1048 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1049 pvt->dram_rw_en[dram] = (low & 0x3);
1051 amd64_read_pci_cfg(pvt->F1, K8_DRAM_LIMIT_LOW + off, &low);
1054 * Extract parts into separate data entries. Limit is the HIGHEST memory
1055 * location of the region, so lower 24 bits need to be all ones
1057 pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF;
1058 pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7;
1059 pvt->dram_DstNode[dram] = (low & 0x7);
1062 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1063 struct err_regs *err_info, u64 sys_addr)
1065 struct mem_ctl_info *src_mci;
1070 syndrome = extract_syndrome(err_info);
1072 /* CHIPKILL enabled */
1073 if (err_info->nbcfg & K8_NBCFG_CHIPKILL) {
1074 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1077 * Syndrome didn't map, so we don't know which of the
1078 * 2 DIMMs is in error. So we need to ID 'both' of them
1081 amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
1082 "error reporting race\n", syndrome);
1083 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1088 * non-chipkill ecc mode
1090 * The k8 documentation is unclear about how to determine the
1091 * channel number when using non-chipkill memory. This method
1092 * was obtained from email communication with someone at AMD.
1093 * (Wish the email was placed in this comment - norsk)
1095 channel = ((sys_addr & BIT(3)) != 0);
1099 * Find out which node the error address belongs to. This may be
1100 * different from the node that detected the error.
1102 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1104 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1105 (unsigned long)sys_addr);
1106 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1110 /* Now map the sys_addr to a CSROW */
1111 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1113 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1115 error_address_to_page_and_offset(sys_addr, &page, &offset);
1117 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1118 channel, EDAC_MOD_STR);
1122 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1126 if (pvt->ext_model >= K8_REV_F)
1127 dbam_map = ddr2_dbam;
1128 else if (pvt->ext_model >= K8_REV_D)
1129 dbam_map = ddr2_dbam_revD;
1131 dbam_map = ddr2_dbam_revCG;
1133 return dbam_map[cs_mode];
1137 * Get the number of DCT channels in use.
1140 * number of Memory Channels in operation
1142 * contents of the DCL0_LOW register
1144 static int f10_early_channel_count(struct amd64_pvt *pvt)
1146 int dbams[] = { DBAM0, DBAM1 };
1147 int i, j, channels = 0;
1150 /* If we are in 128 bit mode, then we are using 2 channels */
1151 if (pvt->dclr0 & F10_WIDTH_128) {
1157 * Need to check if in unganged mode: In such, there are 2 channels,
1158 * but they are not in 128 bit mode and thus the above 'dclr0' status
1161 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1162 * their CSEnable bit on. If so, then SINGLE DIMM case.
1164 debugf0("Data width is not 128 bits - need more decoding\n");
1167 * Check DRAM Bank Address Mapping values for each DIMM to see if there
1168 * is more than just one DIMM present in unganged mode. Need to check
1169 * both controllers since DIMMs can be placed in either one.
1171 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1172 if (amd64_read_pci_cfg(pvt->F2, dbams[i], &dbam))
1175 for (j = 0; j < 4; j++) {
1176 if (DBAM_DIMM(j, dbam) > 0) {
1186 amd64_info("MCT channel count: %d\n", channels);
1195 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1199 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1200 dbam_map = ddr3_dbam;
1202 dbam_map = ddr2_dbam;
1204 return dbam_map[cs_mode];
1207 static u64 f10_get_error_address(struct mem_ctl_info *mci,
1208 struct err_regs *info)
1210 return (((u64) (info->nbeah & 0xffff)) << 32) +
1211 (info->nbeal & ~0x01);
1215 * Read the Base and Limit registers for F10 based Memory controllers. Extract
1216 * fields from the 'raw' reg into separate data fields.
1218 * Isolates: BASE, LIMIT, IntlvEn, IntlvSel, RW_EN.
1220 static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1222 u32 high_offset, low_offset, high_base, low_base, high_limit, low_limit;
1224 low_offset = K8_DRAM_BASE_LOW + (dram << 3);
1225 high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1227 /* read the 'raw' DRAM BASE Address register */
1228 amd64_read_pci_cfg(pvt->F1, low_offset, &low_base);
1229 amd64_read_pci_cfg(pvt->F1, high_offset, &high_base);
1231 /* Extract parts into separate data entries */
1232 pvt->dram_rw_en[dram] = (low_base & 0x3);
1234 if (pvt->dram_rw_en[dram] == 0)
1237 pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7;
1239 pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) |
1240 (((u64)low_base & 0xFFFF0000) << 8);
1242 low_offset = K8_DRAM_LIMIT_LOW + (dram << 3);
1243 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1245 /* read the 'raw' LIMIT registers */
1246 amd64_read_pci_cfg(pvt->F1, low_offset, &low_limit);
1247 amd64_read_pci_cfg(pvt->F1, high_offset, &high_limit);
1249 pvt->dram_DstNode[dram] = (low_limit & 0x7);
1250 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
1253 * Extract address values and form a LIMIT address. Limit is the HIGHEST
1254 * memory location of the region, so low 24 bits need to be all ones.
1256 pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) |
1257 (((u64) low_limit & 0xFFFF0000) << 8) |
1261 static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1264 if (!amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_LOW,
1265 &pvt->dram_ctl_select_low)) {
1266 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
1267 "High range addresses at: 0x%x\n",
1268 pvt->dram_ctl_select_low,
1269 dct_sel_baseaddr(pvt));
1271 debugf0(" DCT mode: %s, All DCTs on: %s\n",
1272 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
1273 (dct_dram_enabled(pvt) ? "yes" : "no"));
1275 if (!dct_ganging_enabled(pvt))
1276 debugf0(" Address range split per DCT: %s\n",
1277 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1279 debugf0(" DCT data interleave for ECC: %s, "
1280 "DRAM cleared since last warm reset: %s\n",
1281 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1282 (dct_memory_cleared(pvt) ? "yes" : "no"));
1284 debugf0(" DCT channel interleave: %s, "
1285 "DCT interleave bits selector: 0x%x\n",
1286 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1287 dct_sel_interleave_addr(pvt));
1290 amd64_read_pci_cfg(pvt->F2, F10_DCTL_SEL_HIGH,
1291 &pvt->dram_ctl_select_high);
1295 * determine channel based on the interleaving mode: F10h BKDG, 2.8.9 Memory
1296 * Interleaving Modes.
1298 static u32 f10_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1299 int hi_range_sel, u32 intlv_en)
1301 u32 cs, temp, dct_sel_high = (pvt->dram_ctl_select_low >> 1) & 1;
1303 if (dct_ganging_enabled(pvt))
1305 else if (hi_range_sel)
1307 else if (dct_interleave_enabled(pvt)) {
1309 * see F2x110[DctSelIntLvAddr] - channel interleave mode
1311 if (dct_sel_interleave_addr(pvt) == 0)
1312 cs = sys_addr >> 6 & 1;
1313 else if ((dct_sel_interleave_addr(pvt) >> 1) & 1) {
1314 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) % 2;
1316 if (dct_sel_interleave_addr(pvt) & 1)
1317 cs = (sys_addr >> 9 & 1) ^ temp;
1319 cs = (sys_addr >> 6 & 1) ^ temp;
1320 } else if (intlv_en & 4)
1321 cs = sys_addr >> 15 & 1;
1322 else if (intlv_en & 2)
1323 cs = sys_addr >> 14 & 1;
1324 else if (intlv_en & 1)
1325 cs = sys_addr >> 13 & 1;
1327 cs = sys_addr >> 12 & 1;
1328 } else if (dct_high_range_enabled(pvt) && !dct_ganging_enabled(pvt))
1329 cs = ~dct_sel_high & 1;
1336 static inline u32 f10_map_intlv_en_to_shift(u32 intlv_en)
1340 else if (intlv_en == 3)
1342 else if (intlv_en == 7)
1348 /* See F10h BKDG, 2.8.10.2 DctSelBaseOffset Programming */
1349 static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
1350 u32 dct_sel_base_addr,
1351 u64 dct_sel_base_off,
1352 u32 hole_valid, u32 hole_off,
1358 if (!(dct_sel_base_addr & 0xFFFF0000) &&
1359 hole_valid && (sys_addr >= 0x100000000ULL))
1360 chan_off = hole_off << 16;
1362 chan_off = dct_sel_base_off;
1364 if (hole_valid && (sys_addr >= 0x100000000ULL))
1365 chan_off = hole_off << 16;
1367 chan_off = dram_base & 0xFFFFF8000000ULL;
1370 return (sys_addr & 0x0000FFFFFFFFFFC0ULL) -
1371 (chan_off & 0x0000FFFFFF800000ULL);
1374 /* Hack for the time being - Can we get this from BIOS?? */
1375 #define CH0SPARE_RANK 0
1376 #define CH1SPARE_RANK 1
1379 * checks if the csrow passed in is marked as SPARED, if so returns the new
1382 static inline int f10_process_possible_spare(int csrow,
1383 u32 cs, struct amd64_pvt *pvt)
1388 /* Depending on channel, isolate respective SPARING info */
1390 swap_done = F10_ONLINE_SPARE_SWAPDONE1(pvt->online_spare);
1391 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS1(pvt->online_spare);
1392 if (swap_done && (csrow == bad_dram_cs))
1393 csrow = CH1SPARE_RANK;
1395 swap_done = F10_ONLINE_SPARE_SWAPDONE0(pvt->online_spare);
1396 bad_dram_cs = F10_ONLINE_SPARE_BADDRAM_CS0(pvt->online_spare);
1397 if (swap_done && (csrow == bad_dram_cs))
1398 csrow = CH0SPARE_RANK;
1404 * Iterate over the DRAM DCT "base" and "mask" registers looking for a
1405 * SystemAddr match on the specified 'ChannelSelect' and 'NodeID'
1408 * -EINVAL: NOT FOUND
1409 * 0..csrow = Chip-Select Row
1411 static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs)
1413 struct mem_ctl_info *mci;
1414 struct amd64_pvt *pvt;
1415 u32 cs_base, cs_mask;
1416 int cs_found = -EINVAL;
1423 pvt = mci->pvt_info;
1425 debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs);
1427 for (csrow = 0; csrow < pvt->cs_count; csrow++) {
1429 cs_base = amd64_get_dct_base(pvt, cs, csrow);
1430 if (!(cs_base & K8_DCSB_CS_ENABLE))
1434 * We have an ENABLED CSROW, Isolate just the MASK bits of the
1435 * target: [28:19] and [13:5], which map to [36:27] and [21:13]
1436 * of the actual address.
1438 cs_base &= REV_F_F1Xh_DCSB_BASE_BITS;
1441 * Get the DCT Mask, and ENABLE the reserved bits: [18:16] and
1442 * [4:0] to become ON. Then mask off bits [28:0] ([36:8])
1444 cs_mask = amd64_get_dct_mask(pvt, cs, csrow);
1446 debugf1(" CSROW=%d CSBase=0x%x RAW CSMask=0x%x\n",
1447 csrow, cs_base, cs_mask);
1449 cs_mask = (cs_mask | 0x0007C01F) & 0x1FFFFFFF;
1451 debugf1(" Final CSMask=0x%x\n", cs_mask);
1452 debugf1(" (InputAddr & ~CSMask)=0x%x "
1453 "(CSBase & ~CSMask)=0x%x\n",
1454 (in_addr & ~cs_mask), (cs_base & ~cs_mask));
1456 if ((in_addr & ~cs_mask) == (cs_base & ~cs_mask)) {
1457 cs_found = f10_process_possible_spare(csrow, cs, pvt);
1459 debugf1(" MATCH csrow=%d\n", cs_found);
1466 /* For a given @dram_range, check if @sys_addr falls within it. */
1467 static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range,
1468 u64 sys_addr, int *nid, int *chan_sel)
1470 int node_id, cs_found = -EINVAL, high_range = 0;
1471 u32 intlv_en, intlv_sel, intlv_shift, hole_off;
1472 u32 hole_valid, tmp, dct_sel_base, channel;
1473 u64 dram_base, chan_addr, dct_sel_base_off;
1475 dram_base = pvt->dram_base[dram_range];
1476 intlv_en = pvt->dram_IntlvEn[dram_range];
1478 node_id = pvt->dram_DstNode[dram_range];
1479 intlv_sel = pvt->dram_IntlvSel[dram_range];
1481 debugf1("(dram=%d) Base=0x%llx SystemAddr= 0x%llx Limit=0x%llx\n",
1482 dram_range, dram_base, sys_addr, pvt->dram_limit[dram_range]);
1485 * This assumes that one node's DHAR is the same as all the other
1488 hole_off = (pvt->dhar & 0x0000FF80);
1489 hole_valid = (pvt->dhar & 0x1);
1490 dct_sel_base_off = (pvt->dram_ctl_select_high & 0xFFFFFC00) << 16;
1492 debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n",
1493 hole_off, hole_valid, intlv_sel);
1496 (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1499 dct_sel_base = dct_sel_baseaddr(pvt);
1502 * check whether addresses >= DctSelBaseAddr[47:27] are to be used to
1503 * select between DCT0 and DCT1.
1505 if (dct_high_range_enabled(pvt) &&
1506 !dct_ganging_enabled(pvt) &&
1507 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1510 channel = f10_determine_channel(pvt, sys_addr, high_range, intlv_en);
1512 chan_addr = f10_get_base_addr_offset(sys_addr, high_range, dct_sel_base,
1513 dct_sel_base_off, hole_valid,
1514 hole_off, dram_base);
1516 intlv_shift = f10_map_intlv_en_to_shift(intlv_en);
1518 /* remove Node ID (in case of memory interleaving) */
1519 tmp = chan_addr & 0xFC0;
1521 chan_addr = ((chan_addr >> intlv_shift) & 0xFFFFFFFFF000ULL) | tmp;
1523 /* remove channel interleave and hash */
1524 if (dct_interleave_enabled(pvt) &&
1525 !dct_high_range_enabled(pvt) &&
1526 !dct_ganging_enabled(pvt)) {
1527 if (dct_sel_interleave_addr(pvt) != 1)
1528 chan_addr = (chan_addr >> 1) & 0xFFFFFFFFFFFFFFC0ULL;
1530 tmp = chan_addr & 0xFC0;
1531 chan_addr = ((chan_addr & 0xFFFFFFFFFFFFC000ULL) >> 1)
1536 debugf1(" (ChannelAddrLong=0x%llx) >> 8 becomes InputAddr=0x%x\n",
1537 chan_addr, (u32)(chan_addr >> 8));
1539 cs_found = f10_lookup_addr_in_dct(chan_addr >> 8, node_id, channel);
1541 if (cs_found >= 0) {
1543 *chan_sel = channel;
1548 static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1549 int *node, int *chan_sel)
1551 int dram_range, cs_found = -EINVAL;
1552 u64 dram_base, dram_limit;
1554 for (dram_range = 0; dram_range < DRAM_REG_COUNT; dram_range++) {
1556 if (!pvt->dram_rw_en[dram_range])
1559 dram_base = pvt->dram_base[dram_range];
1560 dram_limit = pvt->dram_limit[dram_range];
1562 if ((dram_base <= sys_addr) && (sys_addr <= dram_limit)) {
1564 cs_found = f10_match_to_this_node(pvt, dram_range,
1575 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1576 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1578 * The @sys_addr is usually an error address received from the hardware
1581 static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1582 struct err_regs *err_info,
1585 struct amd64_pvt *pvt = mci->pvt_info;
1587 int nid, csrow, chan = 0;
1590 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1593 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1597 error_address_to_page_and_offset(sys_addr, &page, &offset);
1599 syndrome = extract_syndrome(err_info);
1602 * We need the syndromes for channel detection only when we're
1603 * ganged. Otherwise @chan should already contain the channel at
1606 if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
1607 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1610 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
1614 * Channel unknown, report all channels on this CSROW as failed.
1616 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1617 edac_mc_handle_ce(mci, page, offset, syndrome,
1618 csrow, chan, EDAC_MOD_STR);
1622 * debug routine to display the memory sizes of all logical DIMMs and its
1625 static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1627 int dimm, size0, size1, factor = 0;
1631 if (boot_cpu_data.x86 == 0xf) {
1632 if (pvt->dclr0 & F10_WIDTH_128)
1635 /* K8 families < revF not supported yet */
1636 if (pvt->ext_model < K8_REV_F)
1642 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1643 ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
1645 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1646 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1648 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1650 /* Dump memory sizes for DIMM and its CSROWs */
1651 for (dimm = 0; dimm < 4; dimm++) {
1654 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
1655 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1658 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
1659 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1661 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
1662 dimm * 2, size0 << factor,
1663 dimm * 2 + 1, size1 << factor);
1667 static struct amd64_family_type amd64_family_types[] = {
1670 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1671 .f3_id = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1673 .early_channel_count = k8_early_channel_count,
1674 .get_error_address = k8_get_error_address,
1675 .read_dram_base_limit = k8_read_dram_base_limit,
1676 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1677 .dbam_to_cs = k8_dbam_to_chip_select,
1682 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1683 .f3_id = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1685 .early_channel_count = f10_early_channel_count,
1686 .get_error_address = f10_get_error_address,
1687 .read_dram_base_limit = f10_read_dram_base_limit,
1688 .read_dram_ctl_register = f10_read_dram_ctl_register,
1689 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1690 .dbam_to_cs = f10_dbam_to_chip_select,
1695 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1696 unsigned int device,
1697 struct pci_dev *related)
1699 struct pci_dev *dev = NULL;
1701 dev = pci_get_device(vendor, device, dev);
1703 if ((dev->bus->number == related->bus->number) &&
1704 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1706 dev = pci_get_device(vendor, device, dev);
1713 * These are tables of eigenvectors (one per line) which can be used for the
1714 * construction of the syndrome tables. The modified syndrome search algorithm
1715 * uses those to find the symbol in error and thus the DIMM.
1717 * Algorithm courtesy of Ross LaFetra from AMD.
1719 static u16 x4_vectors[] = {
1720 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1721 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1722 0x0001, 0x0002, 0x0004, 0x0008,
1723 0x1013, 0x3032, 0x4044, 0x8088,
1724 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1725 0x4857, 0xc4fe, 0x13cc, 0x3288,
1726 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1727 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1728 0x15c1, 0x2a42, 0x89ac, 0x4758,
1729 0x2b03, 0x1602, 0x4f0c, 0xca08,
1730 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1731 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1732 0x2b87, 0x164e, 0x642c, 0xdc18,
1733 0x40b9, 0x80de, 0x1094, 0x20e8,
1734 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1735 0x11c1, 0x2242, 0x84ac, 0x4c58,
1736 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1737 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1738 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1739 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1740 0x16b3, 0x3d62, 0x4f34, 0x8518,
1741 0x1e2f, 0x391a, 0x5cac, 0xf858,
1742 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1743 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1744 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1745 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1746 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1747 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1748 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1749 0x185d, 0x2ca6, 0x7914, 0x9e28,
1750 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1751 0x4199, 0x82ee, 0x19f4, 0x2e58,
1752 0x4807, 0xc40e, 0x130c, 0x3208,
1753 0x1905, 0x2e0a, 0x5804, 0xac08,
1754 0x213f, 0x132a, 0xadfc, 0x5ba8,
1755 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1758 static u16 x8_vectors[] = {
1759 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
1760 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
1761 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
1762 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1763 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1764 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1765 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1766 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1767 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1768 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1769 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1770 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1771 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1772 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1773 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1774 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1775 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1776 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1777 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1780 static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
1783 unsigned int i, err_sym;
1785 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1787 int v_idx = err_sym * v_dim;
1788 int v_end = (err_sym + 1) * v_dim;
1790 /* walk over all 16 bits of the syndrome */
1791 for (i = 1; i < (1U << 16); i <<= 1) {
1793 /* if bit is set in that eigenvector... */
1794 if (v_idx < v_end && vectors[v_idx] & i) {
1795 u16 ev_comp = vectors[v_idx++];
1797 /* ... and bit set in the modified syndrome, */
1807 /* can't get to zero, move to next symbol */
1812 debugf0("syndrome(%x) not found\n", syndrome);
1816 static int map_err_sym_to_channel(int err_sym, int sym_size)
1829 return err_sym >> 4;
1835 /* imaginary bits not in a DIMM */
1837 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1849 return err_sym >> 3;
1855 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1857 struct amd64_pvt *pvt = mci->pvt_info;
1860 if (pvt->syn_type == 8)
1861 err_sym = decode_syndrome(syndrome, x8_vectors,
1862 ARRAY_SIZE(x8_vectors),
1864 else if (pvt->syn_type == 4)
1865 err_sym = decode_syndrome(syndrome, x4_vectors,
1866 ARRAY_SIZE(x4_vectors),
1869 amd64_warn("Illegal syndrome type: %u\n", pvt->syn_type);
1873 return map_err_sym_to_channel(err_sym, pvt->syn_type);
1877 * Handle any Correctable Errors (CEs) that have occurred. Check for valid ERROR
1878 * ADDRESS and process.
1880 static void amd64_handle_ce(struct mem_ctl_info *mci,
1881 struct err_regs *info)
1883 struct amd64_pvt *pvt = mci->pvt_info;
1886 /* Ensure that the Error Address is VALID */
1887 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
1888 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1889 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1893 sys_addr = pvt->ops->get_error_address(mci, info);
1895 amd64_mc_err(mci, "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
1897 pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
1900 /* Handle any Un-correctable Errors (UEs) */
1901 static void amd64_handle_ue(struct mem_ctl_info *mci,
1902 struct err_regs *info)
1904 struct amd64_pvt *pvt = mci->pvt_info;
1905 struct mem_ctl_info *log_mci, *src_mci = NULL;
1912 if (!(info->nbsh & K8_NBSH_VALID_ERROR_ADDR)) {
1913 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1914 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1918 sys_addr = pvt->ops->get_error_address(mci, info);
1921 * Find out which node the error address belongs to. This may be
1922 * different from the node that detected the error.
1924 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1926 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1927 (unsigned long)sys_addr);
1928 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1934 csrow = sys_addr_to_csrow(log_mci, sys_addr);
1936 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1937 (unsigned long)sys_addr);
1938 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
1940 error_address_to_page_and_offset(sys_addr, &page, &offset);
1941 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
1945 static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
1946 struct err_regs *info)
1948 u32 ec = ERROR_CODE(info->nbsl);
1949 u32 xec = EXT_ERROR_CODE(info->nbsl);
1950 int ecc_type = (info->nbsh >> 13) & 0x3;
1952 /* Bail early out if this was an 'observed' error */
1953 if (PP(ec) == K8_NBSL_PP_OBS)
1956 /* Do only ECC errors */
1957 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
1961 amd64_handle_ce(mci, info);
1962 else if (ecc_type == 1)
1963 amd64_handle_ue(mci, info);
1966 void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg)
1968 struct mem_ctl_info *mci = mcis[node_id];
1969 struct err_regs regs;
1971 regs.nbsl = (u32) m->status;
1972 regs.nbsh = (u32)(m->status >> 32);
1973 regs.nbeal = (u32) m->addr;
1974 regs.nbeah = (u32)(m->addr >> 32);
1977 __amd64_decode_bus_error(mci, ®s);
1980 * Check the UE bit of the NB status high register, if set generate some
1981 * logs. If NOT a GART error, then process the event as a NO-INFO event.
1982 * If it was a GART error, skip that process.
1984 * FIXME: this should go somewhere else, if at all.
1986 if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors)
1987 edac_mc_handle_ue_no_info(mci, "UE bit is set");
1992 * Use pvt->F2 which contains the F2 CPU PCI device to get the related
1993 * F1 (AddrMap) and F3 (Misc) devices. Return negative value on error.
1995 static int amd64_reserve_mc_sibling_devices(struct amd64_pvt *pvt, u16 f1_id,
1998 /* Reserve the ADDRESS MAP Device */
1999 pvt->F1 = pci_get_related_function(pvt->F2->vendor, f1_id, pvt->F2);
2001 amd64_err("error address map device not found: "
2002 "vendor %x device 0x%x (broken BIOS?)\n",
2003 PCI_VENDOR_ID_AMD, f1_id);
2007 /* Reserve the MISC Device */
2008 pvt->F3 = pci_get_related_function(pvt->F2->vendor, f3_id, pvt->F2);
2010 pci_dev_put(pvt->F1);
2013 amd64_err("error F3 device not found: "
2014 "vendor %x device 0x%x (broken BIOS?)\n",
2015 PCI_VENDOR_ID_AMD, f3_id);
2019 debugf1("F1: %s\n", pci_name(pvt->F1));
2020 debugf1("F2: %s\n", pci_name(pvt->F2));
2021 debugf1("F3: %s\n", pci_name(pvt->F3));
2026 static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
2028 pci_dev_put(pvt->F1);
2029 pci_dev_put(pvt->F3);
2033 * Retrieve the hardware registers of the memory controller (this includes the
2034 * 'Address Map' and 'Misc' device regs)
2036 static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2043 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2044 * those are Read-As-Zero
2046 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2047 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
2049 /* check first whether TOP_MEM2 is enabled */
2050 rdmsrl(MSR_K8_SYSCFG, msr_val);
2051 if (msr_val & (1U << 21)) {
2052 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2053 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2055 debugf0(" TOP_MEM2 disabled.\n");
2057 amd64_read_pci_cfg(pvt->F3, K8_NBCAP, &pvt->nbcap);
2059 if (pvt->ops->read_dram_ctl_register)
2060 pvt->ops->read_dram_ctl_register(pvt);
2062 for (dram = 0; dram < DRAM_REG_COUNT; dram++) {
2064 * Call CPU specific READ function to get the DRAM Base and
2065 * Limit values from the DCT.
2067 pvt->ops->read_dram_base_limit(pvt, dram);
2070 * Only print out debug info on rows with both R and W Enabled.
2071 * Normal processing, compiler should optimize this whole 'if'
2072 * debug output block away.
2074 if (pvt->dram_rw_en[dram] != 0) {
2075 debugf1(" DRAM-BASE[%d]: 0x%016llx "
2076 "DRAM-LIMIT: 0x%016llx\n",
2078 pvt->dram_base[dram],
2079 pvt->dram_limit[dram]);
2081 debugf1(" IntlvEn=%s %s %s "
2082 "IntlvSel=%d DstNode=%d\n",
2083 pvt->dram_IntlvEn[dram] ?
2084 "Enabled" : "Disabled",
2085 (pvt->dram_rw_en[dram] & 0x2) ? "W" : "!W",
2086 (pvt->dram_rw_en[dram] & 0x1) ? "R" : "!R",
2087 pvt->dram_IntlvSel[dram],
2088 pvt->dram_DstNode[dram]);
2092 amd64_read_dct_base_mask(pvt);
2094 amd64_read_pci_cfg(pvt->F1, K8_DHAR, &pvt->dhar);
2095 amd64_read_dbam_reg(pvt);
2097 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2099 amd64_read_pci_cfg(pvt->F2, F10_DCLR_0, &pvt->dclr0);
2100 amd64_read_pci_cfg(pvt->F2, F10_DCHR_0, &pvt->dchr0);
2102 if (boot_cpu_data.x86 >= 0x10) {
2103 if (!dct_ganging_enabled(pvt)) {
2104 amd64_read_pci_cfg(pvt->F2, F10_DCLR_1, &pvt->dclr1);
2105 amd64_read_pci_cfg(pvt->F2, F10_DCHR_1, &pvt->dchr1);
2107 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2110 if (boot_cpu_data.x86 == 0x10 &&
2111 boot_cpu_data.x86_model > 7 &&
2112 /* F3x180[EccSymbolSize]=1 => x8 symbols */
2118 amd64_dump_misc_regs(pvt);
2122 * NOTE: CPU Revision Dependent code
2125 * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1)
2126 * k8 private pointer to -->
2127 * DRAM Bank Address mapping register
2129 * DCL register where dual_channel_active is
2131 * The DBAM register consists of 4 sets of 4 bits each definitions:
2134 * 0-3 CSROWs 0 and 1
2135 * 4-7 CSROWs 2 and 3
2136 * 8-11 CSROWs 4 and 5
2137 * 12-15 CSROWs 6 and 7
2139 * Values range from: 0 to 15
2140 * The meaning of the values depends on CPU revision and dual-channel state,
2141 * see relevant BKDG more info.
2143 * The memory controller provides for total of only 8 CSROWs in its current
2144 * architecture. Each "pair" of CSROWs normally represents just one DIMM in
2145 * single channel or two (2) DIMMs in dual channel mode.
2147 * The following code logic collapses the various tables for CSROW based on CPU
2151 * The number of PAGE_SIZE pages on the specified CSROW number it
2155 static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2157 u32 cs_mode, nr_pages;
2160 * The math on this doesn't look right on the surface because x/2*4 can
2161 * be simplified to x*2 but this expression makes use of the fact that
2162 * it is integral math where 1/2=0. This intermediate value becomes the
2163 * number of bits to shift the DBAM register to extract the proper CSROW
2166 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2168 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
2171 * If dual channel then double the memory size of single channel.
2172 * Channel count is 1 or 2
2174 nr_pages <<= (pvt->channel_count - 1);
2176 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2177 debugf0(" nr_pages= %u channel-count = %d\n",
2178 nr_pages, pvt->channel_count);
2184 * Initialize the array of csrow attribute instances, based on the values
2185 * from pci config hardware registers.
2187 static int amd64_init_csrows(struct mem_ctl_info *mci)
2189 struct csrow_info *csrow;
2190 struct amd64_pvt *pvt;
2191 u64 input_addr_min, input_addr_max, sys_addr;
2194 pvt = mci->pvt_info;
2196 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &pvt->nbcfg);
2198 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
2199 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2200 (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"
2203 for (i = 0; i < pvt->cs_count; i++) {
2204 csrow = &mci->csrows[i];
2206 if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) {
2207 debugf1("----CSROW %d EMPTY for node %d\n", i,
2212 debugf1("----CSROW %d VALID for MC node %d\n",
2213 i, pvt->mc_node_id);
2216 csrow->nr_pages = amd64_csrow_nr_pages(i, pvt);
2217 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2218 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2219 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2220 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2221 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2222 csrow->page_mask = ~mask_from_dct_mask(pvt, i);
2223 /* 8 bytes of resolution */
2225 csrow->mtype = amd64_determine_memory_type(pvt, i);
2227 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2228 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
2229 (unsigned long)input_addr_min,
2230 (unsigned long)input_addr_max);
2231 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2232 (unsigned long)sys_addr, csrow->page_mask);
2233 debugf1(" nr_pages: %u first_page: 0x%lx "
2234 "last_page: 0x%lx\n",
2235 (unsigned)csrow->nr_pages,
2236 csrow->first_page, csrow->last_page);
2239 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2241 if (pvt->nbcfg & K8_NBCFG_ECC_ENABLE)
2243 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ?
2244 EDAC_S4ECD4ED : EDAC_SECDED;
2246 csrow->edac_mode = EDAC_NONE;
2252 /* get all cores on this DCT */
2253 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2257 for_each_online_cpu(cpu)
2258 if (amd_get_nb_id(cpu) == nid)
2259 cpumask_set_cpu(cpu, mask);
2262 /* check MCG_CTL on all the cpus on this node */
2263 static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2269 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2270 amd64_warn("%s: Error allocating mask\n", __func__);
2274 get_cpus_on_this_dct_cpumask(mask, nid);
2276 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2278 for_each_cpu(cpu, mask) {
2279 struct msr *reg = per_cpu_ptr(msrs, cpu);
2280 nbe = reg->l & K8_MSR_MCGCTL_NBE;
2282 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2284 (nbe ? "enabled" : "disabled"));
2292 free_cpumask_var(mask);
2296 static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2298 cpumask_var_t cmask;
2301 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2302 amd64_warn("%s: error allocating mask\n", __func__);
2306 get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
2308 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2310 for_each_cpu(cpu, cmask) {
2312 struct msr *reg = per_cpu_ptr(msrs, cpu);
2315 if (reg->l & K8_MSR_MCGCTL_NBE)
2316 pvt->flags.nb_mce_enable = 1;
2318 reg->l |= K8_MSR_MCGCTL_NBE;
2321 * Turn off NB MCE reporting only when it was off before
2323 if (!pvt->flags.nb_mce_enable)
2324 reg->l &= ~K8_MSR_MCGCTL_NBE;
2327 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2329 free_cpumask_var(cmask);
2334 static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2336 struct amd64_pvt *pvt = mci->pvt_info;
2337 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2339 amd64_read_pci_cfg(pvt->F3, K8_NBCTL, &value);
2341 /* turn on UECCn and CECCEn bits */
2342 pvt->old_nbctl = value & mask;
2343 pvt->nbctl_mcgctl_saved = 1;
2346 pci_write_config_dword(pvt->F3, K8_NBCTL, value);
2348 if (amd64_toggle_ecc_err_reporting(pvt, ON))
2349 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
2351 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value);
2353 debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2354 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2355 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2357 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2358 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
2360 pvt->flags.nb_ecc_prev = 0;
2362 /* Attempt to turn on DRAM ECC Enable */
2363 value |= K8_NBCFG_ECC_ENABLE;
2364 pci_write_config_dword(pvt->F3, K8_NBCFG, value);
2366 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value);
2368 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2369 amd64_warn("Hardware rejected DRAM ECC enable,"
2370 "check memory DIMM configuration.\n");
2372 amd64_info("Hardware accepted DRAM ECC Enable\n");
2375 pvt->flags.nb_ecc_prev = 1;
2378 debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2379 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
2380 (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
2382 pvt->ctl_error_info.nbcfg = value;
2385 static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2387 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2389 if (!pvt->nbctl_mcgctl_saved)
2392 amd64_read_pci_cfg(pvt->F3, K8_NBCTL, &value);
2394 value |= pvt->old_nbctl;
2396 pci_write_config_dword(pvt->F3, K8_NBCTL, value);
2398 /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */
2399 if (!pvt->flags.nb_ecc_prev) {
2400 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value);
2401 value &= ~K8_NBCFG_ECC_ENABLE;
2402 pci_write_config_dword(pvt->F3, K8_NBCFG, value);
2405 /* restore the NB Enable MCGCTL bit */
2406 if (amd64_toggle_ecc_err_reporting(pvt, OFF))
2407 amd64_warn("Error restoring NB MCGCTL settings!\n");
2411 * EDAC requires that the BIOS have ECC enabled before taking over the
2412 * processing of ECC errors. This is because the BIOS can properly initialize
2413 * the memory system completely. A command line option allows to force-enable
2414 * hardware ECC later in amd64_enable_ecc_error_reporting().
2416 static const char *ecc_msg =
2417 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
2418 " Either enable ECC checking or force module loading by setting "
2419 "'ecc_enable_override'.\n"
2420 " (Note that use of the override may cause unknown side effects.)\n";
2422 static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2426 bool nb_mce_en = false;
2428 amd64_read_pci_cfg(pvt->F3, K8_NBCFG, &value);
2430 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2431 amd64_info("DRAM ECC %s.\n", (ecc_enabled ? "enabled" : "disabled"));
2433 nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
2435 amd64_notice("NB MCE bank disabled, "
2436 "set MSR 0x%08x[4] on node %d to enable.\n",
2437 MSR_IA32_MCG_CTL, pvt->mc_node_id);
2439 if (!ecc_enabled || !nb_mce_en) {
2440 if (!ecc_enable_override) {
2441 amd64_notice("%s", ecc_msg);
2444 amd64_warn("Forcing ECC on!\n");
2451 struct mcidev_sysfs_attribute sysfs_attrs[ARRAY_SIZE(amd64_dbg_attrs) +
2452 ARRAY_SIZE(amd64_inj_attrs) +
2455 struct mcidev_sysfs_attribute terminator = { .attr = { .name = NULL } };
2457 static void amd64_set_mc_sysfs_attributes(struct mem_ctl_info *mci)
2459 unsigned int i = 0, j = 0;
2461 for (; i < ARRAY_SIZE(amd64_dbg_attrs); i++)
2462 sysfs_attrs[i] = amd64_dbg_attrs[i];
2464 for (j = 0; j < ARRAY_SIZE(amd64_inj_attrs); j++, i++)
2465 sysfs_attrs[i] = amd64_inj_attrs[j];
2467 sysfs_attrs[i] = terminator;
2469 mci->mc_driver_sysfs_attributes = sysfs_attrs;
2472 static void amd64_setup_mci_misc_attributes(struct mem_ctl_info *mci)
2474 struct amd64_pvt *pvt = mci->pvt_info;
2476 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
2477 mci->edac_ctl_cap = EDAC_FLAG_NONE;
2479 if (pvt->nbcap & K8_NBCAP_SECDED)
2480 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
2482 if (pvt->nbcap & K8_NBCAP_CHIPKILL)
2483 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
2485 mci->edac_cap = amd64_determine_edac_cap(pvt);
2486 mci->mod_name = EDAC_MOD_STR;
2487 mci->mod_ver = EDAC_AMD64_VERSION;
2488 mci->ctl_name = pvt->ctl_name;
2489 mci->dev_name = pci_name(pvt->F2);
2490 mci->ctl_page_to_phys = NULL;
2492 /* memory scrubber interface */
2493 mci->set_sdram_scrub_rate = amd64_set_scrub_rate;
2494 mci->get_sdram_scrub_rate = amd64_get_scrub_rate;
2498 * returns a pointer to the family descriptor on success, NULL otherwise.
2500 static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
2502 u8 fam = boot_cpu_data.x86;
2503 struct amd64_family_type *fam_type = NULL;
2507 fam_type = &amd64_family_types[K8_CPUS];
2508 pvt->ops = &amd64_family_types[K8_CPUS].ops;
2509 pvt->ctl_name = fam_type->ctl_name;
2510 pvt->min_scrubrate = K8_MIN_SCRUB_RATE_BITS;
2513 fam_type = &amd64_family_types[F10_CPUS];
2514 pvt->ops = &amd64_family_types[F10_CPUS].ops;
2515 pvt->ctl_name = fam_type->ctl_name;
2516 pvt->min_scrubrate = F10_MIN_SCRUB_RATE_BITS;
2520 amd64_err("Unsupported family!\n");
2524 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2526 amd64_info("%s %sdetected (node %d).\n", pvt->ctl_name,
2528 (pvt->ext_model >= K8_REV_F ? "revF or later "
2529 : "revE or earlier ")
2530 : ""), pvt->mc_node_id);
2534 static int amd64_probe_one_instance(struct pci_dev *F2)
2536 struct amd64_pvt *pvt = NULL;
2537 struct amd64_family_type *fam_type = NULL;
2541 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
2545 pvt->mc_node_id = get_node_id(F2);
2549 fam_type = amd64_per_family_init(pvt);
2554 err = amd64_reserve_mc_sibling_devices(pvt, fam_type->f1_id,
2560 err = amd64_check_ecc_enabled(pvt);
2565 * Save the pointer to the private data for use in 2nd initialization
2568 pvts[pvt->mc_node_id] = pvt;
2573 amd64_free_mc_sibling_devices(pvt);
2583 * This is the finishing stage of the init code. Needs to be performed after all
2584 * MCs' hardware have been prepped for accessing extended config space.
2586 static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2588 int node_id = pvt->mc_node_id;
2589 struct mem_ctl_info *mci;
2592 amd64_read_mc_registers(pvt);
2595 * We need to determine how many memory channels there are. Then use
2596 * that information for calculating the size of the dynamic instance
2597 * tables in the 'mci' structure
2599 pvt->channel_count = pvt->ops->early_channel_count(pvt);
2600 if (pvt->channel_count < 0)
2604 mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id);
2608 mci->pvt_info = pvt;
2610 mci->dev = &pvt->F2->dev;
2611 amd64_setup_mci_misc_attributes(mci);
2613 if (amd64_init_csrows(mci))
2614 mci->edac_cap = EDAC_FLAG_NONE;
2616 amd64_enable_ecc_error_reporting(mci);
2617 amd64_set_mc_sysfs_attributes(mci);
2620 if (edac_mc_add_mc(mci)) {
2621 debugf1("failed edac_mc_add_mc()\n");
2625 mcis[node_id] = mci;
2626 pvts[node_id] = NULL;
2628 /* register stuff with EDAC MCE */
2629 if (report_gart_errors)
2630 amd_report_gart_errors(true);
2632 amd_register_ecc_decoder(amd64_decode_bus_error);
2640 debugf0("failure to init 2nd stage: ret=%d\n", ret);
2642 amd64_restore_ecc_error_reporting(pvt);
2644 amd64_free_mc_sibling_devices(pvt);
2646 kfree(pvts[pvt->mc_node_id]);
2647 pvts[node_id] = NULL;
2653 static int __devinit amd64_init_one_instance(struct pci_dev *pdev,
2654 const struct pci_device_id *mc_type)
2658 ret = pci_enable_device(pdev);
2660 debugf0("ret=%d\n", ret);
2664 ret = amd64_probe_one_instance(pdev);
2666 amd64_err("Error probing instance: %d\n", get_node_id(pdev));
2671 static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
2673 struct mem_ctl_info *mci;
2674 struct amd64_pvt *pvt;
2676 /* Remove from EDAC CORE tracking list */
2677 mci = edac_mc_del_mc(&pdev->dev);
2681 pvt = mci->pvt_info;
2683 amd64_restore_ecc_error_reporting(pvt);
2685 amd64_free_mc_sibling_devices(pvt);
2687 /* unregister from EDAC MCE */
2688 amd_report_gart_errors(false);
2689 amd_unregister_ecc_decoder(amd64_decode_bus_error);
2691 /* Free the EDAC CORE resources */
2692 mci->pvt_info = NULL;
2693 mcis[pvt->mc_node_id] = NULL;
2700 * This table is part of the interface for loading drivers for PCI devices. The
2701 * PCI core identifies what devices are on a system during boot, and then
2702 * inquiry this table to see if this driver is for a given device found.
2704 static const struct pci_device_id amd64_pci_table[] __devinitdata = {
2706 .vendor = PCI_VENDOR_ID_AMD,
2707 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2708 .subvendor = PCI_ANY_ID,
2709 .subdevice = PCI_ANY_ID,
2714 .vendor = PCI_VENDOR_ID_AMD,
2715 .device = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2716 .subvendor = PCI_ANY_ID,
2717 .subdevice = PCI_ANY_ID,
2723 MODULE_DEVICE_TABLE(pci, amd64_pci_table);
2725 static struct pci_driver amd64_pci_driver = {
2726 .name = EDAC_MOD_STR,
2727 .probe = amd64_init_one_instance,
2728 .remove = __devexit_p(amd64_remove_one_instance),
2729 .id_table = amd64_pci_table,
2732 static void amd64_setup_pci_device(void)
2734 struct mem_ctl_info *mci;
2735 struct amd64_pvt *pvt;
2743 pvt = mci->pvt_info;
2745 edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2747 if (!amd64_ctl_pci) {
2748 pr_warning("%s(): Unable to create PCI control\n",
2751 pr_warning("%s(): PCI error report via EDAC not set\n",
2757 static int __init amd64_edac_init(void)
2759 int nb, err = -ENODEV;
2760 bool load_ok = false;
2762 edac_printk(KERN_INFO, EDAC_MOD_STR, EDAC_AMD64_VERSION "\n");
2766 if (amd_cache_northbridges() < 0)
2770 pvts = kzalloc(amd_nb_num() * sizeof(pvts[0]), GFP_KERNEL);
2771 mcis = kzalloc(amd_nb_num() * sizeof(mcis[0]), GFP_KERNEL);
2772 if (!(pvts && mcis))
2775 msrs = msrs_alloc();
2779 err = pci_register_driver(&amd64_pci_driver);
2784 * At this point, the array 'pvts[]' contains pointers to alloc'd
2785 * amd64_pvt structs. These will be used in the 2nd stage init function
2786 * to finish initialization of the MC instances.
2789 for (nb = 0; nb < amd_nb_num(); nb++) {
2793 err = amd64_init_2nd_stage(pvts[nb]);
2801 amd64_setup_pci_device();
2806 pci_unregister_driver(&amd64_pci_driver);
2816 static void __exit amd64_edac_exit(void)
2819 edac_pci_release_generic_ctl(amd64_ctl_pci);
2821 pci_unregister_driver(&amd64_pci_driver);
2833 module_init(amd64_edac_init);
2834 module_exit(amd64_edac_exit);
2836 MODULE_LICENSE("GPL");
2837 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
2838 "Dave Peterson, Thayne Harbaugh");
2839 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
2840 EDAC_AMD64_VERSION);
2842 module_param(edac_op_state, int, 0444);
2843 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");