2 * edac_mc kernel module
3 * (C) 2005, 2006 Linux Networx (http://lnxi.com)
4 * This file may be distributed under the terms of the
5 * GNU General Public License.
7 * Written by Thayne Harbaugh
8 * Based on work by Dan Hollis <goemon at anime dot net> and others.
9 * http://www.anime.net/~goemon/linux-ecc/
11 * Modified by Dave Peterson and Doug Thompson
15 #include <linux/module.h>
16 #include <linux/proc_fs.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/sysctl.h>
22 #include <linux/highmem.h>
23 #include <linux/timer.h>
24 #include <linux/slab.h>
25 #include <linux/jiffies.h>
26 #include <linux/spinlock.h>
27 #include <linux/list.h>
28 #include <linux/ctype.h>
29 #include <linux/edac.h>
30 #include <linux/bitops.h>
31 #include <linux/uaccess.h>
34 #include "edac_module.h"
35 #include <ras/ras_event.h>
37 #ifdef CONFIG_EDAC_ATOMIC_SCRUB
40 #define edac_atomic_scrub(va, size) do { } while (0)
43 int edac_op_state = EDAC_OPSTATE_INVAL;
44 EXPORT_SYMBOL_GPL(edac_op_state);
46 static int edac_report = EDAC_REPORTING_ENABLED;
48 /* lock to memory controller's control array */
49 static DEFINE_MUTEX(mem_ctls_mutex);
50 static LIST_HEAD(mc_devices);
53 * Used to lock EDAC MC to just one module, avoiding two drivers e. g.
54 * apei/ghes and i7core_edac to be used at the same time.
56 static const char *edac_mc_owner;
58 static struct mem_ctl_info *error_desc_to_mci(struct edac_raw_error_desc *e)
60 return container_of(e, struct mem_ctl_info, error_desc);
63 int edac_get_report_status(void)
67 EXPORT_SYMBOL_GPL(edac_get_report_status);
69 void edac_set_report_status(int new)
71 if (new == EDAC_REPORTING_ENABLED ||
72 new == EDAC_REPORTING_DISABLED ||
73 new == EDAC_REPORTING_FORCE)
76 EXPORT_SYMBOL_GPL(edac_set_report_status);
78 static int edac_report_set(const char *str, const struct kernel_param *kp)
83 if (!strncmp(str, "on", 2))
84 edac_report = EDAC_REPORTING_ENABLED;
85 else if (!strncmp(str, "off", 3))
86 edac_report = EDAC_REPORTING_DISABLED;
87 else if (!strncmp(str, "force", 5))
88 edac_report = EDAC_REPORTING_FORCE;
93 static int edac_report_get(char *buffer, const struct kernel_param *kp)
97 switch (edac_report) {
98 case EDAC_REPORTING_ENABLED:
99 ret = sprintf(buffer, "on");
101 case EDAC_REPORTING_DISABLED:
102 ret = sprintf(buffer, "off");
104 case EDAC_REPORTING_FORCE:
105 ret = sprintf(buffer, "force");
115 static const struct kernel_param_ops edac_report_ops = {
116 .set = edac_report_set,
117 .get = edac_report_get,
120 module_param_cb(edac_report, &edac_report_ops, &edac_report, 0644);
122 unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf,
125 struct mem_ctl_info *mci = dimm->mci;
129 for (i = 0; i < mci->n_layers; i++) {
130 n = snprintf(p, len, "%s %d ",
131 edac_layer_name[mci->layers[i].type],
143 #ifdef CONFIG_EDAC_DEBUG
145 static void edac_mc_dump_channel(struct rank_info *chan)
147 edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx);
148 edac_dbg(4, " channel = %p\n", chan);
149 edac_dbg(4, " channel->csrow = %p\n", chan->csrow);
150 edac_dbg(4, " channel->dimm = %p\n", chan->dimm);
153 static void edac_mc_dump_dimm(struct dimm_info *dimm)
160 edac_dimm_info_location(dimm, location, sizeof(location));
162 edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n",
163 dimm->mci->csbased ? "rank" : "dimm",
164 dimm->idx, location, dimm->csrow, dimm->cschannel);
165 edac_dbg(4, " dimm = %p\n", dimm);
166 edac_dbg(4, " dimm->label = '%s'\n", dimm->label);
167 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
168 edac_dbg(4, " dimm->grain = %d\n", dimm->grain);
169 edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages);
172 static void edac_mc_dump_csrow(struct csrow_info *csrow)
174 edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx);
175 edac_dbg(4, " csrow = %p\n", csrow);
176 edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page);
177 edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page);
178 edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask);
179 edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels);
180 edac_dbg(4, " csrow->channels = %p\n", csrow->channels);
181 edac_dbg(4, " csrow->mci = %p\n", csrow->mci);
184 static void edac_mc_dump_mci(struct mem_ctl_info *mci)
186 edac_dbg(3, "\tmci = %p\n", mci);
187 edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap);
188 edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap);
189 edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap);
190 edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check);
191 edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n",
192 mci->nr_csrows, mci->csrows);
193 edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n",
194 mci->tot_dimms, mci->dimms);
195 edac_dbg(3, "\tdev = %p\n", mci->pdev);
196 edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n",
197 mci->mod_name, mci->ctl_name);
198 edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info);
201 #endif /* CONFIG_EDAC_DEBUG */
203 const char * const edac_mem_types[] = {
204 [MEM_EMPTY] = "Empty",
205 [MEM_RESERVED] = "Reserved",
206 [MEM_UNKNOWN] = "Unknown",
210 [MEM_SDR] = "Unbuffered-SDR",
211 [MEM_RDR] = "Registered-SDR",
212 [MEM_DDR] = "Unbuffered-DDR",
213 [MEM_RDDR] = "Registered-DDR",
215 [MEM_DDR2] = "Unbuffered-DDR2",
216 [MEM_FB_DDR2] = "FullyBuffered-DDR2",
217 [MEM_RDDR2] = "Registered-DDR2",
219 [MEM_DDR3] = "Unbuffered-DDR3",
220 [MEM_RDDR3] = "Registered-DDR3",
221 [MEM_LRDDR3] = "Load-Reduced-DDR3-RAM",
222 [MEM_DDR4] = "Unbuffered-DDR4",
223 [MEM_RDDR4] = "Registered-DDR4",
224 [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM",
225 [MEM_NVDIMM] = "Non-volatile-RAM",
227 EXPORT_SYMBOL_GPL(edac_mem_types);
230 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
231 * @p: pointer to a pointer with the memory offset to be used. At
232 * return, this will be incremented to point to the next offset
233 * @size: Size of the data structure to be reserved
234 * @n_elems: Number of elements that should be reserved
236 * If 'size' is a constant, the compiler will optimize this whole function
237 * down to either a no-op or the addition of a constant to the value of '*p'.
239 * The 'p' pointer is absolutely needed to keep the proper advancing
240 * further in memory to the proper offsets when allocating the struct along
241 * with its embedded structs, as edac_device_alloc_ctl_info() does it
242 * above, for example.
244 * At return, the pointer 'p' will be incremented to be used on a next call
247 void *edac_align_ptr(void **p, unsigned int size, int n_elems)
249 unsigned int align, r;
252 *p += size * n_elems;
255 * 'p' can possibly be an unaligned item X such that sizeof(X) is
256 * 'size'. Adjust 'p' so that its alignment is at least as
257 * stringent as what the compiler would provide for X and return
258 * the aligned result.
259 * Here we assume that the alignment of a "long long" is the most
260 * stringent alignment that the compiler will ever provide by default.
261 * As far as I know, this is a reasonable assumption.
263 if (size > sizeof(long))
264 align = sizeof(long long);
265 else if (size > sizeof(int))
266 align = sizeof(long);
267 else if (size > sizeof(short))
269 else if (size > sizeof(char))
270 align = sizeof(short);
274 r = (unsigned long)p % align;
281 return (void *)(((unsigned long)ptr) + align - r);
284 static void _edac_mc_free(struct mem_ctl_info *mci)
286 put_device(&mci->dev);
289 static void mci_release(struct device *dev)
291 struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
292 struct csrow_info *csr;
296 for (i = 0; i < mci->tot_dimms; i++)
297 kfree(mci->dimms[i]);
302 for (row = 0; row < mci->nr_csrows; row++) {
303 csr = mci->csrows[row];
308 for (chn = 0; chn < mci->num_cschannel; chn++)
309 kfree(csr->channels[chn]);
310 kfree(csr->channels);
319 static int edac_mc_alloc_csrows(struct mem_ctl_info *mci)
321 unsigned int tot_channels = mci->num_cschannel;
322 unsigned int tot_csrows = mci->nr_csrows;
323 unsigned int row, chn;
326 * Alocate and fill the csrow/channels structs
328 mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
332 for (row = 0; row < tot_csrows; row++) {
333 struct csrow_info *csr;
335 csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL);
339 mci->csrows[row] = csr;
340 csr->csrow_idx = row;
342 csr->nr_channels = tot_channels;
343 csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
348 for (chn = 0; chn < tot_channels; chn++) {
349 struct rank_info *chan;
351 chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL);
355 csr->channels[chn] = chan;
356 chan->chan_idx = chn;
364 static int edac_mc_alloc_dimms(struct mem_ctl_info *mci)
366 unsigned int pos[EDAC_MAX_LAYERS];
367 unsigned int row, chn, idx;
372 * Allocate and fill the dimm structs
374 mci->dimms = kcalloc(mci->tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
378 memset(&pos, 0, sizeof(pos));
381 for (idx = 0; idx < mci->tot_dimms; idx++) {
382 struct dimm_info *dimm;
383 struct rank_info *chan;
386 chan = mci->csrows[row]->channels[chn];
388 dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL);
391 mci->dimms[idx] = dimm;
396 * Copy DIMM location and initialize it.
398 len = sizeof(dimm->label);
400 n = snprintf(p, len, "mc#%u", mci->mc_idx);
403 for (layer = 0; layer < mci->n_layers; layer++) {
404 n = snprintf(p, len, "%s#%u",
405 edac_layer_name[mci->layers[layer].type],
409 dimm->location[layer] = pos[layer];
415 /* Link it to the csrows old API data */
418 dimm->cschannel = chn;
420 /* Increment csrow location */
421 if (mci->layers[0].is_virt_csrow) {
423 if (chn == mci->num_cschannel) {
429 if (row == mci->nr_csrows) {
435 /* Increment dimm location */
436 for (layer = mci->n_layers - 1; layer >= 0; layer--) {
438 if (pos[layer] < mci->layers[layer].size)
447 struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num,
448 unsigned int n_layers,
449 struct edac_mc_layer *layers,
452 struct mem_ctl_info *mci;
453 struct edac_mc_layer *layer;
454 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
455 unsigned int idx, size, tot_dimms = 1, count = 1;
456 unsigned int tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
457 void *pvt, *ptr = NULL;
459 bool per_rank = false;
461 if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0))
465 * Calculate the total amount of dimms and csrows/cschannels while
466 * in the old API emulation mode
468 for (idx = 0; idx < n_layers; idx++) {
469 tot_dimms *= layers[idx].size;
471 if (layers[idx].is_virt_csrow)
472 tot_csrows *= layers[idx].size;
474 tot_channels *= layers[idx].size;
476 if (layers[idx].type == EDAC_MC_LAYER_CHIP_SELECT)
480 /* Figure out the offsets of the various items from the start of an mc
481 * structure. We want the alignment of each item to be at least as
482 * stringent as what the compiler would provide if we could simply
483 * hardcode everything into a single struct.
485 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
486 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
487 for (i = 0; i < n_layers; i++) {
488 count *= layers[i].size;
489 edac_dbg(4, "errcount layer %d size %d\n", i, count);
490 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
491 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
492 tot_errcount += 2 * count;
495 edac_dbg(4, "allocating %d error counters\n", tot_errcount);
496 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
497 size = ((unsigned long)pvt) + sz_pvt;
499 edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
502 per_rank ? "ranks" : "dimms",
503 tot_csrows * tot_channels);
505 mci = kzalloc(size, GFP_KERNEL);
509 mci->dev.release = mci_release;
510 device_initialize(&mci->dev);
512 /* Adjust pointers so they point within the memory we just allocated
513 * rather than an imaginary chunk of memory located at address 0.
515 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
516 for (i = 0; i < n_layers; i++) {
517 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
518 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
520 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
522 /* setup index and various internal pointers */
523 mci->mc_idx = mc_num;
524 mci->tot_dimms = tot_dimms;
526 mci->n_layers = n_layers;
528 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
529 mci->nr_csrows = tot_csrows;
530 mci->num_cschannel = tot_channels;
531 mci->csbased = per_rank;
533 if (edac_mc_alloc_csrows(mci))
536 if (edac_mc_alloc_dimms(mci))
539 mci->op_state = OP_ALLOC;
548 EXPORT_SYMBOL_GPL(edac_mc_alloc);
550 void edac_mc_free(struct mem_ctl_info *mci)
556 EXPORT_SYMBOL_GPL(edac_mc_free);
558 bool edac_has_mcs(void)
562 mutex_lock(&mem_ctls_mutex);
564 ret = list_empty(&mc_devices);
566 mutex_unlock(&mem_ctls_mutex);
570 EXPORT_SYMBOL_GPL(edac_has_mcs);
572 /* Caller must hold mem_ctls_mutex */
573 static struct mem_ctl_info *__find_mci_by_dev(struct device *dev)
575 struct mem_ctl_info *mci;
576 struct list_head *item;
580 list_for_each(item, &mc_devices) {
581 mci = list_entry(item, struct mem_ctl_info, link);
583 if (mci->pdev == dev)
593 * scan list of controllers looking for the one that manages
595 * @dev: pointer to a struct device related with the MCI
597 struct mem_ctl_info *find_mci_by_dev(struct device *dev)
599 struct mem_ctl_info *ret;
601 mutex_lock(&mem_ctls_mutex);
602 ret = __find_mci_by_dev(dev);
603 mutex_unlock(&mem_ctls_mutex);
607 EXPORT_SYMBOL_GPL(find_mci_by_dev);
610 * edac_mc_workq_function
611 * performs the operation scheduled by a workq request
613 static void edac_mc_workq_function(struct work_struct *work_req)
615 struct delayed_work *d_work = to_delayed_work(work_req);
616 struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work);
618 mutex_lock(&mem_ctls_mutex);
620 if (mci->op_state != OP_RUNNING_POLL) {
621 mutex_unlock(&mem_ctls_mutex);
625 if (edac_op_state == EDAC_OPSTATE_POLL)
626 mci->edac_check(mci);
628 mutex_unlock(&mem_ctls_mutex);
630 /* Queue ourselves again. */
631 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
635 * edac_mc_reset_delay_period(unsigned long value)
637 * user space has updated our poll period value, need to
638 * reset our workq delays
640 void edac_mc_reset_delay_period(unsigned long value)
642 struct mem_ctl_info *mci;
643 struct list_head *item;
645 mutex_lock(&mem_ctls_mutex);
647 list_for_each(item, &mc_devices) {
648 mci = list_entry(item, struct mem_ctl_info, link);
650 if (mci->op_state == OP_RUNNING_POLL)
651 edac_mod_work(&mci->work, value);
653 mutex_unlock(&mem_ctls_mutex);
658 /* Return 0 on success, 1 on failure.
659 * Before calling this function, caller must
660 * assign a unique value to mci->mc_idx.
664 * called with the mem_ctls_mutex lock held
666 static int add_mc_to_global_list(struct mem_ctl_info *mci)
668 struct list_head *item, *insert_before;
669 struct mem_ctl_info *p;
671 insert_before = &mc_devices;
673 p = __find_mci_by_dev(mci->pdev);
674 if (unlikely(p != NULL))
677 list_for_each(item, &mc_devices) {
678 p = list_entry(item, struct mem_ctl_info, link);
680 if (p->mc_idx >= mci->mc_idx) {
681 if (unlikely(p->mc_idx == mci->mc_idx))
684 insert_before = item;
689 list_add_tail_rcu(&mci->link, insert_before);
693 edac_printk(KERN_WARNING, EDAC_MC,
694 "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev),
695 edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx);
699 edac_printk(KERN_WARNING, EDAC_MC,
700 "bug in low-level driver: attempt to assign\n"
701 " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__);
705 static int del_mc_from_global_list(struct mem_ctl_info *mci)
707 list_del_rcu(&mci->link);
709 /* these are for safe removal of devices from global list while
710 * NMI handlers may be traversing list
713 INIT_LIST_HEAD(&mci->link);
715 return list_empty(&mc_devices);
718 struct mem_ctl_info *edac_mc_find(int idx)
720 struct mem_ctl_info *mci;
721 struct list_head *item;
723 mutex_lock(&mem_ctls_mutex);
725 list_for_each(item, &mc_devices) {
726 mci = list_entry(item, struct mem_ctl_info, link);
727 if (mci->mc_idx == idx)
733 mutex_unlock(&mem_ctls_mutex);
736 EXPORT_SYMBOL(edac_mc_find);
738 const char *edac_get_owner(void)
740 return edac_mc_owner;
742 EXPORT_SYMBOL_GPL(edac_get_owner);
744 /* FIXME - should a warning be printed if no error detection? correction? */
745 int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
746 const struct attribute_group **groups)
751 #ifdef CONFIG_EDAC_DEBUG
752 if (edac_debug_level >= 3)
753 edac_mc_dump_mci(mci);
755 if (edac_debug_level >= 4) {
756 struct dimm_info *dimm;
759 for (i = 0; i < mci->nr_csrows; i++) {
760 struct csrow_info *csrow = mci->csrows[i];
764 for (j = 0; j < csrow->nr_channels; j++)
765 nr_pages += csrow->channels[j]->dimm->nr_pages;
768 edac_mc_dump_csrow(csrow);
769 for (j = 0; j < csrow->nr_channels; j++)
770 if (csrow->channels[j]->dimm->nr_pages)
771 edac_mc_dump_channel(csrow->channels[j]);
774 mci_for_each_dimm(mci, dimm)
775 edac_mc_dump_dimm(dimm);
778 mutex_lock(&mem_ctls_mutex);
780 if (edac_mc_owner && edac_mc_owner != mci->mod_name) {
785 if (add_mc_to_global_list(mci))
788 /* set load time so that error rate can be tracked */
789 mci->start_time = jiffies;
791 mci->bus = edac_get_sysfs_subsys();
793 if (edac_create_sysfs_mci_device(mci, groups)) {
794 edac_mc_printk(mci, KERN_WARNING,
795 "failed to create sysfs device\n");
799 if (mci->edac_check) {
800 mci->op_state = OP_RUNNING_POLL;
802 INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
803 edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
806 mci->op_state = OP_RUNNING_INTERRUPT;
809 /* Report action taken */
810 edac_mc_printk(mci, KERN_INFO,
811 "Giving out device to module %s controller %s: DEV %s (%s)\n",
812 mci->mod_name, mci->ctl_name, mci->dev_name,
813 edac_op_state_to_string(mci->op_state));
815 edac_mc_owner = mci->mod_name;
817 mutex_unlock(&mem_ctls_mutex);
821 del_mc_from_global_list(mci);
824 mutex_unlock(&mem_ctls_mutex);
827 EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups);
829 struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
831 struct mem_ctl_info *mci;
835 mutex_lock(&mem_ctls_mutex);
837 /* find the requested mci struct in the global list */
838 mci = __find_mci_by_dev(dev);
840 mutex_unlock(&mem_ctls_mutex);
844 /* mark MCI offline: */
845 mci->op_state = OP_OFFLINE;
847 if (del_mc_from_global_list(mci))
848 edac_mc_owner = NULL;
850 mutex_unlock(&mem_ctls_mutex);
853 edac_stop_work(&mci->work);
855 /* remove from sysfs */
856 edac_remove_sysfs_mci_device(mci);
858 edac_printk(KERN_INFO, EDAC_MC,
859 "Removed device %d for %s %s: DEV %s\n", mci->mc_idx,
860 mci->mod_name, mci->ctl_name, edac_dev_name(mci));
864 EXPORT_SYMBOL_GPL(edac_mc_del_mc);
866 static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
871 unsigned long flags = 0;
875 /* ECC error page was not in our memory. Ignore it. */
876 if (!pfn_valid(page))
879 /* Find the actual page structure then map it and fix */
880 pg = pfn_to_page(page);
883 local_irq_save(flags);
885 virt_addr = kmap_atomic(pg);
887 /* Perform architecture specific atomic scrub operation */
888 edac_atomic_scrub(virt_addr + offset, size);
890 /* Unmap and complete */
891 kunmap_atomic(virt_addr);
894 local_irq_restore(flags);
897 /* FIXME - should return -1 */
898 int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
900 struct csrow_info **csrows = mci->csrows;
903 edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page);
906 for (i = 0; i < mci->nr_csrows; i++) {
907 struct csrow_info *csrow = csrows[i];
909 for (j = 0; j < csrow->nr_channels; j++) {
910 struct dimm_info *dimm = csrow->channels[j]->dimm;
916 edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n",
918 csrow->first_page, page, csrow->last_page,
921 if ((page >= csrow->first_page) &&
922 (page <= csrow->last_page) &&
923 ((page & csrow->page_mask) ==
924 (csrow->first_page & csrow->page_mask))) {
931 edac_mc_printk(mci, KERN_ERR,
932 "could not look up page error address %lx\n",
933 (unsigned long)page);
937 EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
939 const char *edac_layer_name[] = {
940 [EDAC_MC_LAYER_BRANCH] = "branch",
941 [EDAC_MC_LAYER_CHANNEL] = "channel",
942 [EDAC_MC_LAYER_SLOT] = "slot",
943 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
944 [EDAC_MC_LAYER_ALL_MEM] = "memory",
946 EXPORT_SYMBOL_GPL(edac_layer_name);
948 static void edac_inc_ce_error(struct mem_ctl_info *mci,
949 bool enable_per_layer_report,
950 const int pos[EDAC_MAX_LAYERS],
957 if (!enable_per_layer_report) {
958 mci->ce_noinfo_count += count;
962 for (i = 0; i < mci->n_layers; i++) {
966 mci->ce_per_layer[i][index] += count;
968 if (i < mci->n_layers - 1)
969 index *= mci->layers[i + 1].size;
973 static void edac_inc_ue_error(struct mem_ctl_info *mci,
974 bool enable_per_layer_report,
975 const int pos[EDAC_MAX_LAYERS],
982 if (!enable_per_layer_report) {
983 mci->ue_noinfo_count += count;
987 for (i = 0; i < mci->n_layers; i++) {
991 mci->ue_per_layer[i][index] += count;
993 if (i < mci->n_layers - 1)
994 index *= mci->layers[i + 1].size;
998 static void edac_ce_error(struct mem_ctl_info *mci,
999 const u16 error_count,
1000 const int pos[EDAC_MAX_LAYERS],
1002 const char *location,
1005 const char *other_detail,
1006 const bool enable_per_layer_report,
1007 const unsigned long page_frame_number,
1008 const unsigned long offset_in_page,
1011 unsigned long remapped_page;
1017 if (edac_mc_get_log_ce()) {
1018 if (other_detail && *other_detail)
1019 edac_mc_printk(mci, KERN_WARNING,
1020 "%d CE %s%son %s (%s %s - %s)\n",
1021 error_count, msg, msg_aux, label,
1022 location, detail, other_detail);
1024 edac_mc_printk(mci, KERN_WARNING,
1025 "%d CE %s%son %s (%s %s)\n",
1026 error_count, msg, msg_aux, label,
1029 edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count);
1031 if (mci->scrub_mode == SCRUB_SW_SRC) {
1033 * Some memory controllers (called MCs below) can remap
1034 * memory so that it is still available at a different
1035 * address when PCI devices map into memory.
1036 * MC's that can't do this, lose the memory where PCI
1037 * devices are mapped. This mapping is MC-dependent
1038 * and so we call back into the MC driver for it to
1039 * map the MC page to a physical (CPU) page which can
1040 * then be mapped to a virtual page - which can then
1043 remapped_page = mci->ctl_page_to_phys ?
1044 mci->ctl_page_to_phys(mci, page_frame_number) :
1047 edac_mc_scrub_block(remapped_page,
1048 offset_in_page, grain);
1052 static void edac_ue_error(struct mem_ctl_info *mci,
1053 const u16 error_count,
1054 const int pos[EDAC_MAX_LAYERS],
1056 const char *location,
1059 const char *other_detail,
1060 const bool enable_per_layer_report)
1067 if (edac_mc_get_log_ue()) {
1068 if (other_detail && *other_detail)
1069 edac_mc_printk(mci, KERN_WARNING,
1070 "%d UE %s%son %s (%s %s - %s)\n",
1071 error_count, msg, msg_aux, label,
1072 location, detail, other_detail);
1074 edac_mc_printk(mci, KERN_WARNING,
1075 "%d UE %s%son %s (%s %s)\n",
1076 error_count, msg, msg_aux, label,
1080 if (edac_mc_get_panic_on_ue()) {
1081 if (other_detail && *other_detail)
1082 panic("UE %s%son %s (%s%s - %s)\n",
1083 msg, msg_aux, label, location, detail, other_detail);
1085 panic("UE %s%son %s (%s%s)\n",
1086 msg, msg_aux, label, location, detail);
1089 edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count);
1092 static void edac_inc_csrow(struct edac_raw_error_desc *e, int row, int chan)
1094 struct mem_ctl_info *mci = error_desc_to_mci(e);
1095 enum hw_event_mc_err_type type = e->type;
1096 u16 count = e->error_count;
1101 edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan);
1103 if (type == HW_EVENT_ERR_CORRECTED) {
1104 mci->csrows[row]->ce_count += count;
1106 mci->csrows[row]->channels[chan]->ce_count += count;
1108 mci->csrows[row]->ue_count += count;
1112 void edac_raw_mc_handle_error(struct edac_raw_error_desc *e)
1114 struct mem_ctl_info *mci = error_desc_to_mci(e);
1116 int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer };
1119 /* Sanity-check driver-supplied grain value. */
1120 if (WARN_ON_ONCE(!e->grain))
1123 grain_bits = fls_long(e->grain - 1);
1125 /* Report the error via the trace interface */
1126 if (IS_ENABLED(CONFIG_RAS))
1127 trace_mc_event(e->type, e->msg, e->label, e->error_count,
1128 mci->mc_idx, e->top_layer, e->mid_layer,
1130 (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page,
1131 grain_bits, e->syndrome, e->other_detail);
1133 /* Memory type dependent details about the error */
1134 if (e->type == HW_EVENT_ERR_CORRECTED) {
1135 snprintf(detail, sizeof(detail),
1136 "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx",
1137 e->page_frame_number, e->offset_in_page,
1138 e->grain, e->syndrome);
1139 edac_ce_error(mci, e->error_count, pos, e->msg, e->location, e->label,
1140 detail, e->other_detail, e->enable_per_layer_report,
1141 e->page_frame_number, e->offset_in_page, e->grain);
1143 snprintf(detail, sizeof(detail),
1144 "page:0x%lx offset:0x%lx grain:%ld",
1145 e->page_frame_number, e->offset_in_page, e->grain);
1147 edac_ue_error(mci, e->error_count, pos, e->msg, e->location, e->label,
1148 detail, e->other_detail, e->enable_per_layer_report);
1153 EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error);
1155 void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1156 struct mem_ctl_info *mci,
1157 const u16 error_count,
1158 const unsigned long page_frame_number,
1159 const unsigned long offset_in_page,
1160 const unsigned long syndrome,
1161 const int top_layer,
1162 const int mid_layer,
1163 const int low_layer,
1165 const char *other_detail)
1167 struct dimm_info *dimm;
1169 int row = -1, chan = -1;
1170 int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
1171 int i, n_labels = 0;
1172 struct edac_raw_error_desc *e = &mci->error_desc;
1174 edac_dbg(3, "MC%d\n", mci->mc_idx);
1176 /* Fills the error report buffer */
1177 memset(e, 0, sizeof (*e));
1178 e->error_count = error_count;
1180 e->top_layer = top_layer;
1181 e->mid_layer = mid_layer;
1182 e->low_layer = low_layer;
1183 e->page_frame_number = page_frame_number;
1184 e->offset_in_page = offset_in_page;
1185 e->syndrome = syndrome;
1187 e->other_detail = other_detail;
1190 * Check if the event report is consistent and if the memory
1191 * location is known. If it is known, enable_per_layer_report will be
1192 * true, the DIMM(s) label info will be filled and the per-layer
1193 * error counters will be incremented.
1195 for (i = 0; i < mci->n_layers; i++) {
1196 if (pos[i] >= (int)mci->layers[i].size) {
1198 edac_mc_printk(mci, KERN_ERR,
1199 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1200 edac_layer_name[mci->layers[i].type],
1201 pos[i], mci->layers[i].size);
1203 * Instead of just returning it, let's use what's
1204 * known about the error. The increment routines and
1205 * the DIMM filter logic will do the right thing by
1206 * pointing the likely damaged DIMMs.
1211 e->enable_per_layer_report = true;
1215 * Get the dimm label/grain that applies to the match criteria.
1216 * As the error algorithm may not be able to point to just one memory
1217 * stick, the logic here will get all possible labels that could
1218 * pottentially be affected by the error.
1219 * On FB-DIMM memory controllers, for uncorrected errors, it is common
1220 * to have only the MC channel and the MC dimm (also called "branch")
1221 * but the channel is not known, as the memory is arranged in pairs,
1222 * where each memory belongs to a separate channel within the same
1228 mci_for_each_dimm(mci, dimm) {
1229 if (top_layer >= 0 && top_layer != dimm->location[0])
1231 if (mid_layer >= 0 && mid_layer != dimm->location[1])
1233 if (low_layer >= 0 && low_layer != dimm->location[2])
1236 /* get the max grain, over the error match range */
1237 if (dimm->grain > e->grain)
1238 e->grain = dimm->grain;
1241 * If the error is memory-controller wide, there's no need to
1242 * seek for the affected DIMMs because the whole
1243 * channel/memory controller/... may be affected.
1244 * Also, don't show errors for empty DIMM slots.
1246 if (!dimm->nr_pages)
1250 if (n_labels > EDAC_MAX_LABELS) {
1254 if (p != e->label) {
1255 strcpy(p, OTHER_LABEL);
1256 p += strlen(OTHER_LABEL);
1258 strcpy(p, dimm->label);
1263 * get csrow/channel of the DIMM, in order to allow
1264 * incrementing the compat API counters
1266 edac_dbg(4, "%s csrows map: (%d,%d)\n",
1267 mci->csbased ? "rank" : "dimm",
1268 dimm->csrow, dimm->cschannel);
1271 else if (row >= 0 && row != dimm->csrow)
1275 chan = dimm->cschannel;
1276 else if (chan >= 0 && chan != dimm->cschannel)
1280 if (!e->enable_per_layer_report)
1281 strcpy(e->label, "any memory");
1282 else if (!*e->label)
1283 strcpy(e->label, "unknown memory");
1285 edac_inc_csrow(e, row, chan);
1287 /* Fill the RAM location data */
1290 for (i = 0; i < mci->n_layers; i++) {
1294 p += sprintf(p, "%s:%d ",
1295 edac_layer_name[mci->layers[i].type],
1298 if (p > e->location)
1301 edac_raw_mc_handle_error(e);
1303 EXPORT_SYMBOL_GPL(edac_mc_handle_error);