aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/altera_edac.c1
-rw-r--r--drivers/edac/amd64_edac.c112
-rw-r--r--drivers/edac/amd64_edac.h9
-rw-r--r--drivers/edac/edac_device_sysfs.c1
-rw-r--r--drivers/edac/edac_mc_sysfs.c3
-rw-r--r--drivers/edac/edac_pci_sysfs.c2
-rw-r--r--drivers/edac/ghes_edac.c100
-rw-r--r--drivers/edac/i10nm_base.c2
-rw-r--r--drivers/edac/skx_base.c20
-rw-r--r--drivers/edac/skx_common.c8
-rw-r--r--drivers/edac/skx_common.h2
-rw-r--r--drivers/edac/synopsys_edac.c22
12 files changed, 206 insertions, 76 deletions
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 2d12b94eccda..9e9d15f36be0 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -561,6 +561,7 @@ static const struct regmap_config s10_sdram_regmap_cfg = {
.reg_write = s10_protected_reg_write,
.use_single_read = true,
.use_single_write = true,
+ .fast_io = true,
};
/************** </Stratix10 EDAC Memory Controller Functions> ***********/
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 608fdab566b3..88a643aabcb0 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -265,6 +265,8 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
if (pvt->model == 0x60)
amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
+ else
+ amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
break;
case 0x17:
@@ -2266,6 +2268,33 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_base_addr_to_cs_size,
}
},
+ [F17_M60H_CPUS] = {
+ .ctl_name = "F17h_M60h",
+ .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_base_addr_to_cs_size,
+ }
+ },
+ [F17_M70H_CPUS] = {
+ .ctl_name = "F17h_M70h",
+ .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_base_addr_to_cs_size,
+ }
+ },
+ [F19_CPUS] = {
+ .ctl_name = "F19h",
+ .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_base_addr_to_cs_size,
+ }
+ },
};
/*
@@ -2837,6 +2866,50 @@ static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
return nr_pages;
}
+static int init_csrows_df(struct mem_ctl_info *mci)
+{
+ struct amd64_pvt *pvt = mci->pvt_info;
+ enum edac_type edac_mode = EDAC_NONE;
+ enum dev_type dev_type = DEV_UNKNOWN;
+ struct dimm_info *dimm;
+ int empty = 1;
+ u8 umc, cs;
+
+ if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
+ edac_mode = EDAC_S16ECD16ED;
+ dev_type = DEV_X16;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
+ edac_mode = EDAC_S8ECD8ED;
+ dev_type = DEV_X8;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
+ edac_mode = EDAC_S4ECD4ED;
+ dev_type = DEV_X4;
+ } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
+ edac_mode = EDAC_SECDED;
+ }
+
+ for_each_umc(umc) {
+ for_each_chip_select(cs, umc, pvt) {
+ if (!csrow_enabled(cs, umc, pvt))
+ continue;
+
+ empty = 0;
+ dimm = mci->csrows[cs]->channels[umc]->dimm;
+
+ edac_dbg(1, "MC node: %d, csrow: %d\n",
+ pvt->mc_node_id, cs);
+
+ dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
+ dimm->mtype = pvt->dram_type;
+ dimm->edac_mode = edac_mode;
+ dimm->dtype = dev_type;
+ dimm->grain = 64;
+ }
+ }
+
+ return empty;
+}
+
/*
* Initialize the array of csrow attribute instances, based on the values
* from pci config hardware registers.
@@ -2851,15 +2924,16 @@ static int init_csrows(struct mem_ctl_info *mci)
int nr_pages = 0;
u32 val;
- if (!pvt->umc) {
- amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
+ if (pvt->umc)
+ return init_csrows_df(mci);
- pvt->nbcfg = val;
+ amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
- edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
- pvt->mc_node_id, val,
- !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
- }
+ pvt->nbcfg = val;
+
+ edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
+ pvt->mc_node_id, val,
+ !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
/*
* We iterate over DCT0 here but we look at DCT1 in parallel, if needed.
@@ -2896,13 +2970,7 @@ static int init_csrows(struct mem_ctl_info *mci)
edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
/* Determine DIMM ECC mode: */
- if (pvt->umc) {
- if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED)
- edac_mode = EDAC_S4ECD4ED;
- else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED)
- edac_mode = EDAC_SECDED;
-
- } else if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
+ if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
? EDAC_S4ECD4ED
: EDAC_SECDED;
@@ -2912,6 +2980,7 @@ static int init_csrows(struct mem_ctl_info *mci)
dimm = csrow->channels[j]->dimm;
dimm->mtype = pvt->dram_type;
dimm->edac_mode = edac_mode;
+ dimm->grain = 64;
}
}
@@ -3264,6 +3333,14 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
fam_type = &family_types[F17_M30H_CPUS];
pvt->ops = &family_types[F17_M30H_CPUS].ops;
break;
+ } else if (pvt->model >= 0x60 && pvt->model <= 0x6f) {
+ fam_type = &family_types[F17_M60H_CPUS];
+ pvt->ops = &family_types[F17_M60H_CPUS].ops;
+ break;
+ } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
+ fam_type = &family_types[F17_M70H_CPUS];
+ pvt->ops = &family_types[F17_M70H_CPUS].ops;
+ break;
}
/* fall through */
case 0x18:
@@ -3274,6 +3351,12 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
family_types[F17_CPUS].ctl_name = "F18h";
break;
+ case 0x19:
+ fam_type = &family_types[F19_CPUS];
+ pvt->ops = &family_types[F19_CPUS].ops;
+ family_types[F19_CPUS].ctl_name = "F19h";
+ break;
+
default:
amd64_err("Unsupported family!\n");
return NULL;
@@ -3533,6 +3616,7 @@ static const struct x86_cpu_id amd64_cpuids[] = {
{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 4dce6a2ac75f..2cb7431173be 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -120,6 +120,12 @@
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
+#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F0 0x1448
+#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F6 0x144e
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
+#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
+#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650
+#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
/*
* Function 1 - Address Map
@@ -286,6 +292,9 @@ enum amd_families {
F17_CPUS,
F17_M10H_CPUS,
F17_M30H_CPUS,
+ F17_M60H_CPUS,
+ F17_M70H_CPUS,
+ F19_CPUS,
NUM_FAMILIES,
};
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 0e7ea3591b78..5e7593753799 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -275,6 +275,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
/* Error exit stack */
err_kobj_reg:
+ kobject_put(&edac_dev->kobj);
module_put(edac_dev->owner);
err_out:
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 4386ea4b9b5a..9f589acd0753 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -449,8 +449,7 @@ error:
csrow = mci->csrows[i];
if (!nr_pages_per_csrow(csrow))
continue;
-
- device_del(&mci->csrows[i]->dev);
+ device_unregister(&mci->csrows[i]->dev);
}
return err;
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 72c9eb9fdffb..53042af7262e 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -386,7 +386,7 @@ static int edac_pci_main_kobj_setup(void)
/* Error unwind statck */
kobject_init_and_add_fail:
- kfree(edac_pci_top_main_kobj);
+ kobject_put(edac_pci_top_main_kobj);
kzalloc_fail:
module_put(THIS_MODULE);
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 2059e43ccc01..60d1d93bd195 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -26,9 +26,18 @@ struct ghes_edac_pvt {
char msg[80];
};
-static atomic_t ghes_init = ATOMIC_INIT(0);
+static refcount_t ghes_refcount = REFCOUNT_INIT(0);
+
+/*
+ * Access to ghes_pvt must be protected by ghes_lock. The spinlock
+ * also provides the necessary (implicit) memory barrier for the SMP
+ * case to make the pointer visible on another CPU.
+ */
static struct ghes_edac_pvt *ghes_pvt;
+/* GHES registration mutex */
+static DEFINE_MUTEX(ghes_reg_mutex);
+
/*
* Sync with other, potentially concurrent callers of
* ghes_edac_report_mem_error(). We don't know what the
@@ -79,9 +88,8 @@ static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
(*num_dimm)++;
}
-static int get_dimm_smbios_index(u16 handle)
+static int get_dimm_smbios_index(struct mem_ctl_info *mci, u16 handle)
{
- struct mem_ctl_info *mci = ghes_pvt->mci;
int i;
for (i = 0; i < mci->tot_dimms; i++) {
@@ -198,14 +206,11 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
enum hw_event_mc_err_type type;
struct edac_raw_error_desc *e;
struct mem_ctl_info *mci;
- struct ghes_edac_pvt *pvt = ghes_pvt;
+ struct ghes_edac_pvt *pvt;
unsigned long flags;
char *p;
u8 grain_bits;
- if (!pvt)
- return;
-
/*
* We can do the locking below because GHES defers error processing
* from NMI to IRQ context. Whenever that changes, we'd at least
@@ -216,12 +221,17 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
spin_lock_irqsave(&ghes_lock, flags);
+ pvt = ghes_pvt;
+ if (!pvt)
+ goto unlock;
+
mci = pvt->mci;
e = &mci->error_desc;
/* Cleans the error report buffer */
memset(e, 0, sizeof (*e));
e->error_count = 1;
+ e->grain = 1;
strcpy(e->label, "unknown label");
e->msg = pvt->msg;
e->other_detail = pvt->other_detail;
@@ -317,7 +327,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
/* Error grain */
if (mem_err->validation_bits & CPER_MEM_VALID_PA_MASK)
- e->grain = ~(mem_err->physical_addr_mask & ~PAGE_MASK);
+ e->grain = ~mem_err->physical_addr_mask + 1;
/* Memory error location, mapped on e->location */
p = e->location;
@@ -348,7 +358,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
mem_err->mem_dev_handle);
- index = get_dimm_smbios_index(mem_err->mem_dev_handle);
+ index = get_dimm_smbios_index(mci, mem_err->mem_dev_handle);
if (index >= 0) {
e->top_layer = index;
e->enable_per_layer_report = true;
@@ -433,8 +443,13 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
if (p > pvt->other_detail)
*(p - 1) = '\0';
+ /* Sanity-check driver-supplied grain value. */
+ if (WARN_ON_ONCE(!e->grain))
+ e->grain = 1;
+
+ grain_bits = fls_long(e->grain - 1);
+
/* Generate the trace event */
- grain_bits = fls_long(e->grain);
snprintf(pvt->detail_location, sizeof(pvt->detail_location),
"APEI location: %s %s", e->location, e->other_detail);
trace_mc_event(type, e->msg, e->label, e->error_count,
@@ -443,6 +458,8 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
grain_bits, e->syndrome, pvt->detail_location);
edac_raw_mc_handle_error(type, mci, e);
+
+unlock:
spin_unlock_irqrestore(&ghes_lock, flags);
}
@@ -457,10 +474,12 @@ static struct acpi_platform_list plat_list[] = {
int ghes_edac_register(struct ghes *ghes, struct device *dev)
{
bool fake = false;
- int rc, num_dimm = 0;
+ int rc = 0, num_dimm = 0;
struct mem_ctl_info *mci;
+ struct ghes_edac_pvt *pvt;
struct edac_mc_layer layers[1];
struct ghes_edac_dimm_fill dimm_fill;
+ unsigned long flags;
int idx = -1;
if (IS_ENABLED(CONFIG_X86)) {
@@ -472,11 +491,14 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
idx = 0;
}
+ /* finish another registration/unregistration instance first */
+ mutex_lock(&ghes_reg_mutex);
+
/*
* We have only one logical memory controller to which all DIMMs belong.
*/
- if (atomic_inc_return(&ghes_init) > 1)
- return 0;
+ if (refcount_inc_not_zero(&ghes_refcount))
+ goto unlock;
/* Get the number of DIMMs */
dmi_walk(ghes_edac_count_dimms, &num_dimm);
@@ -494,12 +516,13 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt));
if (!mci) {
pr_info("Can't allocate memory for EDAC data\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto unlock;
}
- ghes_pvt = mci->pvt_info;
- ghes_pvt->ghes = ghes;
- ghes_pvt->mci = mci;
+ pvt = mci->pvt_info;
+ pvt->ghes = ghes;
+ pvt->mci = mci;
mci->pdev = dev;
mci->mtype_cap = MEM_FLAG_EMPTY;
@@ -541,23 +564,48 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
if (rc < 0) {
pr_info("Can't register at EDAC core\n");
edac_mc_free(mci);
- return -ENODEV;
+ rc = -ENODEV;
+ goto unlock;
}
- return 0;
+
+ spin_lock_irqsave(&ghes_lock, flags);
+ ghes_pvt = pvt;
+ spin_unlock_irqrestore(&ghes_lock, flags);
+
+ /* only set on success */
+ refcount_set(&ghes_refcount, 1);
+
+unlock:
+ mutex_unlock(&ghes_reg_mutex);
+
+ return rc;
}
void ghes_edac_unregister(struct ghes *ghes)
{
struct mem_ctl_info *mci;
+ unsigned long flags;
- if (!ghes_pvt)
- return;
+ mutex_lock(&ghes_reg_mutex);
- if (atomic_dec_return(&ghes_init))
- return;
+ if (!refcount_dec_and_test(&ghes_refcount))
+ goto unlock;
- mci = ghes_pvt->mci;
+ /*
+ * Wait for the irq handler being finished.
+ */
+ spin_lock_irqsave(&ghes_lock, flags);
+ mci = ghes_pvt ? ghes_pvt->mci : NULL;
ghes_pvt = NULL;
- edac_mc_del_mc(mci->pdev);
- edac_mc_free(mci);
+ spin_unlock_irqrestore(&ghes_lock, flags);
+
+ if (!mci)
+ goto unlock;
+
+ mci = edac_mc_del_mc(mci->pdev);
+ if (mci)
+ edac_mc_free(mci);
+
+unlock:
+ mutex_unlock(&ghes_reg_mutex);
}
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 6f06aec4877c..217ba0f7fd7a 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -160,7 +160,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci)
mtr, mcddrtcfg, imc->mc, i, j);
if (IS_DIMM_PRESENT(mtr))
- ndimms += skx_get_dimm_info(mtr, 0, dimm,
+ ndimms += skx_get_dimm_info(mtr, 0, 0, dimm,
imc, i, j);
else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index a5c8fa3a249a..1c27e1856f0a 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -151,27 +151,23 @@ static const struct x86_cpu_id skx_cpuids[] = {
};
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
-#define SKX_GET_MTMTR(dev, reg) \
- pci_read_config_dword((dev), 0x87c, &(reg))
-
-static bool skx_check_ecc(struct pci_dev *pdev)
+static bool skx_check_ecc(u32 mcmtr)
{
- u32 mtmtr;
-
- SKX_GET_MTMTR(pdev, mtmtr);
-
- return !!GET_BITFIELD(mtmtr, 2, 2);
+ return !!GET_BITFIELD(mcmtr, 2, 2);
}
static int skx_get_dimm_config(struct mem_ctl_info *mci)
{
struct skx_pvt *pvt = mci->pvt_info;
+ u32 mtr, mcmtr, amap, mcddrtcfg;
struct skx_imc *imc = pvt->imc;
- u32 mtr, amap, mcddrtcfg;
struct dimm_info *dimm;
int i, j;
int ndimms;
+ /* Only the mcmtr on the first channel is effective */
+ pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr);
+
for (i = 0; i < SKX_NUM_CHANNELS; i++) {
ndimms = 0;
pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
@@ -182,14 +178,14 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci)
pci_read_config_dword(imc->chan[i].cdev,
0x80 + 4 * j, &mtr);
if (IS_DIMM_PRESENT(mtr)) {
- ndimms += skx_get_dimm_info(mtr, amap, dimm, imc, i, j);
+ ndimms += skx_get_dimm_info(mtr, mcmtr, amap, dimm, imc, i, j);
} else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) {
ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
EDAC_MOD_STR);
nvdimm_count++;
}
}
- if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) {
+ if (ndimms && !skx_check_ecc(mcmtr)) {
skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
return -ENODEV;
}
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index b0dddcfa9baa..b8b7405a5ac4 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -235,7 +235,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL);
if (!pdev) {
- skx_printk(KERN_ERR, "Can't get tolm/tohm\n");
+ edac_dbg(2, "Can't get tolm/tohm\n");
return -ENODEV;
}
@@ -283,7 +283,7 @@ static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
#define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 6, "rows")
#define numcol(reg) skx_get_dimm_attr(reg, 0, 1, 10, 0, 2, "cols")
-int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
+int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
struct skx_imc *imc, int chan, int dimmno)
{
int banks = 16, ranks, rows, cols, npages;
@@ -303,8 +303,8 @@ int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
imc->mc, chan, dimmno, size, npages,
banks, 1 << ranks, rows, cols);
- imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0);
- imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9);
+ imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mcmtr, 0, 0);
+ imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mcmtr, 9, 9);
imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0);
imc->chan[chan].dimms[dimmno].rowbits = rows;
imc->chan[chan].dimms[dimmno].colbits = cols;
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index d18fa98669af..3207606ea10b 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -126,7 +126,7 @@ int skx_get_all_bus_mappings(unsigned int did, int off, enum type,
int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm);
-int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
+int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
struct skx_imc *imc, int chan, int dimmno);
int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 2d263382d797..880ffd833718 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -479,20 +479,14 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
pinf = &p->ceinfo;
if (!priv->p_data->quirks) {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type:%s Row %d Bank %d Col %d ",
- "CE", pinf->row, pinf->bank, pinf->col);
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "Bit Position: %d Data: 0x%08x\n",
+ "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
+ "CE", pinf->row, pinf->bank, pinf->col,
pinf->bitpos, pinf->data);
} else {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type:%s Row %d Bank %d Col %d ",
- "CE", pinf->row, pinf->bank, pinf->col);
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "BankGroup Number %d Block Number %d ",
- pinf->bankgrpnr, pinf->blknr);
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "Bit Position: %d Data: 0x%08x\n",
+ "DDR ECC error type:%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
+ "CE", pinf->row, pinf->bank, pinf->col,
+ pinf->bankgrpnr, pinf->blknr,
pinf->bitpos, pinf->data);
}
@@ -509,10 +503,8 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
"UE", pinf->row, pinf->bank, pinf->col);
} else {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type :%s Row %d Bank %d Col %d ",
- "UE", pinf->row, pinf->bank, pinf->col);
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "BankGroup Number %d Block Number %d",
+ "DDR ECC error type :%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d",
+ "UE", pinf->row, pinf->bank, pinf->col,
pinf->bankgrpnr, pinf->blknr);
}