summaryrefslogtreecommitdiffstats
path: root/drivers/edac
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/Kconfig1
-rw-r--r--drivers/edac/amd64_edac.c298
-rw-r--r--drivers/edac/i10nm_base.c1
-rw-r--r--drivers/edac/igen6_edac.c6
-rw-r--r--drivers/edac/mce_amd.c4
-rw-r--r--drivers/edac/synopsys_edac.c54
-rw-r--r--drivers/edac/versal_edac.c202
7 files changed, 228 insertions, 338 deletions
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 5a7f3fabee..16c8de5050 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -78,6 +78,7 @@ config EDAC_GHES
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64)"
depends on AMD_NB && EDAC_DECODE_MCE
+ imply AMD_ATL
help
Support for error detection and correction of DRAM ECC errors on
the AMD64 families (>= K8) of memory controllers.
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 537b9987a4..a17f3c0cdf 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/ras.h>
#include "amd64_edac.h"
#include <asm/amd_nb.h>
@@ -80,7 +81,7 @@ int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
amd64_warn("%s: error reading F%dx%03x.\n",
func, PCI_FUNC(pdev->devfn), offset);
- return err;
+ return pcibios_err_to_errno(err);
}
int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
@@ -93,7 +94,7 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
amd64_warn("%s: error writing to F%dx%03x.\n",
func, PCI_FUNC(pdev->devfn), offset);
- return err;
+ return pcibios_err_to_errno(err);
}
/*
@@ -1024,8 +1025,10 @@ static int gpu_get_node_map(struct amd64_pvt *pvt)
}
ret = pci_read_config_dword(pdev, REG_LOCAL_NODE_TYPE_MAP, &tmp);
- if (ret)
+ if (ret) {
+ ret = pcibios_err_to_errno(ret);
goto out;
+ }
gpu_node_map.node_count = FIELD_GET(LNTM_NODE_COUNT, tmp);
gpu_node_map.base_node_id = FIELD_GET(LNTM_BASE_NODE_ID, tmp);
@@ -1051,281 +1054,6 @@ static int fixup_node_id(int node_id, struct mce *m)
return nid - gpu_node_map.base_node_id + 1;
}
-/* Protect the PCI config register pairs used for DF indirect access. */
-static DEFINE_MUTEX(df_indirect_mutex);
-
-/*
- * Data Fabric Indirect Access uses FICAA/FICAD.
- *
- * Fabric Indirect Configuration Access Address (FICAA): Constructed based
- * on the device's Instance Id and the PCI function and register offset of
- * the desired register.
- *
- * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
- * and FICAD HI registers but so far we only need the LO register.
- *
- * Use Instance Id 0xFF to indicate a broadcast read.
- */
-#define DF_BROADCAST 0xFF
-static int __df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
-{
- struct pci_dev *F4;
- u32 ficaa;
- int err = -ENODEV;
-
- if (node >= amd_nb_num())
- goto out;
-
- F4 = node_to_amd_nb(node)->link;
- if (!F4)
- goto out;
-
- ficaa = (instance_id == DF_BROADCAST) ? 0 : 1;
- ficaa |= reg & 0x3FC;
- ficaa |= (func & 0x7) << 11;
- ficaa |= instance_id << 16;
-
- mutex_lock(&df_indirect_mutex);
-
- err = pci_write_config_dword(F4, 0x5C, ficaa);
- if (err) {
- pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
- goto out_unlock;
- }
-
- err = pci_read_config_dword(F4, 0x98, lo);
- if (err)
- pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
-
-out_unlock:
- mutex_unlock(&df_indirect_mutex);
-
-out:
- return err;
-}
-
-static int df_indirect_read_instance(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
-{
- return __df_indirect_read(node, func, reg, instance_id, lo);
-}
-
-static int df_indirect_read_broadcast(u16 node, u8 func, u16 reg, u32 *lo)
-{
- return __df_indirect_read(node, func, reg, DF_BROADCAST, lo);
-}
-
-struct addr_ctx {
- u64 ret_addr;
- u32 tmp;
- u16 nid;
- u8 inst_id;
-};
-
-static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr)
-{
- u64 dram_base_addr, dram_limit_addr, dram_hole_base;
-
- u8 die_id_shift, die_id_mask, socket_id_shift, socket_id_mask;
- u8 intlv_num_dies, intlv_num_chan, intlv_num_sockets;
- u8 intlv_addr_sel, intlv_addr_bit;
- u8 num_intlv_bits, hashed_bit;
- u8 lgcy_mmio_hole_en, base = 0;
- u8 cs_mask, cs_id = 0;
- bool hash_enabled = false;
-
- struct addr_ctx ctx;
-
- memset(&ctx, 0, sizeof(ctx));
-
- /* Start from the normalized address */
- ctx.ret_addr = norm_addr;
-
- ctx.nid = nid;
- ctx.inst_id = umc;
-
- /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */
- if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp))
- goto out_err;
-
- /* Remove HiAddrOffset from normalized address, if enabled: */
- if (ctx.tmp & BIT(0)) {
- u64 hi_addr_offset = (ctx.tmp & GENMASK_ULL(31, 20)) << 8;
-
- if (norm_addr >= hi_addr_offset) {
- ctx.ret_addr -= hi_addr_offset;
- base = 1;
- }
- }
-
- /* Read D18F0x110 (DramBaseAddress). */
- if (df_indirect_read_instance(nid, 0, 0x110 + (8 * base), umc, &ctx.tmp))
- goto out_err;
-
- /* Check if address range is valid. */
- if (!(ctx.tmp & BIT(0))) {
- pr_err("%s: Invalid DramBaseAddress range: 0x%x.\n",
- __func__, ctx.tmp);
- goto out_err;
- }
-
- lgcy_mmio_hole_en = ctx.tmp & BIT(1);
- intlv_num_chan = (ctx.tmp >> 4) & 0xF;
- intlv_addr_sel = (ctx.tmp >> 8) & 0x7;
- dram_base_addr = (ctx.tmp & GENMASK_ULL(31, 12)) << 16;
-
- /* {0, 1, 2, 3} map to address bits {8, 9, 10, 11} respectively */
- if (intlv_addr_sel > 3) {
- pr_err("%s: Invalid interleave address select %d.\n",
- __func__, intlv_addr_sel);
- goto out_err;
- }
-
- /* Read D18F0x114 (DramLimitAddress). */
- if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp))
- goto out_err;
-
- intlv_num_sockets = (ctx.tmp >> 8) & 0x1;
- intlv_num_dies = (ctx.tmp >> 10) & 0x3;
- dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0);
-
- intlv_addr_bit = intlv_addr_sel + 8;
-
- /* Re-use intlv_num_chan by setting it equal to log2(#channels) */
- switch (intlv_num_chan) {
- case 0: intlv_num_chan = 0; break;
- case 1: intlv_num_chan = 1; break;
- case 3: intlv_num_chan = 2; break;
- case 5: intlv_num_chan = 3; break;
- case 7: intlv_num_chan = 4; break;
-
- case 8: intlv_num_chan = 1;
- hash_enabled = true;
- break;
- default:
- pr_err("%s: Invalid number of interleaved channels %d.\n",
- __func__, intlv_num_chan);
- goto out_err;
- }
-
- num_intlv_bits = intlv_num_chan;
-
- if (intlv_num_dies > 2) {
- pr_err("%s: Invalid number of interleaved nodes/dies %d.\n",
- __func__, intlv_num_dies);
- goto out_err;
- }
-
- num_intlv_bits += intlv_num_dies;
-
- /* Add a bit if sockets are interleaved. */
- num_intlv_bits += intlv_num_sockets;
-
- /* Assert num_intlv_bits <= 4 */
- if (num_intlv_bits > 4) {
- pr_err("%s: Invalid interleave bits %d.\n",
- __func__, num_intlv_bits);
- goto out_err;
- }
-
- if (num_intlv_bits > 0) {
- u64 temp_addr_x, temp_addr_i, temp_addr_y;
- u8 die_id_bit, sock_id_bit, cs_fabric_id;
-
- /*
- * Read FabricBlockInstanceInformation3_CS[BlockFabricID].
- * This is the fabric id for this coherent slave. Use
- * umc/channel# as instance id of the coherent slave
- * for FICAA.
- */
- if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp))
- goto out_err;
-
- cs_fabric_id = (ctx.tmp >> 8) & 0xFF;
- die_id_bit = 0;
-
- /* If interleaved over more than 1 channel: */
- if (intlv_num_chan) {
- die_id_bit = intlv_num_chan;
- cs_mask = (1 << die_id_bit) - 1;
- cs_id = cs_fabric_id & cs_mask;
- }
-
- sock_id_bit = die_id_bit;
-
- /* Read D18F1x208 (SystemFabricIdMask). */
- if (intlv_num_dies || intlv_num_sockets)
- if (df_indirect_read_broadcast(nid, 1, 0x208, &ctx.tmp))
- goto out_err;
-
- /* If interleaved over more than 1 die. */
- if (intlv_num_dies) {
- sock_id_bit = die_id_bit + intlv_num_dies;
- die_id_shift = (ctx.tmp >> 24) & 0xF;
- die_id_mask = (ctx.tmp >> 8) & 0xFF;
-
- cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit;
- }
-
- /* If interleaved over more than 1 socket. */
- if (intlv_num_sockets) {
- socket_id_shift = (ctx.tmp >> 28) & 0xF;
- socket_id_mask = (ctx.tmp >> 16) & 0xFF;
-
- cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit;
- }
-
- /*
- * The pre-interleaved address consists of XXXXXXIIIYYYYY
- * where III is the ID for this CS, and XXXXXXYYYYY are the
- * address bits from the post-interleaved address.
- * "num_intlv_bits" has been calculated to tell us how many "I"
- * bits there are. "intlv_addr_bit" tells us how many "Y" bits
- * there are (where "I" starts).
- */
- temp_addr_y = ctx.ret_addr & GENMASK_ULL(intlv_addr_bit - 1, 0);
- temp_addr_i = (cs_id << intlv_addr_bit);
- temp_addr_x = (ctx.ret_addr & GENMASK_ULL(63, intlv_addr_bit)) << num_intlv_bits;
- ctx.ret_addr = temp_addr_x | temp_addr_i | temp_addr_y;
- }
-
- /* Add dram base address */
- ctx.ret_addr += dram_base_addr;
-
- /* If legacy MMIO hole enabled */
- if (lgcy_mmio_hole_en) {
- if (df_indirect_read_broadcast(nid, 0, 0x104, &ctx.tmp))
- goto out_err;
-
- dram_hole_base = ctx.tmp & GENMASK(31, 24);
- if (ctx.ret_addr >= dram_hole_base)
- ctx.ret_addr += (BIT_ULL(32) - dram_hole_base);
- }
-
- if (hash_enabled) {
- /* Save some parentheses and grab ls-bit at the end. */
- hashed_bit = (ctx.ret_addr >> 12) ^
- (ctx.ret_addr >> 18) ^
- (ctx.ret_addr >> 21) ^
- (ctx.ret_addr >> 30) ^
- cs_id;
-
- hashed_bit &= BIT(0);
-
- if (hashed_bit != ((ctx.ret_addr >> intlv_addr_bit) & BIT(0)))
- ctx.ret_addr ^= BIT(intlv_addr_bit);
- }
-
- /* Is calculated system address is above DRAM limit address? */
- if (ctx.ret_addr > dram_limit_addr)
- goto out_err;
-
- *sys_addr = ctx.ret_addr;
- return 0;
-
-out_err:
- return -EINVAL;
-}
-
static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
/*
@@ -1915,7 +1643,7 @@ ddr3:
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
{
- u16 mce_nid = topology_die_id(m->extcpu);
+ u16 mce_nid = topology_amd_node_id(m->extcpu);
struct mem_ctl_info *mci;
u8 start_bit = 1;
u8 end_bit = 47;
@@ -3073,9 +2801,10 @@ static void decode_umc_error(int node_id, struct mce *m)
{
u8 ecc_type = (m->status >> 45) & 0x3;
struct mem_ctl_info *mci;
+ unsigned long sys_addr;
struct amd64_pvt *pvt;
+ struct atl_err a_err;
struct err_info err;
- u64 sys_addr;
node_id = fixup_node_id(node_id, m);
@@ -3106,7 +2835,12 @@ static void decode_umc_error(int node_id, struct mce *m)
pvt->ops->get_err_info(m, &err);
- if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
+ a_err.addr = m->addr;
+ a_err.ipid = m->ipid;
+ a_err.cpu = m->extcpu;
+
+ sys_addr = amd_convert_umc_mca_addr_to_sys_addr(&a_err);
+ if (IS_ERR_VALUE(sys_addr)) {
err.err_code = ERR_NORM_ADDR;
goto log_error;
}
@@ -3446,7 +3180,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
int cpu;
for_each_online_cpu(cpu)
- if (topology_die_id(cpu) == nid)
+ if (topology_amd_node_id(cpu) == nid)
cpumask_set_cpu(cpu, mask);
}
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 2b83d6de93..3fd22a1eb1 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -951,6 +951,7 @@ static const struct x86_cpu_id i10nm_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 2b0ecdeba5..dbe9fe5f2c 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -238,6 +238,7 @@ static struct work_struct ecclog_work;
#define DID_ADL_N_SKU9 0x4678
#define DID_ADL_N_SKU10 0x4679
#define DID_ADL_N_SKU11 0x467c
+#define DID_ADL_N_SKU12 0x4632
/* Compute die IDs for Raptor Lake-P with IBECC */
#define DID_RPL_P_SKU1 0xa706
@@ -583,6 +584,7 @@ static const struct pci_device_id igen6_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU9), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU10), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_ADL_N_SKU11), (kernel_ulong_t)&adl_n_cfg },
+ { PCI_VDEVICE(INTEL, DID_ADL_N_SKU12), (kernel_ulong_t)&adl_n_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU1), (kernel_ulong_t)&rpl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU2), (kernel_ulong_t)&rpl_p_cfg },
{ PCI_VDEVICE(INTEL, DID_RPL_P_SKU3), (kernel_ulong_t)&rpl_p_cfg },
@@ -798,7 +800,7 @@ static int errcmd_enable_error_reporting(bool enable)
rc = pci_read_config_word(imc->pdev, ERRCMD_OFFSET, &errcmd);
if (rc)
- return rc;
+ return pcibios_err_to_errno(rc);
if (enable)
errcmd |= ERRCMD_CE | ERRSTS_UE;
@@ -807,7 +809,7 @@ static int errcmd_enable_error_reporting(bool enable)
rc = pci_write_config_word(imc->pdev, ERRCMD_OFFSET, errcmd);
if (rc)
- return rc;
+ return pcibios_err_to_errno(rc);
return 0;
}
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index ec8b6c9fed..8130c3dc64 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -584,7 +584,7 @@ static void decode_mc3_mce(struct mce *m)
static void decode_mc4_mce(struct mce *m)
{
unsigned int fam = x86_family(m->cpuid);
- int node_id = topology_die_id(m->extcpu);
+ int node_id = topology_amd_node_id(m->extcpu);
u16 ec = EC(m->status);
u8 xec = XEC(m->status, 0x1f);
u8 offset = 0;
@@ -746,7 +746,7 @@ static void decode_smca_error(struct mce *m)
if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) &&
xec == 0 && decode_dram_ecc)
- decode_dram_ecc(topology_die_id(m->extcpu), m);
+ decode_dram_ecc(topology_amd_node_id(m->extcpu), m);
}
static inline void amd_decode_err_code(u16 ec)
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 709babce43..ea7a9a342d 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -9,6 +9,7 @@
#include <linux/edac.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/of.h>
@@ -299,6 +300,7 @@ struct synps_ecc_status {
/**
* struct synps_edac_priv - DDR memory controller private instance data.
* @baseaddr: Base address of the DDR controller.
+ * @reglock: Concurrent CSRs access lock.
* @message: Buffer for framing the event specific info.
* @stat: ECC status information.
* @p_data: Platform data.
@@ -313,6 +315,7 @@ struct synps_ecc_status {
*/
struct synps_edac_priv {
void __iomem *baseaddr;
+ spinlock_t reglock;
char message[SYNPS_EDAC_MSG_SIZE];
struct synps_ecc_status stat;
const struct synps_platform_data *p_data;
@@ -408,7 +411,8 @@ out:
static int zynqmp_get_error_info(struct synps_edac_priv *priv)
{
struct synps_ecc_status *p;
- u32 regval, clearval = 0;
+ u32 regval, clearval;
+ unsigned long flags;
void __iomem *base;
base = priv->baseaddr;
@@ -452,10 +456,14 @@ ue_err:
p->ueinfo.blknr = (regval & ECC_CEADDR1_BLKNR_MASK);
p->ueinfo.data = readl(base + ECC_UESYND0_OFST);
out:
- clearval = ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT;
- clearval |= ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
+ spin_lock_irqsave(&priv->reglock, flags);
+
+ clearval = readl(base + ECC_CLR_OFST) |
+ ECC_CTRL_CLR_CE_ERR | ECC_CTRL_CLR_CE_ERRCNT |
+ ECC_CTRL_CLR_UE_ERR | ECC_CTRL_CLR_UE_ERRCNT;
writel(clearval, base + ECC_CLR_OFST);
- writel(0x0, base + ECC_CLR_OFST);
+
+ spin_unlock_irqrestore(&priv->reglock, flags);
return 0;
}
@@ -515,24 +523,41 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
static void enable_intr(struct synps_edac_priv *priv)
{
+ unsigned long flags;
+
/* Enable UE/CE Interrupts */
- if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
- writel(DDR_UE_MASK | DDR_CE_MASK,
- priv->baseaddr + ECC_CLR_OFST);
- else
+ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
priv->baseaddr + DDR_QOS_IRQ_EN_OFST);
+ return;
+ }
+
+ spin_lock_irqsave(&priv->reglock, flags);
+
+ writel(DDR_UE_MASK | DDR_CE_MASK,
+ priv->baseaddr + ECC_CLR_OFST);
+
+ spin_unlock_irqrestore(&priv->reglock, flags);
}
static void disable_intr(struct synps_edac_priv *priv)
{
+ unsigned long flags;
+
/* Disable UE/CE Interrupts */
- if (priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)
- writel(0x0, priv->baseaddr + ECC_CLR_OFST);
- else
+ if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR)) {
writel(DDR_QOSUE_MASK | DDR_QOSCE_MASK,
priv->baseaddr + DDR_QOS_IRQ_DB_OFST);
+
+ return;
+ }
+
+ spin_lock_irqsave(&priv->reglock, flags);
+
+ writel(0, priv->baseaddr + ECC_CLR_OFST);
+
+ spin_unlock_irqrestore(&priv->reglock, flags);
}
/**
@@ -576,8 +601,6 @@ static irqreturn_t intr_handler(int irq, void *dev_id)
/* v3.0 of the controller does not have this register */
if (!(priv->p_data->quirks & DDR_ECC_INTR_SELF_CLEAR))
writel(regval, priv->baseaddr + DDR_QOS_IRQ_STAT_OFST);
- else
- enable_intr(priv);
return IRQ_HANDLED;
}
@@ -1324,11 +1347,9 @@ static int mc_probe(struct platform_device *pdev)
struct synps_edac_priv *priv;
struct mem_ctl_info *mci;
void __iomem *baseaddr;
- struct resource *res;
int rc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- baseaddr = devm_ioremap_resource(&pdev->dev, res);
+ baseaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(baseaddr))
return PTR_ERR(baseaddr);
@@ -1359,6 +1380,7 @@ static int mc_probe(struct platform_device *pdev)
priv = mci->pvt_info;
priv->baseaddr = baseaddr;
priv->p_data = p_data;
+ spin_lock_init(&priv->reglock);
mc_init(mci, pdev);
diff --git a/drivers/edac/versal_edac.c b/drivers/edac/versal_edac.c
index 4d5b68b6be..a556d23e82 100644
--- a/drivers/edac/versal_edac.c
+++ b/drivers/edac/versal_edac.c
@@ -42,8 +42,11 @@
#define ECCW0_FLIP_CTRL 0x109C
#define ECCW0_FLIP0_OFFSET 0x10A0
+#define ECCW0_FLIP0_BITS 31
+#define ECCW0_FLIP1_OFFSET 0x10A4
#define ECCW1_FLIP_CTRL 0x10AC
#define ECCW1_FLIP0_OFFSET 0x10B0
+#define ECCW1_FLIP1_OFFSET 0x10B4
#define ECCR0_CERR_STAT_OFFSET 0x10BC
#define ECCR0_CE_ADDR_LO_OFFSET 0x10C0
#define ECCR0_CE_ADDR_HI_OFFSET 0x10C4
@@ -116,9 +119,6 @@
#define XDDR_BUS_WIDTH_32 1
#define XDDR_BUS_WIDTH_16 2
-#define ECC_CEPOISON_MASK 0x1
-#define ECC_UEPOISON_MASK 0x3
-
#define XDDR_MAX_ROW_CNT 18
#define XDDR_MAX_COL_CNT 10
#define XDDR_MAX_RANK_CNT 2
@@ -133,6 +133,7 @@
* https://docs.xilinx.com/r/en-US/am012-versal-register-reference/PCSR_LOCK-XRAM_SLCR-Register
*/
#define PCSR_UNLOCK_VAL 0xF9E8D7C6
+#define PCSR_LOCK_VAL 1
#define XDDR_ERR_TYPE_CE 0
#define XDDR_ERR_TYPE_UE 1
@@ -142,6 +143,7 @@
#define XILINX_DRAM_SIZE_12G 3
#define XILINX_DRAM_SIZE_16G 4
#define XILINX_DRAM_SIZE_32G 5
+#define NUM_UE_BITPOS 2
/**
* struct ecc_error_info - ECC error log information.
@@ -479,7 +481,7 @@ static void err_callback(const u32 *payload, void *data)
writel(regval, priv->ddrmc_baseaddr + XDDR_ISR_OFFSET);
/* Lock the PCSR registers */
- writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
edac_dbg(3, "Total error count CE %d UE %d\n",
priv->ce_cnt, priv->ue_cnt);
}
@@ -650,7 +652,7 @@ static void enable_intr(struct edac_priv *priv)
writel(XDDR_IRQ_UE_MASK,
priv->ddrmc_baseaddr + XDDR_IRQ1_EN_OFFSET);
/* Lock the PCSR registers */
- writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
}
static void disable_intr(struct edac_priv *priv)
@@ -663,7 +665,7 @@ static void disable_intr(struct edac_priv *priv)
priv->ddrmc_baseaddr + XDDR_IRQ_DIS_OFFSET);
/* Lock the PCSR registers */
- writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
}
#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
@@ -734,38 +736,63 @@ static void poison_setup(struct edac_priv *priv)
writel(regval, priv->ddrmc_noc_baseaddr + XDDR_NOC_REG_ADEC15_OFFSET);
}
-static ssize_t xddr_inject_data_poison_store(struct mem_ctl_info *mci,
- const char __user *data)
+static void xddr_inject_data_ce_store(struct mem_ctl_info *mci, u8 ce_bitpos)
{
+ u32 ecc0_flip0, ecc1_flip0, ecc0_flip1, ecc1_flip1;
struct edac_priv *priv = mci->pvt_info;
- writel(0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
- writel(0, priv->ddrmc_baseaddr + ECCW1_FLIP0_OFFSET);
-
- if (strncmp(data, "CE", 2) == 0) {
- writel(ECC_CEPOISON_MASK, priv->ddrmc_baseaddr +
- ECCW0_FLIP0_OFFSET);
- writel(ECC_CEPOISON_MASK, priv->ddrmc_baseaddr +
- ECCW1_FLIP0_OFFSET);
+ if (ce_bitpos < ECCW0_FLIP0_BITS) {
+ ecc0_flip0 = BIT(ce_bitpos);
+ ecc1_flip0 = BIT(ce_bitpos);
+ ecc0_flip1 = 0;
+ ecc1_flip1 = 0;
} else {
- writel(ECC_UEPOISON_MASK, priv->ddrmc_baseaddr +
- ECCW0_FLIP0_OFFSET);
- writel(ECC_UEPOISON_MASK, priv->ddrmc_baseaddr +
- ECCW1_FLIP0_OFFSET);
+ ce_bitpos = ce_bitpos - ECCW0_FLIP0_BITS;
+ ecc0_flip1 = BIT(ce_bitpos);
+ ecc1_flip1 = BIT(ce_bitpos);
+ ecc0_flip0 = 0;
+ ecc1_flip0 = 0;
}
- /* Lock the PCSR registers */
- writel(1, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
-
- return 0;
+ writel(ecc0_flip0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
+ writel(ecc1_flip0, priv->ddrmc_baseaddr + ECCW1_FLIP0_OFFSET);
+ writel(ecc0_flip1, priv->ddrmc_baseaddr + ECCW0_FLIP1_OFFSET);
+ writel(ecc1_flip1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
}
-static ssize_t inject_data_poison_store(struct file *file, const char __user *data,
- size_t count, loff_t *ppos)
+/*
+ * To inject a correctable error, the following steps are needed:
+ *
+ * - Write the correctable error bit position value:
+ * echo <bit_pos val> > /sys/kernel/debug/edac/<controller instance>/inject_ce
+ *
+ * poison_setup() derives the row, column, bank, group and rank and
+ * writes to the ADEC registers based on the address given by the user.
+ *
+ * The ADEC12 and ADEC13 are mask registers; write 0 to make sure default
+ * configuration is there and no addresses are masked.
+ *
+ * The row, column, bank, group and rank registers are written to the
+ * match ADEC bit to generate errors at the particular address. ADEC14
+ * and ADEC15 have the match bits.
+ *
+ * xddr_inject_data_ce_store() updates the ECC FLIP registers with the
+ * bits to be corrupted based on the bit position given by the user.
+ *
+ * Upon doing a read to the address the errors are injected.
+ */
+static ssize_t inject_data_ce_store(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
{
struct device *dev = file->private_data;
struct mem_ctl_info *mci = to_mci(dev);
struct edac_priv *priv = mci->pvt_info;
+ u8 ce_bitpos;
+ int ret;
+
+ ret = kstrtou8_from_user(data, count, 0, &ce_bitpos);
+ if (ret)
+ return ret;
/* Unlock the PCSR registers */
writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
@@ -773,17 +800,113 @@ static ssize_t inject_data_poison_store(struct file *file, const char __user *da
poison_setup(priv);
+ xddr_inject_data_ce_store(mci, ce_bitpos);
+ ret = count;
+
/* Lock the PCSR registers */
- writel(1, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
+
+ return ret;
+}
+
+static const struct file_operations xddr_inject_ce_fops = {
+ .open = simple_open,
+ .write = inject_data_ce_store,
+ .llseek = generic_file_llseek,
+};
+
+static void xddr_inject_data_ue_store(struct mem_ctl_info *mci, u32 val0, u32 val1)
+{
+ struct edac_priv *priv = mci->pvt_info;
+
+ writel(val0, priv->ddrmc_baseaddr + ECCW0_FLIP0_OFFSET);
+ writel(val0, priv->ddrmc_baseaddr + ECCW0_FLIP1_OFFSET);
+ writel(val1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
+ writel(val1, priv->ddrmc_baseaddr + ECCW1_FLIP1_OFFSET);
+}
+
+/*
+ * To inject an uncorrectable error, the following steps are needed:
+ * echo <bit_pos val> > /sys/kernel/debug/edac/<controller instance>/inject_ue
+ *
+ * poison_setup() derives the row, column, bank, group and rank and
+ * writes to the ADEC registers based on the address given by the user.
+ *
+ * The ADEC12 and ADEC13 are mask registers; write 0 so that none of the
+ * addresses are masked. The row, column, bank, group and rank registers
+ * are written to the match ADEC bit to generate errors at the
+ * particular address. ADEC14 and ADEC15 have the match bits.
+ *
+ * xddr_inject_data_ue_store() updates the ECC FLIP registers with the
+ * bits to be corrupted based on the bit position given by the user. For
+ * uncorrectable errors
+ * 2 bit errors are injected.
+ *
+ * Upon doing a read to the address the errors are injected.
+ */
+static ssize_t inject_data_ue_store(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct device *dev = file->private_data;
+ struct mem_ctl_info *mci = to_mci(dev);
+ struct edac_priv *priv = mci->pvt_info;
+ char buf[6], *pbuf, *token[2];
+ u32 val0 = 0, val1 = 0;
+ u8 len, ue0, ue1;
+ int i, ret;
+
+ len = min_t(size_t, count, sizeof(buf));
+ if (copy_from_user(buf, data, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ pbuf = &buf[0];
+ for (i = 0; i < NUM_UE_BITPOS; i++)
+ token[i] = strsep(&pbuf, ",");
- xddr_inject_data_poison_store(mci, data);
+ if (!token[0] || !token[1])
+ return -EFAULT;
+
+ ret = kstrtou8(token[0], 0, &ue0);
+ if (ret)
+ return ret;
+
+ ret = kstrtou8(token[1], 0, &ue1);
+ if (ret)
+ return ret;
+
+ if (ue0 < ECCW0_FLIP0_BITS) {
+ val0 = BIT(ue0);
+ } else {
+ ue0 = ue0 - ECCW0_FLIP0_BITS;
+ val1 = BIT(ue0);
+ }
+
+ if (ue1 < ECCW0_FLIP0_BITS) {
+ val0 |= BIT(ue1);
+ } else {
+ ue1 = ue1 - ECCW0_FLIP0_BITS;
+ val1 |= BIT(ue1);
+ }
+ /* Unlock the PCSR registers */
+ writel(PCSR_UNLOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_UNLOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
+
+ poison_setup(priv);
+
+ xddr_inject_data_ue_store(mci, val0, val1);
+
+ /* Lock the PCSR registers */
+ writel(PCSR_LOCK_VAL, priv->ddrmc_noc_baseaddr + XDDR_PCSR_OFFSET);
+ writel(PCSR_LOCK_VAL, priv->ddrmc_baseaddr + XDDR_PCSR_OFFSET);
return count;
}
-static const struct file_operations xddr_inject_enable_fops = {
+static const struct file_operations xddr_inject_ue_fops = {
.open = simple_open,
- .write = inject_data_poison_store,
+ .write = inject_data_ue_store,
.llseek = generic_file_llseek,
};
@@ -795,8 +918,17 @@ static void create_debugfs_attributes(struct mem_ctl_info *mci)
if (!priv->debugfs)
return;
- edac_debugfs_create_file("inject_error", 0200, priv->debugfs,
- &mci->dev, &xddr_inject_enable_fops);
+ if (!edac_debugfs_create_file("inject_ce", 0200, priv->debugfs,
+ &mci->dev, &xddr_inject_ce_fops)) {
+ debugfs_remove_recursive(priv->debugfs);
+ return;
+ }
+
+ if (!edac_debugfs_create_file("inject_ue", 0200, priv->debugfs,
+ &mci->dev, &xddr_inject_ue_fops)) {
+ debugfs_remove_recursive(priv->debugfs);
+ return;
+ }
debugfs_create_x64("address", 0600, priv->debugfs,
&priv->err_inject_addr);
mci->debugfs = priv->debugfs;
@@ -1030,7 +1162,7 @@ free_edac_mc:
return rc;
}
-static int mc_remove(struct platform_device *pdev)
+static void mc_remove(struct platform_device *pdev)
{
struct mem_ctl_info *mci = platform_get_drvdata(pdev);
struct edac_priv *priv = mci->pvt_info;
@@ -1046,8 +1178,6 @@ static int mc_remove(struct platform_device *pdev)
XPM_EVENT_ERROR_MASK_DDRMC_NCR, err_callback, mci);
edac_mc_del_mc(&pdev->dev);
edac_mc_free(mci);
-
- return 0;
}
static struct platform_driver xilinx_ddr_edac_mc_driver = {
@@ -1056,7 +1186,7 @@ static struct platform_driver xilinx_ddr_edac_mc_driver = {
.of_match_table = xlnx_edac_match,
},
.probe = mc_probe,
- .remove = mc_remove,
+ .remove_new = mc_remove,
};
module_platform_driver(xilinx_ddr_edac_mc_driver);