summaryrefslogtreecommitdiffstats
path: root/drivers/cxl
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:57 +0000
commitdc50eab76b709d68175a358d6e23a5a3890764d3 (patch)
treec754d0390db060af0213ff994f0ac310e4cfd6e9 /drivers/cxl
parentAdding debian version 6.6.15-2. (diff)
downloadlinux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz
linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/cxl')
-rw-r--r--drivers/cxl/acpi.c49
-rw-r--r--drivers/cxl/core/core.h1
-rw-r--r--drivers/cxl/core/hdm.c48
-rw-r--r--drivers/cxl/core/mbox.c5
-rw-r--r--drivers/cxl/core/pci.c321
-rw-r--r--drivers/cxl/core/port.c94
-rw-r--r--drivers/cxl/core/region.c2
-rw-r--r--drivers/cxl/core/regs.c47
-rw-r--r--drivers/cxl/cxl.h33
-rw-r--r--drivers/cxl/cxlmem.h4
-rw-r--r--drivers/cxl/mem.c7
-rw-r--r--drivers/cxl/pci.c29
-rw-r--r--drivers/cxl/port.c3
13 files changed, 502 insertions, 141 deletions
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 40d055560e..b3cec89126 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -194,31 +194,27 @@ struct cxl_cfmws_context {
int id;
};
-static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
- const unsigned long end)
+static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
+ struct cxl_cfmws_context *ctx)
{
int target_map[CXL_DECODER_MAX_INTERLEAVE];
- struct cxl_cfmws_context *ctx = arg;
struct cxl_port *root_port = ctx->root_port;
struct resource *cxl_res = ctx->cxl_res;
struct cxl_cxims_context cxims_ctx;
struct cxl_root_decoder *cxlrd;
struct device *dev = ctx->dev;
- struct acpi_cedt_cfmws *cfmws;
cxl_calc_hb_fn cxl_calc_hb;
struct cxl_decoder *cxld;
unsigned int ways, i, ig;
struct resource *res;
int rc;
- cfmws = (struct acpi_cedt_cfmws *) header;
-
rc = cxl_acpi_cfmws_verify(dev, cfmws);
if (rc) {
dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
- return 0;
+ return rc;
}
rc = eiw_to_ways(cfmws->interleave_ways, &ways);
@@ -254,7 +250,7 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb);
if (IS_ERR(cxlrd))
- return 0;
+ return PTR_ERR(cxlrd);
cxld = &cxlrd->cxlsd.cxld;
cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
@@ -289,22 +285,16 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
}
}
}
+
+ cxlrd->qos_class = cfmws->qtg_id;
+
rc = cxl_decoder_add(cxld, target_map);
err_xormap:
if (rc)
put_device(&cxld->dev);
else
rc = cxl_decoder_autoremove(dev, cxld);
- if (rc) {
- dev_err(dev, "Failed to add decode range: %pr", res);
- return rc;
- }
- dev_dbg(dev, "add: %s node: %d range [%#llx - %#llx]\n",
- dev_name(&cxld->dev),
- phys_to_target_node(cxld->hpa_range.start),
- cxld->hpa_range.start, cxld->hpa_range.end);
-
- return 0;
+ return rc;
err_insert:
kfree(res->name);
@@ -313,6 +303,29 @@ err_name:
return -ENOMEM;
}
+static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
+{
+ struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
+ struct cxl_cfmws_context *ctx = arg;
+ struct device *dev = ctx->dev;
+ int rc;
+
+ rc = __cxl_parse_cfmws(cfmws, ctx);
+ if (rc)
+ dev_err(dev,
+ "Failed to add decode range: [%#llx - %#llx] (%d)\n",
+ cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1, rc);
+ else
+ dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
+ phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1);
+
+ /* never fail cxl_acpi load for a single window failure */
+ return 0;
+}
+
__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
struct device *dev)
{
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 8e5f3d8431..86d7ba2323 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -73,6 +73,7 @@ struct cxl_rcrb_info;
resource_size_t __rcrb_to_component(struct device *dev,
struct cxl_rcrb_info *ri,
enum cxl_rcrb which);
+u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb);
extern struct rw_semaphore cxl_dpa_rwsem;
extern struct rw_semaphore cxl_region_rwsem;
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 90664659d5..7d97790b89 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -81,26 +81,6 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
cxlhdm->interleave_mask |= GENMASK(14, 12);
}
-static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
- struct cxl_component_regs *regs)
-{
- struct cxl_register_map map = {
- .host = &port->dev,
- .resource = port->component_reg_phys,
- .base = crb,
- .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
- };
-
- cxl_probe_component_regs(&port->dev, crb, &map.component_map);
- if (!map.component_map.hdm_decoder.valid) {
- dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
- /* unique error code to indicate no HDM decoder capability */
- return -ENODEV;
- }
-
- return cxl_map_component_regs(&map, regs, BIT(CXL_CM_CAP_CAP_ID_HDM));
-}
-
static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
{
struct cxl_hdm *cxlhdm;
@@ -153,9 +133,9 @@ static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
struct cxl_endpoint_dvsec_info *info)
{
+ struct cxl_register_map *reg_map = &port->reg_map;
struct device *dev = &port->dev;
struct cxl_hdm *cxlhdm;
- void __iomem *crb;
int rc;
cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
@@ -164,19 +144,29 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
cxlhdm->port = port;
dev_set_drvdata(dev, cxlhdm);
- crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
- if (!crb && info && info->mem_enabled) {
+ /* Memory devices can configure device HDM using DVSEC range regs. */
+ if (reg_map->resource == CXL_RESOURCE_NONE) {
+ if (!info || !info->mem_enabled) {
+ dev_err(dev, "No component registers mapped\n");
+ return ERR_PTR(-ENXIO);
+ }
+
cxlhdm->decoder_count = info->ranges;
return cxlhdm;
- } else if (!crb) {
- dev_err(dev, "No component registers mapped\n");
- return ERR_PTR(-ENXIO);
}
- rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs);
- iounmap(crb);
- if (rc)
+ if (!reg_map->component_map.hdm_decoder.valid) {
+ dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
+ /* unique error code to indicate no HDM decoder capability */
+ return ERR_PTR(-ENODEV);
+ }
+
+ rc = cxl_map_component_regs(reg_map, &cxlhdm->regs,
+ BIT(CXL_CM_CAP_CAP_ID_HDM));
+ if (rc) {
+ dev_err(dev, "Failed to map HDM capability.\n");
return ERR_PTR(rc);
+ }
parse_hdm_decoder_caps(cxlhdm);
if (cxlhdm->decoder_count == 0) {
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index b12986b968..36270dcfb4 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -1249,8 +1249,7 @@ int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
return 0;
}
- cxlds->dpa_res =
- (struct resource)DEFINE_RES_MEM(0, mds->total_bytes);
+ cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes);
if (mds->partition_align_bytes == 0) {
rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
@@ -1402,6 +1401,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
mutex_init(&mds->mbox_mutex);
mutex_init(&mds->event.log_lock);
mds->cxlds.dev = dev;
+ mds->cxlds.reg_map.host = dev;
+ mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
return mds;
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index c7a7887ebd..deae3289de 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/pci-doe.h>
+#include <linux/aer.h>
#include <cxlpci.h>
#include <cxlmem.h>
#include <cxl.h>
@@ -475,9 +476,9 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
allowed++;
}
- if (!allowed) {
- cxl_set_mem_enable(cxlds, 0);
- info->mem_enabled = 0;
+ if (!allowed && info->mem_enabled) {
+ dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n");
+ return -ENXIO;
}
/*
@@ -595,6 +596,16 @@ static int cxl_cdat_read_table(struct device *dev,
return 0;
}
+static unsigned char cdat_checksum(void *buf, size_t size)
+{
+ unsigned char sum, *data = buf;
+ size_t i;
+
+ for (sum = 0, i = 0; i < size; i++)
+ sum += data[i];
+ return sum;
+}
+
/**
* read_cdat_data - Read the CDAT data on this port
* @port: Port to read data from
@@ -603,18 +614,30 @@ static int cxl_cdat_read_table(struct device *dev,
*/
void read_cdat_data(struct cxl_port *port)
{
- struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
- struct device *host = cxlmd->dev.parent;
+ struct device *uport = port->uport_dev;
struct device *dev = &port->dev;
struct pci_doe_mb *cdat_doe;
+ struct pci_dev *pdev = NULL;
+ struct cxl_memdev *cxlmd;
size_t cdat_length;
- void *cdat_table;
+ void *cdat_table, *cdat_buf;
int rc;
- if (!dev_is_pci(host))
+ if (is_cxl_memdev(uport)) {
+ struct device *host;
+
+ cxlmd = to_cxl_memdev(uport);
+ host = cxlmd->dev.parent;
+ if (dev_is_pci(host))
+ pdev = to_pci_dev(host);
+ } else if (dev_is_pci(uport)) {
+ pdev = to_pci_dev(uport);
+ }
+
+ if (!pdev)
return;
- cdat_doe = pci_find_doe_mailbox(to_pci_dev(host),
- PCI_DVSEC_VENDOR_ID_CXL,
+
+ cdat_doe = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL,
CXL_DOE_PROTOCOL_TABLE_ACCESS);
if (!cdat_doe) {
dev_dbg(dev, "No CDAT mailbox\n");
@@ -628,50 +651,59 @@ void read_cdat_data(struct cxl_port *port)
return;
}
- cdat_table = devm_kzalloc(dev, cdat_length + sizeof(__le32),
- GFP_KERNEL);
- if (!cdat_table)
+ cdat_buf = devm_kzalloc(dev, cdat_length + sizeof(__le32), GFP_KERNEL);
+ if (!cdat_buf)
return;
- rc = cxl_cdat_read_table(dev, cdat_doe, cdat_table, &cdat_length);
- if (rc) {
- /* Don't leave table data allocated on error */
- devm_kfree(dev, cdat_table);
- dev_err(dev, "CDAT data read error\n");
- return;
- }
+ rc = cxl_cdat_read_table(dev, cdat_doe, cdat_buf, &cdat_length);
+ if (rc)
+ goto err;
+
+ cdat_table = cdat_buf + sizeof(__le32);
+ if (cdat_checksum(cdat_table, cdat_length))
+ goto err;
- port->cdat.table = cdat_table + sizeof(__le32);
+ port->cdat.table = cdat_table;
port->cdat.length = cdat_length;
+ return;
+
+err:
+ /* Don't leave table data allocated on error */
+ devm_kfree(dev, cdat_buf);
+ dev_err(dev, "Failed to read/validate CDAT.\n");
}
EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
-void cxl_cor_error_detected(struct pci_dev *pdev)
+static void __cxl_handle_cor_ras(struct cxl_dev_state *cxlds,
+ void __iomem *ras_base)
{
- struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
void __iomem *addr;
u32 status;
- if (!cxlds->regs.ras)
+ if (!ras_base)
return;
- addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
+ addr = ras_base + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
status = readl(addr);
if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
trace_cxl_aer_correctable_error(cxlds->cxlmd, status);
}
}
-EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
+
+static void cxl_handle_endpoint_cor_ras(struct cxl_dev_state *cxlds)
+{
+ return __cxl_handle_cor_ras(cxlds, cxlds->regs.ras);
+}
/* CXL spec rev3.0 8.2.4.16.1 */
-static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
+static void header_log_copy(void __iomem *ras_base, u32 *log)
{
void __iomem *addr;
u32 *log_addr;
int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
- addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET;
+ addr = ras_base + CXL_RAS_HEADER_LOG_OFFSET;
log_addr = log;
for (i = 0; i < log_u32_size; i++) {
@@ -685,17 +717,18 @@ static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
* Log the state of the RAS status registers and prepare them to log the
* next error status. Return 1 if reset needed.
*/
-static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
+static bool __cxl_handle_ras(struct cxl_dev_state *cxlds,
+ void __iomem *ras_base)
{
u32 hl[CXL_HEADERLOG_SIZE_U32];
void __iomem *addr;
u32 status;
u32 fe;
- if (!cxlds->regs.ras)
+ if (!ras_base)
return false;
- addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
+ addr = ras_base + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
status = readl(addr);
if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
return false;
@@ -703,7 +736,7 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
/* If multiple errors, log header points to first error from ctrl reg */
if (hweight32(status) > 1) {
void __iomem *rcc_addr =
- cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
+ ras_base + CXL_RAS_CAP_CONTROL_OFFSET;
fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
readl(rcc_addr)));
@@ -711,13 +744,211 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
fe = status;
}
- header_log_copy(cxlds, hl);
+ header_log_copy(ras_base, hl);
trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe, hl);
writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
return true;
}
+static bool cxl_handle_endpoint_ras(struct cxl_dev_state *cxlds)
+{
+ return __cxl_handle_ras(cxlds, cxlds->regs.ras);
+}
+
+#ifdef CONFIG_PCIEAER_CXL
+
+static void cxl_dport_map_rch_aer(struct cxl_dport *dport)
+{
+ struct cxl_rcrb_info *ri = &dport->rcrb;
+ void __iomem *dport_aer = NULL;
+ resource_size_t aer_phys;
+ struct device *host;
+
+ if (dport->rch && ri->aer_cap) {
+ host = dport->reg_map.host;
+ aer_phys = ri->aer_cap + ri->base;
+ dport_aer = devm_cxl_iomap_block(host, aer_phys,
+ sizeof(struct aer_capability_regs));
+ }
+
+ dport->regs.dport_aer = dport_aer;
+}
+
+static void cxl_dport_map_regs(struct cxl_dport *dport)
+{
+ struct cxl_register_map *map = &dport->reg_map;
+ struct device *dev = dport->dport_dev;
+
+ if (!map->component_map.ras.valid)
+ dev_dbg(dev, "RAS registers not found\n");
+ else if (cxl_map_component_regs(map, &dport->regs.component,
+ BIT(CXL_CM_CAP_CAP_ID_RAS)))
+ dev_dbg(dev, "Failed to map RAS capability.\n");
+
+ if (dport->rch)
+ cxl_dport_map_rch_aer(dport);
+}
+
+static void cxl_disable_rch_root_ints(struct cxl_dport *dport)
+{
+ void __iomem *aer_base = dport->regs.dport_aer;
+ struct pci_host_bridge *bridge;
+ u32 aer_cmd_mask, aer_cmd;
+
+ if (!aer_base)
+ return;
+
+ bridge = to_pci_host_bridge(dport->dport_dev);
+
+ /*
+ * Disable RCH root port command interrupts.
+ * CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors
+ *
+ * This sequence may not be necessary. CXL spec states disabling
+ * the root cmd register's interrupts is required. But, PCI spec
+ * shows these are disabled by default on reset.
+ */
+ if (bridge->native_aer) {
+ aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN |
+ PCI_ERR_ROOT_CMD_NONFATAL_EN |
+ PCI_ERR_ROOT_CMD_FATAL_EN);
+ aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND);
+ aer_cmd &= ~aer_cmd_mask;
+ writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND);
+ }
+}
+
+void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport)
+{
+ struct device *dport_dev = dport->dport_dev;
+ struct pci_host_bridge *host_bridge;
+
+ host_bridge = to_pci_host_bridge(dport_dev);
+ if (host_bridge->native_aer)
+ dport->rcrb.aer_cap = cxl_rcrb_to_aer(dport_dev, dport->rcrb.base);
+
+ dport->reg_map.host = host;
+ cxl_dport_map_regs(dport);
+
+ if (dport->rch)
+ cxl_disable_rch_root_ints(dport);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_setup_parent_dport, CXL);
+
+static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds,
+ struct cxl_dport *dport)
+{
+ return __cxl_handle_cor_ras(cxlds, dport->regs.ras);
+}
+
+static bool cxl_handle_rdport_ras(struct cxl_dev_state *cxlds,
+ struct cxl_dport *dport)
+{
+ return __cxl_handle_ras(cxlds, dport->regs.ras);
+}
+
+/*
+ * Copy the AER capability registers using 32 bit read accesses.
+ * This is necessary because RCRB AER capability is MMIO mapped. Clear the
+ * status after copying.
+ *
+ * @aer_base: base address of AER capability block in RCRB
+ * @aer_regs: destination for copying AER capability
+ */
+static bool cxl_rch_get_aer_info(void __iomem *aer_base,
+ struct aer_capability_regs *aer_regs)
+{
+ int read_cnt = sizeof(struct aer_capability_regs) / sizeof(u32);
+ u32 *aer_regs_buf = (u32 *)aer_regs;
+ int n;
+
+ if (!aer_base)
+ return false;
+
+ /* Use readl() to guarantee 32-bit accesses */
+ for (n = 0; n < read_cnt; n++)
+ aer_regs_buf[n] = readl(aer_base + n * sizeof(u32));
+
+ writel(aer_regs->uncor_status, aer_base + PCI_ERR_UNCOR_STATUS);
+ writel(aer_regs->cor_status, aer_base + PCI_ERR_COR_STATUS);
+
+ return true;
+}
+
+/* Get AER severity. Return false if there is no error. */
+static bool cxl_rch_get_aer_severity(struct aer_capability_regs *aer_regs,
+ int *severity)
+{
+ if (aer_regs->uncor_status & ~aer_regs->uncor_mask) {
+ if (aer_regs->uncor_status & PCI_ERR_ROOT_FATAL_RCV)
+ *severity = AER_FATAL;
+ else
+ *severity = AER_NONFATAL;
+ return true;
+ }
+
+ if (aer_regs->cor_status & ~aer_regs->cor_mask) {
+ *severity = AER_CORRECTABLE;
+ return true;
+ }
+
+ return false;
+}
+
+static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds)
+{
+ struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+ struct aer_capability_regs aer_regs;
+ struct cxl_dport *dport;
+ struct cxl_port *port;
+ int severity;
+
+ port = cxl_pci_find_port(pdev, &dport);
+ if (!port)
+ return;
+
+ put_device(&port->dev);
+
+ if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs))
+ return;
+
+ if (!cxl_rch_get_aer_severity(&aer_regs, &severity))
+ return;
+
+ pci_print_aer(pdev, severity, &aer_regs);
+
+ if (severity == AER_CORRECTABLE)
+ cxl_handle_rdport_cor_ras(cxlds, dport);
+ else
+ cxl_handle_rdport_ras(cxlds, dport);
+}
+
+#else
+static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { }
+#endif
+
+void cxl_cor_error_detected(struct pci_dev *pdev)
+{
+ struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+ struct device *dev = &cxlds->cxlmd->dev;
+
+ scoped_guard(device, dev) {
+ if (!dev->driver) {
+ dev_warn(&pdev->dev,
+ "%s: memdev disabled, abort error handling\n",
+ dev_name(dev));
+ return;
+ }
+
+ if (cxlds->rcd)
+ cxl_handle_rdport_errors(cxlds);
+
+ cxl_handle_endpoint_cor_ras(cxlds);
+ }
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
+
pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
@@ -726,13 +957,25 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
struct device *dev = &cxlmd->dev;
bool ue;
- /*
- * A frozen channel indicates an impending reset which is fatal to
- * CXL.mem operation, and will likely crash the system. On the off
- * chance the situation is recoverable dump the status of the RAS
- * capability registers and bounce the active state of the memdev.
- */
- ue = cxl_report_and_clear(cxlds);
+ scoped_guard(device, dev) {
+ if (!dev->driver) {
+ dev_warn(&pdev->dev,
+ "%s: memdev disabled, abort error handling\n",
+ dev_name(dev));
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (cxlds->rcd)
+ cxl_handle_rdport_errors(cxlds);
+ /*
+ * A frozen channel indicates an impending reset which is fatal to
+ * CXL.mem operation, and will likely crash the system. On the off
+ * chance the situation is recoverable dump the status of the RAS
+ * capability registers and bounce the active state of the memdev.
+ */
+ ue = cxl_handle_endpoint_ras(cxlds);
+ }
+
switch (state) {
case pci_channel_io_normal:
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index c67cc8c9d5..6b386771cc 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -287,6 +287,15 @@ static ssize_t interleave_ways_show(struct device *dev,
static DEVICE_ATTR_RO(interleave_ways);
+static ssize_t qos_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+ return sysfs_emit(buf, "%d\n", cxlrd->qos_class);
+}
+static DEVICE_ATTR_RO(qos_class);
+
static struct attribute *cxl_decoder_base_attrs[] = {
&dev_attr_start.attr,
&dev_attr_size.attr,
@@ -306,6 +315,7 @@ static struct attribute *cxl_decoder_root_attrs[] = {
&dev_attr_cap_type2.attr,
&dev_attr_cap_type3.attr,
&dev_attr_target_list.attr,
+ &dev_attr_qos_class.attr,
SET_CXL_REGION_ATTR(create_pmem_region)
SET_CXL_REGION_ATTR(create_ram_region)
SET_CXL_REGION_ATTR(delete_region)
@@ -530,8 +540,33 @@ static void cxl_port_release(struct device *dev)
kfree(port);
}
+static ssize_t decoders_committed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_port *port = to_cxl_port(dev);
+ int rc;
+
+ down_read(&cxl_region_rwsem);
+ rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
+ up_read(&cxl_region_rwsem);
+
+ return rc;
+}
+
+static DEVICE_ATTR_RO(decoders_committed);
+
+static struct attribute *cxl_port_attrs[] = {
+ &dev_attr_decoders_committed.attr,
+ NULL,
+};
+
+static struct attribute_group cxl_port_attribute_group = {
+ .attrs = cxl_port_attrs,
+};
+
static const struct attribute_group *cxl_port_attribute_groups[] = {
&cxl_base_attribute_group,
+ &cxl_port_attribute_group,
NULL,
};
@@ -628,7 +663,6 @@ static int devm_cxl_link_parent_dport(struct device *host,
static struct lock_class_key cxl_port_key;
static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
- resource_size_t component_reg_phys,
struct cxl_dport *parent_dport)
{
struct cxl_port *port;
@@ -679,7 +713,6 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
} else
dev->parent = uport_dev;
- port->component_reg_phys = component_reg_phys;
ida_init(&port->decoder_ida);
port->hdm_end = -1;
port->commit_end = -1;
@@ -703,16 +736,18 @@ err:
static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
resource_size_t component_reg_phys)
{
- if (component_reg_phys == CXL_RESOURCE_NONE)
- return 0;
-
*map = (struct cxl_register_map) {
.host = host,
- .reg_type = CXL_REGLOC_RBI_COMPONENT,
+ .reg_type = CXL_REGLOC_RBI_EMPTY,
.resource = component_reg_phys,
- .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
};
+ if (component_reg_phys == CXL_RESOURCE_NONE)
+ return 0;
+
+ map->reg_type = CXL_REGLOC_RBI_COMPONENT;
+ map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE;
+
return cxl_setup_regs(map);
}
@@ -721,7 +756,7 @@ static int cxl_port_setup_regs(struct cxl_port *port,
{
if (dev_is_platform(port->uport_dev))
return 0;
- return cxl_setup_comp_regs(&port->dev, &port->comp_map,
+ return cxl_setup_comp_regs(&port->dev, &port->reg_map,
component_reg_phys);
}
@@ -738,9 +773,9 @@ static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
* register probing, and fixup @host after the fact, since @host may be
* NULL.
*/
- rc = cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
+ rc = cxl_setup_comp_regs(dport->dport_dev, &dport->reg_map,
component_reg_phys);
- dport->comp_map.host = host;
+ dport->reg_map.host = host;
return rc;
}
@@ -753,21 +788,36 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
struct device *dev;
int rc;
- port = cxl_port_alloc(uport_dev, component_reg_phys, parent_dport);
+ port = cxl_port_alloc(uport_dev, parent_dport);
if (IS_ERR(port))
return port;
dev = &port->dev;
- if (is_cxl_memdev(uport_dev))
+ if (is_cxl_memdev(uport_dev)) {
+ struct cxl_memdev *cxlmd = to_cxl_memdev(uport_dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
rc = dev_set_name(dev, "endpoint%d", port->id);
- else if (parent_dport)
+ if (rc)
+ goto err;
+
+ /*
+ * The endpoint driver already enumerated the component and RAS
+ * registers. Reuse that enumeration while prepping them to be
+ * mapped by the cxl_port driver.
+ */
+ port->reg_map = cxlds->reg_map;
+ port->reg_map.host = &port->dev;
+ } else if (parent_dport) {
rc = dev_set_name(dev, "port%d", port->id);
- else
- rc = dev_set_name(dev, "root%d", port->id);
- if (rc)
- goto err;
+ if (rc)
+ goto err;
- rc = cxl_port_setup_regs(port, component_reg_phys);
+ rc = cxl_port_setup_regs(port, component_reg_phys);
+ if (rc)
+ goto err;
+ } else
+ rc = dev_set_name(dev, "root%d", port->id);
if (rc)
goto err;
@@ -1500,7 +1550,11 @@ retry:
struct cxl_dport *dport;
struct cxl_port *port;
- if (!dport_dev)
+ /*
+ * The terminal "grandparent" in PCI is NULL and @platform_bus
+ * for platform devices
+ */
+ if (!dport_dev || dport_dev == &platform_bus)
return 0;
uport_dev = dport_dev->parent;
@@ -1719,6 +1773,7 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
}
atomic_set(&cxlrd->region_id, rc);
+ cxlrd->qos_class = CXL_QOS_CLASS_INVALID;
return cxlrd;
}
EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
@@ -2090,3 +2145,4 @@ static void cxl_core_exit(void)
subsys_initcall(cxl_core_init);
module_exit(cxl_core_exit);
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(CXL);
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 6ebd12f797..7bb656237f 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -123,7 +123,7 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
{
if (!cpu_cache_has_invalidate_memregion()) {
if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
- dev_warn_once(
+ dev_info_once(
&cxlr->dev,
"Bypassing cpu_cache_invalidate_memregion() for testing!\n");
return 0;
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index e0fbe964f6..372786f809 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -216,16 +216,16 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
for (i = 0; i < ARRAY_SIZE(mapinfo); i++) {
struct mapinfo *mi = &mapinfo[i];
- resource_size_t phys_addr;
+ resource_size_t addr;
resource_size_t length;
if (!mi->rmap->valid)
continue;
if (!test_bit(mi->rmap->id, &map_mask))
continue;
- phys_addr = map->resource + mi->rmap->offset;
+ addr = map->resource + mi->rmap->offset;
length = mi->rmap->size;
- *(mi->addr) = devm_cxl_iomap_block(host, phys_addr, length);
+ *(mi->addr) = devm_cxl_iomap_block(host, addr, length);
if (!*(mi->addr))
return -ENOMEM;
}
@@ -386,10 +386,9 @@ int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type)
}
EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, CXL);
-int cxl_map_pmu_regs(struct pci_dev *pdev, struct cxl_pmu_regs *regs,
- struct cxl_register_map *map)
+int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = map->host;
resource_size_t phys_addr;
phys_addr = map->resource;
@@ -470,6 +469,42 @@ int cxl_setup_regs(struct cxl_register_map *map)
}
EXPORT_SYMBOL_NS_GPL(cxl_setup_regs, CXL);
+u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb)
+{
+ void __iomem *addr;
+ u16 offset = 0;
+ u32 cap_hdr;
+
+ if (WARN_ON_ONCE(rcrb == CXL_RESOURCE_NONE))
+ return 0;
+
+ if (!request_mem_region(rcrb, SZ_4K, dev_name(dev)))
+ return 0;
+
+ addr = ioremap(rcrb, SZ_4K);
+ if (!addr)
+ goto out;
+
+ cap_hdr = readl(addr + offset);
+ while (PCI_EXT_CAP_ID(cap_hdr) != PCI_EXT_CAP_ID_ERR) {
+ offset = PCI_EXT_CAP_NEXT(cap_hdr);
+
+ /* Offset 0 terminates capability list. */
+ if (!offset)
+ break;
+ cap_hdr = readl(addr + offset);
+ }
+
+ if (offset)
+ dev_dbg(dev, "found AER extended capability (0x%x)\n", offset);
+
+ iounmap(addr);
+out:
+ release_mem_region(rcrb, SZ_4K);
+
+ return offset;
+}
+
resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri,
enum cxl_rcrb which)
{
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index de2c250c89..62fa96f856 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -221,6 +221,14 @@ struct cxl_regs {
struct_group_tagged(cxl_pmu_regs, pmu_regs,
void __iomem *pmu;
);
+
+ /*
+ * RCH downstream port specific RAS register
+ * @aer: CXL 3.0 8.2.1.1 RCH Downstream Port RCRB
+ */
+ struct_group_tagged(cxl_rch_regs, rch_regs,
+ void __iomem *dport_aer;
+ );
};
struct cxl_reg_map {
@@ -278,8 +286,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
unsigned long map_mask);
int cxl_map_device_regs(const struct cxl_register_map *map,
struct cxl_device_regs *regs);
-int cxl_map_pmu_regs(struct pci_dev *pdev, struct cxl_pmu_regs *regs,
- struct cxl_register_map *map);
+int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs);
enum cxl_regloc_type;
int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type);
@@ -321,6 +328,7 @@ enum cxl_decoder_type {
*/
#define CXL_DECODER_MAX_INTERLEAVE 16
+#define CXL_QOS_CLASS_INVALID -1
/**
* struct cxl_decoder - Common CXL HDM Decoder Attributes
@@ -430,6 +438,7 @@ typedef struct cxl_dport *(*cxl_calc_hb_fn)(struct cxl_root_decoder *cxlrd,
* @calc_hb: which host bridge covers the n'th position by granularity
* @platform_data: platform specific configuration data
* @range_lock: sync region autodiscovery by address range
+ * @qos_class: QoS performance class cookie
* @cxlsd: base cxl switch decoder
*/
struct cxl_root_decoder {
@@ -438,6 +447,7 @@ struct cxl_root_decoder {
cxl_calc_hb_fn calc_hb;
void *platform_data;
struct mutex range_lock;
+ int qos_class;
struct cxl_switch_decoder cxlsd;
};
@@ -570,11 +580,10 @@ struct cxl_dax_region {
* @regions: cxl_region_ref instances, regions mapped by this port
* @parent_dport: dport that points to this port in the parent
* @decoder_ida: allocator for decoder ids
- * @comp_map: component register capability mappings
+ * @reg_map: component and ras register mapping parameters
* @nr_dports: number of entries in @dports
* @hdm_end: track last allocated HDM decoder instance for allocation ordering
* @commit_end: cursor to track highest committed decoder for commit ordering
- * @component_reg_phys: component register capability base address (optional)
* @dead: last ep has been removed, force port re-creation
* @depth: How deep this port is relative to the root. depth 0 is the root.
* @cdat: Cached CDAT data
@@ -590,11 +599,10 @@ struct cxl_port {
struct xarray regions;
struct cxl_dport *parent_dport;
struct ida decoder_ida;
- struct cxl_register_map comp_map;
+ struct cxl_register_map reg_map;
int nr_dports;
int hdm_end;
int commit_end;
- resource_size_t component_reg_phys;
bool dead;
unsigned int depth;
struct cxl_cdat {
@@ -618,19 +626,21 @@ struct cxl_rcrb_info {
/**
* struct cxl_dport - CXL downstream port
* @dport_dev: PCI bridge or firmware device representing the downstream link
- * @comp_map: component register capability mappings
+ * @reg_map: component and ras register mapping parameters
* @port_id: unique hardware identifier for dport in decoder target list
* @rcrb: Data about the Root Complex Register Block layout
* @rch: Indicate whether this dport was enumerated in RCH or VH mode
* @port: reference to cxl_port that contains this downstream port
+ * @regs: Dport parsed register blocks
*/
struct cxl_dport {
struct device *dport_dev;
- struct cxl_register_map comp_map;
+ struct cxl_register_map reg_map;
int port_id;
struct cxl_rcrb_info rcrb;
bool rch;
struct cxl_port *port;
+ struct cxl_regs regs;
};
/**
@@ -705,6 +715,13 @@ struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
struct device *dport_dev, int port_id,
resource_size_t rcrb);
+#ifdef CONFIG_PCIEAER_CXL
+void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport);
+#else
+static inline void cxl_setup_parent_dport(struct device *host,
+ struct cxl_dport *dport) { }
+#endif
+
struct cxl_decoder *to_cxl_decoder(struct device *dev);
struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 6933bc20e7..a2fcbca253 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -400,6 +400,7 @@ enum cxl_devtype {
*
* @dev: The device associated with this CXL state
* @cxlmd: The device representing the CXL.mem capabilities of @dev
+ * @reg_map: component and ras register mapping parameters
* @regs: Parsed register blocks
* @cxl_dvsec: Offset to the PCIe device DVSEC
* @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
@@ -407,13 +408,13 @@ enum cxl_devtype {
* @dpa_res: Overall DPA resource tree for the device
* @pmem_res: Active Persistent memory capacity configuration
* @ram_res: Active Volatile memory capacity configuration
- * @component_reg_phys: register base of component registers
* @serial: PCIe Device Serial Number
* @type: Generic Memory Class device or Vendor Specific Memory device
*/
struct cxl_dev_state {
struct device *dev;
struct cxl_memdev *cxlmd;
+ struct cxl_register_map reg_map;
struct cxl_regs regs;
int cxl_dvsec;
bool rcd;
@@ -421,7 +422,6 @@ struct cxl_dev_state {
struct resource dpa_res;
struct resource pmem_res;
struct resource ram_res;
- resource_size_t component_reg_phys;
u64 serial;
enum cxl_devtype type;
};
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 317c7548e4..e087febf9a 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -49,7 +49,6 @@ static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd,
struct cxl_dport *parent_dport)
{
struct cxl_port *parent_port = parent_dport->port;
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_port *endpoint, *iter, *down;
int rc;
@@ -65,8 +64,8 @@ static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd,
ep->next = down;
}
- endpoint = devm_cxl_add_port(host, &cxlmd->dev,
- cxlds->component_reg_phys,
+ /* Note: endpoint port component registers are derived from @cxlds */
+ endpoint = devm_cxl_add_port(host, &cxlmd->dev, CXL_RESOURCE_NONE,
parent_dport);
if (IS_ERR(endpoint))
return PTR_ERR(endpoint);
@@ -158,6 +157,8 @@ static int cxl_mem_probe(struct device *dev)
else
endpoint_parent = &parent_port->dev;
+ cxl_setup_parent_dport(dev, dport);
+
device_lock(endpoint_parent);
if (!endpoint_parent->driver) {
dev_err(dev, "CXL port topology %s not enabled\n",
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 8bece1e2e2..0155fb66b5 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -85,25 +85,28 @@ static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
status & CXLMDEV_DEV_FATAL ? " fatal" : "", \
status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
+/*
+ * Threaded irq dev_id's must be globally unique. cxl_dev_id provides a unique
+ * wrapper object for each irq within the same cxlds.
+ */
struct cxl_dev_id {
struct cxl_dev_state *cxlds;
};
static int cxl_request_irq(struct cxl_dev_state *cxlds, int irq,
- irq_handler_t handler, irq_handler_t thread_fn)
+ irq_handler_t thread_fn)
{
struct device *dev = cxlds->dev;
struct cxl_dev_id *dev_id;
- /* dev_id must be globally unique and must contain the cxlds */
dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
if (!dev_id)
return -ENOMEM;
dev_id->cxlds = cxlds;
- return devm_request_threaded_irq(dev, irq, handler, thread_fn,
- IRQF_SHARED | IRQF_ONESHOT,
- NULL, dev_id);
+ return devm_request_threaded_irq(dev, irq, NULL, thread_fn,
+ IRQF_SHARED | IRQF_ONESHOT, NULL,
+ dev_id);
}
static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds)
@@ -445,7 +448,7 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
if (irq < 0)
return 0;
- if (cxl_request_irq(cxlds, irq, NULL, cxl_pci_mbox_irq))
+ if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq))
return 0;
dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n");
@@ -643,7 +646,7 @@ static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting)
if (irq < 0)
return irq;
- return cxl_request_irq(cxlds, irq, NULL, cxl_event_thread);
+ return cxl_request_irq(cxlds, irq, cxl_event_thread);
}
static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
@@ -824,16 +827,14 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* If the component registers can't be found, the cxl_pci driver may
* still be useful for management functions so don't return an error.
*/
- cxlds->component_reg_phys = CXL_RESOURCE_NONE;
- rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
+ rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT,
+ &cxlds->reg_map);
if (rc)
dev_warn(&pdev->dev, "No component registers (%d)\n", rc);
- else if (!map.component_map.ras.valid)
+ else if (!cxlds->reg_map.component_map.ras.valid)
dev_dbg(&pdev->dev, "RAS registers not found\n");
- cxlds->component_reg_phys = map.resource;
-
- rc = cxl_map_component_regs(&map, &cxlds->regs.component,
+ rc = cxl_map_component_regs(&cxlds->reg_map, &cxlds->regs.component,
BIT(CXL_CM_CAP_CAP_ID_RAS));
if (rc)
dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
@@ -894,7 +895,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
break;
}
- rc = cxl_map_pmu_regs(pdev, &pmu_regs, &map);
+ rc = cxl_map_pmu_regs(&map, &pmu_regs);
if (rc) {
dev_dbg(&pdev->dev, "Could not map PMU regs\n");
break;
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 6240e05b95..47bc8e0b85 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -62,6 +62,9 @@ static int cxl_switch_port_probe(struct cxl_port *port)
struct cxl_hdm *cxlhdm;
int rc;
+ /* Cache the data early to ensure is_visible() works */
+ read_cdat_data(port);
+
rc = devm_cxl_port_enumerate_dports(port);
if (rc < 0)
return rc;