From 85c675d0d09a45a135bddd15d7b385f8758c32fb Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 18 May 2024 19:35:05 +0200 Subject: Adding upstream version 6.7.7. Signed-off-by: Daniel Baumann --- drivers/cxl/core/core.h | 1 + drivers/cxl/core/hdm.c | 48 +++---- drivers/cxl/core/mbox.c | 5 +- drivers/cxl/core/pci.c | 321 ++++++++++++++++++++++++++++++++++++++++------ drivers/cxl/core/port.c | 94 +++++++++++--- drivers/cxl/core/region.c | 2 +- drivers/cxl/core/regs.c | 47 ++++++- 7 files changed, 422 insertions(+), 96 deletions(-) (limited to 'drivers/cxl/core') diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 8e5f3d8431..86d7ba2323 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -73,6 +73,7 @@ struct cxl_rcrb_info; resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri, enum cxl_rcrb which); +u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb); extern struct rw_semaphore cxl_dpa_rwsem; extern struct rw_semaphore cxl_region_rwsem; diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 90664659d5..7d97790b89 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -81,26 +81,6 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm) cxlhdm->interleave_mask |= GENMASK(14, 12); } -static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb, - struct cxl_component_regs *regs) -{ - struct cxl_register_map map = { - .host = &port->dev, - .resource = port->component_reg_phys, - .base = crb, - .max_size = CXL_COMPONENT_REG_BLOCK_SIZE, - }; - - cxl_probe_component_regs(&port->dev, crb, &map.component_map); - if (!map.component_map.hdm_decoder.valid) { - dev_dbg(&port->dev, "HDM decoder registers not implemented\n"); - /* unique error code to indicate no HDM decoder capability */ - return -ENODEV; - } - - return cxl_map_component_regs(&map, regs, BIT(CXL_CM_CAP_CAP_ID_HDM)); -} - static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info) { struct cxl_hdm *cxlhdm; @@ -153,9 +133,9 @@ static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info) struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, struct cxl_endpoint_dvsec_info *info) { + struct cxl_register_map *reg_map = &port->reg_map; struct device *dev = &port->dev; struct cxl_hdm *cxlhdm; - void __iomem *crb; int rc; cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL); @@ -164,19 +144,29 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, cxlhdm->port = port; dev_set_drvdata(dev, cxlhdm); - crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE); - if (!crb && info && info->mem_enabled) { + /* Memory devices can configure device HDM using DVSEC range regs. */ + if (reg_map->resource == CXL_RESOURCE_NONE) { + if (!info || !info->mem_enabled) { + dev_err(dev, "No component registers mapped\n"); + return ERR_PTR(-ENXIO); + } + cxlhdm->decoder_count = info->ranges; return cxlhdm; - } else if (!crb) { - dev_err(dev, "No component registers mapped\n"); - return ERR_PTR(-ENXIO); } - rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs); - iounmap(crb); - if (rc) + if (!reg_map->component_map.hdm_decoder.valid) { + dev_dbg(&port->dev, "HDM decoder registers not implemented\n"); + /* unique error code to indicate no HDM decoder capability */ + return ERR_PTR(-ENODEV); + } + + rc = cxl_map_component_regs(reg_map, &cxlhdm->regs, + BIT(CXL_CM_CAP_CAP_ID_HDM)); + if (rc) { + dev_err(dev, "Failed to map HDM capability.\n"); return ERR_PTR(rc); + } parse_hdm_decoder_caps(cxlhdm); if (cxlhdm->decoder_count == 0) { diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index b12986b968..36270dcfb4 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -1249,8 +1249,7 @@ int cxl_mem_create_range_info(struct cxl_memdev_state *mds) return 0; } - cxlds->dpa_res = - (struct resource)DEFINE_RES_MEM(0, mds->total_bytes); + cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes); if (mds->partition_align_bytes == 0) { rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0, @@ -1402,6 +1401,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev) mutex_init(&mds->mbox_mutex); mutex_init(&mds->event.log_lock); mds->cxlds.dev = dev; + mds->cxlds.reg_map.host = dev; + mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE; mds->cxlds.type = CXL_DEVTYPE_CLASSMEM; return mds; diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index c7a7887ebd..deae3289de 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -475,9 +476,9 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm, allowed++; } - if (!allowed) { - cxl_set_mem_enable(cxlds, 0); - info->mem_enabled = 0; + if (!allowed && info->mem_enabled) { + dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n"); + return -ENXIO; } /* @@ -595,6 +596,16 @@ static int cxl_cdat_read_table(struct device *dev, return 0; } +static unsigned char cdat_checksum(void *buf, size_t size) +{ + unsigned char sum, *data = buf; + size_t i; + + for (sum = 0, i = 0; i < size; i++) + sum += data[i]; + return sum; +} + /** * read_cdat_data - Read the CDAT data on this port * @port: Port to read data from @@ -603,18 +614,30 @@ static int cxl_cdat_read_table(struct device *dev, */ void read_cdat_data(struct cxl_port *port) { - struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); - struct device *host = cxlmd->dev.parent; + struct device *uport = port->uport_dev; struct device *dev = &port->dev; struct pci_doe_mb *cdat_doe; + struct pci_dev *pdev = NULL; + struct cxl_memdev *cxlmd; size_t cdat_length; - void *cdat_table; + void *cdat_table, *cdat_buf; int rc; - if (!dev_is_pci(host)) + if (is_cxl_memdev(uport)) { + struct device *host; + + cxlmd = to_cxl_memdev(uport); + host = cxlmd->dev.parent; + if (dev_is_pci(host)) + pdev = to_pci_dev(host); + } else if (dev_is_pci(uport)) { + pdev = to_pci_dev(uport); + } + + if (!pdev) return; - cdat_doe = pci_find_doe_mailbox(to_pci_dev(host), - PCI_DVSEC_VENDOR_ID_CXL, + + cdat_doe = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS); if (!cdat_doe) { dev_dbg(dev, "No CDAT mailbox\n"); @@ -628,50 +651,59 @@ void read_cdat_data(struct cxl_port *port) return; } - cdat_table = devm_kzalloc(dev, cdat_length + sizeof(__le32), - GFP_KERNEL); - if (!cdat_table) + cdat_buf = devm_kzalloc(dev, cdat_length + sizeof(__le32), GFP_KERNEL); + if (!cdat_buf) return; - rc = cxl_cdat_read_table(dev, cdat_doe, cdat_table, &cdat_length); - if (rc) { - /* Don't leave table data allocated on error */ - devm_kfree(dev, cdat_table); - dev_err(dev, "CDAT data read error\n"); - return; - } + rc = cxl_cdat_read_table(dev, cdat_doe, cdat_buf, &cdat_length); + if (rc) + goto err; + + cdat_table = cdat_buf + sizeof(__le32); + if (cdat_checksum(cdat_table, cdat_length)) + goto err; - port->cdat.table = cdat_table + sizeof(__le32); + port->cdat.table = cdat_table; port->cdat.length = cdat_length; + return; + +err: + /* Don't leave table data allocated on error */ + devm_kfree(dev, cdat_buf); + dev_err(dev, "Failed to read/validate CDAT.\n"); } EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL); -void cxl_cor_error_detected(struct pci_dev *pdev) +static void __cxl_handle_cor_ras(struct cxl_dev_state *cxlds, + void __iomem *ras_base) { - struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); void __iomem *addr; u32 status; - if (!cxlds->regs.ras) + if (!ras_base) return; - addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET; + addr = ras_base + CXL_RAS_CORRECTABLE_STATUS_OFFSET; status = readl(addr); if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) { writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr); trace_cxl_aer_correctable_error(cxlds->cxlmd, status); } } -EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL); + +static void cxl_handle_endpoint_cor_ras(struct cxl_dev_state *cxlds) +{ + return __cxl_handle_cor_ras(cxlds, cxlds->regs.ras); +} /* CXL spec rev3.0 8.2.4.16.1 */ -static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log) +static void header_log_copy(void __iomem *ras_base, u32 *log) { void __iomem *addr; u32 *log_addr; int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32); - addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET; + addr = ras_base + CXL_RAS_HEADER_LOG_OFFSET; log_addr = log; for (i = 0; i < log_u32_size; i++) { @@ -685,17 +717,18 @@ static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log) * Log the state of the RAS status registers and prepare them to log the * next error status. Return 1 if reset needed. */ -static bool cxl_report_and_clear(struct cxl_dev_state *cxlds) +static bool __cxl_handle_ras(struct cxl_dev_state *cxlds, + void __iomem *ras_base) { u32 hl[CXL_HEADERLOG_SIZE_U32]; void __iomem *addr; u32 status; u32 fe; - if (!cxlds->regs.ras) + if (!ras_base) return false; - addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET; + addr = ras_base + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET; status = readl(addr); if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK)) return false; @@ -703,7 +736,7 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds) /* If multiple errors, log header points to first error from ctrl reg */ if (hweight32(status) > 1) { void __iomem *rcc_addr = - cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET; + ras_base + CXL_RAS_CAP_CONTROL_OFFSET; fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK, readl(rcc_addr))); @@ -711,13 +744,211 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds) fe = status; } - header_log_copy(cxlds, hl); + header_log_copy(ras_base, hl); trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe, hl); writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr); return true; } +static bool cxl_handle_endpoint_ras(struct cxl_dev_state *cxlds) +{ + return __cxl_handle_ras(cxlds, cxlds->regs.ras); +} + +#ifdef CONFIG_PCIEAER_CXL + +static void cxl_dport_map_rch_aer(struct cxl_dport *dport) +{ + struct cxl_rcrb_info *ri = &dport->rcrb; + void __iomem *dport_aer = NULL; + resource_size_t aer_phys; + struct device *host; + + if (dport->rch && ri->aer_cap) { + host = dport->reg_map.host; + aer_phys = ri->aer_cap + ri->base; + dport_aer = devm_cxl_iomap_block(host, aer_phys, + sizeof(struct aer_capability_regs)); + } + + dport->regs.dport_aer = dport_aer; +} + +static void cxl_dport_map_regs(struct cxl_dport *dport) +{ + struct cxl_register_map *map = &dport->reg_map; + struct device *dev = dport->dport_dev; + + if (!map->component_map.ras.valid) + dev_dbg(dev, "RAS registers not found\n"); + else if (cxl_map_component_regs(map, &dport->regs.component, + BIT(CXL_CM_CAP_CAP_ID_RAS))) + dev_dbg(dev, "Failed to map RAS capability.\n"); + + if (dport->rch) + cxl_dport_map_rch_aer(dport); +} + +static void cxl_disable_rch_root_ints(struct cxl_dport *dport) +{ + void __iomem *aer_base = dport->regs.dport_aer; + struct pci_host_bridge *bridge; + u32 aer_cmd_mask, aer_cmd; + + if (!aer_base) + return; + + bridge = to_pci_host_bridge(dport->dport_dev); + + /* + * Disable RCH root port command interrupts. + * CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors + * + * This sequence may not be necessary. CXL spec states disabling + * the root cmd register's interrupts is required. But, PCI spec + * shows these are disabled by default on reset. + */ + if (bridge->native_aer) { + aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN | + PCI_ERR_ROOT_CMD_NONFATAL_EN | + PCI_ERR_ROOT_CMD_FATAL_EN); + aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND); + aer_cmd &= ~aer_cmd_mask; + writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND); + } +} + +void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport) +{ + struct device *dport_dev = dport->dport_dev; + struct pci_host_bridge *host_bridge; + + host_bridge = to_pci_host_bridge(dport_dev); + if (host_bridge->native_aer) + dport->rcrb.aer_cap = cxl_rcrb_to_aer(dport_dev, dport->rcrb.base); + + dport->reg_map.host = host; + cxl_dport_map_regs(dport); + + if (dport->rch) + cxl_disable_rch_root_ints(dport); +} +EXPORT_SYMBOL_NS_GPL(cxl_setup_parent_dport, CXL); + +static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds, + struct cxl_dport *dport) +{ + return __cxl_handle_cor_ras(cxlds, dport->regs.ras); +} + +static bool cxl_handle_rdport_ras(struct cxl_dev_state *cxlds, + struct cxl_dport *dport) +{ + return __cxl_handle_ras(cxlds, dport->regs.ras); +} + +/* + * Copy the AER capability registers using 32 bit read accesses. + * This is necessary because RCRB AER capability is MMIO mapped. Clear the + * status after copying. + * + * @aer_base: base address of AER capability block in RCRB + * @aer_regs: destination for copying AER capability + */ +static bool cxl_rch_get_aer_info(void __iomem *aer_base, + struct aer_capability_regs *aer_regs) +{ + int read_cnt = sizeof(struct aer_capability_regs) / sizeof(u32); + u32 *aer_regs_buf = (u32 *)aer_regs; + int n; + + if (!aer_base) + return false; + + /* Use readl() to guarantee 32-bit accesses */ + for (n = 0; n < read_cnt; n++) + aer_regs_buf[n] = readl(aer_base + n * sizeof(u32)); + + writel(aer_regs->uncor_status, aer_base + PCI_ERR_UNCOR_STATUS); + writel(aer_regs->cor_status, aer_base + PCI_ERR_COR_STATUS); + + return true; +} + +/* Get AER severity. Return false if there is no error. */ +static bool cxl_rch_get_aer_severity(struct aer_capability_regs *aer_regs, + int *severity) +{ + if (aer_regs->uncor_status & ~aer_regs->uncor_mask) { + if (aer_regs->uncor_status & PCI_ERR_ROOT_FATAL_RCV) + *severity = AER_FATAL; + else + *severity = AER_NONFATAL; + return true; + } + + if (aer_regs->cor_status & ~aer_regs->cor_mask) { + *severity = AER_CORRECTABLE; + return true; + } + + return false; +} + +static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) +{ + struct pci_dev *pdev = to_pci_dev(cxlds->dev); + struct aer_capability_regs aer_regs; + struct cxl_dport *dport; + struct cxl_port *port; + int severity; + + port = cxl_pci_find_port(pdev, &dport); + if (!port) + return; + + put_device(&port->dev); + + if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs)) + return; + + if (!cxl_rch_get_aer_severity(&aer_regs, &severity)) + return; + + pci_print_aer(pdev, severity, &aer_regs); + + if (severity == AER_CORRECTABLE) + cxl_handle_rdport_cor_ras(cxlds, dport); + else + cxl_handle_rdport_ras(cxlds, dport); +} + +#else +static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { } +#endif + +void cxl_cor_error_detected(struct pci_dev *pdev) +{ + struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); + struct device *dev = &cxlds->cxlmd->dev; + + scoped_guard(device, dev) { + if (!dev->driver) { + dev_warn(&pdev->dev, + "%s: memdev disabled, abort error handling\n", + dev_name(dev)); + return; + } + + if (cxlds->rcd) + cxl_handle_rdport_errors(cxlds); + + cxl_handle_endpoint_cor_ras(cxlds); + } +} +EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL); + pci_ers_result_t cxl_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { @@ -726,13 +957,25 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev, struct device *dev = &cxlmd->dev; bool ue; - /* - * A frozen channel indicates an impending reset which is fatal to - * CXL.mem operation, and will likely crash the system. On the off - * chance the situation is recoverable dump the status of the RAS - * capability registers and bounce the active state of the memdev. - */ - ue = cxl_report_and_clear(cxlds); + scoped_guard(device, dev) { + if (!dev->driver) { + dev_warn(&pdev->dev, + "%s: memdev disabled, abort error handling\n", + dev_name(dev)); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (cxlds->rcd) + cxl_handle_rdport_errors(cxlds); + /* + * A frozen channel indicates an impending reset which is fatal to + * CXL.mem operation, and will likely crash the system. On the off + * chance the situation is recoverable dump the status of the RAS + * capability registers and bounce the active state of the memdev. + */ + ue = cxl_handle_endpoint_ras(cxlds); + } + switch (state) { case pci_channel_io_normal: diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index c67cc8c9d5..6b386771cc 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -287,6 +287,15 @@ static ssize_t interleave_ways_show(struct device *dev, static DEVICE_ATTR_RO(interleave_ways); +static ssize_t qos_class_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); + + return sysfs_emit(buf, "%d\n", cxlrd->qos_class); +} +static DEVICE_ATTR_RO(qos_class); + static struct attribute *cxl_decoder_base_attrs[] = { &dev_attr_start.attr, &dev_attr_size.attr, @@ -306,6 +315,7 @@ static struct attribute *cxl_decoder_root_attrs[] = { &dev_attr_cap_type2.attr, &dev_attr_cap_type3.attr, &dev_attr_target_list.attr, + &dev_attr_qos_class.attr, SET_CXL_REGION_ATTR(create_pmem_region) SET_CXL_REGION_ATTR(create_ram_region) SET_CXL_REGION_ATTR(delete_region) @@ -530,8 +540,33 @@ static void cxl_port_release(struct device *dev) kfree(port); } +static ssize_t decoders_committed_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_port *port = to_cxl_port(dev); + int rc; + + down_read(&cxl_region_rwsem); + rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port)); + up_read(&cxl_region_rwsem); + + return rc; +} + +static DEVICE_ATTR_RO(decoders_committed); + +static struct attribute *cxl_port_attrs[] = { + &dev_attr_decoders_committed.attr, + NULL, +}; + +static struct attribute_group cxl_port_attribute_group = { + .attrs = cxl_port_attrs, +}; + static const struct attribute_group *cxl_port_attribute_groups[] = { &cxl_base_attribute_group, + &cxl_port_attribute_group, NULL, }; @@ -628,7 +663,6 @@ static int devm_cxl_link_parent_dport(struct device *host, static struct lock_class_key cxl_port_key; static struct cxl_port *cxl_port_alloc(struct device *uport_dev, - resource_size_t component_reg_phys, struct cxl_dport *parent_dport) { struct cxl_port *port; @@ -679,7 +713,6 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev, } else dev->parent = uport_dev; - port->component_reg_phys = component_reg_phys; ida_init(&port->decoder_ida); port->hdm_end = -1; port->commit_end = -1; @@ -703,16 +736,18 @@ err: static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map, resource_size_t component_reg_phys) { - if (component_reg_phys == CXL_RESOURCE_NONE) - return 0; - *map = (struct cxl_register_map) { .host = host, - .reg_type = CXL_REGLOC_RBI_COMPONENT, + .reg_type = CXL_REGLOC_RBI_EMPTY, .resource = component_reg_phys, - .max_size = CXL_COMPONENT_REG_BLOCK_SIZE, }; + if (component_reg_phys == CXL_RESOURCE_NONE) + return 0; + + map->reg_type = CXL_REGLOC_RBI_COMPONENT; + map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE; + return cxl_setup_regs(map); } @@ -721,7 +756,7 @@ static int cxl_port_setup_regs(struct cxl_port *port, { if (dev_is_platform(port->uport_dev)) return 0; - return cxl_setup_comp_regs(&port->dev, &port->comp_map, + return cxl_setup_comp_regs(&port->dev, &port->reg_map, component_reg_phys); } @@ -738,9 +773,9 @@ static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport, * register probing, and fixup @host after the fact, since @host may be * NULL. */ - rc = cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map, + rc = cxl_setup_comp_regs(dport->dport_dev, &dport->reg_map, component_reg_phys); - dport->comp_map.host = host; + dport->reg_map.host = host; return rc; } @@ -753,21 +788,36 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host, struct device *dev; int rc; - port = cxl_port_alloc(uport_dev, component_reg_phys, parent_dport); + port = cxl_port_alloc(uport_dev, parent_dport); if (IS_ERR(port)) return port; dev = &port->dev; - if (is_cxl_memdev(uport_dev)) + if (is_cxl_memdev(uport_dev)) { + struct cxl_memdev *cxlmd = to_cxl_memdev(uport_dev); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + rc = dev_set_name(dev, "endpoint%d", port->id); - else if (parent_dport) + if (rc) + goto err; + + /* + * The endpoint driver already enumerated the component and RAS + * registers. Reuse that enumeration while prepping them to be + * mapped by the cxl_port driver. + */ + port->reg_map = cxlds->reg_map; + port->reg_map.host = &port->dev; + } else if (parent_dport) { rc = dev_set_name(dev, "port%d", port->id); - else - rc = dev_set_name(dev, "root%d", port->id); - if (rc) - goto err; + if (rc) + goto err; - rc = cxl_port_setup_regs(port, component_reg_phys); + rc = cxl_port_setup_regs(port, component_reg_phys); + if (rc) + goto err; + } else + rc = dev_set_name(dev, "root%d", port->id); if (rc) goto err; @@ -1500,7 +1550,11 @@ retry: struct cxl_dport *dport; struct cxl_port *port; - if (!dport_dev) + /* + * The terminal "grandparent" in PCI is NULL and @platform_bus + * for platform devices + */ + if (!dport_dev || dport_dev == &platform_bus) return 0; uport_dev = dport_dev->parent; @@ -1719,6 +1773,7 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, } atomic_set(&cxlrd->region_id, rc); + cxlrd->qos_class = CXL_QOS_CLASS_INVALID; return cxlrd; } EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL); @@ -2090,3 +2145,4 @@ static void cxl_core_exit(void) subsys_initcall(cxl_core_init); module_exit(cxl_core_exit); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS(CXL); diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 6ebd12f797..7bb656237f 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -123,7 +123,7 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr) { if (!cpu_cache_has_invalidate_memregion()) { if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) { - dev_warn_once( + dev_info_once( &cxlr->dev, "Bypassing cpu_cache_invalidate_memregion() for testing!\n"); return 0; diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index e0fbe964f6..372786f809 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -216,16 +216,16 @@ int cxl_map_component_regs(const struct cxl_register_map *map, for (i = 0; i < ARRAY_SIZE(mapinfo); i++) { struct mapinfo *mi = &mapinfo[i]; - resource_size_t phys_addr; + resource_size_t addr; resource_size_t length; if (!mi->rmap->valid) continue; if (!test_bit(mi->rmap->id, &map_mask)) continue; - phys_addr = map->resource + mi->rmap->offset; + addr = map->resource + mi->rmap->offset; length = mi->rmap->size; - *(mi->addr) = devm_cxl_iomap_block(host, phys_addr, length); + *(mi->addr) = devm_cxl_iomap_block(host, addr, length); if (!*(mi->addr)) return -ENOMEM; } @@ -386,10 +386,9 @@ int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type) } EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, CXL); -int cxl_map_pmu_regs(struct pci_dev *pdev, struct cxl_pmu_regs *regs, - struct cxl_register_map *map) +int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs) { - struct device *dev = &pdev->dev; + struct device *dev = map->host; resource_size_t phys_addr; phys_addr = map->resource; @@ -470,6 +469,42 @@ int cxl_setup_regs(struct cxl_register_map *map) } EXPORT_SYMBOL_NS_GPL(cxl_setup_regs, CXL); +u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb) +{ + void __iomem *addr; + u16 offset = 0; + u32 cap_hdr; + + if (WARN_ON_ONCE(rcrb == CXL_RESOURCE_NONE)) + return 0; + + if (!request_mem_region(rcrb, SZ_4K, dev_name(dev))) + return 0; + + addr = ioremap(rcrb, SZ_4K); + if (!addr) + goto out; + + cap_hdr = readl(addr + offset); + while (PCI_EXT_CAP_ID(cap_hdr) != PCI_EXT_CAP_ID_ERR) { + offset = PCI_EXT_CAP_NEXT(cap_hdr); + + /* Offset 0 terminates capability list. */ + if (!offset) + break; + cap_hdr = readl(addr + offset); + } + + if (offset) + dev_dbg(dev, "found AER extended capability (0x%x)\n", offset); + + iounmap(addr); +out: + release_mem_region(rcrb, SZ_4K); + + return offset; +} + resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri, enum cxl_rcrb which) { -- cgit v1.2.3