diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:22 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:22 +0000 |
commit | b20732900e4636a467c0183a47f7396700f5f743 (patch) | |
tree | 42f079ff82e701ebcb76829974b4caca3e5b6798 /drivers/cxl | |
parent | Adding upstream version 6.8.12. (diff) | |
download | linux-b20732900e4636a467c0183a47f7396700f5f743.tar.xz linux-b20732900e4636a467c0183a47f7396700f5f743.zip |
Adding upstream version 6.9.7.upstream/6.9.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/cxl')
-rw-r--r-- | drivers/cxl/Kconfig | 13 | ||||
-rw-r--r-- | drivers/cxl/acpi.c | 13 | ||||
-rw-r--r-- | drivers/cxl/core/cdat.c | 180 | ||||
-rw-r--r-- | drivers/cxl/core/core.h | 4 | ||||
-rw-r--r-- | drivers/cxl/core/pci.c | 128 | ||||
-rw-r--r-- | drivers/cxl/core/port.c | 145 | ||||
-rw-r--r-- | drivers/cxl/core/region.c | 188 | ||||
-rw-r--r-- | drivers/cxl/core/trace.h | 4 | ||||
-rw-r--r-- | drivers/cxl/cxl.h | 17 | ||||
-rw-r--r-- | drivers/cxl/cxlmem.h | 2 | ||||
-rw-r--r-- | drivers/cxl/cxlpci.h | 24 | ||||
-rw-r--r-- | drivers/cxl/pci.c | 22 |
12 files changed, 566 insertions, 174 deletions
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig index 67998dbd1d..5f3c9c5529 100644 --- a/drivers/cxl/Kconfig +++ b/drivers/cxl/Kconfig @@ -144,17 +144,4 @@ config CXL_REGION_INVALIDATION_TEST If unsure, or if this kernel is meant for production environments, say N. -config CXL_PMU - tristate "CXL Performance Monitoring Unit" - default CXL_BUS - depends on PERF_EVENTS - help - Support performance monitoring as defined in CXL rev 3.0 - section 13.2: Performance Monitoring. CXL components may have - one or more CXL Performance Monitoring Units (CPMUs). - - Say 'y/m' to enable a driver that will attach to performance - monitoring units and provide standard perf based interfaces. - - If unsure say 'm'. endif diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index af5cb818f8..cb8c155a2c 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -525,22 +525,11 @@ static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport) { struct acpi_device *hb = to_cxl_host_bridge(NULL, dev); u32 uid; - int rc; if (kstrtou32(acpi_device_uid(hb), 0, &uid)) return -EINVAL; - rc = acpi_get_genport_coordinates(uid, dport->hb_coord); - if (rc < 0) - return rc; - - /* Adjust back to picoseconds from nanoseconds */ - for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { - dport->hb_coord[i].read_latency *= 1000; - dport->hb_coord[i].write_latency *= 1000; - } - - return 0; + return acpi_get_genport_coordinates(uid, dport->coord); } static int add_host_bridge_dport(struct device *match, void *arg) diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c index fbf167f9d5..bb83867d9f 100644 --- a/drivers/cxl/core/cdat.c +++ b/drivers/cxl/core/cdat.c @@ -9,16 +9,47 @@ #include "cxlmem.h" #include "core.h" #include "cxl.h" +#include "core.h" struct dsmas_entry { struct range dpa_range; u8 handle; - struct access_coordinate coord; + struct access_coordinate coord[ACCESS_COORDINATE_MAX]; int entries; int qos_class; }; +static u32 cdat_normalize(u16 entry, u64 base, u8 type) +{ + u32 value; + + /* + * Check for invalid and overflow values + */ + if (entry == 0xffff || !entry) + return 0; + else if (base > (UINT_MAX / (entry))) + return 0; + + /* + * CDAT fields follow the format of HMAT fields. See table 5 Device + * Scoped Latency and Bandwidth Information Structure in Coherent Device + * Attribute Table (CDAT) Specification v1.01. + */ + value = entry * base; + switch (type) { + case ACPI_HMAT_ACCESS_LATENCY: + case ACPI_HMAT_READ_LATENCY: + case ACPI_HMAT_WRITE_LATENCY: + value = DIV_ROUND_UP(value, 1000); + break; + default: + break; + } + return value; +} + static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg, const unsigned long end) { @@ -57,8 +88,8 @@ static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg, return 0; } -static void cxl_access_coordinate_set(struct access_coordinate *coord, - int access, unsigned int val) +static void __cxl_access_coordinate_set(struct access_coordinate *coord, + int access, unsigned int val) { switch (access) { case ACPI_HMAT_ACCESS_LATENCY: @@ -84,6 +115,13 @@ static void cxl_access_coordinate_set(struct access_coordinate *coord, } } +static void cxl_access_coordinate_set(struct access_coordinate *coord, + int access, unsigned int val) +{ + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) + __cxl_access_coordinate_set(&coord[i], access, val); +} + static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg, const unsigned long end) { @@ -96,7 +134,6 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg, __le16 le_val; u64 val; u16 len; - int rc; len = le16_to_cpu((__force __le16)hdr->length); if (len != size || (unsigned long)hdr + len > end) { @@ -123,12 +160,10 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg, le_base = (__force __le64)dslbis->entry_base_unit; le_val = (__force __le16)dslbis->entry[0]; - rc = check_mul_overflow(le64_to_cpu(le_base), - le16_to_cpu(le_val), &val); - if (rc) - pr_warn("DSLBIS value overflowed.\n"); + val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base), + dslbis->data_type); - cxl_access_coordinate_set(&dent->coord, dslbis->data_type, val); + cxl_access_coordinate_set(dent->coord, dslbis->data_type, val); return 0; } @@ -149,38 +184,31 @@ static int cxl_cdat_endpoint_process(struct cxl_port *port, int rc; rc = cdat_table_parse(ACPI_CDAT_TYPE_DSMAS, cdat_dsmas_handler, - dsmas_xa, port->cdat.table); + dsmas_xa, port->cdat.table, port->cdat.length); rc = cdat_table_parse_output(rc); if (rc) return rc; rc = cdat_table_parse(ACPI_CDAT_TYPE_DSLBIS, cdat_dslbis_handler, - dsmas_xa, port->cdat.table); + dsmas_xa, port->cdat.table, port->cdat.length); return cdat_table_parse_output(rc); } static int cxl_port_perf_data_calculate(struct cxl_port *port, struct xarray *dsmas_xa) { - struct access_coordinate ep_c; - struct access_coordinate coord[ACCESS_COORDINATE_MAX]; + struct access_coordinate ep_c[ACCESS_COORDINATE_MAX]; struct dsmas_entry *dent; int valid_entries = 0; unsigned long index; int rc; - rc = cxl_endpoint_get_perf_coordinates(port, &ep_c); + rc = cxl_endpoint_get_perf_coordinates(port, ep_c); if (rc) { dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n"); return rc; } - rc = cxl_hb_get_perf_coordinates(port, coord); - if (rc) { - dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n"); - return rc; - } - struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port); if (!cxl_root) @@ -192,18 +220,10 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port, xa_for_each(dsmas_xa, index, dent) { int qos_class; - cxl_coordinates_combine(&dent->coord, &dent->coord, &ep_c); - /* - * Keeping the host bridge coordinates separate from the dsmas - * coordinates in order to allow calculation of access class - * 0 and 1 for region later. - */ - cxl_coordinates_combine(&coord[ACCESS_COORDINATE_LOCAL], - &coord[ACCESS_COORDINATE_LOCAL], - &dent->coord); + cxl_coordinates_combine(dent->coord, dent->coord, ep_c); dent->entries = 1; rc = cxl_root->ops->qos_class(cxl_root, - &coord[ACCESS_COORDINATE_LOCAL], + &dent->coord[ACCESS_COORDINATE_CPU], 1, &qos_class); if (rc != 1) continue; @@ -221,14 +241,17 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port, static void update_perf_entry(struct device *dev, struct dsmas_entry *dent, struct cxl_dpa_perf *dpa_perf) { + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) + dpa_perf->coord[i] = dent->coord[i]; dpa_perf->dpa_range = dent->dpa_range; - dpa_perf->coord = dent->coord; dpa_perf->qos_class = dent->qos_class; dev_dbg(dev, "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n", dent->dpa_range.start, dpa_perf->qos_class, - dent->coord.read_bandwidth, dent->coord.write_bandwidth, - dent->coord.read_latency, dent->coord.write_latency); + dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth, + dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth, + dent->coord[ACCESS_COORDINATE_CPU].read_latency, + dent->coord[ACCESS_COORDINATE_CPU].write_latency); } static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds, @@ -460,17 +483,16 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg, le_base = (__force __le64)tbl->sslbis_header.entry_base_unit; le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth; - - if (check_mul_overflow(le64_to_cpu(le_base), - le16_to_cpu(le_val), &val)) - dev_warn(dev, "SSLBIS value overflowed!\n"); + val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base), + sslbis->data_type); xa_for_each(&port->dports, index, dport) { if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT || - dsp_id == dport->port_id) - cxl_access_coordinate_set(&dport->sw_coord, + dsp_id == dport->port_id) { + cxl_access_coordinate_set(dport->coord, sslbis->data_type, val); + } } } @@ -485,13 +507,28 @@ void cxl_switch_parse_cdat(struct cxl_port *port) return; rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler, - port, port->cdat.table); + port, port->cdat.table, port->cdat.length); rc = cdat_table_parse_output(rc); if (rc) dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc); } EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL); +static void __cxl_coordinates_combine(struct access_coordinate *out, + struct access_coordinate *c1, + struct access_coordinate *c2) +{ + if (c1->write_bandwidth && c2->write_bandwidth) + out->write_bandwidth = min(c1->write_bandwidth, + c2->write_bandwidth); + out->write_latency = c1->write_latency + c2->write_latency; + + if (c1->read_bandwidth && c2->read_bandwidth) + out->read_bandwidth = min(c1->read_bandwidth, + c2->read_bandwidth); + out->read_latency = c1->read_latency + c2->read_latency; +} + /** * cxl_coordinates_combine - Combine the two input coordinates * @@ -503,15 +540,60 @@ void cxl_coordinates_combine(struct access_coordinate *out, struct access_coordinate *c1, struct access_coordinate *c2) { - if (c1->write_bandwidth && c2->write_bandwidth) - out->write_bandwidth = min(c1->write_bandwidth, - c2->write_bandwidth); - out->write_latency = c1->write_latency + c2->write_latency; - - if (c1->read_bandwidth && c2->read_bandwidth) - out->read_bandwidth = min(c1->read_bandwidth, - c2->read_bandwidth); - out->read_latency = c1->read_latency + c2->read_latency; + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) + __cxl_coordinates_combine(&out[i], &c1[i], &c2[i]); } MODULE_IMPORT_NS(CXL); + +void cxl_region_perf_data_calculate(struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled) +{ + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); + struct range dpa = { + .start = cxled->dpa_res->start, + .end = cxled->dpa_res->end, + }; + struct cxl_dpa_perf *perf; + + switch (cxlr->mode) { + case CXL_DECODER_RAM: + perf = &mds->ram_perf; + break; + case CXL_DECODER_PMEM: + perf = &mds->pmem_perf; + break; + default: + return; + } + + lockdep_assert_held(&cxl_dpa_rwsem); + + if (!range_contains(&perf->dpa_range, &dpa)) + return; + + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + /* Get total bandwidth and the worst latency for the cxl region */ + cxlr->coord[i].read_latency = max_t(unsigned int, + cxlr->coord[i].read_latency, + perf->coord[i].read_latency); + cxlr->coord[i].write_latency = max_t(unsigned int, + cxlr->coord[i].write_latency, + perf->coord[i].write_latency); + cxlr->coord[i].read_bandwidth += perf->coord[i].read_bandwidth; + cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth; + } +} + +int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr, + enum access_coordinate_class access) +{ + return hmat_update_target_coordinates(nid, &cxlr->coord[access], access); +} + +bool cxl_need_node_perf_attrs_update(int nid) +{ + return !acpi_node_backed_by_real_pxm(nid); +} diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 3b64fb1b9e..bc5a95665a 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -90,4 +90,8 @@ enum cxl_poison_trace_type { long cxl_pci_get_latency(struct pci_dev *pdev); +int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr, + enum access_coordinate_class access); +bool cxl_need_node_perf_attrs_update(int nid); + #endif /* __CXL_CORE_H__ */ diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index e9e6c81ce0..2773f05adb 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -518,14 +518,14 @@ EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL); FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle))) static int cxl_cdat_get_length(struct device *dev, - struct pci_doe_mb *cdat_doe, + struct pci_doe_mb *doe_mb, size_t *length) { __le32 request = CDAT_DOE_REQ(0); __le32 response[2]; int rc; - rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL, + rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS, &request, sizeof(request), &response, sizeof(response)); @@ -543,56 +543,58 @@ static int cxl_cdat_get_length(struct device *dev, } static int cxl_cdat_read_table(struct device *dev, - struct pci_doe_mb *cdat_doe, - void *cdat_table, size_t *cdat_length) + struct pci_doe_mb *doe_mb, + struct cdat_doe_rsp *rsp, size_t *length) { - size_t length = *cdat_length + sizeof(__le32); - __le32 *data = cdat_table; - int entry_handle = 0; + size_t received, remaining = *length; + unsigned int entry_handle = 0; + union cdat_data *data; __le32 saved_dw = 0; do { __le32 request = CDAT_DOE_REQ(entry_handle); - struct cdat_entry_header *entry; - size_t entry_dw; int rc; - rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL, + rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS, &request, sizeof(request), - data, length); + rsp, sizeof(*rsp) + remaining); if (rc < 0) { dev_err(dev, "DOE failed: %d", rc); return rc; } - /* 1 DW Table Access Response Header + CDAT entry */ - entry = (struct cdat_entry_header *)(data + 1); - if ((entry_handle == 0 && - rc != sizeof(__le32) + sizeof(struct cdat_header)) || - (entry_handle > 0 && - (rc < sizeof(__le32) + sizeof(*entry) || - rc != sizeof(__le32) + le16_to_cpu(entry->length)))) + if (rc < sizeof(*rsp)) return -EIO; + data = (union cdat_data *)rsp->data; + received = rc - sizeof(*rsp); + + if (entry_handle == 0) { + if (received != sizeof(data->header)) + return -EIO; + } else { + if (received < sizeof(data->entry) || + received != le16_to_cpu(data->entry.length)) + return -EIO; + } + /* Get the CXL table access header entry handle */ entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, - le32_to_cpu(data[0])); - entry_dw = rc / sizeof(__le32); - /* Skip Header */ - entry_dw -= 1; + le32_to_cpu(rsp->doe_header)); + /* * Table Access Response Header overwrote the last DW of * previous entry, so restore that DW */ - *data = saved_dw; - length -= entry_dw * sizeof(__le32); - data += entry_dw; - saved_dw = *data; + rsp->doe_header = saved_dw; + remaining -= received; + rsp = (void *)rsp + received; + saved_dw = rsp->doe_header; } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY); /* Length in CDAT header may exceed concatenation of CDAT entries */ - *cdat_length -= length - sizeof(__le32); + *length -= remaining; return 0; } @@ -617,11 +619,11 @@ void read_cdat_data(struct cxl_port *port) { struct device *uport = port->uport_dev; struct device *dev = &port->dev; - struct pci_doe_mb *cdat_doe; + struct pci_doe_mb *doe_mb; struct pci_dev *pdev = NULL; struct cxl_memdev *cxlmd; - size_t cdat_length; - void *cdat_table, *cdat_buf; + struct cdat_doe_rsp *buf; + size_t table_length, length; int rc; if (is_cxl_memdev(uport)) { @@ -638,39 +640,48 @@ void read_cdat_data(struct cxl_port *port) if (!pdev) return; - cdat_doe = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL, - CXL_DOE_PROTOCOL_TABLE_ACCESS); - if (!cdat_doe) { + doe_mb = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL, + CXL_DOE_PROTOCOL_TABLE_ACCESS); + if (!doe_mb) { dev_dbg(dev, "No CDAT mailbox\n"); return; } port->cdat_available = true; - if (cxl_cdat_get_length(dev, cdat_doe, &cdat_length)) { + if (cxl_cdat_get_length(dev, doe_mb, &length)) { dev_dbg(dev, "No CDAT length\n"); return; } - cdat_buf = devm_kzalloc(dev, cdat_length + sizeof(__le32), GFP_KERNEL); - if (!cdat_buf) - return; + /* + * The begin of the CDAT buffer needs space for additional 4 + * bytes for the DOE header. Table data starts afterwards. + */ + buf = devm_kzalloc(dev, sizeof(*buf) + length, GFP_KERNEL); + if (!buf) + goto err; + + table_length = length; - rc = cxl_cdat_read_table(dev, cdat_doe, cdat_buf, &cdat_length); + rc = cxl_cdat_read_table(dev, doe_mb, buf, &length); if (rc) goto err; - cdat_table = cdat_buf + sizeof(__le32); - if (cdat_checksum(cdat_table, cdat_length)) + if (table_length != length) + dev_warn(dev, "Malformed CDAT table length (%zu:%zu), discarding trailing data\n", + table_length, length); + + if (cdat_checksum(buf->data, length)) goto err; - port->cdat.table = cdat_table; - port->cdat.length = cdat_length; - return; + port->cdat.table = buf->data; + port->cdat.length = length; + return; err: /* Don't leave table data allocated on error */ - devm_kfree(dev, cdat_buf); + devm_kfree(dev, buf); dev_err(dev, "Failed to read/validate CDAT.\n"); } EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL); @@ -1034,3 +1045,32 @@ long cxl_pci_get_latency(struct pci_dev *pdev) return cxl_flit_size(pdev) * MEGA / bw; } + +static int __cxl_endpoint_decoder_reset_detected(struct device *dev, void *data) +{ + struct cxl_port *port = data; + struct cxl_decoder *cxld; + struct cxl_hdm *cxlhdm; + void __iomem *hdm; + u32 ctrl; + + if (!is_endpoint_decoder(dev)) + return 0; + + cxld = to_cxl_decoder(dev); + if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) + return 0; + + cxlhdm = dev_get_drvdata(&port->dev); + hdm = cxlhdm->regs.hdm_decoder; + ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); + + return !FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl); +} + +bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port) +{ + return device_for_each_child(&port->dev, port, + __cxl_endpoint_decoder_reset_detected); +} +EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, CXL); diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index 4ae441ef32..887ed6e358 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -3,6 +3,7 @@ #include <linux/platform_device.h> #include <linux/memregion.h> #include <linux/workqueue.h> +#include <linux/einj-cxl.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/module.h> @@ -793,6 +794,40 @@ static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport, return rc; } +DEFINE_SHOW_ATTRIBUTE(einj_cxl_available_error_type); + +static int cxl_einj_inject(void *data, u64 type) +{ + struct cxl_dport *dport = data; + + if (dport->rch) + return einj_cxl_inject_rch_error(dport->rcrb.base, type); + + return einj_cxl_inject_error(to_pci_dev(dport->dport_dev), type); +} +DEFINE_DEBUGFS_ATTRIBUTE(cxl_einj_inject_fops, NULL, cxl_einj_inject, + "0x%llx\n"); + +static void cxl_debugfs_create_dport_dir(struct cxl_dport *dport) +{ + struct dentry *dir; + + if (!einj_cxl_is_initialized()) + return; + + /* + * dport_dev needs to be a PCIe port for CXL 2.0+ ports because + * EINJ expects a dport SBDF to be specified for 2.0 error injection. + */ + if (!dport->rch && !dev_is_pci(dport->dport_dev)) + return; + + dir = cxl_debugfs_create_dir(dev_name(dport->dport_dev)); + + debugfs_create_file("einj_inject", 0200, dir, dport, + &cxl_einj_inject_fops); +} + static struct cxl_port *__devm_cxl_add_port(struct device *host, struct device *uport_dev, resource_size_t component_reg_phys, @@ -822,6 +857,7 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host, */ port->reg_map = cxlds->reg_map; port->reg_map.host = &port->dev; + cxlmd->endpoint = port; } else if (parent_dport) { rc = dev_set_name(dev, "port%d", port->id); if (rc) @@ -1149,6 +1185,8 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev, if (dev_is_pci(dport_dev)) dport->link_latency = cxl_pci_get_latency(to_pci_dev(dport_dev)); + cxl_debugfs_create_dport_dir(dport); + return dport; } @@ -1374,7 +1412,6 @@ int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint) get_device(host); get_device(&endpoint->dev); - cxlmd->endpoint = endpoint; cxlmd->depth = endpoint->depth; return devm_add_action_or_reset(dev, delete_endpoint, cxlmd); } @@ -2096,36 +2133,39 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd) } EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL); -/** - * cxl_hb_get_perf_coordinates - Retrieve performance numbers between initiator - * and host bridge - * - * @port: endpoint cxl_port - * @coord: output access coordinates - * - * Return: errno on failure, 0 on success. - */ -int cxl_hb_get_perf_coordinates(struct cxl_port *port, - struct access_coordinate *coord) +static void add_latency(struct access_coordinate *c, long latency) { - struct cxl_port *iter = port; - struct cxl_dport *dport; - - if (!is_cxl_endpoint(port)) - return -EINVAL; + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + c[i].write_latency += latency; + c[i].read_latency += latency; + } +} - dport = iter->parent_dport; - while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) { - iter = to_cxl_port(iter->dev.parent); - dport = iter->parent_dport; +static bool coordinates_valid(struct access_coordinate *c) +{ + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + if (c[i].read_bandwidth && c[i].write_bandwidth && + c[i].read_latency && c[i].write_latency) + continue; + return false; } - coord[ACCESS_COORDINATE_LOCAL] = - dport->hb_coord[ACCESS_COORDINATE_LOCAL]; - coord[ACCESS_COORDINATE_CPU] = - dport->hb_coord[ACCESS_COORDINATE_CPU]; + return true; +} - return 0; +static void set_min_bandwidth(struct access_coordinate *c, unsigned int bw) +{ + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + c[i].write_bandwidth = min(c[i].write_bandwidth, bw); + c[i].read_bandwidth = min(c[i].read_bandwidth, bw); + } +} + +static void set_access_coordinates(struct access_coordinate *out, + struct access_coordinate *in) +{ + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) + out[i] = in[i]; } static bool parent_port_is_cxl_root(struct cxl_port *port) @@ -2144,13 +2184,21 @@ static bool parent_port_is_cxl_root(struct cxl_port *port) int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, struct access_coordinate *coord) { - struct access_coordinate c = { - .read_bandwidth = UINT_MAX, - .write_bandwidth = UINT_MAX, + struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); + struct access_coordinate c[] = { + { + .read_bandwidth = UINT_MAX, + .write_bandwidth = UINT_MAX, + }, + { + .read_bandwidth = UINT_MAX, + .write_bandwidth = UINT_MAX, + }, }; struct cxl_port *iter = port; struct cxl_dport *dport; struct pci_dev *pdev; + struct device *dev; unsigned int bw; bool is_cxl_root; @@ -2158,6 +2206,13 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, return -EINVAL; /* + * Skip calculation for RCD. Expectation is HMAT already covers RCD case + * since RCH does not support hotplug. + */ + if (cxlmd->cxlds->rcd) + return 0; + + /* * Exit the loop when the parent port of the current iter port is cxl * root. The iterative loop starts at the endpoint and gathers the * latency of the CXL link from the current device/port to the connected @@ -2172,23 +2227,33 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, * There's no valid access_coordinate for a root port since RPs do not * have CDAT and therefore needs to be skipped. */ - if (!is_cxl_root) - cxl_coordinates_combine(&c, &c, &dport->sw_coord); - c.write_latency += dport->link_latency; - c.read_latency += dport->link_latency; + if (!is_cxl_root) { + if (!coordinates_valid(dport->coord)) + return -EINVAL; + cxl_coordinates_combine(c, c, dport->coord); + } + add_latency(c, dport->link_latency); } while (!is_cxl_root); + dport = iter->parent_dport; + /* Retrieve HB coords */ + if (!coordinates_valid(dport->coord)) + return -EINVAL; + cxl_coordinates_combine(c, c, dport->coord); + + dev = port->uport_dev->parent; + if (!dev_is_pci(dev)) + return -ENODEV; + /* Get the calculated PCI paths bandwidth */ - pdev = to_pci_dev(port->uport_dev->parent); + pdev = to_pci_dev(dev); bw = pcie_bandwidth_available(pdev, NULL, NULL, NULL); if (bw == 0) return -ENXIO; bw /= BITS_PER_BYTE; - c.write_bandwidth = min(c.write_bandwidth, bw); - c.read_bandwidth = min(c.read_bandwidth, bw); - - *coord = c; + set_min_bandwidth(c, bw); + set_access_coordinates(coord, c); return 0; } @@ -2245,6 +2310,10 @@ static __init int cxl_core_init(void) cxl_debugfs = debugfs_create_dir("cxl", NULL); + if (einj_cxl_is_initialized()) + debugfs_create_file("einj_types", 0400, cxl_debugfs, NULL, + &einj_cxl_available_error_type_fops); + cxl_mbox_init(); rc = cxl_memdev_init(); diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 4c7fd2d5cc..18b9514964 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -4,6 +4,7 @@ #include <linux/genalloc.h> #include <linux/device.h> #include <linux/module.h> +#include <linux/memory.h> #include <linux/slab.h> #include <linux/uuid.h> #include <linux/sort.h> @@ -30,6 +31,108 @@ static struct cxl_region *to_cxl_region(struct device *dev); +#define __ACCESS_ATTR_RO(_level, _name) { \ + .attr = { .name = __stringify(_name), .mode = 0444 }, \ + .show = _name##_access##_level##_show, \ +} + +#define ACCESS_DEVICE_ATTR_RO(level, name) \ + struct device_attribute dev_attr_access##level##_##name = __ACCESS_ATTR_RO(level, name) + +#define ACCESS_ATTR_RO(level, attrib) \ +static ssize_t attrib##_access##level##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + struct cxl_region *cxlr = to_cxl_region(dev); \ + \ + if (cxlr->coord[level].attrib == 0) \ + return -ENOENT; \ + \ + return sysfs_emit(buf, "%u\n", cxlr->coord[level].attrib); \ +} \ +static ACCESS_DEVICE_ATTR_RO(level, attrib) + +ACCESS_ATTR_RO(0, read_bandwidth); +ACCESS_ATTR_RO(0, read_latency); +ACCESS_ATTR_RO(0, write_bandwidth); +ACCESS_ATTR_RO(0, write_latency); + +#define ACCESS_ATTR_DECLARE(level, attrib) \ + (&dev_attr_access##level##_##attrib.attr) + +static struct attribute *access0_coordinate_attrs[] = { + ACCESS_ATTR_DECLARE(0, read_bandwidth), + ACCESS_ATTR_DECLARE(0, write_bandwidth), + ACCESS_ATTR_DECLARE(0, read_latency), + ACCESS_ATTR_DECLARE(0, write_latency), + NULL +}; + +ACCESS_ATTR_RO(1, read_bandwidth); +ACCESS_ATTR_RO(1, read_latency); +ACCESS_ATTR_RO(1, write_bandwidth); +ACCESS_ATTR_RO(1, write_latency); + +static struct attribute *access1_coordinate_attrs[] = { + ACCESS_ATTR_DECLARE(1, read_bandwidth), + ACCESS_ATTR_DECLARE(1, write_bandwidth), + ACCESS_ATTR_DECLARE(1, read_latency), + ACCESS_ATTR_DECLARE(1, write_latency), + NULL +}; + +#define ACCESS_VISIBLE(level) \ +static umode_t cxl_region_access##level##_coordinate_visible( \ + struct kobject *kobj, struct attribute *a, int n) \ +{ \ + struct device *dev = kobj_to_dev(kobj); \ + struct cxl_region *cxlr = to_cxl_region(dev); \ + \ + if (a == &dev_attr_access##level##_read_latency.attr && \ + cxlr->coord[level].read_latency == 0) \ + return 0; \ + \ + if (a == &dev_attr_access##level##_write_latency.attr && \ + cxlr->coord[level].write_latency == 0) \ + return 0; \ + \ + if (a == &dev_attr_access##level##_read_bandwidth.attr && \ + cxlr->coord[level].read_bandwidth == 0) \ + return 0; \ + \ + if (a == &dev_attr_access##level##_write_bandwidth.attr && \ + cxlr->coord[level].write_bandwidth == 0) \ + return 0; \ + \ + return a->mode; \ +} + +ACCESS_VISIBLE(0); +ACCESS_VISIBLE(1); + +static const struct attribute_group cxl_region_access0_coordinate_group = { + .name = "access0", + .attrs = access0_coordinate_attrs, + .is_visible = cxl_region_access0_coordinate_visible, +}; + +static const struct attribute_group *get_cxl_region_access0_group(void) +{ + return &cxl_region_access0_coordinate_group; +} + +static const struct attribute_group cxl_region_access1_coordinate_group = { + .name = "access1", + .attrs = access1_coordinate_attrs, + .is_visible = cxl_region_access1_coordinate_visible, +}; + +static const struct attribute_group *get_cxl_region_access1_group(void) +{ + return &cxl_region_access1_coordinate_group; +} + static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1752,6 +1855,8 @@ static int cxl_region_attach(struct cxl_region *cxlr, return -EINVAL; } + cxl_region_perf_data_calculate(cxlr, cxled); + if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { int i; @@ -2067,6 +2172,8 @@ static const struct attribute_group *region_groups[] = { &cxl_base_attribute_group, &cxl_region_group, &cxl_region_target_group, + &cxl_region_access0_coordinate_group, + &cxl_region_access1_coordinate_group, NULL, }; @@ -2120,6 +2227,7 @@ static void unregister_region(void *_cxlr) struct cxl_region_params *p = &cxlr->params; int i; + unregister_memory_notifier(&cxlr->memory_notifier); device_del(&cxlr->dev); /* @@ -2164,6 +2272,63 @@ static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int i return cxlr; } +static bool cxl_region_update_coordinates(struct cxl_region *cxlr, int nid) +{ + int cset = 0; + int rc; + + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + if (cxlr->coord[i].read_bandwidth) { + rc = 0; + if (cxl_need_node_perf_attrs_update(nid)) + node_set_perf_attrs(nid, &cxlr->coord[i], i); + else + rc = cxl_update_hmat_access_coordinates(nid, cxlr, i); + + if (rc == 0) + cset++; + } + } + + if (!cset) + return false; + + rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_access0_group()); + if (rc) + dev_dbg(&cxlr->dev, "Failed to update access0 group\n"); + + rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_access1_group()); + if (rc) + dev_dbg(&cxlr->dev, "Failed to update access1 group\n"); + + return true; +} + +static int cxl_region_perf_attrs_callback(struct notifier_block *nb, + unsigned long action, void *arg) +{ + struct cxl_region *cxlr = container_of(nb, struct cxl_region, + memory_notifier); + struct cxl_region_params *p = &cxlr->params; + struct cxl_endpoint_decoder *cxled = p->targets[0]; + struct cxl_decoder *cxld = &cxled->cxld; + struct memory_notify *mnb = arg; + int nid = mnb->status_change_nid; + int region_nid; + + if (nid == NUMA_NO_NODE || action != MEM_ONLINE) + return NOTIFY_DONE; + + region_nid = phys_to_target_node(cxld->hpa_range.start); + if (nid != region_nid) + return NOTIFY_DONE; + + if (!cxl_region_update_coordinates(cxlr, nid)) + return NOTIFY_DONE; + + return NOTIFY_OK; +} + /** * devm_cxl_add_region - Adds a region to a decoder * @cxlrd: root decoder @@ -2187,15 +2352,6 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, struct device *dev; int rc; - switch (mode) { - case CXL_DECODER_RAM: - case CXL_DECODER_PMEM: - break; - default: - dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); - return ERR_PTR(-EINVAL); - } - cxlr = cxl_region_alloc(cxlrd, id); if (IS_ERR(cxlr)) return cxlr; @@ -2211,6 +2367,10 @@ static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, if (rc) goto err; + cxlr->memory_notifier.notifier_call = cxl_region_perf_attrs_callback; + cxlr->memory_notifier.priority = CXL_CALLBACK_PRI; + register_memory_notifier(&cxlr->memory_notifier); + rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr); if (rc) return ERR_PTR(rc); @@ -2246,6 +2406,15 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd, { int rc; + switch (mode) { + case CXL_DECODER_RAM: + case CXL_DECODER_PMEM: + break; + default: + dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); + return ERR_PTR(-EINVAL); + } + rc = memregion_alloc(GFP_KERNEL); if (rc < 0) return ERR_PTR(rc); @@ -2550,6 +2719,7 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) if (i == 0) { cxl_nvb = cxl_find_nvdimm_bridge(cxlmd); if (!cxl_nvb) { + kfree(cxlr_pmem); cxlr_pmem = ERR_PTR(-ENODEV); goto out; } diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index e5f13260fc..7c5cd069f1 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -253,8 +253,8 @@ TRACE_EVENT(cxl_generic_event, * DRAM Event Record * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 */ -#define CXL_DPA_FLAGS_MASK 0x3F -#define CXL_DPA_MASK (~CXL_DPA_FLAGS_MASK) +#define CXL_DPA_FLAGS_MASK GENMASK(1, 0) +#define CXL_DPA_MASK GENMASK_ULL(63, 6) #define CXL_DPA_VOLATILE BIT(0) #define CXL_DPA_NOT_REPAIRABLE BIT(1) diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index de477eb7f5..72fa477407 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -6,6 +6,7 @@ #include <linux/libnvdimm.h> #include <linux/bitfield.h> +#include <linux/notifier.h> #include <linux/bitops.h> #include <linux/log2.h> #include <linux/node.h> @@ -517,6 +518,8 @@ struct cxl_region_params { * @cxlr_pmem: (for pmem regions) cached copy of the nvdimm bridge * @flags: Region state flags * @params: active + config params for the region + * @coord: QoS access coordinates for the region + * @memory_notifier: notifier for setting the access coordinates to node */ struct cxl_region { struct device dev; @@ -527,6 +530,8 @@ struct cxl_region { struct cxl_pmem_region *cxlr_pmem; unsigned long flags; struct cxl_region_params params; + struct access_coordinate coord[ACCESS_COORDINATE_MAX]; + struct notifier_block memory_notifier; }; struct cxl_nvdimm_bridge { @@ -658,8 +663,7 @@ struct cxl_rcrb_info { * @rch: Indicate whether this dport was enumerated in RCH or VH mode * @port: reference to cxl_port that contains this downstream port * @regs: Dport parsed register blocks - * @sw_coord: access coordinates (performance) for switch from CDAT - * @hb_coord: access coordinates (performance) from ACPI generic port (host bridge) + * @coord: access coordinates (bandwidth and latency performance attributes) * @link_latency: calculated PCIe downstream latency */ struct cxl_dport { @@ -670,8 +674,7 @@ struct cxl_dport { bool rch; struct cxl_port *port; struct cxl_regs regs; - struct access_coordinate sw_coord; - struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX]; + struct access_coordinate coord[ACCESS_COORDINATE_MAX]; long link_latency; }; @@ -879,8 +882,8 @@ void cxl_switch_parse_cdat(struct cxl_port *port); int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, struct access_coordinate *coord); -int cxl_hb_get_perf_coordinates(struct cxl_port *port, - struct access_coordinate *coord); +void cxl_region_perf_data_calculate(struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled); void cxl_memdev_update_perf(struct cxl_memdev *cxlmd); @@ -888,6 +891,8 @@ void cxl_coordinates_combine(struct access_coordinate *out, struct access_coordinate *c1, struct access_coordinate *c2); +bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port); + /* * Unit test builds overrides this to __weak, find the 'strong' version * of these symbols in tools/testing/cxl/. diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 20fb3b35e8..36cee9c30c 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -401,7 +401,7 @@ enum cxl_devtype { */ struct cxl_dpa_perf { struct range dpa_range; - struct access_coordinate coord; + struct access_coordinate coord[ACCESS_COORDINATE_MAX]; int qos_class; }; diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h index 711b05d9a3..93992a1c8e 100644 --- a/drivers/cxl/cxlpci.h +++ b/drivers/cxl/cxlpci.h @@ -71,6 +71,15 @@ enum cxl_regloc_type { CXL_REGLOC_RBI_TYPES }; +/* + * Table Access DOE, CDAT Read Entry Response + * + * Spec refs: + * + * CXL 3.1 8.1.11, Table 8-14: Read Entry Response + * CDAT Specification 1.03: 2 CDAT Data Structures + */ + struct cdat_header { __le32 length; u8 revision; @@ -86,6 +95,21 @@ struct cdat_entry_header { } __packed; /* + * The DOE CDAT read response contains a CDAT read entry (either the + * CDAT header or a structure). + */ +union cdat_data { + struct cdat_header header; + struct cdat_entry_header entry; +} __packed; + +/* There is an additional CDAT response header of 4 bytes. */ +struct cdat_doe_rsp { + __le32 doe_header; + u8 data[]; +} __packed; + +/* * CXL v3.0 6.2.3 Table 6-4 * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits * mode, otherwise it's 68B flits mode. diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 2ff361e756..659f9d46b1 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -957,11 +957,33 @@ static void cxl_error_resume(struct pci_dev *pdev) dev->driver ? "successful" : "failed"); } +static void cxl_reset_done(struct pci_dev *pdev) +{ + struct cxl_dev_state *cxlds = pci_get_drvdata(pdev); + struct cxl_memdev *cxlmd = cxlds->cxlmd; + struct device *dev = &pdev->dev; + + /* + * FLR does not expect to touch the HDM decoders and related + * registers. SBR, however, will wipe all device configurations. + * Issue a warning if there was an active decoder before the reset + * that no longer exists. + */ + guard(device)(&cxlmd->dev); + if (cxlmd->endpoint && + cxl_endpoint_decoder_reset_detected(cxlmd->endpoint)) { + dev_crit(dev, "SBR happened without memory regions removal.\n"); + dev_crit(dev, "System may be unstable if regions hosted system memory.\n"); + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + } +} + static const struct pci_error_handlers cxl_error_handlers = { .error_detected = cxl_error_detected, .slot_reset = cxl_slot_reset, .resume = cxl_error_resume, .cor_error_detected = cxl_cor_error_detected, + .reset_done = cxl_reset_done, }; static struct pci_driver cxl_pci_driver = { |