summaryrefslogtreecommitdiffstats
path: root/drivers/cxl
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:12 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:12 +0000
commit8665bd53f2f2e27e5511d90428cb3f60e6d0ce15 (patch)
tree8d58900dc0ebd4a3011f92c128d2fe45bc7c4bf2 /drivers/cxl
parentAdding debian version 6.7.12-1. (diff)
downloadlinux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.tar.xz
linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.zip
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/cxl')
-rw-r--r--drivers/cxl/Kconfig3
-rw-r--r--drivers/cxl/acpi.c157
-rw-r--r--drivers/cxl/core/Makefile1
-rw-r--r--drivers/cxl/core/cdat.c517
-rw-r--r--drivers/cxl/core/core.h2
-rw-r--r--drivers/cxl/core/mbox.c122
-rw-r--r--drivers/cxl/core/memdev.c65
-rw-r--r--drivers/cxl/core/pci.c36
-rw-r--r--drivers/cxl/core/pmem.c8
-rw-r--r--drivers/cxl/core/port.c167
-rw-r--r--drivers/cxl/core/region.c13
-rw-r--r--drivers/cxl/core/regs.c5
-rw-r--r--drivers/cxl/core/trace.h14
-rw-r--r--drivers/cxl/cxl.h53
-rw-r--r--drivers/cxl/cxlmem.h130
-rw-r--r--drivers/cxl/cxlpci.h13
-rw-r--r--drivers/cxl/mem.c11
-rw-r--r--drivers/cxl/pci.c29
-rw-r--r--drivers/cxl/port.c8
19 files changed, 1147 insertions, 207 deletions
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 8ea1d340e4..67998dbd1d 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -5,6 +5,7 @@ menuconfig CXL_BUS
select FW_LOADER
select FW_UPLOAD
select PCI_DOE
+ select FIRMWARE_TABLE
help
CXL is a bus that is electrically compatible with PCI Express, but
layers three protocols on that signalling (CXL.io, CXL.cache, and
@@ -54,8 +55,10 @@ config CXL_MEM_RAW_COMMANDS
config CXL_ACPI
tristate "CXL ACPI: Platform Support"
depends on ACPI
+ depends on ACPI_NUMA
default CXL_BUS
select ACPI_TABLE_LIB
+ select ACPI_HMAT
help
Enable support for host managed device memory (HDM) resources
published by a platform's ACPI CXL memory layout description. See
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index b3cec89126..af5cb818f8 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/pci.h>
+#include <linux/node.h>
#include <asm/div64.h>
#include "cxlpci.h"
#include "cxl.h"
@@ -17,6 +18,10 @@ struct cxl_cxims_data {
u64 xormaps[] __counted_by(nr_maps);
};
+static const guid_t acpi_cxl_qtg_id_guid =
+ GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071,
+ 0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52);
+
/*
* Find a targets entry (n) in the host bridge interleave list.
* CXL Specification 3.0 Table 9-22
@@ -194,6 +199,123 @@ struct cxl_cfmws_context {
int id;
};
+/**
+ * cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM
+ * @handle: ACPI handle
+ * @coord: performance access coordinates
+ * @entries: number of QTG IDs to return
+ * @qos_class: int array provided by caller to return QTG IDs
+ *
+ * Return: number of QTG IDs returned, or -errno for errors
+ *
+ * Issue QTG _DSM with accompanied bandwidth and latency data in order to get
+ * the QTG IDs that are suitable for the performance point in order of most
+ * suitable to least suitable. Write back array of QTG IDs and return the
+ * actual number of QTG IDs written back.
+ */
+static int
+cxl_acpi_evaluate_qtg_dsm(acpi_handle handle, struct access_coordinate *coord,
+ int entries, int *qos_class)
+{
+ union acpi_object *out_obj, *out_buf, *obj;
+ union acpi_object in_array[4] = {
+ [0].integer = { ACPI_TYPE_INTEGER, coord->read_latency },
+ [1].integer = { ACPI_TYPE_INTEGER, coord->write_latency },
+ [2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth },
+ [3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth },
+ };
+ union acpi_object in_obj = {
+ .package = {
+ .type = ACPI_TYPE_PACKAGE,
+ .count = 4,
+ .elements = in_array,
+ },
+ };
+ int count, pkg_entries, i;
+ u16 max_qtg;
+ int rc;
+
+ if (!entries)
+ return -EINVAL;
+
+ out_obj = acpi_evaluate_dsm(handle, &acpi_cxl_qtg_id_guid, 1, 1, &in_obj);
+ if (!out_obj)
+ return -ENXIO;
+
+ if (out_obj->type != ACPI_TYPE_PACKAGE) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* Check Max QTG ID */
+ obj = &out_obj->package.elements[0];
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ max_qtg = obj->integer.value;
+
+ /* It's legal to have 0 QTG entries */
+ pkg_entries = out_obj->package.count;
+ if (pkg_entries <= 1) {
+ rc = 0;
+ goto out;
+ }
+
+ /* Retrieve QTG IDs package */
+ obj = &out_obj->package.elements[1];
+ if (obj->type != ACPI_TYPE_PACKAGE) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ pkg_entries = obj->package.count;
+ count = min(entries, pkg_entries);
+ for (i = 0; i < count; i++) {
+ u16 qtg_id;
+
+ out_buf = &obj->package.elements[i];
+ if (out_buf->type != ACPI_TYPE_INTEGER) {
+ rc = -ENXIO;
+ goto out;
+ }
+
+ qtg_id = out_buf->integer.value;
+ if (qtg_id > max_qtg)
+ pr_warn("QTG ID %u greater than MAX %u\n",
+ qtg_id, max_qtg);
+
+ qos_class[i] = qtg_id;
+ }
+ rc = count;
+
+out:
+ ACPI_FREE(out_obj);
+ return rc;
+}
+
+static int cxl_acpi_qos_class(struct cxl_root *cxl_root,
+ struct access_coordinate *coord, int entries,
+ int *qos_class)
+{
+ struct device *dev = cxl_root->port.uport_dev;
+ acpi_handle handle;
+
+ if (!dev_is_platform(dev))
+ return -ENODEV;
+
+ handle = ACPI_HANDLE(dev);
+ if (!handle)
+ return -ENODEV;
+
+ return cxl_acpi_evaluate_qtg_dsm(handle, coord, entries, qos_class);
+}
+
+static const struct cxl_root_ops acpi_root_ops = {
+ .qos_class = cxl_acpi_qos_class,
+};
+
static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
struct cxl_cfmws_context *ctx)
{
@@ -399,8 +521,31 @@ static int cxl_get_chbs(struct device *dev, struct acpi_device *hb,
return 0;
}
+static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
+{
+ struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
+ u32 uid;
+ int rc;
+
+ if (kstrtou32(acpi_device_uid(hb), 0, &uid))
+ return -EINVAL;
+
+ rc = acpi_get_genport_coordinates(uid, dport->hb_coord);
+ if (rc < 0)
+ return rc;
+
+ /* Adjust back to picoseconds from nanoseconds */
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ dport->hb_coord[i].read_latency *= 1000;
+ dport->hb_coord[i].write_latency *= 1000;
+ }
+
+ return 0;
+}
+
static int add_host_bridge_dport(struct device *match, void *arg)
{
+ int ret;
acpi_status rc;
struct device *bridge;
struct cxl_dport *dport;
@@ -450,6 +595,10 @@ static int add_host_bridge_dport(struct device *match, void *arg)
if (IS_ERR(dport))
return PTR_ERR(dport);
+ ret = get_genport_coordinates(match, dport);
+ if (ret)
+ dev_dbg(match, "Failed to get generic port perf coordinates.\n");
+
return 0;
}
@@ -666,6 +815,7 @@ static int cxl_acpi_probe(struct platform_device *pdev)
{
int rc;
struct resource *cxl_res;
+ struct cxl_root *cxl_root;
struct cxl_port *root_port;
struct device *host = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(host);
@@ -685,9 +835,10 @@ static int cxl_acpi_probe(struct platform_device *pdev)
cxl_res->end = -1;
cxl_res->flags = IORESOURCE_MEM;
- root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
- if (IS_ERR(root_port))
- return PTR_ERR(root_port);
+ cxl_root = devm_cxl_add_root(host, &acpi_root_ops);
+ if (IS_ERR(cxl_root))
+ return PTR_ERR(cxl_root);
+ root_port = &cxl_root->port;
rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
add_host_bridge_dport);
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 1f66b5d4d9..9259bcc677 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -13,5 +13,6 @@ cxl_core-y += mbox.o
cxl_core-y += pci.o
cxl_core-y += hdm.o
cxl_core-y += pmu.o
+cxl_core-y += cdat.o
cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
new file mode 100644
index 0000000000..fbf167f9d5
--- /dev/null
+++ b/drivers/cxl/core/cdat.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation. All rights reserved. */
+#include <linux/acpi.h>
+#include <linux/xarray.h>
+#include <linux/fw_table.h>
+#include <linux/node.h>
+#include <linux/overflow.h>
+#include "cxlpci.h"
+#include "cxlmem.h"
+#include "core.h"
+#include "cxl.h"
+
+struct dsmas_entry {
+ struct range dpa_range;
+ u8 handle;
+ struct access_coordinate coord;
+
+ int entries;
+ int qos_class;
+};
+
+static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
+{
+ struct acpi_cdat_header *hdr = &header->cdat;
+ struct acpi_cdat_dsmas *dsmas;
+ int size = sizeof(*hdr) + sizeof(*dsmas);
+ struct xarray *dsmas_xa = arg;
+ struct dsmas_entry *dent;
+ u16 len;
+ int rc;
+
+ len = le16_to_cpu((__force __le16)hdr->length);
+ if (len != size || (unsigned long)hdr + len > end) {
+ pr_warn("Malformed DSMAS table length: (%u:%u)\n", size, len);
+ return -EINVAL;
+ }
+
+ /* Skip common header */
+ dsmas = (struct acpi_cdat_dsmas *)(hdr + 1);
+
+ dent = kzalloc(sizeof(*dent), GFP_KERNEL);
+ if (!dent)
+ return -ENOMEM;
+
+ dent->handle = dsmas->dsmad_handle;
+ dent->dpa_range.start = le64_to_cpu((__force __le64)dsmas->dpa_base_address);
+ dent->dpa_range.end = le64_to_cpu((__force __le64)dsmas->dpa_base_address) +
+ le64_to_cpu((__force __le64)dsmas->dpa_length) - 1;
+
+ rc = xa_insert(dsmas_xa, dent->handle, dent, GFP_KERNEL);
+ if (rc) {
+ kfree(dent);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void cxl_access_coordinate_set(struct access_coordinate *coord,
+ int access, unsigned int val)
+{
+ switch (access) {
+ case ACPI_HMAT_ACCESS_LATENCY:
+ coord->read_latency = val;
+ coord->write_latency = val;
+ break;
+ case ACPI_HMAT_READ_LATENCY:
+ coord->read_latency = val;
+ break;
+ case ACPI_HMAT_WRITE_LATENCY:
+ coord->write_latency = val;
+ break;
+ case ACPI_HMAT_ACCESS_BANDWIDTH:
+ coord->read_bandwidth = val;
+ coord->write_bandwidth = val;
+ break;
+ case ACPI_HMAT_READ_BANDWIDTH:
+ coord->read_bandwidth = val;
+ break;
+ case ACPI_HMAT_WRITE_BANDWIDTH:
+ coord->write_bandwidth = val;
+ break;
+ }
+}
+
+static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
+{
+ struct acpi_cdat_header *hdr = &header->cdat;
+ struct acpi_cdat_dslbis *dslbis;
+ int size = sizeof(*hdr) + sizeof(*dslbis);
+ struct xarray *dsmas_xa = arg;
+ struct dsmas_entry *dent;
+ __le64 le_base;
+ __le16 le_val;
+ u64 val;
+ u16 len;
+ int rc;
+
+ len = le16_to_cpu((__force __le16)hdr->length);
+ if (len != size || (unsigned long)hdr + len > end) {
+ pr_warn("Malformed DSLBIS table length: (%u:%u)\n", size, len);
+ return -EINVAL;
+ }
+
+ /* Skip common header */
+ dslbis = (struct acpi_cdat_dslbis *)(hdr + 1);
+
+ /* Skip unrecognized data type */
+ if (dslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
+ return 0;
+
+ /* Not a memory type, skip */
+ if ((dslbis->flags & ACPI_HMAT_MEMORY_HIERARCHY) != ACPI_HMAT_MEMORY)
+ return 0;
+
+ dent = xa_load(dsmas_xa, dslbis->handle);
+ if (!dent) {
+ pr_warn("No matching DSMAS entry for DSLBIS entry.\n");
+ return 0;
+ }
+
+ le_base = (__force __le64)dslbis->entry_base_unit;
+ le_val = (__force __le16)dslbis->entry[0];
+ rc = check_mul_overflow(le64_to_cpu(le_base),
+ le16_to_cpu(le_val), &val);
+ if (rc)
+ pr_warn("DSLBIS value overflowed.\n");
+
+ cxl_access_coordinate_set(&dent->coord, dslbis->data_type, val);
+
+ return 0;
+}
+
+static int cdat_table_parse_output(int rc)
+{
+ if (rc < 0)
+ return rc;
+ if (rc == 0)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int cxl_cdat_endpoint_process(struct cxl_port *port,
+ struct xarray *dsmas_xa)
+{
+ int rc;
+
+ rc = cdat_table_parse(ACPI_CDAT_TYPE_DSMAS, cdat_dsmas_handler,
+ dsmas_xa, port->cdat.table);
+ rc = cdat_table_parse_output(rc);
+ if (rc)
+ return rc;
+
+ rc = cdat_table_parse(ACPI_CDAT_TYPE_DSLBIS, cdat_dslbis_handler,
+ dsmas_xa, port->cdat.table);
+ return cdat_table_parse_output(rc);
+}
+
+static int cxl_port_perf_data_calculate(struct cxl_port *port,
+ struct xarray *dsmas_xa)
+{
+ struct access_coordinate ep_c;
+ struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+ struct dsmas_entry *dent;
+ int valid_entries = 0;
+ unsigned long index;
+ int rc;
+
+ rc = cxl_endpoint_get_perf_coordinates(port, &ep_c);
+ if (rc) {
+ dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
+ return rc;
+ }
+
+ rc = cxl_hb_get_perf_coordinates(port, coord);
+ if (rc) {
+ dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n");
+ return rc;
+ }
+
+ struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
+
+ if (!cxl_root)
+ return -ENODEV;
+
+ if (!cxl_root->ops || !cxl_root->ops->qos_class)
+ return -EOPNOTSUPP;
+
+ xa_for_each(dsmas_xa, index, dent) {
+ int qos_class;
+
+ cxl_coordinates_combine(&dent->coord, &dent->coord, &ep_c);
+ /*
+ * Keeping the host bridge coordinates separate from the dsmas
+ * coordinates in order to allow calculation of access class
+ * 0 and 1 for region later.
+ */
+ cxl_coordinates_combine(&coord[ACCESS_COORDINATE_LOCAL],
+ &coord[ACCESS_COORDINATE_LOCAL],
+ &dent->coord);
+ dent->entries = 1;
+ rc = cxl_root->ops->qos_class(cxl_root,
+ &coord[ACCESS_COORDINATE_LOCAL],
+ 1, &qos_class);
+ if (rc != 1)
+ continue;
+
+ valid_entries++;
+ dent->qos_class = qos_class;
+ }
+
+ if (!valid_entries)
+ return -ENOENT;
+
+ return 0;
+}
+
+static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
+ struct cxl_dpa_perf *dpa_perf)
+{
+ dpa_perf->dpa_range = dent->dpa_range;
+ dpa_perf->coord = dent->coord;
+ dpa_perf->qos_class = dent->qos_class;
+ dev_dbg(dev,
+ "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
+ dent->dpa_range.start, dpa_perf->qos_class,
+ dent->coord.read_bandwidth, dent->coord.write_bandwidth,
+ dent->coord.read_latency, dent->coord.write_latency);
+}
+
+static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
+ struct xarray *dsmas_xa)
+{
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ struct device *dev = cxlds->dev;
+ struct range pmem_range = {
+ .start = cxlds->pmem_res.start,
+ .end = cxlds->pmem_res.end,
+ };
+ struct range ram_range = {
+ .start = cxlds->ram_res.start,
+ .end = cxlds->ram_res.end,
+ };
+ struct dsmas_entry *dent;
+ unsigned long index;
+
+ xa_for_each(dsmas_xa, index, dent) {
+ if (resource_size(&cxlds->ram_res) &&
+ range_contains(&ram_range, &dent->dpa_range))
+ update_perf_entry(dev, dent, &mds->ram_perf);
+ else if (resource_size(&cxlds->pmem_res) &&
+ range_contains(&pmem_range, &dent->dpa_range))
+ update_perf_entry(dev, dent, &mds->pmem_perf);
+ else
+ dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
+ dent->dpa_range.start);
+ }
+}
+
+static int match_cxlrd_qos_class(struct device *dev, void *data)
+{
+ int dev_qos_class = *(int *)data;
+ struct cxl_root_decoder *cxlrd;
+
+ if (!is_root_decoder(dev))
+ return 0;
+
+ cxlrd = to_cxl_root_decoder(dev);
+ if (cxlrd->qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ if (cxlrd->qos_class == dev_qos_class)
+ return 1;
+
+ return 0;
+}
+
+static void reset_dpa_perf(struct cxl_dpa_perf *dpa_perf)
+{
+ *dpa_perf = (struct cxl_dpa_perf) {
+ .qos_class = CXL_QOS_CLASS_INVALID,
+ };
+}
+
+static bool cxl_qos_match(struct cxl_port *root_port,
+ struct cxl_dpa_perf *dpa_perf)
+{
+ if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
+ return false;
+
+ if (!device_for_each_child(&root_port->dev, &dpa_perf->qos_class,
+ match_cxlrd_qos_class))
+ return false;
+
+ return true;
+}
+
+static int match_cxlrd_hb(struct device *dev, void *data)
+{
+ struct device *host_bridge = data;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_root_decoder *cxlrd;
+
+ if (!is_root_decoder(dev))
+ return 0;
+
+ cxlrd = to_cxl_root_decoder(dev);
+ cxlsd = &cxlrd->cxlsd;
+
+ guard(rwsem_read)(&cxl_region_rwsem);
+ for (int i = 0; i < cxlsd->nr_targets; i++) {
+ if (host_bridge == cxlsd->target[i]->dport_dev)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
+{
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+ struct cxl_port *root_port;
+ int rc;
+
+ struct cxl_root *cxl_root __free(put_cxl_root) =
+ find_cxl_root(cxlmd->endpoint);
+
+ if (!cxl_root)
+ return -ENODEV;
+
+ root_port = &cxl_root->port;
+
+ /* Check that the QTG IDs are all sane between end device and root decoders */
+ if (!cxl_qos_match(root_port, &mds->ram_perf))
+ reset_dpa_perf(&mds->ram_perf);
+ if (!cxl_qos_match(root_port, &mds->pmem_perf))
+ reset_dpa_perf(&mds->pmem_perf);
+
+ /* Check to make sure that the device's host bridge is under a root decoder */
+ rc = device_for_each_child(&root_port->dev,
+ cxlmd->endpoint->host_bridge, match_cxlrd_hb);
+ if (!rc) {
+ reset_dpa_perf(&mds->ram_perf);
+ reset_dpa_perf(&mds->pmem_perf);
+ }
+
+ return rc;
+}
+
+static void discard_dsmas(struct xarray *xa)
+{
+ unsigned long index;
+ void *ent;
+
+ xa_for_each(xa, index, ent) {
+ xa_erase(xa, index);
+ kfree(ent);
+ }
+ xa_destroy(xa);
+}
+DEFINE_FREE(dsmas, struct xarray *, if (_T) discard_dsmas(_T))
+
+void cxl_endpoint_parse_cdat(struct cxl_port *port)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct xarray __dsmas_xa;
+ struct xarray *dsmas_xa __free(dsmas) = &__dsmas_xa;
+ int rc;
+
+ xa_init(&__dsmas_xa);
+ if (!port->cdat.table)
+ return;
+
+ rc = cxl_cdat_endpoint_process(port, dsmas_xa);
+ if (rc < 0) {
+ dev_dbg(&port->dev, "Failed to parse CDAT: %d\n", rc);
+ return;
+ }
+
+ rc = cxl_port_perf_data_calculate(port, dsmas_xa);
+ if (rc) {
+ dev_dbg(&port->dev, "Failed to do perf coord calculations.\n");
+ return;
+ }
+
+ cxl_memdev_set_qos_class(cxlds, dsmas_xa);
+ cxl_qos_class_verify(cxlmd);
+ cxl_memdev_update_perf(cxlmd);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
+
+static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
+{
+ struct acpi_cdat_sslbis_table {
+ struct acpi_cdat_header header;
+ struct acpi_cdat_sslbis sslbis_header;
+ struct acpi_cdat_sslbe entries[];
+ } *tbl = (struct acpi_cdat_sslbis_table *)header;
+ int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header);
+ struct acpi_cdat_sslbis *sslbis;
+ struct cxl_port *port = arg;
+ struct device *dev = &port->dev;
+ int remain, entries, i;
+ u16 len;
+
+ len = le16_to_cpu((__force __le16)header->cdat.length);
+ remain = len - size;
+ if (!remain || remain % sizeof(tbl->entries[0]) ||
+ (unsigned long)header + len > end) {
+ dev_warn(dev, "Malformed SSLBIS table length: (%u)\n", len);
+ return -EINVAL;
+ }
+
+ sslbis = &tbl->sslbis_header;
+ /* Unrecognized data type, we can skip */
+ if (sslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
+ return 0;
+
+ entries = remain / sizeof(tbl->entries[0]);
+ if (struct_size(tbl, entries, entries) != len)
+ return -EINVAL;
+
+ for (i = 0; i < entries; i++) {
+ u16 x = le16_to_cpu((__force __le16)tbl->entries[i].portx_id);
+ u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id);
+ __le64 le_base;
+ __le16 le_val;
+ struct cxl_dport *dport;
+ unsigned long index;
+ u16 dsp_id;
+ u64 val;
+
+ switch (x) {
+ case ACPI_CDAT_SSLBIS_US_PORT:
+ dsp_id = y;
+ break;
+ case ACPI_CDAT_SSLBIS_ANY_PORT:
+ switch (y) {
+ case ACPI_CDAT_SSLBIS_US_PORT:
+ dsp_id = x;
+ break;
+ case ACPI_CDAT_SSLBIS_ANY_PORT:
+ dsp_id = ACPI_CDAT_SSLBIS_ANY_PORT;
+ break;
+ default:
+ dsp_id = y;
+ break;
+ }
+ break;
+ default:
+ dsp_id = x;
+ break;
+ }
+
+ le_base = (__force __le64)tbl->sslbis_header.entry_base_unit;
+ le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth;
+
+ if (check_mul_overflow(le64_to_cpu(le_base),
+ le16_to_cpu(le_val), &val))
+ dev_warn(dev, "SSLBIS value overflowed!\n");
+
+ xa_for_each(&port->dports, index, dport) {
+ if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
+ dsp_id == dport->port_id)
+ cxl_access_coordinate_set(&dport->sw_coord,
+ sslbis->data_type,
+ val);
+ }
+ }
+
+ return 0;
+}
+
+void cxl_switch_parse_cdat(struct cxl_port *port)
+{
+ int rc;
+
+ if (!port->cdat.table)
+ return;
+
+ rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
+ port, port->cdat.table);
+ rc = cdat_table_parse_output(rc);
+ if (rc)
+ dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
+
+/**
+ * cxl_coordinates_combine - Combine the two input coordinates
+ *
+ * @out: Output coordinate of c1 and c2 combined
+ * @c1: input coordinates
+ * @c2: input coordinates
+ */
+void cxl_coordinates_combine(struct access_coordinate *out,
+ struct access_coordinate *c1,
+ struct access_coordinate *c2)
+{
+ if (c1->write_bandwidth && c2->write_bandwidth)
+ out->write_bandwidth = min(c1->write_bandwidth,
+ c2->write_bandwidth);
+ out->write_latency = c1->write_latency + c2->write_latency;
+
+ if (c1->read_bandwidth && c2->read_bandwidth)
+ out->read_bandwidth = min(c1->read_bandwidth,
+ c2->read_bandwidth);
+ out->read_latency = c1->read_latency + c2->read_latency;
+}
+
+MODULE_IMPORT_NS(CXL);
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 86d7ba2323..3b64fb1b9e 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -88,4 +88,6 @@ enum cxl_poison_trace_type {
CXL_POISON_TRACE_CLEAR,
};
+long cxl_pci_get_latency(struct pci_dev *pdev);
+
#endif /* __CXL_CORE_H__ */
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 36270dcfb4..65185c9fa0 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -63,6 +63,7 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
+ CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0),
};
/*
@@ -836,54 +837,37 @@ out:
}
EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
-/*
- * General Media Event Record
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
- */
-static const uuid_t gen_media_event_uuid =
- UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
- 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6);
-
-/*
- * DRAM Event Record
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
- */
-static const uuid_t dram_event_uuid =
- UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
- 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24);
-
-/*
- * Memory Module Event Record
- * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
- */
-static const uuid_t mem_mod_event_uuid =
- UUID_INIT(0xfe927475, 0xdd59, 0x4339,
- 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74);
-
-static void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
- enum cxl_event_log_type type,
- struct cxl_event_record_raw *record)
+void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+ enum cxl_event_log_type type,
+ enum cxl_event_type event_type,
+ const uuid_t *uuid, union cxl_event *evt)
{
- uuid_t *id = &record->hdr.id;
-
- if (uuid_equal(id, &gen_media_event_uuid)) {
- struct cxl_event_gen_media *rec =
- (struct cxl_event_gen_media *)record;
+ if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
+ trace_cxl_general_media(cxlmd, type, &evt->gen_media);
+ else if (event_type == CXL_CPER_EVENT_DRAM)
+ trace_cxl_dram(cxlmd, type, &evt->dram);
+ else if (event_type == CXL_CPER_EVENT_MEM_MODULE)
+ trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
+ else
+ trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL);
- trace_cxl_general_media(cxlmd, type, rec);
- } else if (uuid_equal(id, &dram_event_uuid)) {
- struct cxl_event_dram *rec = (struct cxl_event_dram *)record;
+static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+ enum cxl_event_log_type type,
+ struct cxl_event_record_raw *record)
+{
+ enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC;
+ const uuid_t *uuid = &record->id;
- trace_cxl_dram(cxlmd, type, rec);
- } else if (uuid_equal(id, &mem_mod_event_uuid)) {
- struct cxl_event_mem_module *rec =
- (struct cxl_event_mem_module *)record;
+ if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID))
+ ev_type = CXL_CPER_EVENT_GEN_MEDIA;
+ else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID))
+ ev_type = CXL_CPER_EVENT_DRAM;
+ else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
+ ev_type = CXL_CPER_EVENT_MEM_MODULE;
- trace_cxl_memory_module(cxlmd, type, rec);
- } else {
- /* For unknown record types print just the header */
- trace_cxl_generic_event(cxlmd, type, record);
- }
+ cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
}
static int cxl_clear_event_record(struct cxl_memdev_state *mds,
@@ -926,9 +910,12 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
*/
i = 0;
for (cnt = 0; cnt < total; cnt++) {
- payload->handles[i++] = get_pl->records[cnt].hdr.handle;
+ struct cxl_event_record_raw *raw = &get_pl->records[cnt];
+ struct cxl_event_generic *gen = &raw->event.generic;
+
+ payload->handles[i++] = gen->hdr.handle;
dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
- le16_to_cpu(payload->handles[i]));
+ le16_to_cpu(payload->handles[i - 1]));
if (i == max_handles) {
payload->nr_recs = i;
@@ -959,24 +946,22 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
struct device *dev = mds->cxlds.dev;
struct cxl_get_event_payload *payload;
- struct cxl_mbox_cmd mbox_cmd;
u8 log_type = type;
u16 nr_rec;
mutex_lock(&mds->event.log_lock);
payload = mds->event.buf;
- mbox_cmd = (struct cxl_mbox_cmd) {
- .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
- .payload_in = &log_type,
- .size_in = sizeof(log_type),
- .payload_out = payload,
- .size_out = mds->payload_size,
- .min_out = struct_size(payload, records, 0),
- };
-
do {
int rc, i;
+ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
+ .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
+ .payload_in = &log_type,
+ .size_in = sizeof(log_type),
+ .payload_out = payload,
+ .size_out = mds->payload_size,
+ .min_out = struct_size(payload, records, 0),
+ };
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc) {
@@ -991,8 +976,8 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
break;
for (i = 0; i < nr_rec; i++)
- cxl_event_trace_record(cxlmd, type,
- &payload->records[i]);
+ __cxl_event_trace_record(cxlmd, type,
+ &payload->records[i]);
if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
trace_cxl_overflow(cxlmd, type, payload);
@@ -1309,7 +1294,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
struct cxl_mbox_poison_out *po;
struct cxl_mbox_poison_in pi;
- struct cxl_mbox_cmd mbox_cmd;
int nr_records = 0;
int rc;
@@ -1321,16 +1305,16 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
pi.offset = cpu_to_le64(offset);
pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
- mbox_cmd = (struct cxl_mbox_cmd) {
- .opcode = CXL_MBOX_OP_GET_POISON,
- .size_in = sizeof(pi),
- .payload_in = &pi,
- .size_out = mds->payload_size,
- .payload_out = po,
- .min_out = struct_size(po, record, 0),
- };
-
do {
+ struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
+ .opcode = CXL_MBOX_OP_GET_POISON,
+ .size_in = sizeof(pi),
+ .payload_in = &pi,
+ .size_out = mds->payload_size,
+ .payload_out = po,
+ .min_out = struct_size(po, record, 0),
+ };
+
rc = cxl_internal_send_cmd(mds, &mbox_cmd);
if (rc)
break;
@@ -1404,6 +1388,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
mds->cxlds.reg_map.host = dev;
mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
+ mds->ram_perf.qos_class = CXL_QOS_CLASS_INVALID;
+ mds->pmem_perf.qos_class = CXL_QOS_CLASS_INVALID;
return mds;
}
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 2f43d368ba..d4e259f3a7 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -114,7 +114,7 @@ static DEVICE_ATTR_RO(serial);
static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", dev_to_node(dev));
+ return sysfs_emit(buf, "%d\n", dev_to_node(dev));
}
static DEVICE_ATTR_RO(numa_node);
@@ -447,13 +447,41 @@ static struct attribute *cxl_memdev_attributes[] = {
NULL,
};
+static ssize_t pmem_qos_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+ return sysfs_emit(buf, "%d\n", mds->pmem_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_pmem_qos_class =
+ __ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
+
static struct attribute *cxl_memdev_pmem_attributes[] = {
&dev_attr_pmem_size.attr,
+ &dev_attr_pmem_qos_class.attr,
NULL,
};
+static ssize_t ram_qos_class_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+
+ return sysfs_emit(buf, "%d\n", mds->ram_perf.qos_class);
+}
+
+static struct device_attribute dev_attr_ram_qos_class =
+ __ATTR(qos_class, 0444, ram_qos_class_show, NULL);
+
static struct attribute *cxl_memdev_ram_attributes[] = {
&dev_attr_ram_size.attr,
+ &dev_attr_ram_qos_class.attr,
NULL,
};
@@ -477,14 +505,42 @@ static struct attribute_group cxl_memdev_attribute_group = {
.is_visible = cxl_memdev_visible,
};
+static umode_t cxl_ram_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (a == &dev_attr_ram_qos_class.attr)
+ if (mds->ram_perf.qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_memdev_ram_attribute_group = {
.name = "ram",
.attrs = cxl_memdev_ram_attributes,
+ .is_visible = cxl_ram_visible,
};
+static umode_t cxl_pmem_visible(struct kobject *kobj, struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ if (a == &dev_attr_pmem_qos_class.attr)
+ if (mds->pmem_perf.qos_class == CXL_QOS_CLASS_INVALID)
+ return 0;
+
+ return a->mode;
+}
+
static struct attribute_group cxl_memdev_pmem_attribute_group = {
.name = "pmem",
.attrs = cxl_memdev_pmem_attributes,
+ .is_visible = cxl_pmem_visible,
};
static umode_t cxl_memdev_security_visible(struct kobject *kobj,
@@ -519,6 +575,13 @@ static const struct attribute_group *cxl_memdev_attribute_groups[] = {
NULL,
};
+void cxl_memdev_update_perf(struct cxl_memdev *cxlmd)
+{
+ sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_ram_attribute_group);
+ sysfs_update_group(&cxlmd->dev.kobj, &cxl_memdev_pmem_attribute_group);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_memdev_update_perf, CXL);
+
static const struct device_type cxl_memdev_type = {
.name = "cxl_memdev",
.release = cxl_memdev_release,
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index deae3289de..e9e6c81ce0 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
+#include <linux/units.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/device.h>
#include <linux/delay.h>
@@ -998,3 +999,38 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
EXPORT_SYMBOL_NS_GPL(cxl_error_detected, CXL);
+
+static int cxl_flit_size(struct pci_dev *pdev)
+{
+ if (cxl_pci_flit_256(pdev))
+ return 256;
+
+ return 68;
+}
+
+/**
+ * cxl_pci_get_latency - calculate the link latency for the PCIe link
+ * @pdev: PCI device
+ *
+ * return: calculated latency or 0 for no latency
+ *
+ * CXL Memory Device SW Guide v1.0 2.11.4 Link latency calculation
+ * Link latency = LinkPropagationLatency + FlitLatency + RetimerLatency
+ * LinkProgationLatency is negligible, so 0 will be used
+ * RetimerLatency is assumed to be negligible and 0 will be used
+ * FlitLatency = FlitSize / LinkBandwidth
+ * FlitSize is defined by spec. CXL rev3.0 4.2.1.
+ * 68B flit is used up to 32GT/s. >32GT/s, 256B flit size is used.
+ * The FlitLatency is converted to picoseconds.
+ */
+long cxl_pci_get_latency(struct pci_dev *pdev)
+{
+ long bw;
+
+ bw = pcie_link_speed_mbps(pdev);
+ if (bw < 0)
+ return 0;
+ bw /= BITS_PER_BYTE;
+
+ return cxl_flit_size(pdev) * MEGA / bw;
+}
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index fc94f52403..e69625a8d6 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -64,14 +64,14 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
{
- struct cxl_port *port = find_cxl_root(cxlmd->endpoint);
+ struct cxl_root *cxl_root __free(put_cxl_root) =
+ find_cxl_root(cxlmd->endpoint);
struct device *dev;
- if (!port)
+ if (!cxl_root)
return NULL;
- dev = device_find_child(&port->dev, NULL, match_nvdimm_bridge);
- put_device(&port->dev);
+ dev = device_find_child(&cxl_root->port.dev, NULL, match_nvdimm_bridge);
if (!dev)
return NULL;
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 6b386771cc..4ae441ef32 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/idr.h>
+#include <linux/node.h>
#include <cxlmem.h>
#include <cxlpci.h>
#include <cxl.h>
@@ -537,7 +538,10 @@ static void cxl_port_release(struct device *dev)
xa_destroy(&port->dports);
xa_destroy(&port->regions);
ida_free(&cxl_port_ida, port->id);
- kfree(port);
+ if (is_cxl_root(port))
+ kfree(to_cxl_root(port));
+ else
+ kfree(port);
}
static ssize_t decoders_committed_show(struct device *dev,
@@ -665,17 +669,31 @@ static struct lock_class_key cxl_port_key;
static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
struct cxl_dport *parent_dport)
{
- struct cxl_port *port;
+ struct cxl_root *cxl_root __free(kfree) = NULL;
+ struct cxl_port *port, *_port __free(kfree) = NULL;
struct device *dev;
int rc;
- port = kzalloc(sizeof(*port), GFP_KERNEL);
- if (!port)
- return ERR_PTR(-ENOMEM);
+ /* No parent_dport, root cxl_port */
+ if (!parent_dport) {
+ cxl_root = kzalloc(sizeof(*cxl_root), GFP_KERNEL);
+ if (!cxl_root)
+ return ERR_PTR(-ENOMEM);
+ } else {
+ _port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!_port)
+ return ERR_PTR(-ENOMEM);
+ }
rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
if (rc < 0)
- goto err;
+ return ERR_PTR(rc);
+
+ if (cxl_root)
+ port = &no_free_ptr(cxl_root)->port;
+ else
+ port = no_free_ptr(_port);
+
port->id = rc;
port->uport_dev = uport_dev;
@@ -727,10 +745,6 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
dev->type = &cxl_port_type;
return port;
-
-err:
- kfree(port);
- return ERR_PTR(rc);
}
static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
@@ -837,6 +851,9 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
if (rc)
return ERR_PTR(rc);
+ if (parent_dport && dev_is_pci(uport_dev))
+ port->pci_latency = cxl_pci_get_latency(to_pci_dev(uport_dev));
+
return port;
err:
@@ -880,6 +897,22 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
+struct cxl_root *devm_cxl_add_root(struct device *host,
+ const struct cxl_root_ops *ops)
+{
+ struct cxl_root *cxl_root;
+ struct cxl_port *port;
+
+ port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
+ if (IS_ERR(port))
+ return (struct cxl_root *)port;
+
+ cxl_root = to_cxl_root(port);
+ cxl_root->ops = ops;
+ return cxl_root;
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_root, CXL);
+
struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
{
/* There is no pci_bus associated with a CXL platform-root port */
@@ -935,7 +968,7 @@ static bool dev_is_cxl_root_child(struct device *dev)
return false;
}
-struct cxl_port *find_cxl_root(struct cxl_port *port)
+struct cxl_root *find_cxl_root(struct cxl_port *port)
{
struct cxl_port *iter = port;
@@ -945,10 +978,19 @@ struct cxl_port *find_cxl_root(struct cxl_port *port)
if (!iter)
return NULL;
get_device(&iter->dev);
- return iter;
+ return to_cxl_root(iter);
}
EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
+void put_cxl_root(struct cxl_root *cxl_root)
+{
+ if (!cxl_root)
+ return;
+
+ put_device(&cxl_root->port.dev);
+}
+EXPORT_SYMBOL_NS_GPL(put_cxl_root, CXL);
+
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
struct cxl_dport *dport;
@@ -1104,6 +1146,9 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
if (rc)
return ERR_PTR(rc);
+ if (dev_is_pci(dport_dev))
+ dport->link_latency = cxl_pci_get_latency(to_pci_dev(dport_dev));
+
return dport;
}
@@ -2051,6 +2096,104 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
+/**
+ * cxl_hb_get_perf_coordinates - Retrieve performance numbers between initiator
+ * and host bridge
+ *
+ * @port: endpoint cxl_port
+ * @coord: output access coordinates
+ *
+ * Return: errno on failure, 0 on success.
+ */
+int cxl_hb_get_perf_coordinates(struct cxl_port *port,
+ struct access_coordinate *coord)
+{
+ struct cxl_port *iter = port;
+ struct cxl_dport *dport;
+
+ if (!is_cxl_endpoint(port))
+ return -EINVAL;
+
+ dport = iter->parent_dport;
+ while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
+ iter = to_cxl_port(iter->dev.parent);
+ dport = iter->parent_dport;
+ }
+
+ coord[ACCESS_COORDINATE_LOCAL] =
+ dport->hb_coord[ACCESS_COORDINATE_LOCAL];
+ coord[ACCESS_COORDINATE_CPU] =
+ dport->hb_coord[ACCESS_COORDINATE_CPU];
+
+ return 0;
+}
+
+static bool parent_port_is_cxl_root(struct cxl_port *port)
+{
+ return is_cxl_root(to_cxl_port(port->dev.parent));
+}
+
+/**
+ * cxl_endpoint_get_perf_coordinates - Retrieve performance numbers stored in dports
+ * of CXL path
+ * @port: endpoint cxl_port
+ * @coord: output performance data
+ *
+ * Return: errno on failure, 0 on success.
+ */
+int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
+ struct access_coordinate *coord)
+{
+ struct access_coordinate c = {
+ .read_bandwidth = UINT_MAX,
+ .write_bandwidth = UINT_MAX,
+ };
+ struct cxl_port *iter = port;
+ struct cxl_dport *dport;
+ struct pci_dev *pdev;
+ unsigned int bw;
+ bool is_cxl_root;
+
+ if (!is_cxl_endpoint(port))
+ return -EINVAL;
+
+ /*
+ * Exit the loop when the parent port of the current iter port is cxl
+ * root. The iterative loop starts at the endpoint and gathers the
+ * latency of the CXL link from the current device/port to the connected
+ * downstream port each iteration.
+ */
+ do {
+ dport = iter->parent_dport;
+ iter = to_cxl_port(iter->dev.parent);
+ is_cxl_root = parent_port_is_cxl_root(iter);
+
+ /*
+ * There's no valid access_coordinate for a root port since RPs do not
+ * have CDAT and therefore needs to be skipped.
+ */
+ if (!is_cxl_root)
+ cxl_coordinates_combine(&c, &c, &dport->sw_coord);
+ c.write_latency += dport->link_latency;
+ c.read_latency += dport->link_latency;
+ } while (!is_cxl_root);
+
+ /* Get the calculated PCI paths bandwidth */
+ pdev = to_pci_dev(port->uport_dev->parent);
+ bw = pcie_bandwidth_available(pdev, NULL, NULL, NULL);
+ if (bw == 0)
+ return -ENXIO;
+ bw /= BITS_PER_BYTE;
+
+ c.write_bandwidth = min(c.write_bandwidth, bw);
+ c.read_bandwidth = min(c.read_bandwidth, bw);
+
+ *coord = c;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, CXL);
+
/* for user tooling to ensure port disable work has completed */
static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
{
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 8f0a2507dd..4c7fd2d5cc 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -552,8 +552,9 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
dev_name(&cxlr->dev));
if (IS_ERR(res)) {
- dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n",
- PTR_ERR(res));
+ dev_dbg(&cxlr->dev,
+ "HPA allocation error (%ld) for size:%pap in %s %pr\n",
+ PTR_ERR(res), &size, cxlrd->res->name, cxlrd->res);
return PTR_ERR(res);
}
@@ -2113,13 +2114,13 @@ static struct cxl_region *to_cxl_region(struct device *dev)
return container_of(dev, struct cxl_region, dev);
}
-static void unregister_region(void *dev)
+static void unregister_region(void *_cxlr)
{
- struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region *cxlr = _cxlr;
struct cxl_region_params *p = &cxlr->params;
int i;
- device_del(dev);
+ device_del(&cxlr->dev);
/*
* Now that region sysfs is shutdown, the parameter block is now
@@ -2130,7 +2131,7 @@ static void unregister_region(void *dev)
detach_target(cxlr, i);
cxl_region_iomem_release(cxlr);
- put_device(dev);
+ put_device(&cxlr->dev);
}
static struct lock_class_key cxl_region_key;
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 372786f809..3c42f984ee 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -271,6 +271,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
struct cxl_register_map *map)
{
+ u8 reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
u64 offset = ((u64)reg_hi << 32) |
(reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
@@ -278,11 +279,11 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
if (offset > pci_resource_len(pdev, bar)) {
dev_warn(&pdev->dev,
"BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar,
- &pdev->resource[bar], &offset, map->reg_type);
+ &pdev->resource[bar], &offset, reg_type);
return false;
}
- map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
+ map->reg_type = reg_type;
map->resource = pci_resource_start(pdev, bar) + offset;
map->max_size = pci_resource_len(pdev, bar) - offset;
return true;
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index f01d0709c9..e5f13260fc 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -181,6 +181,7 @@ TRACE_EVENT(cxl_overflow,
* 1) Add CXL_EVT_TP_entry to TP_STRUCT__entry
* 2) Use CXL_EVT_TP_fast_assign within TP_fast_assign;
* pass the dev, log, and CXL event header
+ * NOTE: The uuid must be assigned by the specific trace event
* 3) Use CXL_EVT_TP_printk() instead of TP_printk()
*
* See the generic_event tracepoint as an example.
@@ -203,7 +204,6 @@ TRACE_EVENT(cxl_overflow,
__assign_str(host, dev_name((cxlmd)->dev.parent)); \
__entry->log = (l); \
__entry->serial = (cxlmd)->cxlds->serial; \
- memcpy(&__entry->hdr_uuid, &(hdr).id, sizeof(uuid_t)); \
__entry->hdr_length = (hdr).length; \
__entry->hdr_flags = get_unaligned_le24((hdr).flags); \
__entry->hdr_handle = le16_to_cpu((hdr).handle); \
@@ -225,9 +225,9 @@ TRACE_EVENT(cxl_overflow,
TRACE_EVENT(cxl_generic_event,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
- struct cxl_event_record_raw *rec),
+ const uuid_t *uuid, struct cxl_event_generic *gen_rec),
- TP_ARGS(cxlmd, log, rec),
+ TP_ARGS(cxlmd, log, uuid, gen_rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@@ -235,8 +235,9 @@ TRACE_EVENT(cxl_generic_event,
),
TP_fast_assign(
- CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
- memcpy(__entry->data, &rec->data, CXL_EVENT_RECORD_DATA_LENGTH);
+ CXL_EVT_TP_fast_assign(cxlmd, log, gen_rec->hdr);
+ memcpy(&__entry->hdr_uuid, uuid, sizeof(uuid_t));
+ memcpy(__entry->data, gen_rec->data, CXL_EVENT_RECORD_DATA_LENGTH);
),
CXL_EVT_TP_printk("%s",
@@ -337,6 +338,7 @@ TRACE_EVENT(cxl_general_media,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+ __entry->hdr_uuid = CXL_EVENT_GEN_MEDIA_UUID;
/* General Media */
__entry->dpa = le64_to_cpu(rec->phys_addr);
@@ -423,6 +425,7 @@ TRACE_EVENT(cxl_dram,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+ __entry->hdr_uuid = CXL_EVENT_DRAM_UUID;
/* DRAM */
__entry->dpa = le64_to_cpu(rec->phys_addr);
@@ -570,6 +573,7 @@ TRACE_EVENT(cxl_memory_module,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
+ __entry->hdr_uuid = CXL_EVENT_MEM_MODULE_UUID;
/* Memory Module Event */
__entry->event_type = rec->event_type;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 62fa96f856..de477eb7f5 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/log2.h>
+#include <linux/node.h>
#include <linux/io.h>
/**
@@ -588,6 +589,7 @@ struct cxl_dax_region {
* @depth: How deep this port is relative to the root. depth 0 is the root.
* @cdat: Cached CDAT data
* @cdat_available: Should a CDAT attribute be available in sysfs
+ * @pci_latency: Upstream latency in picoseconds
*/
struct cxl_port {
struct device dev;
@@ -610,6 +612,30 @@ struct cxl_port {
size_t length;
} cdat;
bool cdat_available;
+ long pci_latency;
+};
+
+/**
+ * struct cxl_root - logical collection of root cxl_port items
+ *
+ * @port: cxl_port member
+ * @ops: cxl root operations
+ */
+struct cxl_root {
+ struct cxl_port port;
+ const struct cxl_root_ops *ops;
+};
+
+static inline struct cxl_root *
+to_cxl_root(const struct cxl_port *port)
+{
+ return container_of(port, struct cxl_root, port);
+}
+
+struct cxl_root_ops {
+ int (*qos_class)(struct cxl_root *cxl_root,
+ struct access_coordinate *coord, int entries,
+ int *qos_class);
};
static inline struct cxl_dport *
@@ -632,6 +658,9 @@ struct cxl_rcrb_info {
* @rch: Indicate whether this dport was enumerated in RCH or VH mode
* @port: reference to cxl_port that contains this downstream port
* @regs: Dport parsed register blocks
+ * @sw_coord: access coordinates (performance) for switch from CDAT
+ * @hb_coord: access coordinates (performance) from ACPI generic port (host bridge)
+ * @link_latency: calculated PCIe downstream latency
*/
struct cxl_dport {
struct device *dport_dev;
@@ -641,6 +670,9 @@ struct cxl_dport {
bool rch;
struct cxl_port *port;
struct cxl_regs regs;
+ struct access_coordinate sw_coord;
+ struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX];
+ long link_latency;
};
/**
@@ -698,7 +730,12 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
struct device *uport_dev,
resource_size_t component_reg_phys,
struct cxl_dport *parent_dport);
-struct cxl_port *find_cxl_root(struct cxl_port *port);
+struct cxl_root *devm_cxl_add_root(struct device *host,
+ const struct cxl_root_ops *ops);
+struct cxl_root *find_cxl_root(struct cxl_port *port);
+void put_cxl_root(struct cxl_root *cxl_root);
+DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T))
+
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void);
void cxl_bus_drain(void);
@@ -837,6 +874,20 @@ static inline struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
}
#endif
+void cxl_endpoint_parse_cdat(struct cxl_port *port);
+void cxl_switch_parse_cdat(struct cxl_port *port);
+
+int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
+ struct access_coordinate *coord);
+int cxl_hb_get_perf_coordinates(struct cxl_port *port,
+ struct access_coordinate *coord);
+
+void cxl_memdev_update_perf(struct cxl_memdev *cxlmd);
+
+void cxl_coordinates_combine(struct access_coordinate *out,
+ struct access_coordinate *c1,
+ struct access_coordinate *c2);
+
/*
* Unit test builds overrides this to __weak, find the 'strong' version
* of these symbols in tools/testing/cxl/.
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index a2fcbca253..20fb3b35e8 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -6,6 +6,8 @@
#include <linux/cdev.h>
#include <linux/uuid.h>
#include <linux/rcuwait.h>
+#include <linux/cxl-event.h>
+#include <linux/node.h>
#include "cxl.h"
/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
@@ -392,6 +394,18 @@ enum cxl_devtype {
};
/**
+ * struct cxl_dpa_perf - DPA performance property entry
+ * @dpa_range - range for DPA address
+ * @coord - QoS performance data (i.e. latency, bandwidth)
+ * @qos_class - QoS Class cookies
+ */
+struct cxl_dpa_perf {
+ struct range dpa_range;
+ struct access_coordinate coord;
+ int qos_class;
+};
+
+/**
* struct cxl_dev_state - The driver device state
*
* cxl_dev_state represents the CXL driver/device state. It provides an
@@ -455,6 +469,8 @@ struct cxl_dev_state {
* @security: security driver state info
* @fw: firmware upload / activation state
* @mbox_send: @dev specific transport for transmitting mailbox commands
+ * @ram_perf: performance data entry matched to RAM partition
+ * @pmem_perf: performance data entry matched to PMEM partition
*
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
@@ -475,6 +491,10 @@ struct cxl_memdev_state {
u64 active_persistent_bytes;
u64 next_volatile_bytes;
u64 next_persistent_bytes;
+
+ struct cxl_dpa_perf ram_perf;
+ struct cxl_dpa_perf pmem_perf;
+
struct cxl_event_state event;
struct cxl_poison_state poison;
struct cxl_security_state security;
@@ -503,6 +523,7 @@ enum cxl_opcode {
CXL_MBOX_OP_GET_FW_INFO = 0x0200,
CXL_MBOX_OP_TRANSFER_FW = 0x0201,
CXL_MBOX_OP_ACTIVATE_FW = 0x0202,
+ CXL_MBOX_OP_GET_TIMESTAMP = 0x0300,
CXL_MBOX_OP_SET_TIMESTAMP = 0x0301,
CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400,
CXL_MBOX_OP_GET_LOG = 0x0401,
@@ -580,25 +601,28 @@ struct cxl_mbox_identify {
} __packed;
/*
- * Common Event Record Format
- * CXL rev 3.0 section 8.2.9.2.1; Table 8-42
+ * General Media Event Record UUID
+ * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
*/
-struct cxl_event_record_hdr {
- uuid_t id;
- u8 length;
- u8 flags[3];
- __le16 handle;
- __le16 related_handle;
- __le64 timestamp;
- u8 maint_op_class;
- u8 reserved[15];
-} __packed;
+#define CXL_EVENT_GEN_MEDIA_UUID \
+ UUID_INIT(0xfbcd0a77, 0xc260, 0x417f, 0x85, 0xa9, 0x08, 0x8b, 0x16, \
+ 0x21, 0xeb, 0xa6)
-#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
-struct cxl_event_record_raw {
- struct cxl_event_record_hdr hdr;
- u8 data[CXL_EVENT_RECORD_DATA_LENGTH];
-} __packed;
+/*
+ * DRAM Event Record UUID
+ * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
+ */
+#define CXL_EVENT_DRAM_UUID \
+ UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, \
+ 0x5c, 0x96, 0x24)
+
+/*
+ * Memory Module Event Record UUID
+ * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
+ */
+#define CXL_EVENT_MEM_MODULE_UUID \
+ UUID_INIT(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86, 0x79, 0xba, 0xb1, \
+ 0x13, 0xb7, 0x74)
/*
* Get Event Records output payload
@@ -641,74 +665,6 @@ struct cxl_mbox_clear_event_payload {
} __packed;
#define CXL_CLEAR_EVENT_MAX_HANDLES U8_MAX
-/*
- * General Media Event Record
- * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
- */
-#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
-struct cxl_event_gen_media {
- struct cxl_event_record_hdr hdr;
- __le64 phys_addr;
- u8 descriptor;
- u8 type;
- u8 transaction_type;
- u8 validity_flags[2];
- u8 channel;
- u8 rank;
- u8 device[3];
- u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
- u8 reserved[46];
-} __packed;
-
-/*
- * DRAM Event Record - DER
- * CXL rev 3.0 section 8.2.9.2.1.2; Table 3-44
- */
-#define CXL_EVENT_DER_CORRECTION_MASK_SIZE 0x20
-struct cxl_event_dram {
- struct cxl_event_record_hdr hdr;
- __le64 phys_addr;
- u8 descriptor;
- u8 type;
- u8 transaction_type;
- u8 validity_flags[2];
- u8 channel;
- u8 rank;
- u8 nibble_mask[3];
- u8 bank_group;
- u8 bank;
- u8 row[3];
- u8 column[2];
- u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
- u8 reserved[0x17];
-} __packed;
-
-/*
- * Get Health Info Record
- * CXL rev 3.0 section 8.2.9.8.3.1; Table 8-100
- */
-struct cxl_get_health_info {
- u8 health_status;
- u8 media_status;
- u8 add_status;
- u8 life_used;
- u8 device_temp[2];
- u8 dirty_shutdown_cnt[4];
- u8 cor_vol_err_cnt[4];
- u8 cor_per_err_cnt[4];
-} __packed;
-
-/*
- * Memory Module Event Record
- * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
- */
-struct cxl_event_mem_module {
- struct cxl_event_record_hdr hdr;
- u8 event_type;
- struct cxl_get_health_info info;
- u8 reserved[0x3d];
-} __packed;
-
struct cxl_mbox_get_partition_info {
__le64 active_volatile_cap;
__le64 active_persistent_cap;
@@ -866,6 +822,10 @@ void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds);
void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status);
+void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
+ enum cxl_event_log_type type,
+ enum cxl_event_type event_type,
+ const uuid_t *uuid, union cxl_event *evt);
int cxl_set_timestamp(struct cxl_memdev_state *mds);
int cxl_poison_state_init(struct cxl_memdev_state *mds);
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 0fa4799ea3..711b05d9a3 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -85,6 +85,19 @@ struct cdat_entry_header {
__le16 length;
} __packed;
+/*
+ * CXL v3.0 6.2.3 Table 6-4
+ * The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
+ * mode, otherwise it's 68B flits mode.
+ */
+static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
+{
+ u16 lnksta2;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKSTA2, &lnksta2);
+ return lnksta2 & PCI_EXP_LNKSTA2_FLIT;
+}
+
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index e087febf9a..0c79d9ce87 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -217,16 +217,15 @@ static DEVICE_ATTR_WO(trigger_poison_list);
static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
{
- if (a == &dev_attr_trigger_poison_list.attr) {
- struct device *dev = kobj_to_dev(kobj);
- struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_memdev_state *mds =
- to_cxl_memdev_state(cxlmd->cxlds);
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+ if (a == &dev_attr_trigger_poison_list.attr)
if (!test_bit(CXL_POISON_ENABLED_LIST,
mds->poison.enabled_cmds))
return 0;
- }
+
return a->mode;
}
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 0155fb66b5..2ff361e756 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#include <asm-generic/unaligned.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/moduleparam.h>
#include <linux/module.h>
@@ -381,7 +382,7 @@ static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
return rc;
}
-static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
+static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)
{
struct cxl_dev_state *cxlds = &mds->cxlds;
const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
@@ -440,7 +441,7 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
/* background command interrupts are optional */
- if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
+ if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) || !irq_avail)
return 0;
msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
@@ -587,7 +588,7 @@ static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds)
return devm_add_action_or_reset(mds->cxlds.dev, free_event_buf, buf);
}
-static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
+static bool cxl_alloc_irq_vectors(struct pci_dev *pdev)
{
int nvecs;
@@ -604,9 +605,9 @@ static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
PCI_IRQ_MSIX | PCI_IRQ_MSI);
if (nvecs < 1) {
dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs);
- return -ENXIO;
+ return false;
}
- return 0;
+ return true;
}
static irqreturn_t cxl_event_thread(int irq, void *id)
@@ -742,7 +743,7 @@ static bool cxl_event_int_is_fw(u8 setting)
}
static int cxl_event_config(struct pci_host_bridge *host_bridge,
- struct cxl_memdev_state *mds)
+ struct cxl_memdev_state *mds, bool irq_avail)
{
struct cxl_event_interrupt_policy policy;
int rc;
@@ -754,6 +755,11 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
if (!host_bridge->native_cxl_error)
return 0;
+ if (!irq_avail) {
+ dev_info(mds->cxlds.dev, "No interrupt support, disable event processing.\n");
+ return 0;
+ }
+
rc = cxl_mem_alloc_event_buf(mds);
if (rc)
return rc;
@@ -788,6 +794,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct cxl_register_map map;
struct cxl_memdev *cxlmd;
int i, rc, pmu_count;
+ bool irq_avail;
/*
* Double check the anonymous union trickery in struct cxl_regs
@@ -845,11 +852,9 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
else
dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
- rc = cxl_alloc_irq_vectors(pdev);
- if (rc)
- return rc;
+ irq_avail = cxl_alloc_irq_vectors(pdev);
- rc = cxl_pci_setup_mailbox(mds);
+ rc = cxl_pci_setup_mailbox(mds, irq_avail);
if (rc)
return rc;
@@ -908,7 +913,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
}
- rc = cxl_event_config(host_bridge, mds);
+ rc = cxl_event_config(host_bridge, mds, irq_avail);
if (rc)
return rc;
@@ -969,6 +974,6 @@ static struct pci_driver cxl_pci_driver = {
},
};
-MODULE_LICENSE("GPL v2");
module_pci_driver(cxl_pci_driver);
+MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index 47bc8e0b85..97c2156667 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -69,6 +69,8 @@ static int cxl_switch_port_probe(struct cxl_port *port)
if (rc < 0)
return rc;
+ cxl_switch_parse_cdat(port);
+
cxlhdm = devm_cxl_setup_hdm(port, NULL);
if (!IS_ERR(cxlhdm))
return devm_cxl_enumerate_decoders(cxlhdm, NULL);
@@ -109,6 +111,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
/* Cache the data early to ensure is_visible() works */
read_cdat_data(port);
+ cxl_endpoint_parse_cdat(port);
get_device(&cxlmd->dev);
rc = devm_add_action_or_reset(&port->dev, schedule_detach, cxlmd);
@@ -127,14 +130,15 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
* This can't fail in practice as CXL root exit unregisters all
* descendant ports and that in turn synchronizes with cxl_port_probe()
*/
- root = find_cxl_root(port);
+ struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
+
+ root = &cxl_root->port;
/*
* Now that all endpoint decoders are successfully enumerated, try to
* assemble regions from committed decoders
*/
device_for_each_child(&port->dev, root, discover_region);
- put_device(&root->dev);
return 0;
}