diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:52 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:17:52 +0000 |
commit | 3afb00d3f86d3d924f88b56fa8285d4e9db85852 (patch) | |
tree | 95a985d3019522cea546b7d8df621369bc44fc6c /drivers/cxl | |
parent | Adding debian version 6.9.12-1. (diff) | |
download | linux-3afb00d3f86d3d924f88b56fa8285d4e9db85852.tar.xz linux-3afb00d3f86d3d924f88b56fa8285d4e9db85852.zip |
Merging upstream version 6.10.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/cxl')
-rw-r--r-- | drivers/cxl/Kconfig | 1 | ||||
-rw-r--r-- | drivers/cxl/acpi.c | 93 | ||||
-rw-r--r-- | drivers/cxl/core/core.h | 7 | ||||
-rw-r--r-- | drivers/cxl/core/hdm.c | 13 | ||||
-rw-r--r-- | drivers/cxl/core/mbox.c | 48 | ||||
-rw-r--r-- | drivers/cxl/core/pci.c | 6 | ||||
-rw-r--r-- | drivers/cxl/core/region.c | 91 | ||||
-rw-r--r-- | drivers/cxl/core/regs.c | 2 | ||||
-rw-r--r-- | drivers/cxl/core/trace.c | 91 | ||||
-rw-r--r-- | drivers/cxl/core/trace.h | 70 | ||||
-rw-r--r-- | drivers/cxl/cxl.h | 7 | ||||
-rw-r--r-- | drivers/cxl/cxlmem.h | 14 | ||||
-rw-r--r-- | drivers/cxl/cxlpci.h | 1 | ||||
-rw-r--r-- | drivers/cxl/pci.c | 73 | ||||
-rw-r--r-- | drivers/cxl/pmem.c | 2 |
15 files changed, 338 insertions, 181 deletions
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig index 5f3c9c5529..99b5c25be0 100644 --- a/drivers/cxl/Kconfig +++ b/drivers/cxl/Kconfig @@ -6,6 +6,7 @@ menuconfig CXL_BUS select FW_UPLOAD select PCI_DOE select FIRMWARE_TABLE + select NUMA_KEEP_MEMINFO if (NUMA && X86) help CXL is a bus that is electrically compatible with PCI Express, but layers three protocols on that signalling (CXL.io, CXL.cache, and diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c index cb8c155a2c..571069863c 100644 --- a/drivers/cxl/acpi.c +++ b/drivers/cxl/acpi.c @@ -316,28 +316,59 @@ static const struct cxl_root_ops acpi_root_ops = { .qos_class = cxl_acpi_qos_class, }; +static void del_cxl_resource(struct resource *res) +{ + if (!res) + return; + kfree(res->name); + kfree(res); +} + +static struct resource *alloc_cxl_resource(resource_size_t base, + resource_size_t n, int id) +{ + struct resource *res __free(kfree) = kzalloc(sizeof(*res), GFP_KERNEL); + + if (!res) + return NULL; + + res->start = base; + res->end = base + n - 1; + res->flags = IORESOURCE_MEM; + res->name = kasprintf(GFP_KERNEL, "CXL Window %d", id); + if (!res->name) + return NULL; + + return no_free_ptr(res); +} + +static int add_or_reset_cxl_resource(struct resource *parent, struct resource *res) +{ + int rc = insert_resource(parent, res); + + if (rc) + del_cxl_resource(res); + return rc; +} + +DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *, + if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev)) +DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T)) static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, struct cxl_cfmws_context *ctx) { int target_map[CXL_DECODER_MAX_INTERLEAVE]; struct cxl_port *root_port = ctx->root_port; - struct resource *cxl_res = ctx->cxl_res; struct cxl_cxims_context cxims_ctx; - struct cxl_root_decoder *cxlrd; struct device *dev = ctx->dev; cxl_calc_hb_fn cxl_calc_hb; struct cxl_decoder *cxld; unsigned int ways, i, ig; - struct resource *res; int rc; rc = cxl_acpi_cfmws_verify(dev, cfmws); - if (rc) { - dev_err(dev, "CFMWS range %#llx-%#llx not registered\n", - cfmws->base_hpa, - cfmws->base_hpa + cfmws->window_size - 1); + if (rc) return rc; - } rc = eiw_to_ways(cfmws->interleave_ways, &ways); if (rc) @@ -348,29 +379,23 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, for (i = 0; i < ways; i++) target_map[i] = cfmws->interleave_targets[i]; - res = kzalloc(sizeof(*res), GFP_KERNEL); + struct resource *res __free(del_cxl_resource) = alloc_cxl_resource( + cfmws->base_hpa, cfmws->window_size, ctx->id++); if (!res) return -ENOMEM; - res->name = kasprintf(GFP_KERNEL, "CXL Window %d", ctx->id++); - if (!res->name) - goto err_name; - - res->start = cfmws->base_hpa; - res->end = cfmws->base_hpa + cfmws->window_size - 1; - res->flags = IORESOURCE_MEM; - /* add to the local resource tracking to establish a sort order */ - rc = insert_resource(cxl_res, res); + rc = add_or_reset_cxl_resource(ctx->cxl_res, no_free_ptr(res)); if (rc) - goto err_insert; + return rc; if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_MODULO) cxl_calc_hb = cxl_hb_modulo; else cxl_calc_hb = cxl_hb_xor; - cxlrd = cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb); + struct cxl_root_decoder *cxlrd __free(put_cxlrd) = + cxl_root_decoder_alloc(root_port, ways, cxl_calc_hb); if (IS_ERR(cxlrd)) return PTR_ERR(cxlrd); @@ -378,8 +403,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions); cxld->target_type = CXL_DECODER_HOSTONLYMEM; cxld->hpa_range = (struct range) { - .start = res->start, - .end = res->end, + .start = cfmws->base_hpa, + .end = cfmws->base_hpa + cfmws->window_size - 1, }; cxld->interleave_ways = ways; /* @@ -399,11 +424,10 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, rc = acpi_table_parse_cedt(ACPI_CEDT_TYPE_CXIMS, cxl_parse_cxims, &cxims_ctx); if (rc < 0) - goto err_xormap; + return rc; if (!cxlrd->platform_data) { dev_err(dev, "No CXIMS for HBIG %u\n", ig); - rc = -EINVAL; - goto err_xormap; + return -EINVAL; } } } @@ -411,18 +435,9 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws, cxlrd->qos_class = cfmws->qtg_id; rc = cxl_decoder_add(cxld, target_map); -err_xormap: if (rc) - put_device(&cxld->dev); - else - rc = cxl_decoder_autoremove(dev, cxld); - return rc; - -err_insert: - kfree(res->name); -err_name: - kfree(res); - return -ENOMEM; + return rc; + return cxl_root_decoder_autoremove(dev, no_free_ptr(cxlrd)); } static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg, @@ -683,12 +698,6 @@ static void cxl_acpi_lock_reset_class(void *dev) device_lock_reset_class(dev); } -static void del_cxl_resource(struct resource *res) -{ - kfree(res->name); - kfree(res); -} - static void cxl_set_public_resource(struct resource *priv, struct resource *pub) { priv->desc = (unsigned long) pub; diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 87008505f8..6253944864 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -28,8 +28,15 @@ int cxl_region_init(void); void cxl_region_exit(void); int cxl_get_poison_by_endpoint(struct cxl_port *port); struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa); +u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, + u64 dpa); #else +static inline u64 +cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, u64 dpa) +{ + return ULLONG_MAX; +} static inline struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa) { diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index e01c16fdc7..3df10517a3 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -332,8 +332,8 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, else if (resource_contains(&cxlds->ram_res, res)) cxled->mode = CXL_DECODER_RAM; else { - dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id, - cxled->cxld.id, cxled->dpa_res); + dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n", + port->id, cxled->cxld.id, cxled->dpa_res); cxled->mode = CXL_DECODER_MIXED; } @@ -532,8 +532,7 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size) if (size > avail) { dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size, - cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem", - &avail); + cxl_decoder_mode_name(cxled->mode), &avail); rc = -ENOSPC; goto out; } @@ -901,8 +900,12 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, } rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl), &cxld->interleave_granularity); - if (rc) + if (rc) { + dev_warn(&port->dev, + "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n", + port->id, cxld->id, ctrl); return rc; + } dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n", port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end, diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 65185c9fa0..2626f3fff2 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -56,6 +56,9 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), + CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0), + CXL_CMD(CLEAR_LOG, 0x10, 0, 0), + CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0), CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), @@ -331,6 +334,15 @@ static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in) return false; break; } + case CXL_MBOX_OP_CLEAR_LOG: { + const uuid_t *uuid = (uuid_t *)payload_in; + + /* + * Restrict the ‘Clear log’ action to only apply to + * Vendor debug logs. + */ + return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID); + } default: break; } @@ -842,14 +854,38 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd, enum cxl_event_type event_type, const uuid_t *uuid, union cxl_event *evt) { - if (event_type == CXL_CPER_EVENT_GEN_MEDIA) - trace_cxl_general_media(cxlmd, type, &evt->gen_media); - else if (event_type == CXL_CPER_EVENT_DRAM) - trace_cxl_dram(cxlmd, type, &evt->dram); - else if (event_type == CXL_CPER_EVENT_MEM_MODULE) + if (event_type == CXL_CPER_EVENT_MEM_MODULE) { trace_cxl_memory_module(cxlmd, type, &evt->mem_module); - else + return; + } + if (event_type == CXL_CPER_EVENT_GENERIC) { trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic); + return; + } + + if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) { + u64 dpa, hpa = ULLONG_MAX; + struct cxl_region *cxlr; + + /* + * These trace points are annotated with HPA and region + * translations. Take topology mutation locks and lookup + * { HPA, REGION } from { DPA, MEMDEV } in the event record. + */ + guard(rwsem_read)(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_dpa_rwsem); + + dpa = le64_to_cpu(evt->common.phys_addr) & CXL_DPA_MASK; + cxlr = cxl_dpa_to_region(cxlmd, dpa); + if (cxlr) + hpa = cxl_trace_hpa(cxlr, cxlmd, dpa); + + if (event_type == CXL_CPER_EVENT_GEN_MEDIA) + trace_cxl_general_media(cxlmd, type, cxlr, hpa, + &evt->gen_media); + else if (event_type == CXL_CPER_EVENT_DRAM) + trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram); + } } EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL); diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 2773f05adb..8567dd11ea 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -525,7 +525,7 @@ static int cxl_cdat_get_length(struct device *dev, __le32 response[2]; int rc; - rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL, + rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS, &request, sizeof(request), &response, sizeof(response)); @@ -555,7 +555,7 @@ static int cxl_cdat_read_table(struct device *dev, __le32 request = CDAT_DOE_REQ(entry_handle); int rc; - rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL, + rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS, &request, sizeof(request), rsp, sizeof(*rsp) + remaining); @@ -640,7 +640,7 @@ void read_cdat_data(struct cxl_port *port) if (!pdev) return; - doe_mb = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL, + doe_mb = pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS); if (!doe_mb) { dev_dbg(dev, "No CDAT mailbox\n"); diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index a600feb8a4..538ebd5a64 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -2816,6 +2816,97 @@ struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa) return ctx.cxlr; } +static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos) +{ + struct cxl_region_params *p = &cxlr->params; + int gran = p->interleave_granularity; + int ways = p->interleave_ways; + u64 offset; + + /* Is the hpa within this region at all */ + if (hpa < p->res->start || hpa > p->res->end) { + dev_dbg(&cxlr->dev, + "Addr trans fail: hpa 0x%llx not in region\n", hpa); + return false; + } + + /* Is the hpa in an expected chunk for its pos(-ition) */ + offset = hpa - p->res->start; + offset = do_div(offset, gran * ways); + if ((offset >= pos * gran) && (offset < (pos + 1) * gran)) + return true; + + dev_dbg(&cxlr->dev, + "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa); + + return false; +} + +static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled) +{ + u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa; + struct cxl_region_params *p = &cxlr->params; + int pos = cxled->pos; + u16 eig = 0; + u8 eiw = 0; + + ways_to_eiw(p->interleave_ways, &eiw); + granularity_to_eig(p->interleave_granularity, &eig); + + /* + * The device position in the region interleave set was removed + * from the offset at HPA->DPA translation. To reconstruct the + * HPA, place the 'pos' in the offset. + * + * The placement of 'pos' in the HPA is determined by interleave + * ways and granularity and is defined in the CXL Spec 3.0 Section + * 8.2.4.19.13 Implementation Note: Device Decode Logic + */ + + /* Remove the dpa base */ + dpa_offset = dpa - cxl_dpa_resource_start(cxled); + + mask_upper = GENMASK_ULL(51, eig + 8); + + if (eiw < 8) { + hpa_offset = (dpa_offset & mask_upper) << eiw; + hpa_offset |= pos << (eig + 8); + } else { + bits_upper = (dpa_offset & mask_upper) >> (eig + 8); + bits_upper = bits_upper * 3; + hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8); + } + + /* The lower bits remain unchanged */ + hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0); + + /* Apply the hpa_offset to the region base address */ + hpa = hpa_offset + p->res->start; + + if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos)) + return ULLONG_MAX; + + return hpa; +} + +u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, + u64 dpa) +{ + struct cxl_region_params *p = &cxlr->params; + struct cxl_endpoint_decoder *cxled = NULL; + + for (int i = 0; i < p->nr_targets; i++) { + cxled = p->targets[i]; + if (cxlmd == cxled_to_memdev(cxled)) + break; + } + if (!cxled || cxlmd != cxled_to_memdev(cxled)) + return ULLONG_MAX; + + return cxl_dpa_to_hpa(dpa, cxlr, cxled); +} + static struct lock_class_key cxl_pmem_region_key; static int cxl_pmem_region_alloc(struct cxl_region *cxlr) diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index 3c42f984ee..e1082e749c 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -314,7 +314,7 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type, .resource = CXL_RESOURCE_NONE, }; - regloc = pci_find_dvsec_capability(pdev, PCI_DVSEC_VENDOR_ID_CXL, + regloc = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL, CXL_DVSEC_REG_LOCATOR); if (!regloc) return -ENXIO; diff --git a/drivers/cxl/core/trace.c b/drivers/cxl/core/trace.c index d0403dc3c8..7f2a9dd0d0 100644 --- a/drivers/cxl/core/trace.c +++ b/drivers/cxl/core/trace.c @@ -6,94 +6,3 @@ #define CREATE_TRACE_POINTS #include "trace.h" - -static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos) -{ - struct cxl_region_params *p = &cxlr->params; - int gran = p->interleave_granularity; - int ways = p->interleave_ways; - u64 offset; - - /* Is the hpa within this region at all */ - if (hpa < p->res->start || hpa > p->res->end) { - dev_dbg(&cxlr->dev, - "Addr trans fail: hpa 0x%llx not in region\n", hpa); - return false; - } - - /* Is the hpa in an expected chunk for its pos(-ition) */ - offset = hpa - p->res->start; - offset = do_div(offset, gran * ways); - if ((offset >= pos * gran) && (offset < (pos + 1) * gran)) - return true; - - dev_dbg(&cxlr->dev, - "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa); - - return false; -} - -static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr, - struct cxl_endpoint_decoder *cxled) -{ - u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa; - struct cxl_region_params *p = &cxlr->params; - int pos = cxled->pos; - u16 eig = 0; - u8 eiw = 0; - - ways_to_eiw(p->interleave_ways, &eiw); - granularity_to_eig(p->interleave_granularity, &eig); - - /* - * The device position in the region interleave set was removed - * from the offset at HPA->DPA translation. To reconstruct the - * HPA, place the 'pos' in the offset. - * - * The placement of 'pos' in the HPA is determined by interleave - * ways and granularity and is defined in the CXL Spec 3.0 Section - * 8.2.4.19.13 Implementation Note: Device Decode Logic - */ - - /* Remove the dpa base */ - dpa_offset = dpa - cxl_dpa_resource_start(cxled); - - mask_upper = GENMASK_ULL(51, eig + 8); - - if (eiw < 8) { - hpa_offset = (dpa_offset & mask_upper) << eiw; - hpa_offset |= pos << (eig + 8); - } else { - bits_upper = (dpa_offset & mask_upper) >> (eig + 8); - bits_upper = bits_upper * 3; - hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8); - } - - /* The lower bits remain unchanged */ - hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0); - - /* Apply the hpa_offset to the region base address */ - hpa = hpa_offset + p->res->start; - - if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos)) - return ULLONG_MAX; - - return hpa; -} - -u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *cxlmd, - u64 dpa) -{ - struct cxl_region_params *p = &cxlr->params; - struct cxl_endpoint_decoder *cxled = NULL; - - for (int i = 0; i < p->nr_targets; i++) { - cxled = p->targets[i]; - if (cxlmd == cxled_to_memdev(cxled)) - break; - } - if (!cxled || cxlmd != cxled_to_memdev(cxled)) - return ULLONG_MAX; - - return cxl_dpa_to_hpa(dpa, cxlr, cxled); -} diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index 7c5cd069f1..ee5cd4eb2f 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -60,8 +60,8 @@ TRACE_EVENT(cxl_aer_uncorrectable_error, __array(u32, header_log, CXL_HEADERLOG_SIZE_U32) ), TP_fast_assign( - __assign_str(memdev, dev_name(&cxlmd->dev)); - __assign_str(host, dev_name(cxlmd->dev.parent)); + __assign_str(memdev); + __assign_str(host); __entry->serial = cxlmd->cxlds->serial; __entry->status = status; __entry->first_error = fe; @@ -106,8 +106,8 @@ TRACE_EVENT(cxl_aer_correctable_error, __field(u32, status) ), TP_fast_assign( - __assign_str(memdev, dev_name(&cxlmd->dev)); - __assign_str(host, dev_name(cxlmd->dev.parent)); + __assign_str(memdev); + __assign_str(host); __entry->serial = cxlmd->cxlds->serial; __entry->status = status; ), @@ -142,8 +142,8 @@ TRACE_EVENT(cxl_overflow, ), TP_fast_assign( - __assign_str(memdev, dev_name(&cxlmd->dev)); - __assign_str(host, dev_name(cxlmd->dev.parent)); + __assign_str(memdev); + __assign_str(host); __entry->serial = cxlmd->cxlds->serial; __entry->log = log; __entry->count = le16_to_cpu(payload->overflow_err_count); @@ -200,8 +200,8 @@ TRACE_EVENT(cxl_overflow, __field(u8, hdr_maint_op_class) #define CXL_EVT_TP_fast_assign(cxlmd, l, hdr) \ - __assign_str(memdev, dev_name(&(cxlmd)->dev)); \ - __assign_str(host, dev_name((cxlmd)->dev.parent)); \ + __assign_str(memdev); \ + __assign_str(host); \ __entry->log = (l); \ __entry->serial = (cxlmd)->cxlds->serial; \ __entry->hdr_length = (hdr).length; \ @@ -316,9 +316,9 @@ TRACE_EVENT(cxl_generic_event, TRACE_EVENT(cxl_general_media, TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, - struct cxl_event_gen_media *rec), + struct cxl_region *cxlr, u64 hpa, struct cxl_event_gen_media *rec), - TP_ARGS(cxlmd, log, rec), + TP_ARGS(cxlmd, log, cxlr, hpa, rec), TP_STRUCT__entry( CXL_EVT_TP_entry @@ -330,10 +330,13 @@ TRACE_EVENT(cxl_general_media, __field(u8, channel) __field(u32, device) __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE) - __field(u16, validity_flags) /* Following are out of order to pack trace record */ + __field(u64, hpa) + __field_struct(uuid_t, region_uuid) + __field(u16, validity_flags) __field(u8, rank) __field(u8, dpa_flags) + __string(region_name, cxlr ? dev_name(&cxlr->dev) : "") ), TP_fast_assign( @@ -354,18 +357,28 @@ TRACE_EVENT(cxl_general_media, memcpy(__entry->comp_id, &rec->component_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE); __entry->validity_flags = get_unaligned_le16(&rec->validity_flags); + __entry->hpa = hpa; + if (cxlr) { + __assign_str(region_name); + uuid_copy(&__entry->region_uuid, &cxlr->params.uuid); + } else { + __assign_str(region_name); + uuid_copy(&__entry->region_uuid, &uuid_null); + } ), CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \ "descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \ - "device=%x comp_id=%s validity_flags='%s'", + "device=%x comp_id=%s validity_flags='%s' " \ + "hpa=%llx region=%s region_uuid=%pUb", __entry->dpa, show_dpa_flags(__entry->dpa_flags), show_event_desc_flags(__entry->descriptor), show_mem_event_type(__entry->type), show_trans_type(__entry->transaction_type), __entry->channel, __entry->rank, __entry->device, __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE), - show_valid_flags(__entry->validity_flags) + show_valid_flags(__entry->validity_flags), + __entry->hpa, __get_str(region_name), &__entry->region_uuid ) ); @@ -400,9 +413,9 @@ TRACE_EVENT(cxl_general_media, TRACE_EVENT(cxl_dram, TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, - struct cxl_event_dram *rec), + struct cxl_region *cxlr, u64 hpa, struct cxl_event_dram *rec), - TP_ARGS(cxlmd, log, rec), + TP_ARGS(cxlmd, log, cxlr, hpa, rec), TP_STRUCT__entry( CXL_EVT_TP_entry @@ -417,10 +430,13 @@ TRACE_EVENT(cxl_dram, __field(u32, nibble_mask) __field(u32, row) __array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE) + __field(u64, hpa) + __field_struct(uuid_t, region_uuid) __field(u8, rank) /* Out of order to pack trace record */ __field(u8, bank_group) /* Out of order to pack trace record */ __field(u8, bank) /* Out of order to pack trace record */ __field(u8, dpa_flags) /* Out of order to pack trace record */ + __string(region_name, cxlr ? dev_name(&cxlr->dev) : "") ), TP_fast_assign( @@ -444,12 +460,21 @@ TRACE_EVENT(cxl_dram, __entry->column = get_unaligned_le16(rec->column); memcpy(__entry->cor_mask, &rec->correction_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE); + __entry->hpa = hpa; + if (cxlr) { + __assign_str(region_name); + uuid_copy(&__entry->region_uuid, &cxlr->params.uuid); + } else { + __assign_str(region_name); + uuid_copy(&__entry->region_uuid, &uuid_null); + } ), CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \ "transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \ "bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \ - "validity_flags='%s'", + "validity_flags='%s' " \ + "hpa=%llx region=%s region_uuid=%pUb", __entry->dpa, show_dpa_flags(__entry->dpa_flags), show_event_desc_flags(__entry->descriptor), show_mem_event_type(__entry->type), @@ -458,7 +483,8 @@ TRACE_EVENT(cxl_dram, __entry->bank_group, __entry->bank, __entry->row, __entry->column, __print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE), - show_dram_valid_flags(__entry->validity_flags) + show_dram_valid_flags(__entry->validity_flags), + __entry->hpa, __get_str(region_name), &__entry->region_uuid ) ); @@ -642,8 +668,6 @@ TRACE_EVENT(cxl_memory_module, #define cxl_poison_overflow(flags, time) \ (flags & CXL_POISON_FLAG_OVERFLOW ? le64_to_cpu(time) : 0) -u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa); - TRACE_EVENT(cxl_poison, TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr, @@ -668,8 +692,8 @@ TRACE_EVENT(cxl_poison, ), TP_fast_assign( - __assign_str(memdev, dev_name(&cxlmd->dev)); - __assign_str(host, dev_name(cxlmd->dev.parent)); + __assign_str(memdev); + __assign_str(host); __entry->serial = cxlmd->cxlds->serial; __entry->overflow_ts = cxl_poison_overflow(flags, overflow_ts); __entry->dpa = cxl_poison_record_dpa(record); @@ -678,12 +702,12 @@ TRACE_EVENT(cxl_poison, __entry->trace_type = trace_type; __entry->flags = flags; if (cxlr) { - __assign_str(region, dev_name(&cxlr->dev)); + __assign_str(region); memcpy(__entry->uuid, &cxlr->params.uuid, 16); __entry->hpa = cxl_trace_hpa(cxlr, cxlmd, __entry->dpa); } else { - __assign_str(region, ""); + __assign_str(region); memset(__entry->uuid, 0, 16); __entry->hpa = ULLONG_MAX; } diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index 6f9270f2fa..a6613a6f89 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -12,6 +12,8 @@ #include <linux/node.h> #include <linux/io.h> +extern const struct nvdimm_security_ops *cxl_security_ops; + /** * DOC: cxl objects * @@ -781,6 +783,11 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map); struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port); int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map); int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld); +static inline int cxl_root_decoder_autoremove(struct device *host, + struct cxl_root_decoder *cxlrd) +{ + return cxl_decoder_autoremove(host, &cxlrd->cxlsd.cxld); +} int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint); /** diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h index 07e65a7605..af8169ccdb 100644 --- a/drivers/cxl/cxlmem.h +++ b/drivers/cxl/cxlmem.h @@ -395,9 +395,9 @@ enum cxl_devtype { /** * struct cxl_dpa_perf - DPA performance property entry - * @dpa_range - range for DPA address - * @coord - QoS performance data (i.e. latency, bandwidth) - * @qos_class - QoS Class cookies + * @dpa_range: range for DPA address + * @coord: QoS performance data (i.e. latency, bandwidth) + * @qos_class: QoS Class cookies */ struct cxl_dpa_perf { struct range dpa_range; @@ -464,13 +464,14 @@ struct cxl_dev_state { * @active_persistent_bytes: sum of hard + soft persistent * @next_volatile_bytes: volatile capacity change pending device reset * @next_persistent_bytes: persistent capacity change pending device reset + * @ram_perf: performance data entry matched to RAM partition + * @pmem_perf: performance data entry matched to PMEM partition * @event: event log driver state * @poison: poison driver state info * @security: security driver state info * @fw: firmware upload / activation state + * @mbox_wait: RCU wait for mbox send completely * @mbox_send: @dev specific transport for transmitting mailbox commands - * @ram_perf: performance data entry matched to RAM partition - * @pmem_perf: performance data entry matched to PMEM partition * * See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for * details on capacity parameters. @@ -527,6 +528,9 @@ enum cxl_opcode { CXL_MBOX_OP_SET_TIMESTAMP = 0x0301, CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400, CXL_MBOX_OP_GET_LOG = 0x0401, + CXL_MBOX_OP_GET_LOG_CAPS = 0x0402, + CXL_MBOX_OP_CLEAR_LOG = 0x0403, + CXL_MBOX_OP_GET_SUP_LOG_SUBLIST = 0x0405, CXL_MBOX_OP_IDENTIFY = 0x4000, CXL_MBOX_OP_GET_PARTITION_INFO = 0x4100, CXL_MBOX_OP_SET_PARTITION_INFO = 0x4101, diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h index 93992a1c8e..4da07727ab 100644 --- a/drivers/cxl/cxlpci.h +++ b/drivers/cxl/cxlpci.h @@ -13,7 +13,6 @@ * "DVSEC" redundancies removed. When obvious, abbreviations may be used. */ #define PCI_DVSEC_HEADER1_LENGTH_MASK GENMASK(31, 20) -#define PCI_DVSEC_VENDOR_ID_CXL 0x1E98 /* CXL 2.0 8.1.3: PCIe DVSEC for CXL Device */ #define CXL_DVSEC_PCIE_DEVICE 0 diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c index 659f9d46b1..e53646e9f2 100644 --- a/drivers/cxl/pci.c +++ b/drivers/cxl/pci.c @@ -817,7 +817,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) cxlds->rcd = is_cxl_restricted(pdev); cxlds->serial = pci_get_dsn(pdev); cxlds->cxl_dvsec = pci_find_dvsec_capability( - pdev, PCI_DVSEC_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE); + pdev, PCI_VENDOR_ID_CXL, CXL_DVSEC_PCIE_DEVICE); if (!cxlds->cxl_dvsec) dev_warn(&pdev->dev, "Device DVSEC not present, skip CXL.mem init\n"); @@ -996,6 +996,75 @@ static struct pci_driver cxl_pci_driver = { }, }; -module_pci_driver(cxl_pci_driver); +#define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0) +static void cxl_handle_cper_event(enum cxl_event_type ev_type, + struct cxl_cper_event_rec *rec) +{ + struct cper_cxl_event_devid *device_id = &rec->hdr.device_id; + struct pci_dev *pdev __free(pci_dev_put) = NULL; + enum cxl_event_log_type log_type; + struct cxl_dev_state *cxlds; + unsigned int devfn; + u32 hdr_flags; + + pr_debug("CPER event %d for device %u:%u:%u.%u\n", ev_type, + device_id->segment_num, device_id->bus_num, + device_id->device_num, device_id->func_num); + + devfn = PCI_DEVFN(device_id->device_num, device_id->func_num); + pdev = pci_get_domain_bus_and_slot(device_id->segment_num, + device_id->bus_num, devfn); + if (!pdev) + return; + + guard(device)(&pdev->dev); + if (pdev->driver != &cxl_pci_driver) + return; + + cxlds = pci_get_drvdata(pdev); + if (!cxlds) + return; + + /* Fabricate a log type */ + hdr_flags = get_unaligned_le24(rec->event.generic.hdr.flags); + log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags); + + cxl_event_trace_record(cxlds->cxlmd, log_type, ev_type, + &uuid_null, &rec->event); +} + +static void cxl_cper_work_fn(struct work_struct *work) +{ + struct cxl_cper_work_data wd; + + while (cxl_cper_kfifo_get(&wd)) + cxl_handle_cper_event(wd.event_type, &wd.rec); +} +static DECLARE_WORK(cxl_cper_work, cxl_cper_work_fn); + +static int __init cxl_pci_driver_init(void) +{ + int rc; + + rc = pci_register_driver(&cxl_pci_driver); + if (rc) + return rc; + + rc = cxl_cper_register_work(&cxl_cper_work); + if (rc) + pci_unregister_driver(&cxl_pci_driver); + + return rc; +} + +static void __exit cxl_pci_driver_exit(void) +{ + cxl_cper_unregister_work(&cxl_cper_work); + cancel_work_sync(&cxl_cper_work); + pci_unregister_driver(&cxl_pci_driver); +} + +module_init(cxl_pci_driver_init); +module_exit(cxl_pci_driver_exit); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS(CXL); diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 7cb8994f88..2ecdaee630 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -11,8 +11,6 @@ #include "cxlmem.h" #include "cxl.h" -extern const struct nvdimm_security_ops *cxl_security_ops; - static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX); static void clear_exclusive(void *mds) |