diff options
Diffstat (limited to 'drivers/cxl/core')
-rw-r--r-- | drivers/cxl/core/cdat.c | 152 | ||||
-rw-r--r-- | drivers/cxl/core/core.h | 14 | ||||
-rw-r--r-- | drivers/cxl/core/hdm.c | 13 | ||||
-rw-r--r-- | drivers/cxl/core/mbox.c | 87 | ||||
-rw-r--r-- | drivers/cxl/core/memdev.c | 44 | ||||
-rw-r--r-- | drivers/cxl/core/pci.c | 35 | ||||
-rw-r--r-- | drivers/cxl/core/port.c | 127 | ||||
-rw-r--r-- | drivers/cxl/core/region.c | 177 | ||||
-rw-r--r-- | drivers/cxl/core/regs.c | 7 | ||||
-rw-r--r-- | drivers/cxl/core/trace.c | 91 | ||||
-rw-r--r-- | drivers/cxl/core/trace.h | 50 |
11 files changed, 468 insertions, 329 deletions
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c index eddbbe21450c..bb83867d9fec 100644 --- a/drivers/cxl/core/cdat.c +++ b/drivers/cxl/core/cdat.c @@ -14,12 +14,42 @@ struct dsmas_entry { struct range dpa_range; u8 handle; - struct access_coordinate coord; + struct access_coordinate coord[ACCESS_COORDINATE_MAX]; int entries; int qos_class; }; +static u32 cdat_normalize(u16 entry, u64 base, u8 type) +{ + u32 value; + + /* + * Check for invalid and overflow values + */ + if (entry == 0xffff || !entry) + return 0; + else if (base > (UINT_MAX / (entry))) + return 0; + + /* + * CDAT fields follow the format of HMAT fields. See table 5 Device + * Scoped Latency and Bandwidth Information Structure in Coherent Device + * Attribute Table (CDAT) Specification v1.01. + */ + value = entry * base; + switch (type) { + case ACPI_HMAT_ACCESS_LATENCY: + case ACPI_HMAT_READ_LATENCY: + case ACPI_HMAT_WRITE_LATENCY: + value = DIV_ROUND_UP(value, 1000); + break; + default: + break; + } + return value; +} + static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg, const unsigned long end) { @@ -58,8 +88,8 @@ static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg, return 0; } -static void cxl_access_coordinate_set(struct access_coordinate *coord, - int access, unsigned int val) +static void __cxl_access_coordinate_set(struct access_coordinate *coord, + int access, unsigned int val) { switch (access) { case ACPI_HMAT_ACCESS_LATENCY: @@ -85,6 +115,13 @@ static void cxl_access_coordinate_set(struct access_coordinate *coord, } } +static void cxl_access_coordinate_set(struct access_coordinate *coord, + int access, unsigned int val) +{ + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) + __cxl_access_coordinate_set(&coord[i], access, val); +} + static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg, const unsigned long end) { @@ -97,7 +134,6 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg, __le16 le_val; u64 val; u16 len; - int rc; len = le16_to_cpu((__force __le16)hdr->length); if (len != size || (unsigned long)hdr + len > end) { @@ -124,12 +160,10 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg, le_base = (__force __le64)dslbis->entry_base_unit; le_val = (__force __le16)dslbis->entry[0]; - rc = check_mul_overflow(le64_to_cpu(le_base), - le16_to_cpu(le_val), &val); - if (rc) - pr_warn("DSLBIS value overflowed.\n"); + val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base), + dslbis->data_type); - cxl_access_coordinate_set(&dent->coord, dslbis->data_type, val); + cxl_access_coordinate_set(dent->coord, dslbis->data_type, val); return 0; } @@ -163,25 +197,18 @@ static int cxl_cdat_endpoint_process(struct cxl_port *port, static int cxl_port_perf_data_calculate(struct cxl_port *port, struct xarray *dsmas_xa) { - struct access_coordinate ep_c; - struct access_coordinate coord[ACCESS_COORDINATE_MAX]; + struct access_coordinate ep_c[ACCESS_COORDINATE_MAX]; struct dsmas_entry *dent; int valid_entries = 0; unsigned long index; int rc; - rc = cxl_endpoint_get_perf_coordinates(port, &ep_c); + rc = cxl_endpoint_get_perf_coordinates(port, ep_c); if (rc) { dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n"); return rc; } - rc = cxl_hb_get_perf_coordinates(port, coord); - if (rc) { - dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n"); - return rc; - } - struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port); if (!cxl_root) @@ -193,18 +220,10 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port, xa_for_each(dsmas_xa, index, dent) { int qos_class; - cxl_coordinates_combine(&dent->coord, &dent->coord, &ep_c); - /* - * Keeping the host bridge coordinates separate from the dsmas - * coordinates in order to allow calculation of access class - * 0 and 1 for region later. - */ - cxl_coordinates_combine(&coord[ACCESS_COORDINATE_CPU], - &coord[ACCESS_COORDINATE_CPU], - &dent->coord); + cxl_coordinates_combine(dent->coord, dent->coord, ep_c); dent->entries = 1; rc = cxl_root->ops->qos_class(cxl_root, - &coord[ACCESS_COORDINATE_CPU], + &dent->coord[ACCESS_COORDINATE_CPU], 1, &qos_class); if (rc != 1) continue; @@ -222,14 +241,17 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port, static void update_perf_entry(struct device *dev, struct dsmas_entry *dent, struct cxl_dpa_perf *dpa_perf) { + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) + dpa_perf->coord[i] = dent->coord[i]; dpa_perf->dpa_range = dent->dpa_range; - dpa_perf->coord = dent->coord; dpa_perf->qos_class = dent->qos_class; dev_dbg(dev, "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n", dent->dpa_range.start, dpa_perf->qos_class, - dent->coord.read_bandwidth, dent->coord.write_bandwidth, - dent->coord.read_latency, dent->coord.write_latency); + dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth, + dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth, + dent->coord[ACCESS_COORDINATE_CPU].read_latency, + dent->coord[ACCESS_COORDINATE_CPU].write_latency); } static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds, @@ -461,17 +483,16 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg, le_base = (__force __le64)tbl->sslbis_header.entry_base_unit; le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth; - - if (check_mul_overflow(le64_to_cpu(le_base), - le16_to_cpu(le_val), &val)) - dev_warn(dev, "SSLBIS value overflowed!\n"); + val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base), + sslbis->data_type); xa_for_each(&port->dports, index, dport) { if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT || - dsp_id == dport->port_id) - cxl_access_coordinate_set(&dport->sw_coord, + dsp_id == dport->port_id) { + cxl_access_coordinate_set(dport->coord, sslbis->data_type, val); + } } } @@ -493,6 +514,21 @@ void cxl_switch_parse_cdat(struct cxl_port *port) } EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL); +static void __cxl_coordinates_combine(struct access_coordinate *out, + struct access_coordinate *c1, + struct access_coordinate *c2) +{ + if (c1->write_bandwidth && c2->write_bandwidth) + out->write_bandwidth = min(c1->write_bandwidth, + c2->write_bandwidth); + out->write_latency = c1->write_latency + c2->write_latency; + + if (c1->read_bandwidth && c2->read_bandwidth) + out->read_bandwidth = min(c1->read_bandwidth, + c2->read_bandwidth); + out->read_latency = c1->read_latency + c2->read_latency; +} + /** * cxl_coordinates_combine - Combine the two input coordinates * @@ -504,15 +540,8 @@ void cxl_coordinates_combine(struct access_coordinate *out, struct access_coordinate *c1, struct access_coordinate *c2) { - if (c1->write_bandwidth && c2->write_bandwidth) - out->write_bandwidth = min(c1->write_bandwidth, - c2->write_bandwidth); - out->write_latency = c1->write_latency + c2->write_latency; - - if (c1->read_bandwidth && c2->read_bandwidth) - out->read_bandwidth = min(c1->read_bandwidth, - c2->read_bandwidth); - out->read_latency = c1->read_latency + c2->read_latency; + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) + __cxl_coordinates_combine(&out[i], &c1[i], &c2[i]); } MODULE_IMPORT_NS(CXL); @@ -521,17 +550,13 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr, struct cxl_endpoint_decoder *cxled) { struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); - struct cxl_port *port = cxlmd->endpoint; struct cxl_dev_state *cxlds = cxlmd->cxlds; struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds); - struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX]; - struct access_coordinate coord; struct range dpa = { .start = cxled->dpa_res->start, .end = cxled->dpa_res->end, }; struct cxl_dpa_perf *perf; - int rc; switch (cxlr->mode) { case CXL_DECODER_RAM: @@ -549,35 +574,16 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr, if (!range_contains(&perf->dpa_range, &dpa)) return; - rc = cxl_hb_get_perf_coordinates(port, hb_coord); - if (rc) { - dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n"); - return; - } - for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { - /* Pickup the host bridge coords */ - cxl_coordinates_combine(&coord, &hb_coord[i], &perf->coord); - /* Get total bandwidth and the worst latency for the cxl region */ cxlr->coord[i].read_latency = max_t(unsigned int, cxlr->coord[i].read_latency, - coord.read_latency); + perf->coord[i].read_latency); cxlr->coord[i].write_latency = max_t(unsigned int, cxlr->coord[i].write_latency, - coord.write_latency); - cxlr->coord[i].read_bandwidth += coord.read_bandwidth; - cxlr->coord[i].write_bandwidth += coord.write_bandwidth; - - /* - * Convert latency to nanosec from picosec to be consistent - * with the resulting latency coordinates computed by the - * HMAT_REPORTING code. - */ - cxlr->coord[i].read_latency = - DIV_ROUND_UP(cxlr->coord[i].read_latency, 1000); - cxlr->coord[i].write_latency = - DIV_ROUND_UP(cxlr->coord[i].write_latency, 1000); + perf->coord[i].write_latency); + cxlr->coord[i].read_bandwidth += perf->coord[i].read_bandwidth; + cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth; } } diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index bc5a95665aa0..625394486459 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -27,7 +27,21 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled); int cxl_region_init(void); void cxl_region_exit(void); int cxl_get_poison_by_endpoint(struct cxl_port *port); +struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa); +u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, + u64 dpa); + #else +static inline u64 +cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, u64 dpa) +{ + return ULLONG_MAX; +} +static inline +struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa) +{ + return NULL; +} static inline int cxl_get_poison_by_endpoint(struct cxl_port *port) { return 0; diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 7d97790b893d..784843fa2a22 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -319,8 +319,8 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, else if (resource_contains(&cxlds->ram_res, res)) cxled->mode = CXL_DECODER_RAM; else { - dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id, - cxled->cxld.id, cxled->dpa_res); + dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n", + port->id, cxled->cxld.id, cxled->dpa_res); cxled->mode = CXL_DECODER_MIXED; } @@ -519,8 +519,7 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size) if (size > avail) { dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size, - cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem", - &avail); + cxl_decoder_mode_name(cxled->mode), &avail); rc = -ENOSPC; goto out; } @@ -888,8 +887,12 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, } rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl), &cxld->interleave_granularity); - if (rc) + if (rc) { + dev_warn(&port->dev, + "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n", + port->id, cxld->id, ctrl); return rc; + } dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n", port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end, diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 9adda4795eb7..2626f3fff201 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -56,6 +56,9 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(GET_LSA, 0x8, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(GET_HEALTH_INFO, 0, 0x12, 0), CXL_CMD(GET_LOG, 0x18, CXL_VARIABLE_PAYLOAD, CXL_CMD_FLAG_FORCE_ENABLE), + CXL_CMD(GET_LOG_CAPS, 0x10, 0x4, 0), + CXL_CMD(CLEAR_LOG, 0x10, 0, 0), + CXL_CMD(GET_SUP_LOG_SUBLIST, 0x2, CXL_VARIABLE_PAYLOAD, 0), CXL_CMD(SET_PARTITION_INFO, 0x0a, 0, 0), CXL_CMD(SET_LSA, CXL_VARIABLE_PAYLOAD, 0, 0), CXL_CMD(GET_ALERT_CONFIG, 0, 0x10, 0), @@ -331,6 +334,15 @@ static bool cxl_payload_from_user_allowed(u16 opcode, void *payload_in) return false; break; } + case CXL_MBOX_OP_CLEAR_LOG: { + const uuid_t *uuid = (uuid_t *)payload_in; + + /* + * Restrict the ‘Clear log’ action to only apply to + * Vendor debug logs. + */ + return uuid_equal(uuid, &DEFINE_CXL_VENDOR_DEBUG_UUID); + } default: break; } @@ -842,14 +854,38 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd, enum cxl_event_type event_type, const uuid_t *uuid, union cxl_event *evt) { - if (event_type == CXL_CPER_EVENT_GEN_MEDIA) - trace_cxl_general_media(cxlmd, type, &evt->gen_media); - else if (event_type == CXL_CPER_EVENT_DRAM) - trace_cxl_dram(cxlmd, type, &evt->dram); - else if (event_type == CXL_CPER_EVENT_MEM_MODULE) + if (event_type == CXL_CPER_EVENT_MEM_MODULE) { trace_cxl_memory_module(cxlmd, type, &evt->mem_module); - else + return; + } + if (event_type == CXL_CPER_EVENT_GENERIC) { trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic); + return; + } + + if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) { + u64 dpa, hpa = ULLONG_MAX; + struct cxl_region *cxlr; + + /* + * These trace points are annotated with HPA and region + * translations. Take topology mutation locks and lookup + * { HPA, REGION } from { DPA, MEMDEV } in the event record. + */ + guard(rwsem_read)(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_dpa_rwsem); + + dpa = le64_to_cpu(evt->common.phys_addr) & CXL_DPA_MASK; + cxlr = cxl_dpa_to_region(cxlmd, dpa); + if (cxlr) + hpa = cxl_trace_hpa(cxlr, cxlmd, dpa); + + if (event_type == CXL_CPER_EVENT_GEN_MEDIA) + trace_cxl_general_media(cxlmd, type, cxlr, hpa, + &evt->gen_media); + else if (event_type == CXL_CPER_EVENT_DRAM) + trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram); + } } EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL); @@ -915,7 +951,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds, payload->handles[i++] = gen->hdr.handle; dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log, - le16_to_cpu(payload->handles[i])); + le16_to_cpu(payload->handles[i - 1])); if (i == max_handles) { payload->nr_recs = i; @@ -946,24 +982,22 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds, struct cxl_memdev *cxlmd = mds->cxlds.cxlmd; struct device *dev = mds->cxlds.dev; struct cxl_get_event_payload *payload; - struct cxl_mbox_cmd mbox_cmd; u8 log_type = type; u16 nr_rec; mutex_lock(&mds->event.log_lock); payload = mds->event.buf; - mbox_cmd = (struct cxl_mbox_cmd) { - .opcode = CXL_MBOX_OP_GET_EVENT_RECORD, - .payload_in = &log_type, - .size_in = sizeof(log_type), - .payload_out = payload, - .size_out = mds->payload_size, - .min_out = struct_size(payload, records, 0), - }; - do { int rc, i; + struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_EVENT_RECORD, + .payload_in = &log_type, + .size_in = sizeof(log_type), + .payload_out = payload, + .size_out = mds->payload_size, + .min_out = struct_size(payload, records, 0), + }; rc = cxl_internal_send_cmd(mds, &mbox_cmd); if (rc) { @@ -1296,7 +1330,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); struct cxl_mbox_poison_out *po; struct cxl_mbox_poison_in pi; - struct cxl_mbox_cmd mbox_cmd; int nr_records = 0; int rc; @@ -1308,16 +1341,16 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, pi.offset = cpu_to_le64(offset); pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT); - mbox_cmd = (struct cxl_mbox_cmd) { - .opcode = CXL_MBOX_OP_GET_POISON, - .size_in = sizeof(pi), - .payload_in = &pi, - .size_out = mds->payload_size, - .payload_out = po, - .min_out = struct_size(po, record, 0), - }; - do { + struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){ + .opcode = CXL_MBOX_OP_GET_POISON, + .size_in = sizeof(pi), + .payload_in = &pi, + .size_out = mds->payload_size, + .payload_out = po, + .min_out = struct_size(po, record, 0), + }; + rc = cxl_internal_send_cmd(mds, &mbox_cmd); if (rc) break; diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index d4e259f3a7e9..0277726afd04 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -251,50 +251,6 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) } EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL); -struct cxl_dpa_to_region_context { - struct cxl_region *cxlr; - u64 dpa; -}; - -static int __cxl_dpa_to_region(struct device *dev, void *arg) -{ - struct cxl_dpa_to_region_context *ctx = arg; - struct cxl_endpoint_decoder *cxled; - u64 dpa = ctx->dpa; - - if (!is_endpoint_decoder(dev)) - return 0; - - cxled = to_cxl_endpoint_decoder(dev); - if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) - return 0; - - if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) - return 0; - - dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa, - dev_name(&cxled->cxld.region->dev)); - - ctx->cxlr = cxled->cxld.region; - - return 1; -} - -static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa) -{ - struct cxl_dpa_to_region_context ctx; - struct cxl_port *port; - - ctx = (struct cxl_dpa_to_region_context) { - .dpa = dpa, - }; - port = cxlmd->endpoint; - if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port)) - device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); - - return ctx.cxlr; -} - static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) { struct cxl_dev_state *cxlds = cxlmd->cxlds; diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 0df09bd79408..8567dd11eaac 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -525,7 +525,7 @@ static int cxl_cdat_get_length(struct device *dev, __le32 response[2]; int rc; - rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL, + rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS, &request, sizeof(request), &response, sizeof(response)); @@ -555,7 +555,7 @@ static int cxl_cdat_read_table(struct device *dev, __le32 request = CDAT_DOE_REQ(entry_handle); int rc; - rc = pci_doe(doe_mb, PCI_DVSEC_VENDOR_ID_CXL, + rc = pci_doe(doe_mb, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS, &request, sizeof(request), rsp, sizeof(*rsp) + remaining); @@ -640,7 +640,7 @@ void read_cdat_data(struct cxl_port *port) if (!pdev) return; - doe_mb = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL, + doe_mb = pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_CXL, CXL_DOE_PROTOCOL_TABLE_ACCESS); if (!doe_mb) { dev_dbg(dev, "No CDAT mailbox\n"); @@ -1045,3 +1045,32 @@ long cxl_pci_get_latency(struct pci_dev *pdev) return cxl_flit_size(pdev) * MEGA / bw; } + +static int __cxl_endpoint_decoder_reset_detected(struct device *dev, void *data) +{ + struct cxl_port *port = data; + struct cxl_decoder *cxld; + struct cxl_hdm *cxlhdm; + void __iomem *hdm; + u32 ctrl; + + if (!is_endpoint_decoder(dev)) + return 0; + + cxld = to_cxl_decoder(dev); + if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) + return 0; + + cxlhdm = dev_get_drvdata(&port->dev); + hdm = cxlhdm->regs.hdm_decoder; + ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); + + return !FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl); +} + +bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port) +{ + return device_for_each_child(&port->dev, port, + __cxl_endpoint_decoder_reset_detected); +} +EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_reset_detected, CXL); diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index 2b0cab556072..887ed6e358fb 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -2133,36 +2133,44 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd) } EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL); -/** - * cxl_hb_get_perf_coordinates - Retrieve performance numbers between initiator - * and host bridge - * - * @port: endpoint cxl_port - * @coord: output access coordinates - * - * Return: errno on failure, 0 on success. - */ -int cxl_hb_get_perf_coordinates(struct cxl_port *port, - struct access_coordinate *coord) +static void add_latency(struct access_coordinate *c, long latency) { - struct cxl_port *iter = port; - struct cxl_dport *dport; + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + c[i].write_latency += latency; + c[i].read_latency += latency; + } +} - if (!is_cxl_endpoint(port)) - return -EINVAL; +static bool coordinates_valid(struct access_coordinate *c) +{ + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + if (c[i].read_bandwidth && c[i].write_bandwidth && + c[i].read_latency && c[i].write_latency) + continue; + return false; + } - dport = iter->parent_dport; - while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) { - iter = to_cxl_port(iter->dev.parent); - dport = iter->parent_dport; + return true; +} + +static void set_min_bandwidth(struct access_coordinate *c, unsigned int bw) +{ + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) { + c[i].write_bandwidth = min(c[i].write_bandwidth, bw); + c[i].read_bandwidth = min(c[i].read_bandwidth, bw); } +} - coord[ACCESS_COORDINATE_LOCAL] = - dport->hb_coord[ACCESS_COORDINATE_LOCAL]; - coord[ACCESS_COORDINATE_CPU] = - dport->hb_coord[ACCESS_COORDINATE_CPU]; +static void set_access_coordinates(struct access_coordinate *out, + struct access_coordinate *in) +{ + for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) + out[i] = in[i]; +} - return 0; +static bool parent_port_is_cxl_root(struct cxl_port *port) +{ + return is_cxl_root(to_cxl_port(port->dev.parent)); } /** @@ -2176,47 +2184,76 @@ int cxl_hb_get_perf_coordinates(struct cxl_port *port, int cxl_endpoint_get_perf_coordinates(struct cxl_port *port, struct access_coordinate *coord) { - struct access_coordinate c = { - .read_bandwidth = UINT_MAX, - .write_bandwidth = UINT_MAX, + struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev); + struct access_coordinate c[] = { + { + .read_bandwidth = UINT_MAX, + .write_bandwidth = UINT_MAX, + }, + { + .read_bandwidth = UINT_MAX, + .write_bandwidth = UINT_MAX, + }, }; struct cxl_port *iter = port; struct cxl_dport *dport; struct pci_dev *pdev; + struct device *dev; unsigned int bw; + bool is_cxl_root; if (!is_cxl_endpoint(port)) return -EINVAL; - dport = iter->parent_dport; - /* - * Exit the loop when the parent port of the current port is cxl root. - * The iterative loop starts at the endpoint and gathers the - * latency of the CXL link from the current iter to the next downstream - * port each iteration. If the parent is cxl root then there is - * nothing to gather. + * Skip calculation for RCD. Expectation is HMAT already covers RCD case + * since RCH does not support hotplug. */ - while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) { - cxl_coordinates_combine(&c, &c, &dport->sw_coord); - c.write_latency += dport->link_latency; - c.read_latency += dport->link_latency; + if (cxlmd->cxlds->rcd) + return 0; - iter = to_cxl_port(iter->dev.parent); + /* + * Exit the loop when the parent port of the current iter port is cxl + * root. The iterative loop starts at the endpoint and gathers the + * latency of the CXL link from the current device/port to the connected + * downstream port each iteration. + */ + do { dport = iter->parent_dport; - } + iter = to_cxl_port(iter->dev.parent); + is_cxl_root = parent_port_is_cxl_root(iter); + + /* + * There's no valid access_coordinate for a root port since RPs do not + * have CDAT and therefore needs to be skipped. + */ + if (!is_cxl_root) { + if (!coordinates_valid(dport->coord)) + return -EINVAL; + cxl_coordinates_combine(c, c, dport->coord); + } + add_latency(c, dport->link_latency); + } while (!is_cxl_root); + + dport = iter->parent_dport; + /* Retrieve HB coords */ + if (!coordinates_valid(dport->coord)) + return -EINVAL; + cxl_coordinates_combine(c, c, dport->coord); + + dev = port->uport_dev->parent; + if (!dev_is_pci(dev)) + return -ENODEV; /* Get the calculated PCI paths bandwidth */ - pdev = to_pci_dev(port->uport_dev->parent); + pdev = to_pci_dev(dev); bw = pcie_bandwidth_available(pdev, NULL, NULL, NULL); if (bw == 0) return -ENXIO; bw /= BITS_PER_BYTE; - c.write_bandwidth = min(c.write_bandwidth, bw); - c.read_bandwidth = min(c.read_bandwidth, bw); - - *coord = c; + set_min_bandwidth(c, bw); + set_access_coordinates(coord, c); return 0; } diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 5c186e0a39b9..00a9f0eef8dd 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -2679,28 +2679,158 @@ int cxl_get_poison_by_endpoint(struct cxl_port *port) return rc; } +struct cxl_dpa_to_region_context { + struct cxl_region *cxlr; + u64 dpa; +}; + +static int __cxl_dpa_to_region(struct device *dev, void *arg) +{ + struct cxl_dpa_to_region_context *ctx = arg; + struct cxl_endpoint_decoder *cxled; + u64 dpa = ctx->dpa; + + if (!is_endpoint_decoder(dev)) + return 0; + + cxled = to_cxl_endpoint_decoder(dev); + if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) + return 0; + + if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) + return 0; + + dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa, + dev_name(&cxled->cxld.region->dev)); + + ctx->cxlr = cxled->cxld.region; + + return 1; +} + +struct cxl_region *cxl_dpa_to_region(const struct cxl_memdev *cxlmd, u64 dpa) +{ + struct cxl_dpa_to_region_context ctx; + struct cxl_port *port; + + ctx = (struct cxl_dpa_to_region_context) { + .dpa = dpa, + }; + port = cxlmd->endpoint; + if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port)) + device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); + + return ctx.cxlr; +} + +static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos) +{ + struct cxl_region_params *p = &cxlr->params; + int gran = p->interleave_granularity; + int ways = p->interleave_ways; + u64 offset; + + /* Is the hpa within this region at all */ + if (hpa < p->res->start || hpa > p->res->end) { + dev_dbg(&cxlr->dev, + "Addr trans fail: hpa 0x%llx not in region\n", hpa); + return false; + } + + /* Is the hpa in an expected chunk for its pos(-ition) */ + offset = hpa - p->res->start; + offset = do_div(offset, gran * ways); + if ((offset >= pos * gran) && (offset < (pos + 1) * gran)) + return true; + + dev_dbg(&cxlr->dev, + "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa); + + return false; +} + +static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled) +{ + u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa; + struct cxl_region_params *p = &cxlr->params; + int pos = cxled->pos; + u16 eig = 0; + u8 eiw = 0; + + ways_to_eiw(p->interleave_ways, &eiw); + granularity_to_eig(p->interleave_granularity, &eig); + + /* + * The device position in the region interleave set was removed + * from the offset at HPA->DPA translation. To reconstruct the + * HPA, place the 'pos' in the offset. + * + * The placement of 'pos' in the HPA is determined by interleave + * ways and granularity and is defined in the CXL Spec 3.0 Section + * 8.2.4.19.13 Implementation Note: Device Decode Logic + */ + + /* Remove the dpa base */ + dpa_offset = dpa - cxl_dpa_resource_start(cxled); + + mask_upper = GENMASK_ULL(51, eig + 8); + + if (eiw < 8) { + hpa_offset = (dpa_offset & mask_upper) << eiw; + hpa_offset |= pos << (eig + 8); + } else { + bits_upper = (dpa_offset & mask_upper) >> (eig + 8); + bits_upper = bits_upper * 3; + hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8); + } + + /* The lower bits remain unchanged */ + hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0); + + /* Apply the hpa_offset to the region base address */ + hpa = hpa_offset + p->res->start; + + if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos)) + return ULLONG_MAX; + + return hpa; +} + +u64 cxl_trace_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd, + u64 dpa) +{ + struct cxl_region_params *p = &cxlr->params; + struct cxl_endpoint_decoder *cxled = NULL; + + for (int i = 0; i < p->nr_targets; i++) { + cxled = p->targets[i]; + if (cxlmd == cxled_to_memdev(cxled)) + break; + } + if (!cxled || cxlmd != cxled_to_memdev(cxled)) + return ULLONG_MAX; + + return cxl_dpa_to_hpa(dpa, cxlr, cxled); +} + static struct lock_class_key cxl_pmem_region_key; -static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) +static int cxl_pmem_region_alloc(struct cxl_region *cxlr) { struct cxl_region_params *p = &cxlr->params; struct cxl_nvdimm_bridge *cxl_nvb; - struct cxl_pmem_region *cxlr_pmem; struct device *dev; int i; - down_read(&cxl_region_rwsem); - if (p->state != CXL_CONFIG_COMMIT) { - cxlr_pmem = ERR_PTR(-ENXIO); - goto out; - } + guard(rwsem_read)(&cxl_region_rwsem); + if (p->state != CXL_CONFIG_COMMIT) + return -ENXIO; - cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), - GFP_KERNEL); - if (!cxlr_pmem) { - cxlr_pmem = ERR_PTR(-ENOMEM); - goto out; - } + struct cxl_pmem_region *cxlr_pmem __free(kfree) = + kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), GFP_KERNEL); + if (!cxlr_pmem) + return -ENOMEM; cxlr_pmem->hpa_range.start = p->res->start; cxlr_pmem->hpa_range.end = p->res->end; @@ -2718,10 +2848,8 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) */ if (i == 0) { cxl_nvb = cxl_find_nvdimm_bridge(cxlmd); - if (!cxl_nvb) { - cxlr_pmem = ERR_PTR(-ENODEV); - goto out; - } + if (!cxl_nvb) + return -ENODEV; cxlr->cxl_nvb = cxl_nvb; } m->cxlmd = cxlmd; @@ -2732,18 +2860,16 @@ static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) } dev = &cxlr_pmem->dev; - cxlr_pmem->cxlr = cxlr; - cxlr->cxlr_pmem = cxlr_pmem; device_initialize(dev); lockdep_set_class(&dev->mutex, &cxl_pmem_region_key); device_set_pm_not_required(dev); dev->parent = &cxlr->dev; dev->bus = &cxl_bus_type; dev->type = &cxl_pmem_region_type; -out: - up_read(&cxl_region_rwsem); + cxlr_pmem->cxlr = cxlr; + cxlr->cxlr_pmem = no_free_ptr(cxlr_pmem); - return cxlr_pmem; + return 0; } static void cxl_dax_region_release(struct device *dev) @@ -2860,9 +2986,10 @@ static int devm_cxl_add_pmem_region(struct cxl_region *cxlr) struct device *dev; int rc; - cxlr_pmem = cxl_pmem_region_alloc(cxlr); - if (IS_ERR(cxlr_pmem)) - return PTR_ERR(cxlr_pmem); + rc = cxl_pmem_region_alloc(cxlr); + if (rc) + return rc; + cxlr_pmem = cxlr->cxlr_pmem; cxl_nvb = cxlr->cxl_nvb; dev = &cxlr_pmem->dev; diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c index 372786f80955..e1082e749c69 100644 --- a/drivers/cxl/core/regs.c +++ b/drivers/cxl/core/regs.c @@ -271,6 +271,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL); static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi, struct cxl_register_map *map) { + u8 reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo); int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo); u64 offset = ((u64)reg_hi << 32) | (reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK); @@ -278,11 +279,11 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi, if (offset > pci_resource_len(pdev, bar)) { dev_warn(&pdev->dev, "BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar, - &pdev->resource[bar], &offset, map->reg_type); + &pdev->resource[bar], &offset, reg_type); return false; } - map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo); + map->reg_type = reg_type; map->resource = pci_resource_start(pdev, bar) + offset; map->max_size = pci_resource_len(pdev, bar) - offset; return true; @@ -313,7 +314,7 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type, .resource = CXL_RESOURCE_NONE, }; - regloc = pci_find_dvsec_capability(pdev, PCI_DVSEC_VENDOR_ID_CXL, + regloc = pci_find_dvsec_capability(pdev, PCI_VENDOR_ID_CXL, CXL_DVSEC_REG_LOCATOR); if (!regloc) return -ENXIO; diff --git a/drivers/cxl/core/trace.c b/drivers/cxl/core/trace.c index d0403dc3c8ab..7f2a9dd0d0e3 100644 --- a/drivers/cxl/core/trace.c +++ b/drivers/cxl/core/trace.c @@ -6,94 +6,3 @@ #define CREATE_TRACE_POINTS #include "trace.h" - -static bool cxl_is_hpa_in_range(u64 hpa, struct cxl_region *cxlr, int pos) -{ - struct cxl_region_params *p = &cxlr->params; - int gran = p->interleave_granularity; - int ways = p->interleave_ways; - u64 offset; - - /* Is the hpa within this region at all */ - if (hpa < p->res->start || hpa > p->res->end) { - dev_dbg(&cxlr->dev, - "Addr trans fail: hpa 0x%llx not in region\n", hpa); - return false; - } - - /* Is the hpa in an expected chunk for its pos(-ition) */ - offset = hpa - p->res->start; - offset = do_div(offset, gran * ways); - if ((offset >= pos * gran) && (offset < (pos + 1) * gran)) - return true; - - dev_dbg(&cxlr->dev, - "Addr trans fail: hpa 0x%llx not in expected chunk\n", hpa); - - return false; -} - -static u64 cxl_dpa_to_hpa(u64 dpa, struct cxl_region *cxlr, - struct cxl_endpoint_decoder *cxled) -{ - u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa; - struct cxl_region_params *p = &cxlr->params; - int pos = cxled->pos; - u16 eig = 0; - u8 eiw = 0; - - ways_to_eiw(p->interleave_ways, &eiw); - granularity_to_eig(p->interleave_granularity, &eig); - - /* - * The device position in the region interleave set was removed - * from the offset at HPA->DPA translation. To reconstruct the - * HPA, place the 'pos' in the offset. - * - * The placement of 'pos' in the HPA is determined by interleave - * ways and granularity and is defined in the CXL Spec 3.0 Section - * 8.2.4.19.13 Implementation Note: Device Decode Logic - */ - - /* Remove the dpa base */ - dpa_offset = dpa - cxl_dpa_resource_start(cxled); - - mask_upper = GENMASK_ULL(51, eig + 8); - - if (eiw < 8) { - hpa_offset = (dpa_offset & mask_upper) << eiw; - hpa_offset |= pos << (eig + 8); - } else { - bits_upper = (dpa_offset & mask_upper) >> (eig + 8); - bits_upper = bits_upper * 3; - hpa_offset = ((bits_upper << (eiw - 8)) + pos) << (eig + 8); - } - - /* The lower bits remain unchanged */ - hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0); - - /* Apply the hpa_offset to the region base address */ - hpa = hpa_offset + p->res->start; - - if (!cxl_is_hpa_in_range(hpa, cxlr, cxled->pos)) - return ULLONG_MAX; - - return hpa; -} - -u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *cxlmd, - u64 dpa) -{ - struct cxl_region_params *p = &cxlr->params; - struct cxl_endpoint_decoder *cxled = NULL; - - for (int i = 0; i < p->nr_targets; i++) { - cxled = p->targets[i]; - if (cxlmd == cxled_to_memdev(cxled)) - break; - } - if (!cxled || cxlmd != cxled_to_memdev(cxled)) - return ULLONG_MAX; - - return cxl_dpa_to_hpa(dpa, cxlr, cxled); -} diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index e5f13260fc52..07a0394b1d99 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -253,8 +253,8 @@ TRACE_EVENT(cxl_generic_event, * DRAM Event Record * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 */ -#define CXL_DPA_FLAGS_MASK 0x3F -#define CXL_DPA_MASK (~CXL_DPA_FLAGS_MASK) +#define CXL_DPA_FLAGS_MASK GENMASK(1, 0) +#define CXL_DPA_MASK GENMASK_ULL(63, 6) #define CXL_DPA_VOLATILE BIT(0) #define CXL_DPA_NOT_REPAIRABLE BIT(1) @@ -316,9 +316,9 @@ TRACE_EVENT(cxl_generic_event, TRACE_EVENT(cxl_general_media, TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, - struct cxl_event_gen_media *rec), + struct cxl_region *cxlr, u64 hpa, struct cxl_event_gen_media *rec), - TP_ARGS(cxlmd, log, rec), + TP_ARGS(cxlmd, log, cxlr, hpa, rec), TP_STRUCT__entry( CXL_EVT_TP_entry @@ -330,10 +330,13 @@ TRACE_EVENT(cxl_general_media, __field(u8, channel) __field(u32, device) __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE) - __field(u16, validity_flags) /* Following are out of order to pack trace record */ + __field(u64, hpa) + __field_struct(uuid_t, region_uuid) + __field(u16, validity_flags) __field(u8, rank) __field(u8, dpa_flags) + __string(region_name, cxlr ? dev_name(&cxlr->dev) : "") ), TP_fast_assign( @@ -354,18 +357,28 @@ TRACE_EVENT(cxl_general_media, memcpy(__entry->comp_id, &rec->component_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE); __entry->validity_flags = get_unaligned_le16(&rec->validity_flags); + __entry->hpa = hpa; + if (cxlr) { + __assign_str(region_name, dev_name(&cxlr->dev)); + uuid_copy(&__entry->region_uuid, &cxlr->params.uuid); + } else { + __assign_str(region_name, ""); + uuid_copy(&__entry->region_uuid, &uuid_null); + } ), CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \ "descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \ - "device=%x comp_id=%s validity_flags='%s'", + "device=%x comp_id=%s validity_flags='%s' " \ + "hpa=%llx region=%s region_uuid=%pUb", __entry->dpa, show_dpa_flags(__entry->dpa_flags), show_event_desc_flags(__entry->descriptor), show_mem_event_type(__entry->type), show_trans_type(__entry->transaction_type), __entry->channel, __entry->rank, __entry->device, __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE), - show_valid_flags(__entry->validity_flags) + show_valid_flags(__entry->validity_flags), + __entry->hpa, __get_str(region_name), &__entry->region_uuid ) ); @@ -400,9 +413,9 @@ TRACE_EVENT(cxl_general_media, TRACE_EVENT(cxl_dram, TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, - struct cxl_event_dram *rec), + struct cxl_region *cxlr, u64 hpa, struct cxl_event_dram *rec), - TP_ARGS(cxlmd, log, rec), + TP_ARGS(cxlmd, log, cxlr, hpa, rec), TP_STRUCT__entry( CXL_EVT_TP_entry @@ -417,10 +430,13 @@ TRACE_EVENT(cxl_dram, __field(u32, nibble_mask) __field(u32, row) __array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE) + __field(u64, hpa) + __field_struct(uuid_t, region_uuid) __field(u8, rank) /* Out of order to pack trace record */ __field(u8, bank_group) /* Out of order to pack trace record */ __field(u8, bank) /* Out of order to pack trace record */ __field(u8, dpa_flags) /* Out of order to pack trace record */ + __string(region_name, cxlr ? dev_name(&cxlr->dev) : "") ), TP_fast_assign( @@ -444,12 +460,21 @@ TRACE_EVENT(cxl_dram, __entry->column = get_unaligned_le16(rec->column); memcpy(__entry->cor_mask, &rec->correction_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE); + __entry->hpa = hpa; + if (cxlr) { + __assign_str(region_name, dev_name(&cxlr->dev)); + uuid_copy(&__entry->region_uuid, &cxlr->params.uuid); + } else { + __assign_str(region_name, ""); + uuid_copy(&__entry->region_uuid, &uuid_null); + } ), CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \ "transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \ "bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \ - "validity_flags='%s'", + "validity_flags='%s' " \ + "hpa=%llx region=%s region_uuid=%pUb", __entry->dpa, show_dpa_flags(__entry->dpa_flags), show_event_desc_flags(__entry->descriptor), show_mem_event_type(__entry->type), @@ -458,7 +483,8 @@ TRACE_EVENT(cxl_dram, __entry->bank_group, __entry->bank, __entry->row, __entry->column, __print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE), - show_dram_valid_flags(__entry->validity_flags) + show_dram_valid_flags(__entry->validity_flags), + __entry->hpa, __get_str(region_name), &__entry->region_uuid ) ); @@ -642,8 +668,6 @@ TRACE_EVENT(cxl_memory_module, #define cxl_poison_overflow(flags, time) \ (flags & CXL_POISON_FLAG_OVERFLOW ? le64_to_cpu(time) : 0) -u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa); - TRACE_EVENT(cxl_poison, TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr, |