aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/cxl/Kconfig1
-rw-r--r--drivers/cxl/core/hdm.c13
-rw-r--r--drivers/cxl/cxl.h2
-rw-r--r--drivers/cxl/pmem.c2
-rw-r--r--include/linux/cxl-event.h3
-rw-r--r--tools/testing/cxl/test/mem.c19
6 files changed, 29 insertions, 11 deletions
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index 5f3c9c5529b9..99b5c25be079 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -6,6 +6,7 @@ menuconfig CXL_BUS
select FW_UPLOAD
select PCI_DOE
select FIRMWARE_TABLE
+ select NUMA_KEEP_MEMINFO if (NUMA && X86)
help
CXL is a bus that is electrically compatible with PCI Express, but
layers three protocols on that signalling (CXL.io, CXL.cache, and
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index 7d97790b893d..784843fa2a22 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -319,8 +319,8 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
else if (resource_contains(&cxlds->ram_res, res))
cxled->mode = CXL_DECODER_RAM;
else {
- dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id,
- cxled->cxld.id, cxled->dpa_res);
+ dev_warn(dev, "decoder%d.%d: %pr mixed mode not supported\n",
+ port->id, cxled->cxld.id, cxled->dpa_res);
cxled->mode = CXL_DECODER_MIXED;
}
@@ -519,8 +519,7 @@ int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
if (size > avail) {
dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size,
- cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem",
- &avail);
+ cxl_decoder_mode_name(cxled->mode), &avail);
rc = -ENOSPC;
goto out;
}
@@ -888,8 +887,12 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
}
rc = eig_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl),
&cxld->interleave_granularity);
- if (rc)
+ if (rc) {
+ dev_warn(&port->dev,
+ "decoder%d.%d: Invalid interleave granularity (ctrl: %#x)\n",
+ port->id, cxld->id, ctrl);
return rc;
+ }
dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n",
port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end,
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 036d17db68e0..2a09db5f72ee 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -12,6 +12,8 @@
#include <linux/node.h>
#include <linux/io.h>
+extern const struct nvdimm_security_ops *cxl_security_ops;
+
/**
* DOC: cxl objects
*
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index 7cb8994f8809..2ecdaee63021 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -11,8 +11,6 @@
#include "cxlmem.h"
#include "cxl.h"
-extern const struct nvdimm_security_ops *cxl_security_ops;
-
static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
static void clear_exclusive(void *mds)
diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h
index 03fa6d50d46f..812ed16ffc2f 100644
--- a/include/linux/cxl-event.h
+++ b/include/linux/cxl-event.h
@@ -3,6 +3,9 @@
#ifndef _LINUX_CXL_EVENT_H
#define _LINUX_CXL_EVENT_H
+#include <linux/types.h>
+#include <linux/uuid.h>
+
/*
* Common Event Record Format
* CXL rev 3.0 section 8.2.9.2.1; Table 8-42
diff --git a/tools/testing/cxl/test/mem.c b/tools/testing/cxl/test/mem.c
index 35ee41e435ab..6584443144de 100644
--- a/tools/testing/cxl/test/mem.c
+++ b/tools/testing/cxl/test/mem.c
@@ -127,7 +127,7 @@ static struct {
#define CXL_TEST_EVENT_CNT_MAX 15
/* Set a number of events to return at a time for simulation. */
-#define CXL_TEST_EVENT_CNT 3
+#define CXL_TEST_EVENT_RET_MAX 4
struct mock_event_log {
u16 clear_idx;
@@ -222,6 +222,12 @@ static void mes_add_event(struct mock_event_store *mes,
log->nr_events++;
}
+/*
+ * Vary the number of events returned to simulate events occuring while the
+ * logs are being read.
+ */
+static int ret_limit = 0;
+
static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
{
struct cxl_get_event_payload *pl;
@@ -233,14 +239,18 @@ static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
if (cmd->size_in != sizeof(log_type))
return -EINVAL;
- if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT))
+ ret_limit = (ret_limit + 1) % CXL_TEST_EVENT_RET_MAX;
+ if (!ret_limit)
+ ret_limit = 1;
+
+ if (cmd->size_out < struct_size(pl, records, ret_limit))
return -EINVAL;
log_type = *((u8 *)cmd->payload_in);
if (log_type >= CXL_EVENT_TYPE_MAX)
return -EINVAL;
- memset(cmd->payload_out, 0, cmd->size_out);
+ memset(cmd->payload_out, 0, struct_size(pl, records, 0));
log = event_find_log(dev, log_type);
if (!log || event_log_empty(log))
@@ -248,7 +258,7 @@ static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
pl = cmd->payload_out;
- for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) {
+ for (i = 0; i < ret_limit && !event_log_empty(log); i++) {
memcpy(&pl->records[i], event_get_current(log),
sizeof(pl->records[i]));
pl->records[i].event.generic.hdr.handle =
@@ -256,6 +266,7 @@ static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
log->cur_idx++;
}
+ cmd->size_out = struct_size(pl, records, i);
pl->record_count = cpu_to_le16(i);
if (!event_log_empty(log))
pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;