diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-10 11:07:26 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-10 11:07:26 -0700 |
commit | c235698355fa94df7073b51befda7d4be00a0e23 (patch) | |
tree | 3cb9d2fa40862f2484eda9c148b4326505a8534d /drivers/cxl/core | |
parent | 5e2e7383b57fa03ec2b00c82bb7f49a4a707c1f7 (diff) | |
parent | 1cd8a2537eb07751d405ab7e2223f20338a90506 (diff) |
Merge tag 'cxl-for-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl
Pull cxl updates from Dan Williams:
"Compute Express Link (CXL) updates for 6.0:
- Introduce a 'struct cxl_region' object with support for
provisioning and assembling persistent memory regions.
- Introduce alloc_free_mem_region() to accompany the existing
request_free_mem_region() as a method to allocate physical memory
capacity out of an existing resource.
- Export insert_resource_expand_to_fit() for the CXL subsystem to
late-publish CXL platform windows in iomem_resource.
- Add a polled mode PCI DOE (Data Object Exchange) driver service and
use it in cxl_pci to retrieve the CDAT (Coherent Device Attribute
Table)"
* tag 'cxl-for-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (74 commits)
cxl/hdm: Fix skip allocations vs multiple pmem allocations
cxl/region: Disallow region granularity != window granularity
cxl/region: Fix x1 interleave to greater than x1 interleave routing
cxl/region: Move HPA setup to cxl_region_attach()
cxl/region: Fix decoder interleave programming
Documentation: cxl: remove dangling kernel-doc reference
cxl/region: describe targets and nr_targets members of cxl_region_params
cxl/regions: add padding for cxl_rr_ep_add nested lists
cxl/region: Fix IS_ERR() vs NULL check
cxl/region: Fix region reference target accounting
cxl/region: Fix region commit uninitialized variable warning
cxl/region: Fix port setup uninitialized variable warnings
cxl/region: Stop initializing interleave granularity
cxl/hdm: Fix DPA reservation vs cxl_endpoint_decoder lifetime
cxl/acpi: Minimize granularity for x1 interleaves
cxl/region: Delete 'region' attribute from root decoders
cxl/acpi: Autoload driver for 'cxl_acpi' test devices
cxl/region: decrement ->nr_targets on error in cxl_region_attach()
cxl/region: prevent underflow in ways_to_cxl()
cxl/region: uninitialized variable in alloc_hpa()
...
Diffstat (limited to 'drivers/cxl/core')
-rw-r--r-- | drivers/cxl/core/Makefile | 1 | ||||
-rw-r--r-- | drivers/cxl/core/core.h | 51 | ||||
-rw-r--r-- | drivers/cxl/core/hdm.c | 691 | ||||
-rw-r--r-- | drivers/cxl/core/mbox.c | 95 | ||||
-rw-r--r-- | drivers/cxl/core/memdev.c | 4 | ||||
-rw-r--r-- | drivers/cxl/core/pci.c | 181 | ||||
-rw-r--r-- | drivers/cxl/core/pmem.c | 4 | ||||
-rw-r--r-- | drivers/cxl/core/port.c | 738 | ||||
-rw-r--r-- | drivers/cxl/core/region.c | 1896 |
9 files changed, 3343 insertions, 318 deletions
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile index 9d35085d25af..79c7257f4107 100644 --- a/drivers/cxl/core/Makefile +++ b/drivers/cxl/core/Makefile @@ -10,3 +10,4 @@ cxl_core-y += memdev.o cxl_core-y += mbox.o cxl_core-y += pci.o cxl_core-y += hdm.o +cxl_core-$(CONFIG_CXL_REGION) += region.o diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index 1a50c0fc399c..1d8f87be283f 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -9,6 +9,36 @@ extern const struct device_type cxl_nvdimm_type; extern struct attribute_group cxl_base_attribute_group; +#ifdef CONFIG_CXL_REGION +extern struct device_attribute dev_attr_create_pmem_region; +extern struct device_attribute dev_attr_delete_region; +extern struct device_attribute dev_attr_region; +extern const struct device_type cxl_pmem_region_type; +extern const struct device_type cxl_region_type; +void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled); +#define CXL_REGION_ATTR(x) (&dev_attr_##x.attr) +#define CXL_REGION_TYPE(x) (&cxl_region_type) +#define SET_CXL_REGION_ATTR(x) (&dev_attr_##x.attr), +#define CXL_PMEM_REGION_TYPE(x) (&cxl_pmem_region_type) +int cxl_region_init(void); +void cxl_region_exit(void); +#else +static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled) +{ +} +static inline int cxl_region_init(void) +{ + return 0; +} +static inline void cxl_region_exit(void) +{ +} +#define CXL_REGION_ATTR(x) NULL +#define CXL_REGION_TYPE(x) NULL +#define SET_CXL_REGION_ATTR(x) +#define CXL_PMEM_REGION_TYPE(x) NULL +#endif + struct cxl_send_command; struct cxl_mem_query_commands; int cxl_query_cmd(struct cxl_memdev *cxlmd, @@ -17,9 +47,28 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s); void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr, resource_size_t length); +struct dentry *cxl_debugfs_create_dir(const char *dir); +int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled, + enum cxl_decoder_mode mode); +int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size); +int cxl_dpa_free(struct cxl_endpoint_decoder *cxled); +resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled); +resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled); +extern struct rw_semaphore cxl_dpa_rwsem; + +bool is_switch_decoder(struct device *dev); +struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev); +static inline struct cxl_ep *cxl_ep_load(struct cxl_port *port, + struct cxl_memdev *cxlmd) +{ + if (!port) + return NULL; + + return xa_load(&port->endpoints, (unsigned long)&cxlmd->dev); +} + int cxl_memdev_init(void); void cxl_memdev_exit(void); void cxl_mbox_init(void); -void cxl_mbox_exit(void); #endif /* __CXL_CORE_H__ */ diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index bfc8ee876278..d1d2caea5c62 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ #include <linux/io-64-nonatomic-hi-lo.h> +#include <linux/seq_file.h> #include <linux/device.h> #include <linux/delay.h> @@ -16,6 +17,8 @@ * for enumerating these registers and capabilities. */ +DECLARE_RWSEM(cxl_dpa_rwsem); + static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, int *target_map) { @@ -46,20 +49,22 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, */ int devm_cxl_add_passthrough_decoder(struct cxl_port *port) { - struct cxl_decoder *cxld; - struct cxl_dport *dport; + struct cxl_switch_decoder *cxlsd; + struct cxl_dport *dport = NULL; int single_port_map[1]; + unsigned long index; - cxld = cxl_switch_decoder_alloc(port, 1); - if (IS_ERR(cxld)) - return PTR_ERR(cxld); + cxlsd = cxl_switch_decoder_alloc(port, 1); + if (IS_ERR(cxlsd)) + return PTR_ERR(cxlsd); device_lock_assert(&port->dev); - dport = list_first_entry(&port->dports, typeof(*dport), list); + xa_for_each(&port->dports, index, dport) + break; single_port_map[0] = dport->port_id; - return add_hdm_decoder(port, cxld, single_port_map); + return add_hdm_decoder(port, &cxlsd->cxld, single_port_map); } EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, CXL); @@ -124,47 +129,577 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port) return ERR_PTR(-ENXIO); } + dev_set_drvdata(dev, cxlhdm); + return cxlhdm; } EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, CXL); -static int to_interleave_granularity(u32 ctrl) +static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth) +{ + unsigned long long start = r->start, end = r->end; + + seq_printf(file, "%*s%08llx-%08llx : %s\n", depth * 2, "", start, end, + r->name); +} + +void cxl_dpa_debug(struct seq_file *file, struct cxl_dev_state *cxlds) +{ + struct resource *p1, *p2; + + down_read(&cxl_dpa_rwsem); + for (p1 = cxlds->dpa_res.child; p1; p1 = p1->sibling) { + __cxl_dpa_debug(file, p1, 0); + for (p2 = p1->child; p2; p2 = p2->sibling) + __cxl_dpa_debug(file, p2, 1); + } + up_read(&cxl_dpa_rwsem); +} +EXPORT_SYMBOL_NS_GPL(cxl_dpa_debug, CXL); + +/* + * Must be called in a context that synchronizes against this decoder's + * port ->remove() callback (like an endpoint decoder sysfs attribute) + */ +static void __cxl_dpa_release(struct cxl_endpoint_decoder *cxled) +{ + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_port *port = cxled_to_port(cxled); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct resource *res = cxled->dpa_res; + resource_size_t skip_start; + + lockdep_assert_held_write(&cxl_dpa_rwsem); + + /* save @skip_start, before @res is released */ + skip_start = res->start - cxled->skip; + __release_region(&cxlds->dpa_res, res->start, resource_size(res)); + if (cxled->skip) + __release_region(&cxlds->dpa_res, skip_start, cxled->skip); + cxled->skip = 0; + cxled->dpa_res = NULL; + put_device(&cxled->cxld.dev); + port->hdm_end--; +} + +static void cxl_dpa_release(void *cxled) +{ + down_write(&cxl_dpa_rwsem); + __cxl_dpa_release(cxled); + up_write(&cxl_dpa_rwsem); +} + +/* + * Must be called from context that will not race port device + * unregistration, like decoder sysfs attribute methods + */ +static void devm_cxl_dpa_release(struct cxl_endpoint_decoder *cxled) +{ + struct cxl_port *port = cxled_to_port(cxled); + + lockdep_assert_held_write(&cxl_dpa_rwsem); + devm_remove_action(&port->dev, cxl_dpa_release, cxled); + __cxl_dpa_release(cxled); +} + +static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, + resource_size_t base, resource_size_t len, + resource_size_t skipped) +{ + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_port *port = cxled_to_port(cxled); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct device *dev = &port->dev; + struct resource *res; + + lockdep_assert_held_write(&cxl_dpa_rwsem); + + if (!len) + goto success; + + if (cxled->dpa_res) { + dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n", + port->id, cxled->cxld.id, cxled->dpa_res); + return -EBUSY; + } + + if (port->hdm_end + 1 != cxled->cxld.id) { + /* + * Assumes alloc and commit order is always in hardware instance + * order per expectations from 8.2.5.12.20 Committing Decoder + * Programming that enforce decoder[m] committed before + * decoder[m+1] commit start. + */ + dev_dbg(dev, "decoder%d.%d: expected decoder%d.%d\n", port->id, + cxled->cxld.id, port->id, port->hdm_end + 1); + return -EBUSY; + } + + if (skipped) { + res = __request_region(&cxlds->dpa_res, base - skipped, skipped, + dev_name(&cxled->cxld.dev), 0); + if (!res) { + dev_dbg(dev, + "decoder%d.%d: failed to reserve skipped space\n", + port->id, cxled->cxld.id); + return -EBUSY; + } + } + res = __request_region(&cxlds->dpa_res, base, len, + dev_name(&cxled->cxld.dev), 0); + if (!res) { + dev_dbg(dev, "decoder%d.%d: failed to reserve allocation\n", + port->id, cxled->cxld.id); + if (skipped) + __release_region(&cxlds->dpa_res, base - skipped, + skipped); + return -EBUSY; + } + cxled->dpa_res = res; + cxled->skip = skipped; + + if (resource_contains(&cxlds->pmem_res, res)) + cxled->mode = CXL_DECODER_PMEM; + else if (resource_contains(&cxlds->ram_res, res)) + cxled->mode = CXL_DECODER_RAM; + else { + dev_dbg(dev, "decoder%d.%d: %pr mixed\n", port->id, + cxled->cxld.id, cxled->dpa_res); + cxled->mode = CXL_DECODER_MIXED; + } + +success: + port->hdm_end++; + get_device(&cxled->cxld.dev); + return 0; +} + +static int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, + resource_size_t base, resource_size_t len, + resource_size_t skipped) { - int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl); + struct cxl_port *port = cxled_to_port(cxled); + int rc; + + down_write(&cxl_dpa_rwsem); + rc = __cxl_dpa_reserve(cxled, base, len, skipped); + up_write(&cxl_dpa_rwsem); + + if (rc) + return rc; - return 256 << val; + return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); } -static int to_interleave_ways(u32 ctrl) +resource_size_t cxl_dpa_size(struct cxl_endpoint_decoder *cxled) { - int val = FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl); + resource_size_t size = 0; + + down_read(&cxl_dpa_rwsem); + if (cxled->dpa_res) + size = resource_size(cxled->dpa_res); + up_read(&cxl_dpa_rwsem); - switch (val) { - case 0 ... 4: - return 1 << val; - case 8 ... 10: - return 3 << (val - 8); + return size; +} + +resource_size_t cxl_dpa_resource_start(struct cxl_endpoint_decoder *cxled) +{ + resource_size_t base = -1; + + down_read(&cxl_dpa_rwsem); + if (cxled->dpa_res) + base = cxled->dpa_res->start; + up_read(&cxl_dpa_rwsem); + + return base; +} + +int cxl_dpa_free(struct cxl_endpoint_decoder *cxled) +{ + struct cxl_port *port = cxled_to_port(cxled); + struct device *dev = &cxled->cxld.dev; + int rc; + + down_write(&cxl_dpa_rwsem); + if (!cxled->dpa_res) { + rc = 0; + goto out; + } + if (cxled->cxld.region) { + dev_dbg(dev, "decoder assigned to: %s\n", + dev_name(&cxled->cxld.region->dev)); + rc = -EBUSY; + goto out; + } + if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { + dev_dbg(dev, "decoder enabled\n"); + rc = -EBUSY; + goto out; + } + if (cxled->cxld.id != port->hdm_end) { + dev_dbg(dev, "expected decoder%d.%d\n", port->id, + port->hdm_end); + rc = -EBUSY; + goto out; + } + devm_cxl_dpa_release(cxled); + rc = 0; +out: + up_write(&cxl_dpa_rwsem); + return rc; +} + +int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled, + enum cxl_decoder_mode mode) +{ + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct device *dev = &cxled->cxld.dev; + int rc; + + switch (mode) { + case CXL_DECODER_RAM: + case CXL_DECODER_PMEM: + break; default: + dev_dbg(dev, "unsupported mode: %d\n", mode); + return -EINVAL; + } + + down_write(&cxl_dpa_rwsem); + if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { + rc = -EBUSY; + goto out; + } + + /* + * Only allow modes that are supported by the current partition + * configuration + */ + if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) { + dev_dbg(dev, "no available pmem capacity\n"); + rc = -ENXIO; + goto out; + } + if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) { + dev_dbg(dev, "no available ram capacity\n"); + rc = -ENXIO; + goto out; + } + + cxled->mode = mode; + rc = 0; +out: + up_write(&cxl_dpa_rwsem); + + return rc; +} + +int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size) +{ + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + resource_size_t free_ram_start, free_pmem_start; + struct cxl_port *port = cxled_to_port(cxled); + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct device *dev = &cxled->cxld.dev; + resource_size_t start, avail, skip; + struct resource *p, *last; + int rc; + + down_write(&cxl_dpa_rwsem); + if (cxled->cxld.region) { + dev_dbg(dev, "decoder attached to %s\n", + dev_name(&cxled->cxld.region->dev)); + rc = -EBUSY; + goto out; + } + + if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) { + dev_dbg(dev, "decoder enabled\n"); + rc = -EBUSY; + goto out; + } + + for (p = cxlds->ram_res.child, last = NULL; p; p = p->sibling) + last = p; + if (last) + free_ram_start = last->end + 1; + else + free_ram_start = cxlds->ram_res.start; + + for (p = cxlds->pmem_res.child, last = NULL; p; p = p->sibling) + last = p; + if (last) + free_pmem_start = last->end + 1; + else + free_pmem_start = cxlds->pmem_res.start; + + if (cxled->mode == CXL_DECODER_RAM) { + start = free_ram_start; + avail = cxlds->ram_res.end - start + 1; + skip = 0; + } else if (cxled->mode == CXL_DECODER_PMEM) { + resource_size_t skip_start, skip_end; + + start = free_pmem_start; + avail = cxlds->pmem_res.end - start + 1; + skip_start = free_ram_start; + + /* + * If some pmem is already allocated, then that allocation + * already handled the skip. + */ + if (cxlds->pmem_res.child && + skip_start == cxlds->pmem_res.child->start) + skip_end = skip_start - 1; + else + skip_end = start - 1; + skip = skip_end - skip_start + 1; + } else { + dev_dbg(dev, "mode not set\n"); + rc = -EINVAL; + goto out; + } + + if (size > avail) { + dev_dbg(dev, "%pa exceeds available %s capacity: %pa\n", &size, + cxled->mode == CXL_DECODER_RAM ? "ram" : "pmem", + &avail); + rc = -ENOSPC; + goto out; + } + + rc = __cxl_dpa_reserve(cxled, start, size, skip); +out: + up_write(&cxl_dpa_rwsem); + + if (rc) + return rc; + + return devm_add_action_or_reset(&port->dev, cxl_dpa_release, cxled); +} + +static void cxld_set_interleave(struct cxl_decoder *cxld, u32 *ctrl) +{ + u16 eig; + u8 eiw; + + /* + * Input validation ensures these warns never fire, but otherwise + * suppress unititalized variable usage warnings. + */ + if (WARN_ONCE(ways_to_cxl(cxld->interleave_ways, &eiw), + "invalid interleave_ways: %d\n", cxld->interleave_ways)) + return; + if (WARN_ONCE(granularity_to_cxl(cxld->interleave_granularity, &eig), + "invalid interleave_granularity: %d\n", + cxld->interleave_granularity)) + return; + + u32p_replace_bits(ctrl, eig, CXL_HDM_DECODER0_CTRL_IG_MASK); + u32p_replace_bits(ctrl, eiw, CXL_HDM_DECODER0_CTRL_IW_MASK); + *ctrl |= CXL_HDM_DECODER0_CTRL_COMMIT; +} + +static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl) +{ + u32p_replace_bits(ctrl, !!(cxld->target_type == 3), + CXL_HDM_DECODER0_CTRL_TYPE); +} + +static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt) +{ + struct cxl_dport **t = &cxlsd->target[0]; + int ways = cxlsd->cxld.interleave_ways; + + if (dev_WARN_ONCE(&cxlsd->cxld.dev, + ways > 8 || ways > cxlsd->nr_targets, + "ways: %d overflows targets: %d\n", ways, + cxlsd->nr_targets)) + return -ENXIO; + + *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id); + if (ways > 1) + *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id); + if (ways > 2) + *tgt |= FIELD_PREP(GENMASK(23, 16), t[2]->port_id); + if (ways > 3) + *tgt |= FIELD_PREP(GENMASK(31, 24), t[3]->port_id); + if (ways > 4) + *tgt |= FIELD_PREP(GENMASK_ULL(39, 32), t[4]->port_id); + if (ways > 5) + *tgt |= FIELD_PREP(GENMASK_ULL(47, 40), t[5]->port_id); + if (ways > 6) + *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id); + if (ways > 7) + *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id); + + return 0; +} + +/* + * Per CXL 2.0 8.2.5.12.20 Committing Decoder Programming, hardware must set + * committed or error within 10ms, but just be generous with 20ms to account for + * clock skew and other marginal behavior + */ +#define COMMIT_TIMEOUT_MS 20 +static int cxld_await_commit(void __iomem *hdm, int id) +{ + u32 ctrl; + int i; + + for (i = 0; i < COMMIT_TIMEOUT_MS; i++) { + ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); + if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMIT_ERROR, ctrl)) { + ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; + writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); + return -EIO; + } + if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl)) + return 0; + fsleep(1000); + } + + return -ETIMEDOUT; +} + +static int cxl_decoder_commit(struct cxl_decoder *cxld) +{ + struct cxl_port *port = to_cxl_port(cxld->dev.parent); + struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); + void __iomem *hdm = cxlhdm->regs.hdm_decoder; + int id = cxld->id, rc; + u64 base, size; + u32 ctrl; + + if (cxld->flags & CXL_DECODER_F_ENABLE) + return 0; + + if (port->commit_end + 1 != id) { + dev_dbg(&port->dev, + "%s: out of order commit, expected decoder%d.%d\n", + dev_name(&cxld->dev), port->id, port->commit_end + 1); + return -EBUSY; + } + + down_read(&cxl_dpa_rwsem); + /* common decoder settings */ + ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id)); + cxld_set_interleave(cxld, &ctrl); + cxld_set_type(cxld, &ctrl); + base = cxld->hpa_range.start; + size = range_len(&cxld->hpa_range); + + writel(upper_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); + writel(lower_32_bits(base), hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); + writel(upper_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); + writel(lower_32_bits(size), hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); + + if (is_switch_decoder(&cxld->dev)) { + struct cxl_switch_decoder *cxlsd = + to_cxl_switch_decoder(&cxld->dev); + void __iomem *tl_hi = hdm + CXL_HDM_DECODER0_TL_HIGH(id); + void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id); + u64 targets; + + rc = cxlsd_set_targets(cxlsd, &targets); + if (rc) { + dev_dbg(&port->dev, "%s: target configuration error\n", + dev_name(&cxld->dev)); + goto err; + } + + writel(upper_32_bits(targets), tl_hi); + writel(lower_32_bits(targets), tl_lo); + } else { + struct cxl_endpoint_decoder *cxled = + to_cxl_endpoint_decoder(&cxld->dev); + void __iomem *sk_hi = hdm + CXL_HDM_DECODER0_SKIP_HIGH(id); + void __iomem *sk_lo = hdm + CXL_HDM_DECODER0_SKIP_LOW(id); + + writel(upper_32_bits(cxled->skip), sk_hi); + writel(lower_32_bits(cxled->skip), sk_lo); + } + + writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); + up_read(&cxl_dpa_rwsem); + + port->commit_end++; + rc = cxld_await_commit(hdm, cxld->id); +err: + if (rc) { + dev_dbg(&port->dev, "%s: error %d committing decoder\n", + dev_name(&cxld->dev), rc); + cxld->reset(cxld); + return rc; + } + cxld->flags |= CXL_DECODER_F_ENABLE; + + return 0; +} + +static int cxl_decoder_reset(struct cxl_decoder *cxld) +{ + struct cxl_port *port = to_cxl_port(cxld->dev.parent); + struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); + void __iomem *hdm = cxlhdm->regs.hdm_decoder; + int id = cxld->id; + u32 ctrl; + + if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0) return 0; + + if (port->commit_end != id) { + dev_dbg(&port->dev, + "%s: out of order reset, expected decoder%d.%d\n", + dev_name(&cxld->dev), port->id, port->commit_end); + return -EBUSY; } + + down_read(&cxl_dpa_rwsem); + ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); + ctrl &= ~CXL_HDM_DECODER0_CTRL_COMMIT; + writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(id)); + + writel(0, hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(id)); + writel(0, hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(id)); + writel(0, hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(id)); + writel(0, hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(id)); + up_read(&cxl_dpa_rwsem); + + port->commit_end--; + cxld->flags &= ~CXL_DECODER_F_ENABLE; + + return 0; } static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, - int *target_map, void __iomem *hdm, int which) + int *target_map, void __iomem *hdm, int which, + u64 *dpa_base) { - u64 size, base; + struct cxl_endpoint_decoder *cxled = NULL; + u64 size, base, skip, dpa_size; + bool committed; + u32 remainder; + int i, rc; u32 ctrl; - int i; union { u64 value; unsigned char target_id[8]; } target_list; + if (is_endpoint_decoder(&cxld->dev)) + cxled = to_cxl_endpoint_decoder(&cxld->dev); + ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which)); size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which)); + committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED); + cxld->commit = cxl_decoder_commit; + cxld->reset = cxl_decoder_reset; - if (!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED)) + if (!committed) size = 0; if (base == U64_MAX || size == U64_MAX) { dev_warn(&port->dev, "decoder%d.%d: Invalid resource range\n", @@ -172,39 +707,77 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, return -ENXIO; } - cxld->decoder_range = (struct range) { + cxld->hpa_range = (struct range) { .start = base, .end = base + size - 1, }; - /* switch decoders are always enabled if committed */ - if (ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED) { + /* decoders are enabled if committed */ + if (committed) { cxld->flags |= CXL_DECODER_F_ENABLE; if (ctrl & CXL_HDM_DECODER0_CTRL_LOCK) cxld->flags |= CXL_DECODER_F_LOCK; + if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl)) + cxld->target_type = CXL_DECODER_EXPANDER; + else + cxld->target_type = CXL_DECODER_ACCELERATOR; + if (cxld->id != port->commit_end + 1) { + dev_warn(&port->dev, + "decoder%d.%d: Committed out of order\n", + port->id, cxld->id); + return -ENXIO; + } + port->commit_end = cxld->id; + } else { + /* unless / until type-2 drivers arrive, assume type-3 */ + if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl) == 0) { + ctrl |= CXL_HDM_DECODER0_CTRL_TYPE; + writel(ctrl, hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); + } + cxld->target_type = CXL_DECODER_EXPANDER; } - cxld->interleave_ways = to_interleave_ways(ctrl); - if (!cxld->interleave_ways) { + rc = cxl_to_ways(FIELD_GET(CXL_HDM_DECODER0_CTRL_IW_MASK, ctrl), + &cxld->interleave_ways); + if (rc) { dev_warn(&port->dev, "decoder%d.%d: Invalid interleave ways (ctrl: %#x)\n", port->id, cxld->id, ctrl); - return -ENXIO; + return rc; } - cxld->interleave_granularity = to_interleave_granularity(ctrl); + rc = cxl_to_granularity(FIELD_GET(CXL_HDM_DECODER0_CTRL_IG_MASK, ctrl), + &cxld->interleave_granularity); + if (rc) + return rc; - if (FIELD_GET(CXL_HDM_DECODER0_CTRL_TYPE, ctrl)) - cxld->target_type = CXL_DECODER_EXPANDER; - else - cxld->target_type = CXL_DECODER_ACCELERATOR; + if (!cxled) { + target_list.value = + ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which)); + for (i = 0; i < cxld->interleave_ways; i++) + target_map[i] = target_list.target_id[i]; - if (is_endpoint_decoder(&cxld->dev)) return 0; + } - target_list.value = - ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which)); - for (i = 0; i < cxld->interleave_ways; i++) - target_map[i] = target_list.target_id[i]; + if (!committed) + return 0; + dpa_size = div_u64_rem(size, cxld->interleave_ways, &remainder); + if (remainder) { + dev_err(&port->dev, + "decoder%d.%d: invalid committed configuration size: %#llx ways: %d\n", + port->id, cxld->id, size, cxld->interleave_ways); + return -ENXIO; + } + skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which)); + rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip); + if (rc) { + dev_err(&port->dev, + "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", + port->id, cxld->id, *dpa_base, + *dpa_base + dpa_size + skip - 1, rc); + return rc; + } + *dpa_base += dpa_size + skip; return 0; } @@ -216,7 +789,8 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm) { void __iomem *hdm = cxlhdm->regs.hdm_decoder; struct cxl_port *port = cxlhdm->port; - int i, committed, failed; + int i, committed; + u64 dpa_base = 0; u32 ctrl; /* @@ -236,27 +810,37 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm) if (committed != cxlhdm->decoder_count) msleep(20); - for (i = 0, failed = 0; i < cxlhdm->decoder_count; i++) { + for (i = 0; i < cxlhdm->decoder_count; i++) { int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 }; int rc, target_count = cxlhdm->target_count; struct cxl_decoder *cxld; - if (is_cxl_endpoint(port)) - cxld = cxl_endpoint_decoder_alloc(port); - else - cxld = cxl_switch_decoder_alloc(port, target_count); - if (IS_ERR(cxld)) { - dev_warn(&port->dev, - "Failed to allocate the decoder\n"); - return PTR_ERR(cxld); + if (is_cxl_endpoint(port)) { + struct cxl_endpoint_decoder *cxled; + + cxled = cxl_endpoint_decoder_alloc(port); + if (IS_ERR(cxled)) { + dev_warn(&port->dev, + "Failed to allocate the decoder\n"); + return PTR_ERR(cxled); + } + cxld = &cxled->cxld; + } else { + struct cxl_switch_decoder *cxlsd; + + cxlsd = cxl_switch_decoder_alloc(port, target_count); + if (IS_ERR(cxlsd)) { + dev_warn(&port->dev, + "Failed to allocate the decoder\n"); + return PTR_ERR(cxlsd); + } + cxld = &cxlsd->cxld; } - rc = init_hdm_decoder(port, cxld, target_map, - cxlhdm->regs.hdm_decoder, i); + rc = init_hdm_decoder(port, cxld, target_map, hdm, i, &dpa_base); if (rc) { put_device(&cxld->dev); - failed++; - continue; + return rc; } rc = add_hdm_decoder(port, cxld, target_map); if (rc) { @@ -266,11 +850,6 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm) } } - if (failed == cxlhdm->decoder_count) { - dev_err(&port->dev, "No valid decoders found\n"); - return -ENXIO; - } - return 0; } EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, CXL); diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index cbf23beebebe..16176b9278b4 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -718,12 +718,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL); */ static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds) { - struct cxl_mbox_get_partition_info { - __le64 active_volatile_cap; - __le64 active_persistent_cap; - __le64 next_volatile_cap; - __le64 next_persistent_cap; - } __packed pi; + struct cxl_mbox_get_partition_info pi; int rc; rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0, @@ -773,15 +768,6 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds) cxlds->partition_align_bytes = le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER; - dev_dbg(cxlds->dev, - "Identify Memory Device\n" - " total_bytes = %#llx\n" - " volatile_only_bytes = %#llx\n" - " persistent_only_bytes = %#llx\n" - " partition_align_bytes = %#llx\n", - cxlds->total_bytes, cxlds->volatile_only_bytes, - cxlds->persistent_only_bytes, cxlds->partition_align_bytes); - cxlds->lsa_size = le32_to_cpu(id.lsa_size); memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision)); @@ -789,42 +775,63 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds) } EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL); -int cxl_mem_create_range_info(struct cxl_dev_state *cxlds) +static int add_dpa_res(struct device *dev, struct resource *parent, + struct resource *res, resource_size_t start, + resource_size_t size, const char *type) { int rc; - if (cxlds->partition_align_bytes == 0) { - cxlds->ram_range.start = 0; - cxlds->ram_range.end = cxlds->volatile_only_bytes - 1; - cxlds->pmem_range.start = cxlds->volatile_only_bytes; - cxlds->pmem_range.end = cxlds->volatile_only_bytes + - cxlds->persistent_only_bytes - 1; + res->name = type; + res->start = start; + res->end = start + size - 1; + res->flags = IORESOURCE_MEM; + if (resource_size(res) == 0) { + dev_dbg(dev, "DPA(%s): no capacity\n", res->name); return 0; } - - rc = cxl_mem_get_partition_info(cxlds); + rc = request_resource(parent, res); if (rc) { - dev_err(cxlds->dev, "Failed to query partition information\n"); + dev_err(dev, "DPA(%s): failed to track %pr (%d)\n", res->name, + res, rc); return rc; } - dev_dbg(cxlds->dev, - "Get Partition Info\n" - " active_volatile_bytes = %#llx\n" - " active_persistent_bytes = %#llx\n" - " next_volatile_bytes = %#llx\n" - " next_persistent_bytes = %#llx\n", - cxlds->active_volatile_bytes, cxlds->active_persistent_bytes, - cxlds->next_volatile_bytes, cxlds->next_persistent_bytes); + dev_dbg(dev, "DPA(%s): %pr\n", res->name, res); + + return 0; +} - cxlds->ram_range.start = 0; - cxlds->ram_range.end = cxlds->active_volatile_bytes - 1; +int cxl_mem_create_range_info(struct cxl_dev_state *cxlds) +{ + struct device *dev = cxlds->dev; + int rc; - cxlds->pmem_range.start = cxlds->active_volatile_bytes; - cxlds->pmem_range.end = - cxlds->active_volatile_bytes + cxlds->active_persistent_bytes - 1; + cxlds->dpa_res = + (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes); - return 0; + if (cxlds->partition_align_bytes == 0) { + rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0, + cxlds->volatile_only_bytes, "ram"); + if (rc) + return rc; + return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res, + cxlds->volatile_only_bytes, + cxlds->persistent_only_bytes, "pmem"); + } + + rc = cxl_mem_get_partition_info(cxlds); + if (rc) { + dev_err(dev, "Failed to query partition information\n"); + return rc; + } + + rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0, + cxlds->active_volatile_bytes, "ram"); + if (rc) + return rc; + return add_dpa_res(dev, &cxlds->dpa_res, &cxlds->pmem_res, + cxlds->active_volatile_bytes, + cxlds->active_persistent_bytes, "pmem"); } EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL); @@ -845,19 +852,11 @@ struct cxl_dev_state *cxl_dev_state_create(struct device *dev) } EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL); -static struct dentry *cxl_debugfs; - void __init cxl_mbox_init(void) { struct dentry *mbox_debugfs; - cxl_debugfs = debugfs_create_dir("cxl", NULL); - mbox_debugfs = debugfs_create_dir("mbox", cxl_debugfs); + mbox_debugfs = cxl_debugfs_create_dir("mbox"); debugfs_create_bool("raw_allow_all", 0600, mbox_debugfs, &cxl_raw_allow_all); } - -void cxl_mbox_exit(void) -{ - debugfs_remove_recursive(cxl_debugfs); -} diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index f7cdcd33504a..20ce488a7754 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -68,7 +68,7 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr, { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; - unsigned long long len = range_len(&cxlds->ram_range); + unsigned long long len = resource_size(&cxlds->ram_res); return sysfs_emit(buf, "%#llx\n", len); } @@ -81,7 +81,7 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr, { struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_dev_state *cxlds = cxlmd->cxlds; - unsigned long long len = range_len(&cxlds->pmem_range); + unsigned long long len = resource_size(&cxlds->pmem_res); return sysfs_emit(buf, "%#llx\n", len); } diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index c4c99ff7b55e..9240df53ed87 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -4,6 +4,7 @@ #include <linux/device.h> #include <linux/delay.h> #include <linux/pci.h> +#include <linux/pci-doe.h> #include <cxlpci.h> #include <cxlmem.h> #include <cxl.h> @@ -225,7 +226,6 @@ static int dvsec_range_allowed(struct device *dev, void *arg) { struct range *dev_range = arg; struct cxl_decoder *cxld; - struct range root_range; if (!is_root_decoder(dev)) return 0; @@ -237,12 +237,7 @@ static int dvsec_range_allowed(struct device *dev, void *arg) if (!(cxld->flags & CXL_DECODER_F_RAM)) return 0; - root_range = (struct range) { - .start = cxld->platform_res.start, - .end = cxld->platform_res.end, - }; - - return range_contains(&root_range, dev_range); + return range_contains(&cxld->hpa_range, dev_range); } static void disable_hdm(void *_cxlhdm) @@ -458,3 +453,175 @@ hdm_init: return 0; } EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL); + +#define CXL_DOE_TABLE_ACCESS_REQ_CODE 0x000000ff +#define CXL_DOE_TABLE_ACCESS_REQ_CODE_READ 0 +#define CXL_DOE_TABLE_ACCESS_TABLE_TYPE 0x0000ff00 +#define CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA 0 +#define CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE 0xffff0000 +#define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff +#define CXL_DOE_PROTOCOL_TABLE_ACCESS 2 + +static struct pci_doe_mb *find_cdat_doe(struct device *uport) +{ + struct cxl_memdev *cxlmd; + struct cxl_dev_state *cxlds; + unsigned long index; + void *entry; + + cxlmd = to_cxl_memdev(uport); + cxlds = cxlmd->cxlds; + + xa_for_each(&cxlds->doe_mbs, index, entry) { + struct pci_doe_mb *cur = entry; + + if (pci_doe_supports_prot(cur, PCI_DVSEC_VENDOR_ID_CXL, + CXL_DOE_PROTOCOL_TABLE_ACCESS)) + return cur; + } + + return NULL; +} + +#define CDAT_DOE_REQ(entry_handle) \ + (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \ + CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \ + FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \ + CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \ + FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle))) + +static void cxl_doe_task_complete(struct pci_doe_task *task) +{ + complete(task->private); +} + +struct cdat_doe_task { + u32 request_pl; + u32 response_pl[32]; + struct completion c; + struct pci_doe_task task; +}; + +#define DECLARE_CDAT_DOE_TASK(req, cdt) \ +struct cdat_doe_task cdt = { \ + .c = COMPLETION_INITIALIZER_ONSTACK(cdt.c), \ + .request_pl = req, \ + .task = { \ + .prot.vid = PCI_DVSEC_VENDOR_ID_CXL, \ + .prot.type = CXL_DOE_PROTOCOL_TABLE_ACCESS, \ + .request_pl = &cdt.request_pl, \ + .request_pl_sz = sizeof(cdt.request_pl), \ + .response_pl = cdt.response_pl, \ + .response_pl_sz = sizeof(cdt.response_pl), \ + .complete = cxl_doe_task_complete, \ + .private = &cdt.c, \ + } \ +} + +static int cxl_cdat_get_length(struct device *dev, + struct pci_doe_mb *cdat_doe, + size_t *length) +{ + DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(0), t); + int rc; + + rc = pci_doe_submit_task(cdat_doe, &t.task); + if (rc < 0) { + dev_err(dev, "DOE submit failed: %d", rc); + return rc; + } + wait_for_completion(&t.c); + if (t.task.rv < sizeof(u32)) + return -EIO; + + *length = t.response_pl[1]; + dev_dbg(dev, "CDAT length %zu\n", *length); + + return 0; +} + +static int cxl_cdat_read_table(struct device *dev, + struct pci_doe_mb *cdat_doe, + struct cxl_cdat *cdat) +{ + size_t length = cdat->length; + u32 *data = cdat->table; + int entry_handle = 0; + + do { + DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t); + size_t entry_dw; + u32 *entry; + int rc; + + rc = pci_doe_submit_task(cdat_doe, &t.task); + if (rc < 0) { + dev_err(dev, "DOE submit failed: %d", rc); + return rc; + } + wait_for_completion(&t.c); + /* 1 DW header + 1 DW data min */ + if (t.task.rv < (2 * sizeof(u32))) + return -EIO; + + /* Get the CXL table access header entry handle */ + entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, + t.response_pl[0]); + entry = t.response_pl + 1; + entry_dw = t.task.rv / sizeof(u32); + /* Skip Header */ + entry_dw -= 1; + entry_dw = min(length / sizeof(u32), entry_dw); + /* Prevent length < 1 DW from causing a buffer overflow */ + if (entry_dw) { + memcpy(data, entry, entry_dw * sizeof(u32)); + length -= entry_dw * sizeof(u32); + data += entry_dw; + } + } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY); + + return 0; +} + +/** + * read_cdat_data - Read the CDAT data on this port + * @port: Port to read data from + * + * This call will sleep waiting for responses from the DOE mailbox. + */ +void read_cdat_data(struct cxl_port *port) +{ + struct pci_doe_mb *cdat_doe; + struct device *dev = &port->dev; + struct device *uport = port->uport; + size_t cdat_length; + int rc; + + cdat_doe = find_cdat_doe(uport); + if (!cdat_doe) { + dev_dbg(dev, "No CDAT mailbox\n"); + return; + } + + port->cdat_available = true; + + if (cxl_cdat_get_length(dev, cdat_doe, &cdat_length)) { + dev_dbg(dev, "No CDAT length\n"); + return; + } + + port->cdat.table = devm_kzalloc(dev, cdat_length, GFP_KERNEL); + if (!port->cdat.table) + return; + + port->cdat.length = cdat_length; + rc = cxl_cdat_read_table(dev, cdat_doe, &port->cdat); + if (rc) { + /* Don't leave table data allocated on error */ + devm_kfree(dev, port->cdat.table); + port->cdat.table = NULL; + port->cdat.length = 0; + dev_err(dev, "CDAT data read error\n"); + } +} +EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL); diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index bec7cfb54ebf..1d12a8206444 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -62,9 +62,9 @@ static int match_nvdimm_bridge(struct device *dev, void *data) return is_cxl_nvdimm_bridge(dev); } -struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd) +struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *start) { - struct cxl_port *port = find_cxl_root(&cxl_nvd->dev); + struct cxl_port *port = find_cxl_root(start); struct device *dev; if (!port) diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index dbce99bdffab..bffde862de0b 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -1,7 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ #include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/memregion.h> #include <linux/workqueue.h> +#include <linux/debugfs.h> #include <linux/device.h> #include <linux/module.h> #include <linux/pci.h> @@ -42,6 +44,8 @@ static int cxl_device_id(struct device *dev) return CXL_DEVICE_NVDIMM_BRIDGE; if (dev->type == &cxl_nvdimm_type) return CXL_DEVICE_NVDIMM; + if (dev->type == CXL_PMEM_REGION_TYPE()) + return CXL_DEVICE_PMEM_REGION; if (is_cxl_port(dev)) { if (is_cxl_root(to_cxl_port(dev))) return CXL_DEVICE_ROOT; @@ -49,6 +53,8 @@ static int cxl_device_id(struct device *dev) } if (is_cxl_memdev(dev)) return CXL_DEVICE_MEMORY_EXPANDER; + if (dev->type == CXL_REGION_TYPE()) + return CXL_DEVICE_REGION; return 0; } @@ -73,14 +79,8 @@ static ssize_t start_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cxl_decoder *cxld = to_cxl_decoder(dev); - u64 start; - if (is_root_decoder(dev)) - start = cxld->platform_res.start; - else - start = cxld->decoder_range.start; - - return sysfs_emit(buf, "%#llx\n", start); + return sysfs_emit(buf, "%#llx\n", cxld->hpa_range.start); } static DEVICE_ATTR_ADMIN_RO(start); @@ -88,14 +88,8 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cxl_decoder *cxld = to_cxl_decoder(dev); - u64 size; - - if (is_root_decoder(dev)) - size = resource_size(&cxld->platform_res); - else - size = range_len(&cxld->decoder_range); - return sysfs_emit(buf, "%#llx\n", size); + return sysfs_emit(buf, "%#llx\n", range_len(&cxld->hpa_range)); } static DEVICE_ATTR_RO(size); @@ -131,20 +125,21 @@ static ssize_t target_type_show(struct device *dev, } static DEVICE_ATTR_RO(target_type); -static ssize_t emit_target_list(struct cxl_decoder *cxld, char *buf) +static ssize_t emit_target_list(struct cxl_switch_decoder *cxlsd, char *buf) { + struct cxl_decoder *cxld = &cxlsd->cxld; ssize_t offset = 0; int i, rc = 0; for (i = 0; i < cxld->interleave_ways; i++) { - struct cxl_dport *dport = cxld->target[i]; + struct cxl_dport *dport = cxlsd->target[i]; struct cxl_dport *next = NULL; if (!dport) break; if (i + 1 < cxld->interleave_ways) - next = cxld->target[i + 1]; + next = cxlsd->target[i + 1]; rc = sysfs_emit_at(buf, offset, "%d%s", dport->port_id, next ? "," : ""); if (rc < 0) @@ -158,15 +153,15 @@ static ssize_t emit_target_list(struct cxl_decoder *cxld, char *buf) static ssize_t target_list_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct cxl_decoder *cxld = to_cxl_decoder(dev); + struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); ssize_t offset; unsigned int seq; int rc; do { - seq = read_seqbegin(&cxld->target_lock); - rc = emit_target_list(cxld, buf); - } while (read_seqretry(&cxld->target_lock, seq)); + seq = read_seqbegin(&cxlsd->target_lock); + rc = emit_target_list(cxlsd, buf); + } while (read_seqretry(&cxlsd->target_lock, seq)); if (rc < 0) return rc; @@ -180,10 +175,121 @@ static ssize_t target_list_show(struct device *dev, } static DEVICE_ATTR_RO(target_list); +static ssize_t mode_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); + + switch (cxled->mode) { + case CXL_DECODER_RAM: + return sysfs_emit(buf, "ram\n"); + case CXL_DECODER_PMEM: + return sysfs_emit(buf, "pmem\n"); + case CXL_DECODER_NONE: + return sysfs_emit(buf, "none\n"); + case CXL_DECODER_MIXED: + default: + return sysfs_emit(buf, "mixed\n"); + } +} + +static ssize_t mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); + enum cxl_decoder_mode mode; + ssize_t rc; + + if (sysfs_streq(buf, "pmem")) + mode = CXL_DECODER_PMEM; + else if (sysfs_streq(buf, "ram")) + mode = CXL_DECODER_RAM; + else + return -EINVAL; + + rc = cxl_dpa_set_mode(cxled, mode); + if (rc) + return rc; + + return len; +} +static DEVICE_ATTR_RW(mode); + +static ssize_t dpa_resource_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); + u64 base = cxl_dpa_resource_start(cxled); + + return sysfs_emit(buf, "%#llx\n", base); +} +static DEVICE_ATTR_RO(dpa_resource); + +static ssize_t dpa_size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); + resource_size_t size = cxl_dpa_size(cxled); + + return sysfs_emit(buf, "%pa\n", &size); +} + +static ssize_t dpa_size_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); + unsigned long long size; + ssize_t rc; + + rc = kstrtoull(buf, 0, &size); + if (rc) + return rc; + + if (!IS_ALIGNED(size, SZ_256M)) + return -EINVAL; + + rc = cxl_dpa_free(cxled); + if (rc) + return rc; + + if (size == 0) + return len; + + rc = cxl_dpa_alloc(cxled, size); + if (rc) + return rc; + + return len; +} +static DEVICE_ATTR_RW(dpa_size); + +static ssize_t interleave_granularity_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxl_decoder *cxld = to_cxl_decoder(dev); + + return sysfs_emit(buf, "%d\n", cxld->interleave_granularity); +} + +static DEVICE_ATTR_RO(interleave_granularity); + +static ssize_t interleave_ways_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_decoder *cxld = to_cxl_decoder(dev); + + return sysfs_emit(buf, "%d\n", cxld->interleave_ways); +} + +static DEVICE_ATTR_RO(interleave_ways); + static struct attribute *cxl_decoder_base_attrs[] = { &dev_attr_start.attr, &dev_attr_size.attr, &dev_attr_locked.attr, + &dev_attr_interleave_granularity.attr, + &dev_attr_interleave_ways.attr, NULL, }; @@ -197,11 +303,35 @@ static struct attribute *cxl_decoder_root_attrs[] = { &dev_attr_cap_type2.attr, &dev_attr_cap_type3.attr, &dev_attr_target_list.attr, + SET_CXL_REGION_ATTR(create_pmem_region) + SET_CXL_REGION_ATTR(delete_region) NULL, }; +static bool can_create_pmem(struct cxl_root_decoder *cxlrd) +{ + unsigned long flags = CXL_DECODER_F_TYPE3 | CXL_DECODER_F_PMEM; + + return (cxlrd->cxlsd.cxld.flags & flags) == flags; +} + +static umode_t cxl_root_decoder_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); + + if (a == CXL_REGION_ATTR(create_pmem_region) && !can_create_pmem(cxlrd)) + return 0; + + if (a == CXL_REGION_ATTR(delete_region) && !can_create_pmem(cxlrd)) + return 0; + + return a->mode; +} + static struct attribute_group cxl_decoder_root_attribute_group = { .attrs = cxl_decoder_root_attrs, + .is_visible = cxl_root_decoder_visible, }; static const struct attribute_group *cxl_decoder_root_attribute_groups[] = { @@ -214,6 +344,7 @@ static const struct attribute_group *cxl_decoder_root_attribute_groups[] = { static struct attribute *cxl_decoder_switch_attrs[] = { &dev_attr_target_type.attr, &dev_attr_target_list.attr, + SET_CXL_REGION_ATTR(region) NULL, }; @@ -230,6 +361,10 @@ static const struct attribute_group *cxl_decoder_switch_attribute_groups[] = { static struct attribute *cxl_decoder_endpoint_attrs[] = { &dev_attr_target_type.attr, + &dev_attr_mode.attr, + &dev_attr_dpa_size.attr, + &dev_attr_dpa_resource.attr, + SET_CXL_REGION_ATTR(region) NULL, }; @@ -244,31 +379,64 @@ static const struct attribute_group *cxl_decoder_endpoint_attribute_groups[] = { NULL, }; -static void cxl_decoder_release(struct device *dev) +static void __cxl_decoder_release(struct cxl_decoder *cxld) { - struct cxl_decoder *cxld = to_cxl_decoder(dev); - struct cxl_port *port = to_cxl_port(dev->parent); + struct cxl_port *port = to_cxl_port(cxld->dev.parent); ida_free(&port->decoder_ida, cxld->id); - kfree(cxld); put_device(&port->dev); } +static void cxl_endpoint_decoder_release(struct device *dev) +{ + struct cxl_endpoint_decoder *cxled = to_cxl_endpoint_decoder(dev); + + __cxl_decoder_release(&cxled->cxld); + kfree(cxled); +} + +static void cxl_switch_decoder_release(struct device *dev) +{ + struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); + + __cxl_decoder_release(&cxlsd->cxld); + kfree(cxlsd); +} + +struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev) +{ + if (dev_WARN_ONCE(dev, !is_root_decoder(dev), + "not a cxl_root_decoder device\n")) + return NULL; + return container_of(dev, struct cxl_root_decoder, cxlsd.cxld.dev); +} +EXPORT_SYMBOL_NS_GPL(to_cxl_root_decoder, CXL); + +static void cxl_root_decoder_release(struct device *dev) +{ + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); + + if (atomic_read(&cxlrd->region_id) >= 0) + memregion_free(atomic_read(&cxlrd->region_id)); + __cxl_decoder_release(&cxlrd->cxlsd.cxld); + kfree(cxlrd); +} + static const struct device_type cxl_decoder_endpoint_type = { .name = "cxl_decoder_endpoint", - .release = cxl_decoder_release, + .release = cxl_endpoint_decoder_release, .groups = cxl_decoder_endpoint_attribute_groups, }; static const struct device_type cxl_decoder_switch_type = { .name = "cxl_decoder_switch", - .release = cxl_decoder_release, + .release = cxl_switch_decoder_release, .groups = cxl_decoder_switch_attribute_groups, }; static const struct device_type cxl_decoder_root_type = { .name = "cxl_decoder_root", - .release = cxl_decoder_release, + .release = cxl_root_decoder_release, .groups = cxl_decoder_root_attribute_groups, }; @@ -283,39 +451,63 @@ bool is_root_decoder(struct device *dev) } EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL); -bool is_cxl_decoder(struct device *dev) +bool is_switch_decoder(struct device *dev) { - return dev->type && dev->type->release == cxl_decoder_release; + return is_root_decoder(dev) || dev->type == &cxl_decoder_switch_type; } -EXPORT_SYMBOL_NS_GPL(is_cxl_decoder, CXL); struct cxl_decoder *to_cxl_decoder(struct device *dev) { - if (dev_WARN_ONCE(dev, dev->type->release != cxl_decoder_release, + if (dev_WARN_ONCE(dev, + !is_switch_decoder(dev) && !is_endpoint_decoder(dev), "not a cxl_decoder device\n")) return NULL; return container_of(dev, struct cxl_decoder, dev); } EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL); +struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev) +{ + if (dev_WARN_ONCE(dev, !is_endpoint_decoder(dev), + "not a cxl_endpoint_decoder device\n")) + return NULL; + return container_of(dev, struct cxl_endpoint_decoder, cxld.dev); +} +EXPORT_SYMBOL_NS_GPL(to_cxl_endpoint_decoder, CXL); + +struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev) +{ + if (dev_WARN_ONCE(dev, !is_switch_decoder(dev), + "not a cxl_switch_decoder device\n")) + return NULL; + return container_of(dev, struct cxl_switch_decoder, cxld.dev); +} + static void cxl_ep_release(struct cxl_ep *ep) { - if (!ep) - return; - list_del(&ep->list); put_device(ep->ep); kfree(ep); } +static void cxl_ep_remove(struct cxl_port *port, struct cxl_ep *ep) +{ + if (!ep) + return; + xa_erase(&port->endpoints, (unsigned long) ep->ep); + cxl_ep_release(ep); +} + static void cxl_port_release(struct device *dev) { struct cxl_port *port = to_cxl_port(dev); - struct cxl_ep *ep, *_e; + unsigned long index; + struct cxl_ep *ep; - device_lock(dev); - list_for_each_entry_safe(ep, _e, &port->endpoints, list) - cxl_ep_release(ep); - device_unlock(dev); + xa_for_each(&port->endpoints, index, ep) + cxl_ep_remove(port, ep); + xa_destroy(&port->endpoints); + xa_destroy(&port->dports); + xa_destroy(&port->regions); ida_free(&cxl_port_ida, port->id); kfree(port); } @@ -370,7 +562,7 @@ static void unregister_port(void *_port) lock_dev = &parent->dev; device_lock_assert(lock_dev); - port->uport = NULL; + port->dead = true; device_unregister(&port->dev); } @@ -395,7 +587,7 @@ static struct lock_class_key cxl_port_key; static struct cxl_port *cxl_port_alloc(struct device *uport, resource_size_t component_reg_phys, - struct cxl_port *parent_port) + struct cxl_dport *parent_dport) { struct cxl_port *port; struct device *dev; @@ -409,6 +601,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport, if (rc < 0) goto err; port->id = rc; + port->uport = uport; /* * The top-level cxl_port "cxl_root" does not have a cxl_port as @@ -417,17 +610,37 @@ static struct cxl_port *cxl_port_alloc(struct device *uport, * description. */ dev = &port->dev; - if (parent_port) { + if (parent_dport) { + struct cxl_port *parent_port = parent_dport->port; + struct cxl_port *iter; + dev->parent = &parent_port->dev; port->depth = parent_port->depth + 1; + port->parent_dport = parent_dport; + + /* + * walk to the host bridge, or the first ancestor that knows + * the host bridge + */ + iter = port; + while (!iter->host_bridge && + !is_cxl_root(to_cxl_port(iter->dev.parent))) + iter = to_cxl_port(iter->dev.parent); + if (iter->host_bridge) + port->host_bridge = iter->host_bridge; + else + port->host_bridge = iter->uport; + dev_dbg(uport, "host-bridge: %s\n", dev_name(port->host_bridge)); } else dev->parent = uport; - port->uport = uport; port->component_reg_phys = component_reg_phys; ida_init(&port->decoder_ida); - INIT_LIST_HEAD(&port->dports); - INIT_LIST_HEAD(&port->endpoints); + port->hdm_end = -1; + port->commit_end = -1; + xa_init(&port->dports); + xa_init(&port->endpoints); + xa_init(&port->regions); device_initialize(dev); lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth); @@ -447,24 +660,24 @@ err: * @host: host device for devm operations * @uport: "physical" device implementing this upstream port * @component_reg_phys: (optional) for configurable cxl_port instances - * @parent_port: next hop up in the CXL memory decode hierarchy + * @parent_dport: next hop up in the CXL memory decode hierarchy */ struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport, resource_size_t component_reg_phys, - struct cxl_port *parent_port) + struct cxl_dport *parent_dport) { struct cxl_port *port; struct device *dev; int rc; - port = cxl_port_alloc(uport, component_reg_phys, parent_port); + port = cxl_port_alloc(uport, component_reg_phys, parent_dport); if (IS_ERR(port)) return port; dev = &port->dev; if (is_cxl_memdev(uport)) rc = dev_set_name(dev, "endpoint%d", port->id); - else if (parent_port) + else if (parent_dport) rc = dev_set_name(dev, "port%d", port->id); else rc = dev_set_name(dev, "root%d", port->id); @@ -556,17 +769,13 @@ static int match_root_child(struct device *dev, const void *match) return 0; port = to_cxl_port(dev); - device_lock(dev); - list_for_each_entry(dport, &port->dports, list) { - iter = match; - while (iter) { - if (iter == dport->dport) - goto out; - iter = iter->parent; - } + iter = match; + while (iter) { + dport = cxl_find_dport_by_dev(port, iter); + if (dport) + break; + iter = iter->parent; } -out: - device_unlock(dev); return !!iter; } @@ -590,9 +799,10 @@ EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL); static struct cxl_dport *find_dport(struct cxl_port *port, int id) { struct cxl_dport *dport; + unsigned long index; device_lock_assert(&port->dev); - list_for_each_entry (dport, &port->dports, list) + xa_for_each(&port->dports, index, dport) if (dport->port_id == id) return dport; return NULL; @@ -604,15 +814,15 @@ static int add_dport(struct cxl_port *port, struct cxl_dport *new) device_lock_assert(&port->dev); dup = find_dport(port, new->port_id); - if (dup) + if (dup) { dev_err(&port->dev, "unable to add dport%d-%s non-unique port id (%s)\n", new->port_id, dev_name(new->dport), dev_name(dup->dport)); - else - list_add_tail(&new->list, &port->dports); - - return dup ? -EEXIST : 0; + return -EBUSY; + } + return xa_insert(&port->dports, (unsigned long)new->dport, new, + GFP_KERNEL); } /* @@ -639,10 +849,8 @@ static void cxl_dport_remove(void *data) struct cxl_dport *dport = data; struct cxl_port *port = dport->port; + xa_erase(&port->dports, (unsigned long) dport->dport); put_device(dport->dport); - cond_cxl_root_lock(port); - list_del(&dport->list); - cond_cxl_root_unlock(port); } static void cxl_dport_unlink(void *data) @@ -694,7 +902,6 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, if (!dport) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&dport->list); dport->dport = dport_dev; dport->port_id = port_id; dport->component_reg_phys = component_reg_phys; @@ -723,44 +930,33 @@ struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port, } EXPORT_SYMBOL_NS_GPL(devm_cxl_add_dport, CXL); -static struct cxl_ep *find_ep(struct cxl_port *port, struct device *ep_dev) -{ - struct cxl_ep *ep; - - device_lock_assert(&port->dev); - list_for_each_entry(ep, &port->endpoints, list) - if (ep->ep == ep_dev) - return ep; - return NULL; -} - -static int add_ep(struct cxl_port *port, struct cxl_ep *new) +static int add_ep(struct cxl_ep *new) { - struct cxl_ep *dup; + struct cxl_port *port = new->dport->port; + int rc; device_lock(&port->dev); if (port->dead) { device_unlock(&port->dev); return -ENXIO; } - dup = find_ep(port, new->ep); - if (!dup) - list_add_tail(&new->list, &port->endpoints); + rc = xa_insert(&port->endpoints, (unsigned long)new->ep, new, + GFP_KERNEL); device_unlock(&port->dev); - return dup ? -EEXIST : 0; + return rc; } /** * cxl_add_ep - register an endpoint's interest in a port - * @port: a port in the endpoint's topology ancestry + * @dport: the dport that routes to @ep_dev * @ep_dev: device representing the endpoint * * Intermediate CXL ports are scanned based on the arrival of endpoints. * When those endpoints depart the port can be destroyed once all * endpoints that care about that port have been removed. */ -static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev) +static int cxl_add_ep(struct cxl_dport *dport, struct device *ep_dev) { struct cxl_ep *ep; int rc; @@ -769,10 +965,10 @@ static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev) if (!ep) return -ENOMEM; - INIT_LIST_HEAD(&ep->list); ep->ep = get_device(ep_dev); + ep->dport = dport; - rc = add_ep(port, ep); + rc = add_ep(ep); if (rc) cxl_ep_release(ep); return rc; @@ -781,11 +977,13 @@ static int cxl_add_ep(struct cxl_port *port, struct device *ep_dev) struct cxl_find_port_ctx { const struct device *dport_dev; const struct cxl_port *parent_port; + struct cxl_dport **dport; }; static int match_port_by_dport(struct device *dev, const void *data) { const struct cxl_find_port_ctx *ctx = data; + struct cxl_dport *dport; struct cxl_port *port; if (!is_cxl_port(dev)) @@ -794,7 +992,10 @@ static int match_port_by_dport(struct device *dev, const void *data) return 0; port = to_cxl_port(dev); - return cxl_find_dport_by_dev(port, ctx->dport_dev) != NULL; + dport = cxl_find_dport_by_dev(port, ctx->dport_dev); + if (ctx->dport) + *ctx->dport = dport; + return dport != NULL; } static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx) @@ -810,24 +1011,32 @@ static struct cxl_port *__find_cxl_port(struct cxl_find_port_ctx *ctx) return NULL; } -static struct cxl_port *find_cxl_port(struct device *dport_dev) +static struct cxl_port *find_cxl_port(struct device *dport_dev, + struct cxl_dport **dport) { struct cxl_find_port_ctx ctx = { .dport_dev = dport_dev, + .dport = dport, }; + struct cxl_port *port; - return __find_cxl_port(&ctx); + port = __find_cxl_port(&ctx); + return port; } static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port, - struct device *dport_dev) + struct device *dport_dev, + struct cxl_dport **dport) { struct cxl_find_port_ctx ctx = { .dport_dev = dport_dev, .parent_port = parent_port, + .dport = dport, }; + struct cxl_port *port; - return __find_cxl_port(&ctx); + port = __find_cxl_port(&ctx); + return port; } /* @@ -851,13 +1060,13 @@ static void delete_endpoint(void *data) struct cxl_port *parent_port; struct device *parent; - parent_port = cxl_mem_find_port(cxlmd); + parent_port = cxl_mem_find_port(cxlmd, NULL); if (!parent_port) goto out; parent = &parent_port->dev; device_lock(parent); - if (parent->driver && endpoint->uport) { + if (parent->driver && !endpoint->dead) { devm_release_action(parent, cxl_unlink_uport, endpoint); devm_release_action(parent, unregister_port, endpoint); } @@ -883,21 +1092,70 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, CXL); * for a port to be unregistered is when all memdevs beneath that port have gone * through ->remove(). This "bottom-up" removal selectively removes individual * child ports manually. This depends on devm_cxl_add_port() to not change is - * devm action registration order. + * devm action registration order, and for dports to have already been + * destroyed by reap_dports(). */ -static void delete_switch_port(struct cxl_port *port, struct list_head *dports) +static void delete_switch_port(struct cxl_port *port) +{ + devm_release_action(port->dev.parent, cxl_unlink_uport, port); + devm_release_action(port->dev.parent, unregister_port, port); +} + +static void reap_dports(struct cxl_port *port) { - struct cxl_dport *dport, *_d; + struct cxl_dport *dport; + unsigned long index; + + device_lock_assert(&port->dev); - list_for_each_entry_safe(dport, _d, dports, list) { + xa_for_each(&port->dports, index, dport) { devm_release_action(&port->dev, cxl_dport_unlink, dport); devm_release_action(&port->dev, cxl_dport_remove, dport); devm_kfree(&port->dev, dport); } - devm_release_action(port->dev.parent, cxl_unlink_uport, port); - devm_release_action(port->dev.parent, unregister_port, port); } +int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd, + struct cxl_dport *parent_dport) +{ + struct cxl_port *parent_port = parent_dport->port; + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_port *endpoint, *iter, *down; + int rc; + + /* + * Now that the path to the root is established record all the + * intervening ports in the chain. + */ + for (iter = parent_port, down = NULL; !is_cxl_root(iter); + down = iter, iter = to_cxl_port(iter->dev.parent)) { + struct cxl_ep *ep; + + ep = cxl_ep_load(iter, cxlmd); + ep->next = down; + } + + endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev, + cxlds->component_reg_phys, parent_dport); + if (IS_ERR(endpoint)) + return PTR_ERR(endpoint); + + dev_dbg(&cxlmd->dev, "add: %s\n", dev_name(&endpoint->dev)); + + rc = cxl_endpoint_autoremove(cxlmd, endpoint); + if (rc) + return rc; + + if (!endpoint->dev.driver) { + dev_err(&cxlmd->dev, "%s failed probe\n", + dev_name(&endpoint->dev)); + return -ENXIO; + } + + return 0; +} +EXPORT_SYMBOL_NS_GPL(devm_cxl_add_endpoint, CXL); + static void cxl_detach_ep(void *data) { struct cxl_memdev *cxlmd = data; @@ -906,13 +1164,13 @@ static void cxl_detach_ep(void *data) for (iter = &cxlmd->dev; iter; iter = grandparent(iter)) { struct device *dport_dev = grandparent(iter); struct cxl_port *port, *parent_port; - LIST_HEAD(reap_dports); struct cxl_ep *ep; + bool died = false; if (!dport_dev) break; - port = find_cxl_port(dport_dev); + port = find_cxl_port(dport_dev, NULL); if (!port) continue; @@ -936,26 +1194,27 @@ static void cxl_detach_ep(void *data) } device_lock(&port->dev); - ep = find_ep(port, &cxlmd->dev); + ep = cxl_ep_load(port, cxlmd); dev_dbg(&cxlmd->dev, "disconnect %s from %s\n", ep ? dev_name(ep->ep) : "", dev_name(&port->dev)); - cxl_ep_release(ep); - if (ep && !port->dead && list_empty(&port->endpoints) && + cxl_ep_remove(port, ep); + if (ep && !port->dead && xa_empty(&port->endpoints) && !is_cxl_root(parent_port)) { /* * This was the last ep attached to a dynamically * enumerated port. Block new cxl_add_ep() and garbage * collect the port. */ + died = true; port->dead = true; - list_splice_init(&port->dports, &reap_dports); + reap_dports(port); } device_unlock(&port->dev); - if (!list_empty(&reap_dports)) { + if (died) { dev_dbg(&cxlmd->dev, "delete %s\n", dev_name(&port->dev)); - delete_switch_port(port, &reap_dports); + delete_switch_port(port); } put_device(&port->dev); device_unlock(&parent_port->dev); @@ -986,6 +1245,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd, { struct device *dparent = grandparent(dport_dev); struct cxl_port *port, *parent_port = NULL; + struct cxl_dport *dport, *parent_dport; resource_size_t component_reg_phys; int rc; @@ -1000,7 +1260,7 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd, return -ENXIO; } - parent_port = find_cxl_port(dparent); + parent_port = find_cxl_port(dparent, &parent_dport); if (!parent_port) { /* iterate to create this parent_port */ return -EAGAIN; @@ -1015,13 +1275,14 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd, goto out; } - port = find_cxl_port_at(parent_port, dport_dev); + port = find_cxl_port_at(parent_port, dport_dev, &dport); if (!port) { component_reg_phys = find_component_registers(uport_dev); port = devm_cxl_add_port(&parent_port->dev, uport_dev, - component_reg_phys, parent_port); + component_reg_phys, parent_dport); + /* retry find to pick up the new dport information */ if (!IS_ERR(port)) - get_device(&port->dev); + port = find_cxl_port_at(parent_port, dport_dev, &dport); } out: device_unlock(&parent_port->dev); @@ -1031,8 +1292,8 @@ out: else { dev_dbg(&cxlmd->dev, "add to new port %s:%s\n", dev_name(&port->dev), dev_name(port->uport)); - rc = cxl_add_ep(port, &cxlmd->dev); - if (rc == -EEXIST) { + rc = cxl_add_ep(dport, &cxlmd->dev); + if (rc == -EBUSY) { /* * "can't" happen, but this error code means * something to the caller, so translate it. @@ -1065,6 +1326,7 @@ retry: for (iter = dev; iter; iter = grandparent(iter)) { struct device *dport_dev = grandparent(iter); struct device *uport_dev; + struct cxl_dport *dport; struct cxl_port *port; if (!dport_dev) @@ -1080,12 +1342,12 @@ retry: dev_dbg(dev, "scan: iter: %s dport_dev: %s parent: %s\n", dev_name(iter), dev_name(dport_dev), dev_name(uport_dev)); - port = find_cxl_port(dport_dev); + port = find_cxl_port(dport_dev, &dport); if (port) { dev_dbg(&cxlmd->dev, "found already registered port %s:%s\n", dev_name(&port->dev), dev_name(port->uport)); - rc = cxl_add_ep(port, &cxlmd->dev); + rc = cxl_add_ep(dport, &cxlmd->dev); /* * If the endpoint already exists in the port's list, @@ -1094,7 +1356,7 @@ retry: * the parent_port lock as the current port may be being * reaped. */ - if (rc && rc != -EEXIST) { + if (rc && rc != -EBUSY) { put_device(&port->dev); return rc; } @@ -1124,30 +1386,14 @@ retry: } EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_ports, CXL); -struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd) +struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd, + struct cxl_dport **dport) { - return find_cxl_port(grandparent(&cxlmd->dev)); + return find_cxl_port(grandparent(&cxlmd->dev), dport); } EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL); -struct cxl_dport *cxl_find_dport_by_dev(struct cxl_port *port, - const struct device *dev) -{ - struct cxl_dport *dport; - - device_lock(&port->dev); - list_for_each_entry(dport, &port->dports, list) - if (dport->dport == dev) { - device_unlock(&port->dev); - return dport; - } - - device_unlock(&port->dev); - return NULL; -} -EXPORT_SYMBOL_NS_GPL(cxl_find_dport_by_dev, CXL); - -static int decoder_populate_targets(struct cxl_decoder *cxld, +static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd, struct cxl_port *port, int *target_map) { int i, rc = 0; @@ -1157,88 +1403,92 @@ static int decoder_populate_targets(struct cxl_decoder *cxld, device_lock_assert(&port->dev); - if (list_empty(&port->dports)) + if (xa_empty(&port->dports)) return -EINVAL; - write_seqlock(&cxld->target_lock); - for (i = 0; i < cxld->nr_targets; i++) { + write_seqlock(&cxlsd->target_lock); + for (i = 0; i < cxlsd->nr_targets; i++) { struct cxl_dport *dport = find_dport(port, target_map[i]); if (!dport) { rc = -ENXIO; break; } - cxld->target[i] = dport; + cxlsd->target[i] = dport; } - write_sequnlock(&cxld->target_lock); + write_sequnlock(&cxlsd->target_lock); return rc; } +static struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos) +{ + struct cxl_switch_decoder *cxlsd = &cxlrd->cxlsd; + struct cxl_decoder *cxld = &cxlsd->cxld; + int iw; + + iw = cxld->interleave_ways; + if (dev_WARN_ONCE(&cxld->dev, iw != cxlsd->nr_targets, + "misconfigured root decoder\n")) + return NULL; + + return cxlrd->cxlsd.target[pos % iw]; +} + static struct lock_class_key cxl_decoder_key; /** - * cxl_decoder_alloc - Allocate a new CXL decoder + * cxl_decoder_init - Common decoder setup / initialization * @port: owning port of this decoder - * @nr_targets: downstream targets accessible by this decoder. All upstream - * ports and root ports must have at least 1 target. Endpoint - * devices will have 0 targets. Callers wishing to register an - * endpoint device should specify 0. - * - * A port should contain one or more decoders. Each of those decoders enable - * some address space for CXL.mem utilization. A decoder is expected to be - * configured by the caller before registering. + * @cxld: common decoder properties to initialize * - * Return: A new cxl decoder to be registered by cxl_decoder_add(). The decoder - * is initialized to be a "passthrough" decoder. + * A port may contain one or more decoders. Each of those decoders + * enable some address space for CXL.mem utilization. A decoder is + * expected to be configured by the caller before registering via + * cxl_decoder_add() */ -static struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, - unsigned int nr_targets) +static int cxl_decoder_init(struct cxl_port *port, struct cxl_decoder *cxld) { - struct cxl_decoder *cxld; struct device *dev; - int rc = 0; - - if (nr_targets > CXL_DECODER_MAX_INTERLEAVE) - return ERR_PTR(-EINVAL); - - cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL); - if (!cxld) - return ERR_PTR(-ENOMEM); + int rc; rc = ida_alloc(&port->decoder_ida, GFP_KERNEL); if (rc < 0) - goto err; + return rc; /* need parent to stick around to release the id */ get_device(&port->dev); cxld->id = rc; - cxld->nr_targets = nr_targets; - seqlock_init(&cxld->target_lock); dev = &cxld->dev; device_initialize(dev); lockdep_set_class(&dev->mutex, &cxl_decoder_key); device_set_pm_not_required(dev); dev->parent = &port->dev; dev->bus = &cxl_bus_type; - if (is_cxl_root(port)) - cxld->dev.type = &cxl_decoder_root_type; - else if (is_cxl_endpoint(port)) - cxld->dev.type = &cxl_decoder_endpoint_type; - else - cxld->dev.type = &cxl_decoder_switch_type; /* Pre initialize an "empty" decoder */ cxld->interleave_ways = 1; cxld->interleave_granularity = PAGE_SIZE; cxld->target_type = CXL_DECODER_EXPANDER; - cxld->platform_res = (struct resource)DEFINE_RES_MEM(0, 0); + cxld->hpa_range = (struct range) { + .start = 0, + .end = -1, + }; - return cxld; -err: - kfree(cxld); - return ERR_PTR(rc); + return 0; +} + +static int cxl_switch_decoder_init(struct cxl_port *port, + struct cxl_switch_decoder *cxlsd, + int nr_targets) +{ + if (nr_targets > CXL_DECODER_MAX_INTERLEAVE) + return -EINVAL; + + cxlsd->nr_targets = nr_targets; + seqlock_init(&cxlsd->target_lock); + return cxl_decoder_init(port, &cxlsd->cxld); } /** @@ -1251,13 +1501,46 @@ err: * firmware description of CXL resources into a CXL standard decode * topology. */ -struct cxl_decoder *cxl_root_decoder_alloc(struct cxl_port *port, - unsigned int nr_targets) +struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port, + unsigned int nr_targets) { + struct cxl_root_decoder *cxlrd; + struct cxl_switch_decoder *cxlsd; + struct cxl_decoder *cxld; + int rc; + if (!is_cxl_root(port)) return ERR_PTR(-EINVAL); - return cxl_decoder_alloc(port, nr_targets); + cxlrd = kzalloc(struct_size(cxlrd, cxlsd.target, nr_targets), + GFP_KERNEL); + if (!cxlrd) + return ERR_PTR(-ENOMEM); + + cxlsd = &cxlrd->cxlsd; + rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); + if (rc) { + kfree(cxlrd); + return ERR_PTR(rc); + } + + cxlrd->calc_hb = cxl_hb_modulo; + + cxld = &cxlsd->cxld; + cxld->dev.type = &cxl_decoder_root_type; + /* + * cxl_root_decoder_release() special cases negative ids to + * detect memregion_alloc() failures. + */ + atomic_set(&cxlrd->region_id, -1); + rc = memregion_alloc(GFP_KERNEL); + if (rc < 0) { + put_device(&cxld->dev); + return ERR_PTR(rc); + } + + atomic_set(&cxlrd->region_id, rc); + return cxlrd; } EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL); @@ -1272,13 +1555,29 @@ EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL); * that sit between Switch Upstream Ports / Switch Downstream Ports and * Host Bridges / Root Ports. */ -struct cxl_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, - unsigned int nr_targets) +struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port, + unsigned int nr_targets) { + struct cxl_switch_decoder *cxlsd; + struct cxl_decoder *cxld; + int rc; + if (is_cxl_root(port) || is_cxl_endpoint(port)) return ERR_PTR(-EINVAL); - return cxl_decoder_alloc(port, nr_targets); + cxlsd = kzalloc(struct_size(cxlsd, target, nr_targets), GFP_KERNEL); + if (!cxlsd) + return ERR_PTR(-ENOMEM); + + rc = cxl_switch_decoder_init(port, cxlsd, nr_targets); + if (rc) { + kfree(cxlsd); + return ERR_PTR(rc); + } + + cxld = &cxlsd->cxld; + cxld->dev.type = &cxl_decoder_switch_type; + return cxlsd; } EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL); @@ -1288,18 +1587,35 @@ EXPORT_SYMBOL_NS_GPL(cxl_switch_decoder_alloc, CXL); * * Return: A new cxl decoder to be registered by cxl_decoder_add() */ -struct cxl_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port) +struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port) { + struct cxl_endpoint_decoder *cxled; + struct cxl_decoder *cxld; + int rc; + if (!is_cxl_endpoint(port)) return ERR_PTR(-EINVAL); - return cxl_decoder_alloc(port, 0); + cxled = kzalloc(sizeof(*cxled), GFP_KERNEL); + if (!cxled) + return ERR_PTR(-ENOMEM); + + cxled->pos = -1; + cxld = &cxled->cxld; + rc = cxl_decoder_init(port, cxld); + if (rc) { + kfree(cxled); + return ERR_PTR(rc); + } + + cxld->dev.type = &cxl_decoder_endpoint_type; + return cxled; } EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, CXL); /** * cxl_decoder_add_locked - Add a decoder with targets - * @cxld: The cxl decoder allocated by cxl_decoder_alloc() + * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() * @target_map: A list of downstream ports that this decoder can direct memory * traffic to. These numbers should correspond with the port number * in the PCIe Link Capabilities structure. @@ -1335,7 +1651,9 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map) port = to_cxl_port(cxld->dev.parent); if (!is_endpoint_decoder(dev)) { - rc = decoder_populate_targets(cxld, port, target_map); + struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev); + + rc = decoder_populate_targets(cxlsd, port, target_map); if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) { dev_err(&port->dev, "Failed to populate active decoder targets\n"); @@ -1347,20 +1665,13 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map) if (rc) return rc; - /* - * Platform decoder resources should show up with a reasonable name. All - * other resources are just sub ranges within the main decoder resource. - */ - if (is_root_decoder(dev)) - cxld->platform_res.name = dev_name(dev); - return device_add(dev); } EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, CXL); /** * cxl_decoder_add - Add a decoder with targets - * @cxld: The cxl decoder allocated by cxl_decoder_alloc() + * @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc() * @target_map: A list of downstream ports that this decoder can direct memory * traffic to. These numbers should correspond with the port number * in the PCIe Link Capabilities structure. @@ -1394,6 +1705,13 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL); static void cxld_unregister(void *dev) { + struct cxl_endpoint_decoder *cxled; + + if (is_endpoint_decoder(dev)) { + cxled = to_cxl_endpoint_decoder(dev); + cxl_decoder_kill_region(cxled); + } + device_unregister(dev); } @@ -1521,10 +1839,20 @@ struct bus_type cxl_bus_type = { }; EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL); +static struct dentry *cxl_debugfs; + +struct dentry *cxl_debugfs_create_dir(const char *dir) +{ + return debugfs_create_dir(dir, cxl_debugfs); +} +EXPORT_SYMBOL_NS_GPL(cxl_debugfs_create_dir, CXL); + static __init int cxl_core_init(void) { int rc; + cxl_debugfs = debugfs_create_dir("cxl", NULL); + cxl_mbox_init(); rc = cxl_memdev_init(); @@ -1541,22 +1869,28 @@ static __init int cxl_core_init(void) if (rc) goto err_bus; + rc = cxl_region_init(); + if (rc) + goto err_region; + return 0; +err_region: + bus_unregister(&cxl_bus_type); err_bus: destroy_workqueue(cxl_bus_wq); err_wq: cxl_memdev_exit(); - cxl_mbox_exit(); return rc; } static void cxl_core_exit(void) { + cxl_region_exit(); bus_unregister(&cxl_bus_type); destroy_workqueue(cxl_bus_wq); cxl_memdev_exit(); - cxl_mbox_exit(); + debugfs_remove_recursive(cxl_debugfs); } module_init(cxl_core_init); diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c new file mode 100644 index 000000000000..401148016978 --- /dev/null +++ b/drivers/cxl/core/region.c @@ -0,0 +1,1896 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2022 Intel Corporation. All rights reserved. */ +#include <linux/memregion.h> +#include <linux/genalloc.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/uuid.h> +#include <linux/idr.h> +#include <cxlmem.h> +#include <cxl.h> +#include "core.h" + +/** + * DOC: cxl core region + * + * CXL Regions represent mapped memory capacity in system physical address + * space. Whereas the CXL Root Decoders identify the bounds of potential CXL + * Memory ranges, Regions represent the active mapped capacity by the HDM + * Decoder Capability structures throughout the Host Bridges, Switches, and + * Endpoints in the topology. + * + * Region configuration has ordering constraints. UUID may be set at any time + * but is only visible for persistent regions. + * 1. Interleave granularity + * 2. Interleave size + * 3. Decoder targets + */ + +/* + * All changes to the interleave configuration occur with this lock held + * for write. + */ +static DECLARE_RWSEM(cxl_region_rwsem); + +static struct cxl_region *to_cxl_region(struct device *dev); + +static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + ssize_t rc; + + rc = down_read_interruptible(&cxl_region_rwsem); + if (rc) + return rc; + rc = sysfs_emit(buf, "%pUb\n", &p->uuid); + up_read(&cxl_region_rwsem); + + return rc; +} + +static int is_dup(struct device *match, void *data) +{ + struct cxl_region_params *p; + struct cxl_region *cxlr; + uuid_t *uuid = data; + + if (!is_cxl_region(match)) + return 0; + + lockdep_assert_held(&cxl_region_rwsem); + cxlr = to_cxl_region(match); + p = &cxlr->params; + + if (uuid_equal(&p->uuid, uuid)) { + dev_dbg(match, "already has uuid: %pUb\n", uuid); + return -EBUSY; + } + + return 0; +} + +static ssize_t uuid_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + uuid_t temp; + ssize_t rc; + + if (len != UUID_STRING_LEN + 1) + return -EINVAL; + + rc = uuid_parse(buf, &temp); + if (rc) + return rc; + + if (uuid_is_null(&temp)) + return -EINVAL; + + rc = down_write_killable(&cxl_region_rwsem); + if (rc) + return rc; + + if (uuid_equal(&p->uuid, &temp)) + goto out; + + rc = -EBUSY; + if (p->state >= CXL_CONFIG_ACTIVE) + goto out; + + rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup); + if (rc < 0) + goto out; + + uuid_copy(&p->uuid, &temp); +out: + up_write(&cxl_region_rwsem); + + if (rc) + return rc; + return len; +} +static DEVICE_ATTR_RW(uuid); + +static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port, + struct cxl_region *cxlr) +{ + return xa_load(&port->regions, (unsigned long)cxlr); +} + +static int cxl_region_decode_reset(struct cxl_region *cxlr, int count) +{ + struct cxl_region_params *p = &cxlr->params; + int i; + + for (i = count - 1; i >= 0; i--) { + struct cxl_endpoint_decoder *cxled = p->targets[i]; + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_port *iter = cxled_to_port(cxled); + struct cxl_ep *ep; + int rc; + + while (!is_cxl_root(to_cxl_port(iter->dev.parent))) + iter = to_cxl_port(iter->dev.parent); + + for (ep = cxl_ep_load(iter, cxlmd); iter; + iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { + struct cxl_region_ref *cxl_rr; + struct cxl_decoder *cxld; + + cxl_rr = cxl_rr_load(iter, cxlr); + cxld = cxl_rr->decoder; + rc = cxld->reset(cxld); + if (rc) + return rc; + } + + rc = cxled->cxld.reset(&cxled->cxld); + if (rc) + return rc; + } + + return 0; +} + +static int cxl_region_decode_commit(struct cxl_region *cxlr) +{ + struct cxl_region_params *p = &cxlr->params; + int i, rc = 0; + + for (i = 0; i < p->nr_targets; i++) { + struct cxl_endpoint_decoder *cxled = p->targets[i]; + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_region_ref *cxl_rr; + struct cxl_decoder *cxld; + struct cxl_port *iter; + struct cxl_ep *ep; + + /* commit bottom up */ + for (iter = cxled_to_port(cxled); !is_cxl_root(iter); + iter = to_cxl_port(iter->dev.parent)) { + cxl_rr = cxl_rr_load(iter, cxlr); + cxld = cxl_rr->decoder; + rc = cxld->commit(cxld); + if (rc) + break; + } + + if (rc) { + /* programming @iter failed, teardown */ + for (ep = cxl_ep_load(iter, cxlmd); ep && iter; + iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { + cxl_rr = cxl_rr_load(iter, cxlr); + cxld = cxl_rr->decoder; + cxld->reset(cxld); + } + + cxled->cxld.reset(&cxled->cxld); + goto err; + } + } + + return 0; + +err: + /* undo the targets that were successfully committed */ + cxl_region_decode_reset(cxlr, i); + return rc; +} + +static ssize_t commit_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + bool commit; + ssize_t rc; + + rc = kstrtobool(buf, &commit); + if (rc) + return rc; + + rc = down_write_killable(&cxl_region_rwsem); + if (rc) + return rc; + + /* Already in the requested state? */ + if (commit && p->state >= CXL_CONFIG_COMMIT) + goto out; + if (!commit && p->state < CXL_CONFIG_COMMIT) + goto out; + + /* Not ready to commit? */ + if (commit && p->state < CXL_CONFIG_ACTIVE) { + rc = -ENXIO; + goto out; + } + + if (commit) + rc = cxl_region_decode_commit(cxlr); + else { + p->state = CXL_CONFIG_RESET_PENDING; + up_write(&cxl_region_rwsem); + device_release_driver(&cxlr->dev); + down_write(&cxl_region_rwsem); + + /* + * The lock was dropped, so need to revalidate that the reset is + * still pending. + */ + if (p->state == CXL_CONFIG_RESET_PENDING) + rc = cxl_region_decode_reset(cxlr, p->interleave_ways); + } + + if (rc) + goto out; + + if (commit) + p->state = CXL_CONFIG_COMMIT; + else if (p->state == CXL_CONFIG_RESET_PENDING) + p->state = CXL_CONFIG_ACTIVE; + +out: + up_write(&cxl_region_rwsem); + + if (rc) + return rc; + return len; +} + +static ssize_t commit_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + ssize_t rc; + + rc = down_read_interruptible(&cxl_region_rwsem); + if (rc) + return rc; + rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT); + up_read(&cxl_region_rwsem); + + return rc; +} +static DEVICE_ATTR_RW(commit); + +static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a, + int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct cxl_region *cxlr = to_cxl_region(dev); + + if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM) + return 0; + return a->mode; +} + +static ssize_t interleave_ways_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + ssize_t rc; + + rc = down_read_interruptible(&cxl_region_rwsem); + if (rc) + return rc; + rc = sysfs_emit(buf, "%d\n", p->interleave_ways); + up_read(&cxl_region_rwsem); + + return rc; +} + +static const struct attribute_group *get_cxl_region_target_group(void); + +static ssize_t interleave_ways_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); + struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; + struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + unsigned int val, save; + int rc; + u8 iw; + + rc = kstrtouint(buf, 0, &val); + if (rc) + return rc; + + rc = ways_to_cxl(val, &iw); + if (rc) + return rc; + + /* + * Even for x3, x9, and x12 interleaves the region interleave must be a + * power of 2 multiple of the host bridge interleave. + */ + if (!is_power_of_2(val / cxld->interleave_ways) || + (val % cxld->interleave_ways)) { + dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val); + return -EINVAL; + } + + rc = down_write_killable(&cxl_region_rwsem); + if (rc) + return rc; + if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { + rc = -EBUSY; + goto out; + } + + save = p->interleave_ways; + p->interleave_ways = val; + rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); + if (rc) + p->interleave_ways = save; +out: + up_write(&cxl_region_rwsem); + if (rc) + return rc; + return len; +} +static DEVICE_ATTR_RW(interleave_ways); + +static ssize_t interleave_granularity_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + ssize_t rc; + + rc = down_read_interruptible(&cxl_region_rwsem); + if (rc) + return rc; + rc = sysfs_emit(buf, "%d\n", p->interleave_granularity); + up_read(&cxl_region_rwsem); + + return rc; +} + +static ssize_t interleave_granularity_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); + struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; + struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + int rc, val; + u16 ig; + + rc = kstrtoint(buf, 0, &val); + if (rc) + return rc; + + rc = granularity_to_cxl(val, &ig); + if (rc) + return rc; + + /* + * When the host-bridge is interleaved, disallow region granularity != + * root granularity. Regions with a granularity less than the root + * interleave result in needing multiple endpoints to support a single + * slot in the interleave (possible to suport in the future). Regions + * with a granularity greater than the root interleave result in invalid + * DPA translations (invalid to support). + */ + if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity) + return -EINVAL; + + rc = down_write_killable(&cxl_region_rwsem); + if (rc) + return rc; + if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { + rc = -EBUSY; + goto out; + } + + p->interleave_granularity = val; +out: + up_write(&cxl_region_rwsem); + if (rc) + return rc; + return len; +} +static DEVICE_ATTR_RW(interleave_granularity); + +static ssize_t resource_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + u64 resource = -1ULL; + ssize_t rc; + + rc = down_read_interruptible(&cxl_region_rwsem); + if (rc) + return rc; + if (p->res) + resource = p->res->start; + rc = sysfs_emit(buf, "%#llx\n", resource); + up_read(&cxl_region_rwsem); + + return rc; +} +static DEVICE_ATTR_RO(resource); + +static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size) +{ + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); + struct cxl_region_params *p = &cxlr->params; + struct resource *res; + u32 remainder = 0; + + lockdep_assert_held_write(&cxl_region_rwsem); + + /* Nothing to do... */ + if (p->res && resource_size(p->res) == size) + return 0; + + /* To change size the old size must be freed first */ + if (p->res) + return -EBUSY; + + if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) + return -EBUSY; + + /* ways, granularity and uuid (if PMEM) need to be set before HPA */ + if (!p->interleave_ways || !p->interleave_granularity || + (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid))) + return -ENXIO; + + div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder); + if (remainder) + return -EINVAL; + + res = alloc_free_mem_region(cxlrd->res, size, SZ_256M, + dev_name(&cxlr->dev)); + if (IS_ERR(res)) { + dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n", + PTR_ERR(res)); + return PTR_ERR(res); + } + + p->res = res; + p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; + + return 0; +} + +static void cxl_region_iomem_release(struct cxl_region *cxlr) +{ + struct cxl_region_params *p = &cxlr->params; + + if (device_is_registered(&cxlr->dev)) + lockdep_assert_held_write(&cxl_region_rwsem); + if (p->res) { + remove_resource(p->res); + kfree(p->res); + p->res = NULL; + } +} + +static int free_hpa(struct cxl_region *cxlr) +{ + struct cxl_region_params *p = &cxlr->params; + + lockdep_assert_held_write(&cxl_region_rwsem); + + if (!p->res) + return 0; + + if (p->state >= CXL_CONFIG_ACTIVE) + return -EBUSY; + + cxl_region_iomem_release(cxlr); + p->state = CXL_CONFIG_IDLE; + return 0; +} + +static ssize_t size_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct cxl_region *cxlr = to_cxl_region(dev); + u64 val; + int rc; + + rc = kstrtou64(buf, 0, &val); + if (rc) + return rc; + + rc = down_write_killable(&cxl_region_rwsem); + if (rc) + return rc; + + if (val) + rc = alloc_hpa(cxlr, val); + else + rc = free_hpa(cxlr); + up_write(&cxl_region_rwsem); + + if (rc) + return rc; + + return len; +} + +static ssize_t size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + u64 size = 0; + ssize_t rc; + + rc = down_read_interruptible(&cxl_region_rwsem); + if (rc) + return rc; + if (p->res) + size = resource_size(p->res); + rc = sysfs_emit(buf, "%#llx\n", size); + up_read(&cxl_region_rwsem); + + return rc; +} +static DEVICE_ATTR_RW(size); + +static struct attribute *cxl_region_attrs[] = { + &dev_attr_uuid.attr, + &dev_attr_commit.attr, + &dev_attr_interleave_ways.attr, + &dev_attr_interleave_granularity.attr, + &dev_attr_resource.attr, + &dev_attr_size.attr, + NULL, +}; + +static const struct attribute_group cxl_region_group = { + .attrs = cxl_region_attrs, + .is_visible = cxl_region_visible, +}; + +static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos) +{ + struct cxl_region_params *p = &cxlr->params; + struct cxl_endpoint_decoder *cxled; + int rc; + + rc = down_read_interruptible(&cxl_region_rwsem); + if (rc) + return rc; + + if (pos >= p->interleave_ways) { + dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, + p->interleave_ways); + rc = -ENXIO; + goto out; + } + + cxled = p->targets[pos]; + if (!cxled) + rc = sysfs_emit(buf, "\n"); + else + rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev)); +out: + up_read(&cxl_region_rwsem); + + return rc; +} + +static int match_free_decoder(struct device *dev, void *data) +{ + struct cxl_decoder *cxld; + int *id = data; + + if (!is_switch_decoder(dev)) + return 0; + + cxld = to_cxl_decoder(dev); + + /* enforce ordered allocation */ + if (cxld->id != *id) + return 0; + + if (!cxld->region) + return 1; + + (*id)++; + + return 0; +} + +static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port, + struct cxl_region *cxlr) +{ + struct device *dev; + int id = 0; + + dev = device_find_child(&port->dev, &id, match_free_decoder); + if (!dev) + return NULL; + /* + * This decoder is pinned registered as long as the endpoint decoder is + * registered, and endpoint decoder unregistration holds the + * cxl_region_rwsem over unregister events, so no need to hold on to + * this extra reference. + */ + put_device(dev); + return to_cxl_decoder(dev); +} + +static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port, + struct cxl_region *cxlr) +{ + struct cxl_region_params *p = &cxlr->params; + struct cxl_region_ref *cxl_rr, *iter; + unsigned long index; + int rc; + + xa_for_each(&port->regions, index, iter) { + struct cxl_region_params *ip = &iter->region->params; + + if (ip->res->start > p->res->start) { + dev_dbg(&cxlr->dev, + "%s: HPA order violation %s:%pr vs %pr\n", + dev_name(&port->dev), + dev_name(&iter->region->dev), ip->res, p->res); + return ERR_PTR(-EBUSY); + } + } + + cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL); + if (!cxl_rr) + return ERR_PTR(-ENOMEM); + cxl_rr->port = port; + cxl_rr->region = cxlr; + cxl_rr->nr_targets = 1; + xa_init(&cxl_rr->endpoints); + + rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL); + if (rc) { + dev_dbg(&cxlr->dev, + "%s: failed to track region reference: %d\n", + dev_name(&port->dev), rc); + kfree(cxl_rr); + return ERR_PTR(rc); + } + + return cxl_rr; +} + +static void free_region_ref(struct cxl_region_ref *cxl_rr) +{ + struct cxl_port *port = cxl_rr->port; + struct cxl_region *cxlr = cxl_rr->region; + struct cxl_decoder *cxld = cxl_rr->decoder; + + dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n"); + if (cxld->region == cxlr) { + cxld->region = NULL; + put_device(&cxlr->dev); + } + + xa_erase(&port->regions, (unsigned long)cxlr); + xa_destroy(&cxl_rr->endpoints); + kfree(cxl_rr); +} + +static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr, + struct cxl_endpoint_decoder *cxled) +{ + int rc; + struct cxl_port *port = cxl_rr->port; + struct cxl_region *cxlr = cxl_rr->region; + struct cxl_decoder *cxld = cxl_rr->decoder; + struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled)); + + if (ep) { + rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep, + GFP_KERNEL); + if (rc) + return rc; + } + cxl_rr->nr_eps++; + + if (!cxld->region) { + cxld->region = cxlr; + get_device(&cxlr->dev); + } + + return 0; +} + +/** + * cxl_port_attach_region() - track a region's interest in a port by endpoint + * @port: port to add a new region reference 'struct cxl_region_ref' + * @cxlr: region to attach to @port + * @cxled: endpoint decoder used to create or further pin a region reference + * @pos: interleave position of @cxled in @cxlr + * + * The attach event is an opportunity to validate CXL decode setup + * constraints and record metadata needed for programming HDM decoders, + * in particular decoder target lists. + * + * The steps are: + * + * - validate that there are no other regions with a higher HPA already + * associated with @port + * - establish a region reference if one is not already present + * + * - additionally allocate a decoder instance that will host @cxlr on + * @port + * + * - pin the region reference by the endpoint + * - account for how many entries in @port's target list are needed to + * cover all of the added endpoints. + */ +static int cxl_port_attach_region(struct cxl_port *port, + struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled, int pos) +{ + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_ep *ep = cxl_ep_load(port, cxlmd); + struct cxl_region_ref *cxl_rr; + bool nr_targets_inc = false; + struct cxl_decoder *cxld; + unsigned long index; + int rc = -EBUSY; + + lockdep_assert_held_write(&cxl_region_rwsem); + + cxl_rr = cxl_rr_load(port, cxlr); + if (cxl_rr) { + struct cxl_ep *ep_iter; + int found = 0; + + /* + * Walk the existing endpoints that have been attached to + * @cxlr at @port and see if they share the same 'next' port + * in the downstream direction. I.e. endpoints that share common + * upstream switch. + */ + xa_for_each(&cxl_rr->endpoints, index, ep_iter) { + if (ep_iter == ep) + continue; + if (ep_iter->next == ep->next) { + found++; + break; + } + } + + /* + * New target port, or @port is an endpoint port that always + * accounts its own local decode as a target. + */ + if (!found || !ep->next) { + cxl_rr->nr_targets++; + nr_targets_inc = true; + } + + /* + * The decoder for @cxlr was allocated when the region was first + * attached to @port. + */ + cxld = cxl_rr->decoder; + } else { + cxl_rr = alloc_region_ref(port, cxlr); + if (IS_ERR(cxl_rr)) { + dev_dbg(&cxlr->dev, + "%s: failed to allocate region reference\n", + dev_name(&port->dev)); + return PTR_ERR(cxl_rr); + } + nr_targets_inc = true; + + if (port == cxled_to_port(cxled)) + cxld = &cxled->cxld; + else + cxld = cxl_region_find_decoder(port, cxlr); + if (!cxld) { + dev_dbg(&cxlr->dev, "%s: no decoder available\n", + dev_name(&port->dev)); + goto out_erase; + } + + if (cxld->region) { + dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", + dev_name(&port->dev), dev_name(&cxld->dev), + dev_name(&cxld->region->dev)); + rc = -EBUSY; + goto out_erase; + } + + cxl_rr->decoder = cxld; + } + + rc = cxl_rr_ep_add(cxl_rr, cxled); + if (rc) { + dev_dbg(&cxlr->dev, + "%s: failed to track endpoint %s:%s reference\n", + dev_name(&port->dev), dev_name(&cxlmd->dev), + dev_name(&cxld->dev)); + goto out_erase; + } + + dev_dbg(&cxlr->dev, + "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n", + dev_name(port->uport), dev_name(&port->dev), + dev_name(&cxld->dev), dev_name(&cxlmd->dev), + dev_name(&cxled->cxld.dev), pos, + ep ? ep->next ? dev_name(ep->next->uport) : + dev_name(&cxlmd->dev) : + "none", + cxl_rr->nr_eps, cxl_rr->nr_targets); + + return 0; +out_erase: + if (nr_targets_inc) + cxl_rr->nr_targets--; + if (cxl_rr->nr_eps == 0) + free_region_ref(cxl_rr); + return rc; +} + +static void cxl_port_detach_region(struct cxl_port *port, + struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled) +{ + struct cxl_region_ref *cxl_rr; + struct cxl_ep *ep = NULL; + + lockdep_assert_held_write(&cxl_region_rwsem); + + cxl_rr = cxl_rr_load(port, cxlr); + if (!cxl_rr) + return; + + /* + * Endpoint ports do not carry cxl_ep references, and they + * never target more than one endpoint by definition + */ + if (cxl_rr->decoder == &cxled->cxld) + cxl_rr->nr_eps--; + else + ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled); + if (ep) { + struct cxl_ep *ep_iter; + unsigned long index; + int found = 0; + + cxl_rr->nr_eps--; + xa_for_each(&cxl_rr->endpoints, index, ep_iter) { + if (ep_iter->next == ep->next) { + found++; + break; + } + } + if (!found) + cxl_rr->nr_targets--; + } + + if (cxl_rr->nr_eps == 0) + free_region_ref(cxl_rr); +} + +static int check_last_peer(struct cxl_endpoint_decoder *cxled, + struct cxl_ep *ep, struct cxl_region_ref *cxl_rr, + int distance) +{ + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_region *cxlr = cxl_rr->region; + struct cxl_region_params *p = &cxlr->params; + struct cxl_endpoint_decoder *cxled_peer; + struct cxl_port *port = cxl_rr->port; + struct cxl_memdev *cxlmd_peer; + struct cxl_ep *ep_peer; + int pos = cxled->pos; + + /* + * If this position wants to share a dport with the last endpoint mapped + * then that endpoint, at index 'position - distance', must also be + * mapped by this dport. + */ + if (pos < distance) { + dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n", + dev_name(port->uport), dev_name(&port->dev), + dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); + return -ENXIO; + } + cxled_peer = p->targets[pos - distance]; + cxlmd_peer = cxled_to_memdev(cxled_peer); + ep_peer = cxl_ep_load(port, cxlmd_peer); + if (ep->dport != ep_peer->dport) { + dev_dbg(&cxlr->dev, + "%s:%s: %s:%s pos %d mismatched peer %s:%s\n", + dev_name(port->uport), dev_name(&port->dev), + dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos, + dev_name(&cxlmd_peer->dev), + dev_name(&cxled_peer->cxld.dev)); + return -ENXIO; + } + + return 0; +} + +static int cxl_port_setup_targets(struct cxl_port *port, + struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled) +{ + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); + int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos; + struct cxl_port *parent_port = to_cxl_port(port->dev.parent); + struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr); + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_ep *ep = cxl_ep_load(port, cxlmd); + struct cxl_region_params *p = &cxlr->params; + struct cxl_decoder *cxld = cxl_rr->decoder; + struct cxl_switch_decoder *cxlsd; + u16 eig, peig; + u8 eiw, peiw; + + /* + * While root level decoders support x3, x6, x12, switch level + * decoders only support powers of 2 up to x16. + */ + if (!is_power_of_2(cxl_rr->nr_targets)) { + dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n", + dev_name(port->uport), dev_name(&port->dev), + cxl_rr->nr_targets); + return -EINVAL; + } + + cxlsd = to_cxl_switch_decoder(&cxld->dev); + if (cxl_rr->nr_targets_set) { + int i, distance; + + distance = p->nr_targets / cxl_rr->nr_targets; + for (i = 0; i < cxl_rr->nr_targets_set; i++) + if (ep->dport == cxlsd->target[i]) { + rc = check_last_peer(cxled, ep, cxl_rr, + distance); + if (rc) + return rc; + goto out_target_set; + } + goto add_target; + } + + if (is_cxl_root(parent_port)) { + parent_ig = cxlrd->cxlsd.cxld.interleave_granularity; + parent_iw = cxlrd->cxlsd.cxld.interleave_ways; + /* + * For purposes of address bit routing, use power-of-2 math for + * switch ports. + */ + if (!is_power_of_2(parent_iw)) + parent_iw /= 3; + } else { + struct cxl_region_ref *parent_rr; + struct cxl_decoder *parent_cxld; + + parent_rr = cxl_rr_load(parent_port, cxlr); + parent_cxld = parent_rr->decoder; + parent_ig = parent_cxld->interleave_granularity; + parent_iw = parent_cxld->interleave_ways; + } + + rc = granularity_to_cxl(parent_ig, &peig); + if (rc) { + dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n", + dev_name(parent_port->uport), + dev_name(&parent_port->dev), parent_ig); + return rc; + } + + rc = ways_to_cxl(parent_iw, &peiw); + if (rc) { + dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n", + dev_name(parent_port->uport), + dev_name(&parent_port->dev), parent_iw); + return rc; + } + + iw = cxl_rr->nr_targets; + rc = ways_to_cxl(iw, &eiw); + if (rc) { + dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n", + dev_name(port->uport), dev_name(&port->dev), iw); + return rc; + } + + /* + * If @parent_port is masking address bits, pick the next unused address + * bit to route @port's targets. + */ + if (parent_iw > 1 && cxl_rr->nr_targets > 1) { + u32 address_bit = max(peig + peiw, eiw + peig); + + eig = address_bit - eiw + 1; + } else { + eiw = peiw; + eig = peig; + } + + rc = cxl_to_granularity(eig, &ig); + if (rc) { + dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n", + dev_name(port->uport), dev_name(&port->dev), + 256 << eig); + return rc; + } + + cxld->interleave_ways = iw; + cxld->interleave_granularity = ig; + cxld->hpa_range = (struct range) { + .start = p->res->start, + .end = p->res->end, + }; + dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport), + dev_name(&port->dev), iw, ig); +add_target: + if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) { + dev_dbg(&cxlr->dev, + "%s:%s: targets full trying to add %s:%s at %d\n", + dev_name(port->uport), dev_name(&port->dev), + dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); + return -ENXIO; + } + cxlsd->target[cxl_rr->nr_targets_set] = ep->dport; + inc = 1; +out_target_set: + cxl_rr->nr_targets_set += inc; + dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n", + dev_name(port->uport), dev_name(&port->dev), + cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport), + dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); + + return 0; +} + +static void cxl_port_reset_targets(struct cxl_port *port, + struct cxl_region *cxlr) +{ + struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr); + struct cxl_decoder *cxld; + + /* + * After the last endpoint has been detached the entire cxl_rr may now + * be gone. + */ + if (!cxl_rr) + return; + cxl_rr->nr_targets_set = 0; + + cxld = cxl_rr->decoder; + cxld->hpa_range = (struct range) { + .start = 0, + .end = -1, + }; +} + +static void cxl_region_teardown_targets(struct cxl_region *cxlr) +{ + struct cxl_region_params *p = &cxlr->params; + struct cxl_endpoint_decoder *cxled; + struct cxl_memdev *cxlmd; + struct cxl_port *iter; + struct cxl_ep *ep; + int i; + + for (i = 0; i < p->nr_targets; i++) { + cxled = p->targets[i]; + cxlmd = cxled_to_memdev(cxled); + + iter = cxled_to_port(cxled); + while (!is_cxl_root(to_cxl_port(iter->dev.parent))) + iter = to_cxl_port(iter->dev.parent); + + for (ep = cxl_ep_load(iter, cxlmd); iter; + iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) + cxl_port_reset_targets(iter, cxlr); + } +} + +static int cxl_region_setup_targets(struct cxl_region *cxlr) +{ + struct cxl_region_params *p = &cxlr->params; + struct cxl_endpoint_decoder *cxled; + struct cxl_memdev *cxlmd; + struct cxl_port *iter; + struct cxl_ep *ep; + int i, rc; + + for (i = 0; i < p->nr_targets; i++) { + cxled = p->targets[i]; + cxlmd = cxled_to_memdev(cxled); + + iter = cxled_to_port(cxled); + while (!is_cxl_root(to_cxl_port(iter->dev.parent))) + iter = to_cxl_port(iter->dev.parent); + + /* + * Descend the topology tree programming targets while + * looking for conflicts. + */ + for (ep = cxl_ep_load(iter, cxlmd); iter; + iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { + rc = cxl_port_setup_targets(iter, cxlr, cxled); + if (rc) { + cxl_region_teardown_targets(cxlr); + return rc; + } + } + } + + return 0; +} + +static int cxl_region_attach(struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled, int pos) +{ + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_port *ep_port, *root_port, *iter; + struct cxl_region_params *p = &cxlr->params; + struct cxl_dport *dport; + int i, rc = -ENXIO; + + if (cxled->mode == CXL_DECODER_DEAD) { + dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev)); + return -ENODEV; + } + + /* all full of members, or interleave config not established? */ + if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) { + dev_dbg(&cxlr->dev, "region already active\n"); + return -EBUSY; + } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) { + dev_dbg(&cxlr->dev, "interleave config missing\n"); + return -ENXIO; + } + + if (pos < 0 || pos >= p->interleave_ways) { + dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, + p->interleave_ways); + return -ENXIO; + } + + if (p->targets[pos] == cxled) + return 0; + + if (p->targets[pos]) { + struct cxl_endpoint_decoder *cxled_target = p->targets[pos]; + struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target); + + dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n", + pos, dev_name(&cxlmd_target->dev), + dev_name(&cxled_target->cxld.dev)); + return -EBUSY; + } + + for (i = 0; i < p->interleave_ways; i++) { + struct cxl_endpoint_decoder *cxled_target; + struct cxl_memdev *cxlmd_target; + + cxled_target = p->targets[pos]; + if (!cxled_target) + continue; + + cxlmd_target = cxled_to_memdev(cxled_target); + if (cxlmd_target == cxlmd) { + dev_dbg(&cxlr->dev, + "%s already specified at position %d via: %s\n", + dev_name(&cxlmd->dev), pos, + dev_name(&cxled_target->cxld.dev)); + return -EBUSY; + } + } + + ep_port = cxled_to_port(cxled); + root_port = cxlrd_to_port(cxlrd); + dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge); + if (!dport) { + dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n", + dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), + dev_name(cxlr->dev.parent)); + return -ENXIO; + } + + if (cxlrd->calc_hb(cxlrd, pos) != dport) { + dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n", + dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), + dev_name(&cxlrd->cxlsd.cxld.dev)); + return -ENXIO; + } + + if (cxled->cxld.target_type != cxlr->type) { + dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n", + dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), + cxled->cxld.target_type, cxlr->type); + return -ENXIO; + } + + if (!cxled->dpa_res) { + dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n", + dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev)); + return -ENXIO; + } + + if (resource_size(cxled->dpa_res) * p->interleave_ways != + resource_size(p->res)) { + dev_dbg(&cxlr->dev, + "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n", + dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), + (u64)resource_size(cxled->dpa_res), p->interleave_ways, + (u64)resource_size(p->res)); + return -EINVAL; + } + + for (iter = ep_port; !is_cxl_root(iter); + iter = to_cxl_port(iter->dev.parent)) { + rc = cxl_port_attach_region(iter, cxlr, cxled, pos); + if (rc) + goto err; + } + + p->targets[pos] = cxled; + cxled->pos = pos; + p->nr_targets++; + + if (p->nr_targets == p->interleave_ways) { + rc = cxl_region_setup_targets(cxlr); + if (rc) + goto err_decrement; + p->state = CXL_CONFIG_ACTIVE; + } + + cxled->cxld.interleave_ways = p->interleave_ways; + cxled->cxld.interleave_granularity = p->interleave_granularity; + cxled->cxld.hpa_range = (struct range) { + .start = p->res->start, + .end = p->res->end, + }; + + return 0; + +err_decrement: + p->nr_targets--; +err: + for (iter = ep_port; !is_cxl_root(iter); + iter = to_cxl_port(iter->dev.parent)) + cxl_port_detach_region(iter, cxlr, cxled); + return rc; +} + +static int cxl_region_detach(struct cxl_endpoint_decoder *cxled) +{ + struct cxl_port *iter, *ep_port = cxled_to_port(cxled); + struct cxl_region *cxlr = cxled->cxld.region; + struct cxl_region_params *p; + int rc = 0; + + lockdep_assert_held_write(&cxl_region_rwsem); + + if (!cxlr) + return 0; + + p = &cxlr->params; + get_device(&cxlr->dev); + + if (p->state > CXL_CONFIG_ACTIVE) { + /* + * TODO: tear down all impacted regions if a device is + * removed out of order + */ + rc = cxl_region_decode_reset(cxlr, p->interleave_ways); + if (rc) + goto out; + p->state = CXL_CONFIG_ACTIVE; + } + + for (iter = ep_port; !is_cxl_root(iter); + iter = to_cxl_port(iter->dev.parent)) + cxl_port_detach_region(iter, cxlr, cxled); + + if (cxled->pos < 0 || cxled->pos >= p->interleave_ways || + p->targets[cxled->pos] != cxled) { + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + + dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n", + dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), + cxled->pos); + goto out; + } + + if (p->state == CXL_CONFIG_ACTIVE) { + p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; + cxl_region_teardown_targets(cxlr); + } + p->targets[cxled->pos] = NULL; + p->nr_targets--; + cxled->cxld.hpa_range = (struct range) { + .start = 0, + .end = -1, + }; + + /* notify the region driver that one of its targets has departed */ + up_write(&cxl_region_rwsem); + device_release_driver(&cxlr->dev); + down_write(&cxl_region_rwsem); +out: + put_device(&cxlr->dev); + return rc; +} + +void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled) +{ + down_write(&cxl_region_rwsem); + cxled->mode = CXL_DECODER_DEAD; + cxl_region_detach(cxled); + up_write(&cxl_region_rwsem); +} + +static int attach_target(struct cxl_region *cxlr, const char *decoder, int pos) +{ + struct device *dev; + int rc; + + dev = bus_find_device_by_name(&cxl_bus_type, NULL, decoder); + if (!dev) + return -ENODEV; + + if (!is_endpoint_decoder(dev)) { + put_device(dev); + return -EINVAL; + } + + rc = down_write_killable(&cxl_region_rwsem); + if (rc) + goto out; + down_read(&cxl_dpa_rwsem); + rc = cxl_region_attach(cxlr, to_cxl_endpoint_decoder(dev), pos); + up_read(&cxl_dpa_rwsem); + up_write(&cxl_region_rwsem); +out: + put_device(dev); + return rc; +} + +static int detach_target(struct cxl_region *cxlr, int pos) +{ + struct cxl_region_params *p = &cxlr->params; + int rc; + + rc = down_write_killable(&cxl_region_rwsem); + if (rc) + return rc; + + if (pos >= p->interleave_ways) { + dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, + p->interleave_ways); + rc = -ENXIO; + goto out; + } + + if (!p->targets[pos]) { + rc = 0; + goto out; + } + + rc = cxl_region_detach(p->targets[pos]); +out: + up_write(&cxl_region_rwsem); + return rc; +} + +static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos, + size_t len) +{ + int rc; + + if (sysfs_streq(buf, "\n")) + rc = detach_target(cxlr, pos); + else + rc = attach_target(cxlr, buf, pos); + + if (rc < 0) + return rc; + return len; +} + +#define TARGET_ATTR_RW(n) \ +static ssize_t target##n##_show( \ + struct device *dev, struct device_attribute *attr, char *buf) \ +{ \ + return show_targetN(to_cxl_region(dev), buf, (n)); \ +} \ +static ssize_t target##n##_store(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t len) \ +{ \ + return store_targetN(to_cxl_region(dev), buf, (n), len); \ +} \ +static DEVICE_ATTR_RW(target##n) + +TARGET_ATTR_RW(0); +TARGET_ATTR_RW(1); +TARGET_ATTR_RW(2); +TARGET_ATTR_RW(3); +TARGET_ATTR_RW(4); +TARGET_ATTR_RW(5); +TARGET_ATTR_RW(6); +TARGET_ATTR_RW(7); +TARGET_ATTR_RW(8); +TARGET_ATTR_RW(9); +TARGET_ATTR_RW(10); +TARGET_ATTR_RW(11); +TARGET_ATTR_RW(12); +TARGET_ATTR_RW(13); +TARGET_ATTR_RW(14); +TARGET_ATTR_RW(15); + +static struct attribute *target_attrs[] = { + &dev_attr_target0.attr, + &dev_attr_target1.attr, + &dev_attr_target2.attr, + &dev_attr_target3.attr, + &dev_attr_target4.attr, + &dev_attr_target5.attr, + &dev_attr_target6.attr, + &dev_attr_target7.attr, + &dev_attr_target8.attr, + &dev_attr_target9.attr, + &dev_attr_target10.attr, + &dev_attr_target11.attr, + &dev_attr_target12.attr, + &dev_attr_target13.attr, + &dev_attr_target14.attr, + &dev_attr_target15.attr, + NULL, +}; + +static umode_t cxl_region_target_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + + if (n < p->interleave_ways) + return a->mode; + return 0; +} + +static const struct attribute_group cxl_region_target_group = { + .attrs = target_attrs, + .is_visible = cxl_region_target_visible, +}; + +static const struct attribute_group *get_cxl_region_target_group(void) +{ + return &cxl_region_target_group; +} + +static const struct attribute_group *region_groups[] = { + &cxl_base_attribute_group, + &cxl_region_group, + &cxl_region_target_group, + NULL, +}; + +static void cxl_region_release(struct device *dev) +{ + struct cxl_region *cxlr = to_cxl_region(dev); + + memregion_free(cxlr->id); + kfree(cxlr); +} + +const struct device_type cxl_region_type = { + .name = "cxl_region", + .release = cxl_region_release, + .groups = region_groups +}; + +bool is_cxl_region(struct device *dev) +{ + return dev->type == &cxl_region_type; +} +EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL); + +static struct cxl_region *to_cxl_region(struct device *dev) +{ + if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type, + "not a cxl_region device\n")) + return NULL; + + return container_of(dev, struct cxl_region, dev); +} + +static void unregister_region(void *dev) +{ + struct cxl_region *cxlr = to_cxl_region(dev); + + device_del(dev); + cxl_region_iomem_release(cxlr); + put_device(dev); +} + +static struct lock_class_key cxl_region_key; + +static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id) +{ + struct cxl_region *cxlr; + struct device *dev; + + cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL); + if (!cxlr) { + memregion_free(id); + return ERR_PTR(-ENOMEM); + } + + dev = &cxlr->dev; + device_initialize(dev); + lockdep_set_class(&dev->mutex, &cxl_region_key); + dev->parent = &cxlrd->cxlsd.cxld.dev; + device_set_pm_not_required(dev); + dev->bus = &cxl_bus_type; + dev->type = &cxl_region_type; + cxlr->id = id; + + return cxlr; +} + +/** + * devm_cxl_add_region - Adds a region to a decoder + * @cxlrd: root decoder + * @id: memregion id to create, or memregion_free() on failure + * @mode: mode for the endpoint decoders of this region + * @type: select whether this is an expander or accelerator (type-2 or type-3) + * + * This is the second step of region initialization. Regions exist within an + * address space which is mapped by a @cxlrd. + * + * Return: 0 if the region was added to the @cxlrd, else returns negative error + * code. The region will be named "regionZ" where Z is the unique region number. + */ +static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, + int id, + enum cxl_decoder_mode mode, + enum cxl_decoder_type type) +{ + struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent); + struct cxl_region *cxlr; + struct device *dev; + int rc; + + cxlr = cxl_region_alloc(cxlrd, id); + if (IS_ERR(cxlr)) + return cxlr; + cxlr->mode = mode; + cxlr->type = type; + + dev = &cxlr->dev; + rc = dev_set_name(dev, "region%d", id); + if (rc) + goto err; + + rc = device_add(dev); + if (rc) + goto err; + + rc = devm_add_action_or_reset(port->uport, unregister_region, cxlr); + if (rc) + return ERR_PTR(rc); + + dev_dbg(port->uport, "%s: created %s\n", + dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev)); + return cxlr; + +err: + put_device(dev); + return ERR_PTR(rc); +} + +static ssize_t create_pmem_region_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); + + return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id)); +} + +static ssize_t create_pmem_region_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); + struct cxl_region *cxlr; + int id, rc; + + rc = sscanf(buf, "region%d\n", &id); + if (rc != 1) + return -EINVAL; + + rc = memregion_alloc(GFP_KERNEL); + if (rc < 0) + return rc; + + if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) { + memregion_free(rc); + return -EBUSY; + } + + cxlr = devm_cxl_add_region(cxlrd, id, CXL_DECODER_PMEM, + CXL_DECODER_EXPANDER); + if (IS_ERR(cxlr)) + return PTR_ERR(cxlr); + + return len; +} +DEVICE_ATTR_RW(create_pmem_region); + +static ssize_t region_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct cxl_decoder *cxld = to_cxl_decoder(dev); + ssize_t rc; + + rc = down_read_interruptible(&cxl_region_rwsem); + if (rc) + return rc; + + if (cxld->region) + rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev)); + else + rc = sysfs_emit(buf, "\n"); + up_read(&cxl_region_rwsem); + + return rc; +} +DEVICE_ATTR_RO(region); + +static struct cxl_region * +cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name) +{ + struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; + struct device *region_dev; + + region_dev = device_find_child_by_name(&cxld->dev, name); + if (!region_dev) + return ERR_PTR(-ENODEV); + + return to_cxl_region(region_dev); +} + +static ssize_t delete_region_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); + struct cxl_port *port = to_cxl_port(dev->parent); + struct cxl_region *cxlr; + + cxlr = cxl_find_region_by_name(cxlrd, buf); + if (IS_ERR(cxlr)) + return PTR_ERR(cxlr); + + devm_release_action(port->uport, unregister_region, cxlr); + put_device(&cxlr->dev); + + return len; +} +DEVICE_ATTR_WO(delete_region); + +static void cxl_pmem_region_release(struct device *dev) +{ + struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev); + int i; + + for (i = 0; i < cxlr_pmem->nr_mappings; i++) { + struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd; + + put_device(&cxlmd->dev); + } + + kfree(cxlr_pmem); +} + +static const struct attribute_group *cxl_pmem_region_attribute_groups[] = { + &cxl_base_attribute_group, + NULL, +}; + +const struct device_type cxl_pmem_region_type = { + .name = "cxl_pmem_region", + .release = cxl_pmem_region_release, + .groups = cxl_pmem_region_attribute_groups, +}; + +bool is_cxl_pmem_region(struct device *dev) +{ + return dev->type == &cxl_pmem_region_type; +} +EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL); + +struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev) +{ + if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev), + "not a cxl_pmem_region device\n")) + return NULL; + return container_of(dev, struct cxl_pmem_region, dev); +} +EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL); + +static struct lock_class_key cxl_pmem_region_key; + +static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) +{ + struct cxl_region_params *p = &cxlr->params; + struct cxl_pmem_region *cxlr_pmem; + struct device *dev; + int i; + + down_read(&cxl_region_rwsem); + if (p->state != CXL_CONFIG_COMMIT) { + cxlr_pmem = ERR_PTR(-ENXIO); + goto out; + } + + cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), + GFP_KERNEL); + if (!cxlr_pmem) { + cxlr_pmem = ERR_PTR(-ENOMEM); + goto out; + } + + cxlr_pmem->hpa_range.start = p->res->start; + cxlr_pmem->hpa_range.end = p->res->end; + + /* Snapshot the region configuration underneath the cxl_region_rwsem */ + cxlr_pmem->nr_mappings = p->nr_targets; + for (i = 0; i < p->nr_targets; i++) { + struct cxl_endpoint_decoder *cxled = p->targets[i]; + struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); + struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; + + m->cxlmd = cxlmd; + get_device(&cxlmd->dev); + m->start = cxled->dpa_res->start; + m->size = resource_size(cxled->dpa_res); + m->position = i; + } + + dev = &cxlr_pmem->dev; + cxlr_pmem->cxlr = cxlr; + device_initialize(dev); + lockdep_set_class(&dev->mutex, &cxl_pmem_region_key); + device_set_pm_not_required(dev); + dev->parent = &cxlr->dev; + dev->bus = &cxl_bus_type; + dev->type = &cxl_pmem_region_type; +out: + up_read(&cxl_region_rwsem); + + return cxlr_pmem; +} + +static void cxlr_pmem_unregister(void *dev) +{ + device_unregister(dev); +} + +/** + * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge + * @cxlr: parent CXL region for this pmem region bridge device + * + * Return: 0 on success negative error code on failure. + */ +static int devm_cxl_add_pmem_region(struct cxl_region *cxlr) +{ + struct cxl_pmem_region *cxlr_pmem; + struct device *dev; + int rc; + + cxlr_pmem = cxl_pmem_region_alloc(cxlr); + if (IS_ERR(cxlr_pmem)) + return PTR_ERR(cxlr_pmem); + + dev = &cxlr_pmem->dev; + rc = dev_set_name(dev, "pmem_region%d", cxlr->id); + if (rc) + goto err; + + rc = device_add(dev); + if (rc) + goto err; + + dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), + dev_name(dev)); + + return devm_add_action_or_reset(&cxlr->dev, cxlr_pmem_unregister, dev); + +err: + put_device(dev); + return rc; +} + +static int cxl_region_probe(struct device *dev) +{ + struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + int rc; + + rc = down_read_interruptible(&cxl_region_rwsem); + if (rc) { + dev_dbg(&cxlr->dev, "probe interrupted\n"); + return rc; + } + + if (p->state < CXL_CONFIG_COMMIT) { + dev_dbg(&cxlr->dev, "config state: %d\n", p->state); + rc = -ENXIO; + } + + /* + * From this point on any path that changes the region's state away from + * CXL_CONFIG_COMMIT is also responsible for releasing the driver. + */ + up_read(&cxl_region_rwsem); + + switch (cxlr->mode) { + case CXL_DECODER_PMEM: + return devm_cxl_add_pmem_region(cxlr); + default: + dev_dbg(&cxlr->dev, "unsupported region mode: %d\n", + cxlr->mode); + return -ENXIO; + } +} + +static struct cxl_driver cxl_region_driver = { + .name = "cxl_region", + .probe = cxl_region_probe, + .id = CXL_DEVICE_REGION, +}; + +int cxl_region_init(void) +{ + return cxl_driver_register(&cxl_region_driver); +} + +void cxl_region_exit(void) +{ + cxl_driver_unregister(&cxl_region_driver); +} + +MODULE_IMPORT_NS(CXL); +MODULE_ALIAS_CXL(CXL_DEVICE_REGION); |