diff options
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/ffa.c | 10 | ||||
-rw-r--r-- | drivers/firmware/arm_ffa/bus.c | 16 | ||||
-rw-r--r-- | drivers/firmware/arm_ffa/driver.c | 770 | ||||
-rw-r--r-- | include/linux/arm_ffa.h | 79 |
4 files changed, 840 insertions, 35 deletions
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c index 6e4dba9eadef..320f2eaa14a9 100644 --- a/arch/arm64/kvm/hyp/nvhe/ffa.c +++ b/arch/arm64/kvm/hyp/nvhe/ffa.c @@ -423,6 +423,7 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, DECLARE_REG(u32, fraglen, ctxt, 2); DECLARE_REG(u64, addr_mbz, ctxt, 3); DECLARE_REG(u32, npages_mbz, ctxt, 4); + struct ffa_mem_region_attributes *ep_mem_access; struct ffa_composite_mem_region *reg; struct ffa_mem_region *buf; u32 offset, nr_ranges; @@ -452,7 +453,9 @@ static __always_inline void do_ffa_mem_xfer(const u64 func_id, buf = hyp_buffers.tx; memcpy(buf, host_buffers.tx, fraglen); - offset = buf->ep_mem_access[0].composite_off; + ep_mem_access = (void *)buf + + ffa_mem_desc_offset(buf, 0, FFA_VERSION_1_0); + offset = ep_mem_access->composite_off; if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) { ret = FFA_RET_INVALID_PARAMETERS; goto out_unlock; @@ -504,6 +507,7 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res, DECLARE_REG(u32, handle_lo, ctxt, 1); DECLARE_REG(u32, handle_hi, ctxt, 2); DECLARE_REG(u32, flags, ctxt, 3); + struct ffa_mem_region_attributes *ep_mem_access; struct ffa_composite_mem_region *reg; u32 offset, len, fraglen, fragoff; struct ffa_mem_region *buf; @@ -528,7 +532,9 @@ static void do_ffa_mem_reclaim(struct arm_smccc_res *res, len = res->a1; fraglen = res->a2; - offset = buf->ep_mem_access[0].composite_off; + ep_mem_access = (void *)buf + + ffa_mem_desc_offset(buf, 0, FFA_VERSION_1_0); + offset = ep_mem_access->composite_off; /* * We can trust the SPMD to get this right, but let's at least * check that we end up with something that doesn't look _completely_ diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index 2b8bfcd010f5..1c7940ba5539 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -15,6 +15,8 @@ #include "common.h" +#define SCMI_UEVENT_MODALIAS_FMT "arm_ffa:%04x:%pUb" + static DEFINE_IDA(ffa_bus_id); static int ffa_device_match(struct device *dev, struct device_driver *drv) @@ -63,10 +65,20 @@ static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *e { const struct ffa_device *ffa_dev = to_ffa_dev(dev); - return add_uevent_var(env, "MODALIAS=arm_ffa:%04x:%pUb", + return add_uevent_var(env, "MODALIAS=" SCMI_UEVENT_MODALIAS_FMT, ffa_dev->vm_id, &ffa_dev->uuid); } +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ffa_device *ffa_dev = to_ffa_dev(dev); + + return sysfs_emit(buf, SCMI_UEVENT_MODALIAS_FMT, ffa_dev->vm_id, + &ffa_dev->uuid); +} +static DEVICE_ATTR_RO(modalias); + static ssize_t partition_id_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -88,6 +100,7 @@ static DEVICE_ATTR_RO(uuid); static struct attribute *ffa_device_attributes_attrs[] = { &dev_attr_partition_id.attr, &dev_attr_uuid.attr, + &dev_attr_modalias.attr, NULL, }; ATTRIBUTE_GROUPS(ffa_device_attributes); @@ -193,6 +206,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, dev->release = ffa_release_device; dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id); + ffa_dev->id = id; ffa_dev->vm_id = vm_id; ffa_dev->ops = ops; uuid_copy(&ffa_dev->uuid, uuid); diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 121f4fc903cd..07b72c679247 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -22,20 +22,28 @@ #define DRIVER_NAME "ARM FF-A" #define pr_fmt(fmt) DRIVER_NAME ": " fmt +#include <linux/acpi.h> #include <linux/arm_ffa.h> #include <linux/bitfield.h> +#include <linux/cpuhotplug.h> #include <linux/device.h> +#include <linux/hashtable.h> +#include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mm.h> +#include <linux/mutex.h> +#include <linux/of_irq.h> #include <linux/scatterlist.h> #include <linux/slab.h> +#include <linux/smp.h> #include <linux/uuid.h> +#include <linux/xarray.h> #include "common.h" -#define FFA_DRIVER_VERSION FFA_VERSION_1_0 +#define FFA_DRIVER_VERSION FFA_VERSION_1_1 #define FFA_MIN_VERSION FFA_VERSION_1_0 #define SENDER_ID_MASK GENMASK(31, 16) @@ -51,6 +59,8 @@ */ #define RXTX_BUFFER_SIZE SZ_4K +#define FFA_MAX_NOTIFICATIONS 64 + static ffa_fn *invoke_ffa_fn; static const int ffa_linux_errmap[] = { @@ -64,6 +74,7 @@ static const int ffa_linux_errmap[] = { -EACCES, /* FFA_RET_DENIED */ -EAGAIN, /* FFA_RET_RETRY */ -ECANCELED, /* FFA_RET_ABORTED */ + -ENODATA, /* FFA_RET_NO_DATA */ }; static inline int ffa_to_linux_errno(int errno) @@ -75,6 +86,10 @@ static inline int ffa_to_linux_errno(int errno) return -EINVAL; } +struct ffa_pcpu_irq { + struct ffa_drv_info *info; +}; + struct ffa_drv_info { u32 version; u16 vm_id; @@ -83,6 +98,17 @@ struct ffa_drv_info { void *rx_buffer; void *tx_buffer; bool mem_ops_native; + bool bitmap_created; + unsigned int sched_recv_irq; + unsigned int cpuhp_state; + struct ffa_pcpu_irq __percpu *irq_pcpu; + struct workqueue_struct *notif_pcpu_wq; + struct work_struct notif_pcpu_work; + struct work_struct irq_work; + struct xarray partition_info; + unsigned int partition_count; + DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS)); + struct mutex notify_lock; /* lock to protect notifier hashtable */ }; static struct ffa_drv_info *drv_info; @@ -397,7 +423,7 @@ static u32 ffa_get_num_pages_sg(struct scatterlist *sg) return num_pages; } -static u8 ffa_memory_attributes_get(u32 func_id) +static u16 ffa_memory_attributes_get(u32 func_id) { /* * For the memory lend or donate operation, if the receiver is a PE or @@ -416,38 +442,47 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, { int rc = 0; bool first = true; + u32 composite_offset; phys_addr_t addr = 0; + struct ffa_mem_region *mem_region = buffer; struct ffa_composite_mem_region *composite; struct ffa_mem_region_addr_range *constituents; struct ffa_mem_region_attributes *ep_mem_access; - struct ffa_mem_region *mem_region = buffer; u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg); mem_region->tag = args->tag; mem_region->flags = args->flags; mem_region->sender_id = drv_info->vm_id; mem_region->attributes = ffa_memory_attributes_get(func_id); - ep_mem_access = &mem_region->ep_mem_access[0]; + ep_mem_access = buffer + + ffa_mem_desc_offset(buffer, 0, drv_info->version); + composite_offset = ffa_mem_desc_offset(buffer, args->nattrs, + drv_info->version); for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) { ep_mem_access->receiver = args->attrs[idx].receiver; ep_mem_access->attrs = args->attrs[idx].attrs; - ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs); + ep_mem_access->composite_off = composite_offset; ep_mem_access->flag = 0; ep_mem_access->reserved = 0; } mem_region->handle = 0; - mem_region->reserved_0 = 0; - mem_region->reserved_1 = 0; mem_region->ep_count = args->nattrs; + if (drv_info->version <= FFA_VERSION_1_0) { + mem_region->ep_mem_size = 0; + } else { + mem_region->ep_mem_size = sizeof(*ep_mem_access); + mem_region->ep_mem_offset = sizeof(*mem_region); + memset(mem_region->reserved, 0, 12); + } - composite = buffer + COMPOSITE_OFFSET(args->nattrs); + composite = buffer + composite_offset; composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg); composite->addr_range_cnt = num_entries; composite->reserved = 0; - length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries); - frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0); + length = composite_offset + CONSTITUENTS_OFFSET(num_entries); + frag_len = composite_offset + CONSTITUENTS_OFFSET(0); if (frag_len > max_fragsize) return -ENXIO; @@ -554,6 +589,236 @@ static int ffa_features(u32 func_feat_id, u32 input_props, return 0; } +static int ffa_notification_bitmap_create(void) +{ + ffa_value_t ret; + u16 vcpu_count = nr_cpu_ids; + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_NOTIFICATION_BITMAP_CREATE, + .a1 = drv_info->vm_id, .a2 = vcpu_count, + }, &ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + + return 0; +} + +static int ffa_notification_bitmap_destroy(void) +{ + ffa_value_t ret; + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_NOTIFICATION_BITMAP_DESTROY, + .a1 = drv_info->vm_id, + }, &ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + + return 0; +} + +#define NOTIFICATION_LOW_MASK GENMASK(31, 0) +#define NOTIFICATION_HIGH_MASK GENMASK(63, 32) +#define NOTIFICATION_BITMAP_HIGH(x) \ + ((u32)(FIELD_GET(NOTIFICATION_HIGH_MASK, (x)))) +#define NOTIFICATION_BITMAP_LOW(x) \ + ((u32)(FIELD_GET(NOTIFICATION_LOW_MASK, (x)))) +#define PACK_NOTIFICATION_BITMAP(low, high) \ + (FIELD_PREP(NOTIFICATION_LOW_MASK, (low)) | \ + FIELD_PREP(NOTIFICATION_HIGH_MASK, (high))) + +#define RECEIVER_VCPU_MASK GENMASK(31, 16) +#define PACK_NOTIFICATION_GET_RECEIVER_INFO(vcpu_r, r) \ + (FIELD_PREP(RECEIVER_VCPU_MASK, (vcpu_r)) | \ + FIELD_PREP(RECEIVER_ID_MASK, (r))) + +#define NOTIFICATION_INFO_GET_MORE_PEND_MASK BIT(0) +#define NOTIFICATION_INFO_GET_ID_COUNT GENMASK(11, 7) +#define ID_LIST_MASK_64 GENMASK(51, 12) +#define ID_LIST_MASK_32 GENMASK(31, 12) +#define MAX_IDS_64 20 +#define MAX_IDS_32 10 + +#define PER_VCPU_NOTIFICATION_FLAG BIT(0) +#define SECURE_PARTITION_BITMAP BIT(0) +#define NON_SECURE_VM_BITMAP BIT(1) +#define SPM_FRAMEWORK_BITMAP BIT(2) +#define NS_HYP_FRAMEWORK_BITMAP BIT(3) + +static int ffa_notification_bind_common(u16 dst_id, u64 bitmap, + u32 flags, bool is_bind) +{ + ffa_value_t ret; + u32 func, src_dst_ids = PACK_TARGET_INFO(dst_id, drv_info->vm_id); + + func = is_bind ? FFA_NOTIFICATION_BIND : FFA_NOTIFICATION_UNBIND; + + invoke_ffa_fn((ffa_value_t){ + .a0 = func, .a1 = src_dst_ids, .a2 = flags, + .a3 = NOTIFICATION_BITMAP_LOW(bitmap), + .a4 = NOTIFICATION_BITMAP_HIGH(bitmap), + }, &ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + else if (ret.a0 != FFA_SUCCESS) + return -EINVAL; + + return 0; +} + +static +int ffa_notification_set(u16 src_id, u16 dst_id, u32 flags, u64 bitmap) +{ + ffa_value_t ret; + u32 src_dst_ids = PACK_TARGET_INFO(dst_id, src_id); + + invoke_ffa_fn((ffa_value_t) { + .a0 = FFA_NOTIFICATION_SET, .a1 = src_dst_ids, .a2 = flags, + .a3 = NOTIFICATION_BITMAP_LOW(bitmap), + .a4 = NOTIFICATION_BITMAP_HIGH(bitmap), + }, &ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + else if (ret.a0 != FFA_SUCCESS) + return -EINVAL; + + return 0; +} + +struct ffa_notify_bitmaps { + u64 sp_map; + u64 vm_map; + u64 arch_map; +}; + +static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify) +{ + ffa_value_t ret; + u16 src_id = drv_info->vm_id; + u16 cpu_id = smp_processor_id(); + u32 rec_vcpu_ids = PACK_NOTIFICATION_GET_RECEIVER_INFO(cpu_id, src_id); + + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_NOTIFICATION_GET, .a1 = rec_vcpu_ids, .a2 = flags, + }, &ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + else if (ret.a0 != FFA_SUCCESS) + return -EINVAL; /* Something else went wrong. */ + + notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3); + notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5); + notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7); + + return 0; +} + +struct ffa_dev_part_info { + ffa_sched_recv_cb callback; + void *cb_data; + rwlock_t rw_lock; +}; + +static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu) +{ + struct ffa_dev_part_info *partition; + ffa_sched_recv_cb callback; + void *cb_data; + + partition = xa_load(&drv_info->partition_info, part_id); + read_lock(&partition->rw_lock); + callback = partition->callback; + cb_data = partition->cb_data; + read_unlock(&partition->rw_lock); + + if (callback) + callback(vcpu, is_per_vcpu, cb_data); +} + +static void ffa_notification_info_get(void) +{ + int idx, list, max_ids, lists_cnt, ids_processed, ids_count[MAX_IDS_64]; + bool is_64b_resp; + ffa_value_t ret; + u64 id_list; + + do { + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_FN_NATIVE(NOTIFICATION_INFO_GET), + }, &ret); + + if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) { + if (ret.a2 != FFA_RET_NO_DATA) + pr_err("Notification Info fetch failed: 0x%lx (0x%lx)", + ret.a0, ret.a2); + return; + } + + is_64b_resp = (ret.a0 == FFA_FN64_SUCCESS); + + ids_processed = 0; + lists_cnt = FIELD_GET(NOTIFICATION_INFO_GET_ID_COUNT, ret.a2); + if (is_64b_resp) { + max_ids = MAX_IDS_64; + id_list = FIELD_GET(ID_LIST_MASK_64, ret.a2); + } else { + max_ids = MAX_IDS_32; + id_list = FIELD_GET(ID_LIST_MASK_32, ret.a2); + } + + for (idx = 0; idx < lists_cnt; idx++, id_list >>= 2) + ids_count[idx] = (id_list & 0x3) + 1; + + /* Process IDs */ + for (list = 0; list < lists_cnt; list++) { + u16 vcpu_id, part_id, *packed_id_list = (u16 *)&ret.a3; + + if (ids_processed >= max_ids - 1) + break; + + part_id = packed_id_list[++ids_processed]; + + if (!ids_count[list]) { /* Global Notification */ + __do_sched_recv_cb(part_id, 0, false); + continue; + } + + /* Per vCPU Notification */ + for (idx = 0; idx < ids_count[list]; idx++) { + if (ids_processed >= max_ids - 1) + break; + + vcpu_id = packed_id_list[++ids_processed]; + + __do_sched_recv_cb(part_id, vcpu_id, true); + } + } + } while (ret.a2 & NOTIFICATION_INFO_GET_MORE_PEND_MASK); +} + +static int ffa_run(struct ffa_device *dev, u16 vcpu) +{ + ffa_value_t ret; + u32 target = dev->vm_id << 16 | vcpu; + + invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = target, }, &ret); + + while (ret.a0 == FFA_INTERRUPT) + invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = ret.a1, }, + &ret); + + if (ret.a0 == FFA_ERROR) + return ffa_to_linux_errno((int)ret.a2); + + return 0; +} + static void ffa_set_up_mem_ops_native_flag(void) { if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) || @@ -587,17 +852,9 @@ static int ffa_partition_info_get(const char *uuid_str, return 0; } -static void _ffa_mode_32bit_set(struct ffa_device *dev) -{ - dev->mode_32bit = true; -} - static void ffa_mode_32bit_set(struct ffa_device *dev) { - if (drv_info->version > FFA_VERSION_1_0) - return; - - _ffa_mode_32bit_set(dev); + dev->mode_32bit = true; } static int ffa_sync_send_receive(struct ffa_device *dev, @@ -630,6 +887,231 @@ static int ffa_memory_lend(struct ffa_mem_ops_args *args) return ffa_memory_ops(FFA_MEM_LEND, args); } +#define FFA_SECURE_PARTITION_ID_FLAG BIT(15) + +enum notify_type { + NON_SECURE_VM, + SECURE_PARTITION, + FRAMEWORK, +}; + +struct notifier_cb_info { + struct hlist_node hnode; + ffa_notifier_cb cb; + void *cb_data; + enum notify_type type; +}; + +static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback, + void *cb_data, bool is_registration) +{ + struct ffa_dev_part_info *partition; + bool cb_valid; + + partition = xa_load(&drv_info->partition_info, part_id); + write_lock(&partition->rw_lock); + + cb_valid = !!partition->callback; + if (!(is_registration ^ cb_valid)) { + write_unlock(&partition->rw_lock); + return -EINVAL; + } + + partition->callback = callback; + partition->cb_data = cb_data; + + write_unlock(&partition->rw_lock); + return 0; +} + +static int ffa_sched_recv_cb_register(struct ffa_device *dev, + ffa_sched_recv_cb cb, void *cb_data) +{ + return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true); +} + +static int ffa_sched_recv_cb_unregister(struct ffa_device *dev) +{ + return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false); +} + +static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags) +{ + return ffa_notification_bind_common(dst_id, bitmap, flags, true); +} + +static int ffa_notification_unbind(u16 dst_id, u64 bitmap) +{ + return ffa_notification_bind_common(dst_id, bitmap, 0, false); +} + +/* Should be called while the notify_lock is taken */ +static struct notifier_cb_info * +notifier_hash_node_get(u16 notify_id, enum notify_type type) +{ + struct notifier_cb_info *node; + + hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id) + if (type == node->type) + return node; + + return NULL; +} + +static int +update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb, + void *cb_data, bool is_registration) +{ + struct notifier_cb_info *cb_info = NULL; + bool cb_found; + + cb_info = notifier_hash_node_get(notify_id, type); + cb_found = !!cb_info; + + if (!(is_registration ^ cb_found)) + return -EINVAL; + + if (is_registration) { + cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL); + if (!cb_info) + return -ENOMEM; + + cb_info->type = type; + cb_info->cb = cb; + cb_info->cb_data = cb_data; + + hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id); + } else { + hash_del(&cb_info->hnode); + } + + return 0; +} + +static enum notify_type ffa_notify_type_get(u16 vm_id) +{ + if (vm_id & FFA_SECURE_PARTITION_ID_FLAG) + return SECURE_PARTITION; + else + return NON_SECURE_VM; +} + +static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id) +{ + int rc; + enum notify_type type = ffa_notify_type_get(dev->vm_id); + + if (notify_id >= FFA_MAX_NOTIFICATIONS) + return -EINVAL; + + mutex_lock(&drv_info->notify_lock); + + rc = update_notifier_cb(notify_id, type, NULL, NULL, false); + if (rc) { + pr_err("Could not unregister notification callback\n"); + mutex_unlock(&drv_info->notify_lock); + return rc; + } + + rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id)); + + mutex_unlock(&drv_info->notify_lock); + + return rc; +} + +static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, + ffa_notifier_cb cb, void *cb_data, int notify_id) +{ + int rc; + u32 flags = 0; + enum notify_type type = ffa_notify_type_get(dev->vm_id); + + if (notify_id >= FFA_MAX_NOTIFICATIONS) + return -EINVAL; + + mutex_lock(&drv_info->notify_lock); + + if (is_per_vcpu) + flags = PER_VCPU_NOTIFICATION_FLAG; + + rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags); + if (rc) { + mutex_unlock(&drv_info->notify_lock); + return rc; + } + + rc = update_notifier_cb(notify_id, type, cb, cb_data, true); + if (rc) { + pr_err("Failed to register callback for %d - %d\n", + notify_id, rc); + ffa_notification_unbind(dev->vm_id, BIT(notify_id)); + } + mutex_unlock(&drv_info->notify_lock); + + return rc; +} + +static int ffa_notify_send(struct ffa_device *dev, int notify_id, + bool is_per_vcpu, u16 vcpu) +{ + u32 flags = 0; + + if (is_per_vcpu) + flags |= (PER_VCPU_NOTIFICATION_FLAG | vcpu << 16); + + return ffa_notification_set(dev->vm_id, drv_info->vm_id, flags, + BIT(notify_id)); +} + +static void handle_notif_callbacks(u64 bitmap, enum notify_type type) +{ + int notify_id; + struct notifier_cb_info *cb_info = NULL; + + for (notify_id = 0; notify_id <= FFA_MAX_NOTIFICATIONS && bitmap; + notify_id++, bitmap >>= 1) { + if (!(bitmap & 1)) + continue; + + mutex_lock(&drv_info->notify_lock); + cb_info = notifier_hash_node_get(notify_id, type); + mutex_unlock(&drv_info->notify_lock); + + if (cb_info && cb_info->cb) + cb_info->cb(notify_id, cb_info->cb_data); + } +} + +static void notif_pcpu_irq_work_fn(struct work_struct *work) +{ + int rc; + struct ffa_notify_bitmaps bitmaps; + + rc = ffa_notification_get(SECURE_PARTITION_BITMAP | + SPM_FRAMEWORK_BITMAP, &bitmaps); + if (rc) { + pr_err("Failed to retrieve notifications with %d!\n", rc); + return; + } + + handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM); + handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION); + handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK); +} + +static void +ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data) +{ + struct ffa_drv_info *info = cb_data; + + if (!is_per_vcpu) + notif_pcpu_irq_work_fn(&info->notif_pcpu_work); + else + queue_work_on(vcpu, info->notif_pcpu_wq, + &info->notif_pcpu_work); +} + static const struct ffa_info_ops ffa_drv_info_ops = { .api_version_get = ffa_api_version_get, .partition_info_get = ffa_partition_info_get, @@ -646,10 +1128,24 @@ static const struct ffa_mem_ops ffa_drv_mem_ops = { .memory_lend = ffa_memory_lend, }; +static const struct ffa_cpu_ops ffa_drv_cpu_ops = { + .run = ffa_run, +}; + +static const struct ffa_notifier_ops ffa_drv_notifier_ops = { + .sched_recv_cb_register = ffa_sched_recv_cb_register, + .sched_recv_cb_unregister = ffa_sched_recv_cb_unregister, + .notify_request = ffa_notify_request, + .notify_relinquish = ffa_notify_relinquish, + .notify_send = ffa_notify_send, +}; + static const struct ffa_ops ffa_drv_ops = { .info_ops = &ffa_drv_info_ops, .msg_ops = &ffa_drv_msg_ops, .mem_ops = &ffa_drv_mem_ops, + .cpu_ops = &ffa_drv_cpu_ops, + .notifier_ops = &ffa_drv_notifier_ops, }; void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid) @@ -680,6 +1176,7 @@ static void ffa_setup_partitions(void) int count, idx; uuid_t uuid; struct ffa_device *ffa_dev; + struct ffa_dev_part_info *info; struct ffa_partition_info *pbuf, *tpbuf; count = ffa_partition_probe(&uuid_null, &pbuf); @@ -688,6 +1185,7 @@ static void ffa_setup_partitions(void) return; } + xa_init(&drv_info->partition_info); for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) { import_uuid(&uuid, (u8 *)tpbuf->uuid); @@ -706,9 +1204,232 @@ static void ffa_setup_partitions(void) if (drv_info->version > FFA_VERSION_1_0 && !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC)) - _ffa_mode_32bit_set(ffa_dev); + ffa_mode_32bit_set(ffa_dev); + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + ffa_device_unregister(ffa_dev); + continue; + } + xa_store(&drv_info->partition_info, tpbuf->id, info, GFP_KERNEL); } + drv_info->partition_count = count; + kfree(pbuf); + + /* Allocate for the host */ + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return; + xa_store(&drv_info->partition_info, drv_info->vm_id, info, GFP_KERNEL); + drv_info->partition_count++; +} + +static void ffa_partitions_cleanup(void) +{ + struct ffa_dev_part_info **info; + int idx, count = drv_info->partition_count; + + if (!count) + return; + + info = kcalloc(count, sizeof(**info), GFP_KERNEL); + if (!info) + return; + + xa_extract(&drv_info->partition_info, (void **)info, 0, VM_ID_MASK, + count, XA_PRESENT); + + for (idx = 0; idx < count; idx++) + kfree(info[idx]); + kfree(info); + + drv_info->partition_count = 0; + xa_destroy(&drv_info->partition_info); +} + +/* FFA FEATURE IDs */ +#define FFA_FEAT_NOTIFICATION_PENDING_INT (1) +#define FFA_FEAT_SCHEDULE_RECEIVER_INT (2) +#define FFA_FEAT_MANAGED_EXIT_INT (3) + +static irqreturn_t irq_handler(int irq, void *irq_data) +{ + struct ffa_pcpu_irq *pcpu = irq_data; + struct ffa_drv_info *info = pcpu->info; + + queue_work(info->notif_pcpu_wq, &info->irq_work); + + return IRQ_HANDLED; +} + +static void ffa_sched_recv_irq_work_fn(struct work_struct *work) +{ + ffa_notification_info_get(); +} + +static int ffa_sched_recv_irq_map(void) +{ + int ret, irq, sr_intid; + + /* The returned sr_intid is assumed to be SGI donated to NS world */ + ret = ffa_features(FFA_FEAT_SCHEDULE_RECEIVER_INT, 0, &sr_intid, NULL); + if (ret < 0) { + if (ret != -EOPNOTSUPP) + pr_err("Failed to retrieve scheduler Rx interrupt\n"); + return ret; + } + + if (acpi_disabled) { + struct of_phandle_args oirq = {}; + struct device_node *gic; + + /* Only GICv3 supported currently with the device tree */ + gic = of_find_compatible_node(NULL, NULL, "arm,gic-v3"); + if (!gic) + return -ENXIO; + + oirq.np = gic; + oirq.args_count = 1; + oirq.args[0] = sr_intid; + irq = irq_create_of_mapping(&oirq); + of_node_put(gic); +#ifdef CONFIG_ACPI + } else { + irq = acpi_register_gsi(NULL, sr_intid, ACPI_EDGE_SENSITIVE, + ACPI_ACTIVE_HIGH); +#endif + } + + if (irq <= 0) { + pr_err("Failed to create IRQ mapping!\n"); + return -ENODATA; + } + + return irq; +} + +static void ffa_sched_recv_irq_unmap(void) +{ + if (drv_info->sched_recv_irq) + irq_dispose_mapping(drv_info->sched_recv_irq); +} + +static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu) +{ + enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE); + return 0; +} + +static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu) +{ + disable_percpu_irq(drv_info->sched_recv_irq); + return 0; +} + +static void ffa_uninit_pcpu_irq(void) +{ + if (drv_info->cpuhp_state) + cpuhp_remove_state(drv_info->cpuhp_state); + + if (drv_info->notif_pcpu_wq) + destroy_workqueue(drv_info->notif_pcpu_wq); + + if (drv_info->sched_recv_irq) + free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu); + + if (drv_info->irq_pcpu) + free_percpu(drv_info->irq_pcpu); +} + +static int ffa_init_pcpu_irq(unsigned int irq) +{ + struct ffa_pcpu_irq __percpu *irq_pcpu; + int ret, cpu; + + irq_pcpu = alloc_percpu(struct ffa_pcpu_irq); + if (!irq_pcpu) + return -ENOMEM; + + for_each_present_cpu(cpu) + per_cpu_ptr(irq_pcpu, cpu)->info = drv_info; + + drv_info->irq_pcpu = irq_pcpu; + + ret = request_percpu_irq(irq, irq_handler, "ARM-FFA", irq_pcpu); + if (ret) { + pr_err("Error registering notification IRQ %d: %d\n", irq, ret); + return ret; + } + + INIT_WORK(&drv_info->irq_work, ffa_sched_recv_irq_work_fn); + INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn); + drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification"); + if (!drv_info->notif_pcpu_wq) + return -EINVAL; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ffa/pcpu-irq:starting", + ffa_cpuhp_pcpu_irq_enable, + ffa_cpuhp_pcpu_irq_disable); + + if (ret < 0) + return ret; + + drv_info->cpuhp_state = ret; + return 0; +} + +static void ffa_notifications_cleanup(void) +{ + ffa_uninit_pcpu_irq(); + ffa_sched_recv_irq_unmap(); + + if (drv_info->bitmap_created) { + ffa_notification_bitmap_destroy(); + drv_info->bitmap_created = false; + } +} + +static int ffa_notifications_setup(void) +{ + int ret, irq; + + ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL); + if (ret) { + pr_err("Notifications not supported, continuing with it ..\n"); + return 0; + } + + ret = ffa_notification_bitmap_create(); + if (ret) { + pr_err("notification_bitmap_create error %d\n", ret); + return ret; + } + drv_info->bitmap_created = true; + + irq = ffa_sched_recv_irq_map(); + if (irq <= 0) { + ret = irq; + goto cleanup; + } + + drv_info->sched_recv_irq = irq; + + ret = ffa_init_pcpu_irq(irq); + if (ret) + goto cleanup; + + hash_init(drv_info->notifier_hash); + mutex_init(&drv_info->notify_lock); + + /* Register internal scheduling callback */ + ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle, + drv_info, true); + if (!ret) + return ret; +cleanup: + ffa_notifications_cleanup(); + return ret; } static int __init ffa_init(void) @@ -766,7 +1487,13 @@ static int __init ffa_init(void) ffa_set_up_mem_ops_native_flag(); + ret = ffa_notifications_setup(); + if (ret) + goto partitions_cleanup; + return 0; +partitions_cleanup: + ffa_partitions_cleanup(); free_pages: if (drv_info->tx_buffer) free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); @@ -781,9 +1508,12 @@ subsys_initcall(ffa_init); static void __exit ffa_exit(void) { + ffa_notifications_cleanup(); + ffa_partitions_cleanup(); ffa_rxtx_unmap(drv_info->vm_id); free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE); free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE); + xa_destroy(&drv_info->partition_info); kfree(drv_info); arm_ffa_bus_exit(); } diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index cc060da51bec..1abedb5b2e48 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -6,6 +6,7 @@ #ifndef _LINUX_ARM_FFA_H #define _LINUX_ARM_FFA_H +#include <linux/bitfield.h> #include <linux/device.h> #include <linux/module.h> #include <linux/types.h> @@ -20,6 +21,7 @@ #define FFA_ERROR FFA_SMC_32(0x60) #define FFA_SUCCESS FFA_SMC_32(0x61) +#define FFA_FN64_SUCCESS FFA_SMC_64(0x61) #define FFA_INTERRUPT FFA_SMC_32(0x62) #define FFA_VERSION FFA_SMC_32(0x63) #define FFA_FEATURES FFA_SMC_32(0x64) @@ -54,6 +56,23 @@ #define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A) #define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B) #define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C) +#define FFA_NOTIFICATION_BITMAP_CREATE FFA_SMC_32(0x7D) +#define FFA_NOTIFICATION_BITMAP_DESTROY FFA_SMC_32(0x7E) +#define FFA_NOTIFICATION_BIND FFA_SMC_32(0x7F) +#define FFA_NOTIFICATION_UNBIND FFA_SMC_32(0x80) +#define FFA_NOTIFICATION_SET FFA_SMC_32(0x81) +#define FFA_NOTIFICATION_GET FFA_SMC_32(0x82) +#define FFA_NOTIFICATION_INFO_GET FFA_SMC_32(0x83) +#define FFA_FN64_NOTIFICATION_INFO_GET FFA_SMC_64(0x83) +#define FFA_RX_ACQUIRE FFA_SMC_32(0x84) +#define FFA_SPM_ID_GET FFA_SMC_32(0x85) +#define FFA_MSG_SEND2 FFA_SMC_32(0x86) +#define FFA_SECONDARY_EP_REGISTER FFA_SMC_32(0x87) +#define FFA_FN64_SECONDARY_EP_REGISTER FFA_SMC_64(0x87) +#define FFA_MEM_PERM_GET FFA_SMC_32(0x88) +#define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88) +#define FFA_MEM_PERM_SET FFA_SMC_32(0x89) +#define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89) /* * For some calls it is necessary to use SMC64 to pass or return 64-bit values. @@ -76,6 +95,7 @@ #define FFA_RET_DENIED (-6) #define FFA_RET_RETRY (-7) #define FFA_RET_ABORTED (-8) +#define FFA_RET_NO_DATA (-9) /* FFA version encoding */ #define FFA_MAJOR_VERSION_MASK GENMASK(30, 16) @@ -86,6 +106,7 @@ (FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \ FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor))) #define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0) +#define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1) /** * FF-A specification mentions explicitly about '4K pages'. This should @@ -278,8 +299,8 @@ struct ffa_mem_region { #define FFA_MEM_NON_SHAREABLE (0) #define FFA_MEM_OUTER_SHAREABLE (2) #define FFA_MEM_INNER_SHAREABLE (3) - u8 attributes; - u8 reserved_0; + /* Memory region attributes, upper byte MBZ pre v1.1 */ + u16 attributes; /* * Clear memory region contents after unmapping it from the sender and * before mapping it for any receiver. @@ -317,27 +338,41 @@ struct ffa_mem_region { * memory region. */ u64 tag; - u32 reserved_1; + /* Size of each endpoint memory access descriptor, MBZ pre v1.1 */ + u32 ep_mem_size; /* * The number of `ffa_mem_region_attributes` entries included in this * transaction. */ u32 ep_count; /* - * An array of endpoint memory access descriptors. - * Each one specifies a memory region offset, an endpoint and the - * attributes with which this memory region should be mapped in that - * endpoint's page table. + * 16-byte aligned offset from the base address of this descriptor + * to the first element of the endpoint memory access descriptor array + * Valid only from v1.1 */ - struct ffa_mem_region_attributes ep_mem_access[]; + u32 ep_mem_offset; + /* MBZ, valid only from v1.1 */ + u32 reserved[3]; }; -#define COMPOSITE_OFFSET(x) \ - (offsetof(struct ffa_mem_region, ep_mem_access[x])) #define CONSTITUENTS_OFFSET(x) \ (offsetof(struct ffa_composite_mem_region, constituents[x])) -#define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \ - (COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y)) + +static inline u32 +ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version) +{ + u32 offset = count * sizeof(struct ffa_mem_region_attributes); + /* + * Earlier to v1.1, the endpoint memory descriptor array started at + * offset 32(i.e. offset of ep_mem_offset in the current structure) + */ + if (ffa_version <= FFA_VERSION_1_0) + offset += offsetof(struct ffa_mem_region, ep_mem_offset); + else + offset += sizeof(struct ffa_mem_region); + + return offset; +} struct ffa_mem_ops_args { bool use_txbuf; @@ -367,10 +402,30 @@ struct ffa_mem_ops { int (*memory_lend)(struct ffa_mem_ops_args *args); }; +struct ffa_cpu_ops { + int (*run)(struct ffa_device *dev, u16 vcpu); +}; + +typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data); +typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data); + +struct ffa_notifier_ops { + int (*sched_recv_cb_register)(struct ffa_device *dev, + ffa_sched_recv_cb cb, void *cb_data); + int (*sched_recv_cb_unregister)(struct ffa_device *dev); + int (*notify_request)(struct ffa_device *dev, bool per_vcpu, + ffa_notifier_cb cb, void *cb_data, int notify_id); + int (*notify_relinquish)(struct ffa_device *dev, int notify_id); + int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu, + u16 vcpu); +}; + struct ffa_ops { const struct ffa_info_ops *info_ops; const struct ffa_msg_ops *msg_ops; const struct ffa_mem_ops *mem_ops; + const struct ffa_cpu_ops *cpu_ops; + const struct ffa_notifier_ops *notifier_ops; }; #endif /* _LINUX_ARM_FFA_H */ |