diff options
| author | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 | 
|---|---|---|
| committer | Dmitry Torokhov <[email protected]> | 2023-08-30 16:06:38 -0700 | 
| commit | 1ac731c529cd4d6adbce134754b51ff7d822b145 (patch) | |
| tree | 143ab3f35ca5f3b69f583c84e6964b17139c2ec1 /drivers/iommu/iommu.c | |
| parent | 07b4c950f27bef0362dc6ad7ee713aab61d58149 (diff) | |
| parent | 54116d442e001e1b6bd482122043b1870998a1f3 (diff) | |
Merge branch 'next' into for-linus
Prepare input updates for 6.6 merge window.
Diffstat (limited to 'drivers/iommu/iommu.c')
| -rw-r--r-- | drivers/iommu/iommu.c | 365 | 
1 files changed, 154 insertions, 211 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 10db680acaed..f1dcfa3f1a1b 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -28,6 +28,7 @@  #include <linux/fsl/mc.h>  #include <linux/module.h>  #include <linux/cc_platform.h> +#include <linux/cdx/cdx_bus.h>  #include <trace/events/iommu.h>  #include <linux/sched/mm.h>  #include <linux/msi.h> @@ -87,9 +88,10 @@ static const char * const iommu_group_resv_type_string[] = {  static int iommu_bus_notifier(struct notifier_block *nb,  			      unsigned long action, void *data); +static void iommu_release_device(struct device *dev);  static int iommu_alloc_default_domain(struct iommu_group *group,  				      struct device *dev); -static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, +static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus,  						 unsigned type);  static int __iommu_attach_device(struct iommu_domain *domain,  				 struct device *dev); @@ -129,6 +131,9 @@ static struct bus_type * const iommu_buses[] = {  #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS  	&host1x_context_device_bus_type,  #endif +#ifdef CONFIG_CDX_BUS +	&cdx_bus_type, +#endif  };  /* @@ -453,20 +458,86 @@ err_out:  } -void iommu_release_device(struct device *dev) +/* + * Remove a device from a group's device list and return the group device + * if successful. + */ +static struct group_device * +__iommu_group_remove_device(struct iommu_group *group, struct device *dev)  { +	struct group_device *device; + +	lockdep_assert_held(&group->mutex); +	list_for_each_entry(device, &group->devices, list) { +		if (device->dev == dev) { +			list_del(&device->list); +			return device; +		} +	} + +	return NULL; +} + +/* + * Release a device from its group and decrements the iommu group reference + * count. + */ +static void __iommu_group_release_device(struct iommu_group *group, +					 struct group_device *grp_dev) +{ +	struct device *dev = grp_dev->dev; + +	sysfs_remove_link(group->devices_kobj, grp_dev->name); +	sysfs_remove_link(&dev->kobj, "iommu_group"); + +	trace_remove_device_from_group(group->id, dev); + +	kfree(grp_dev->name); +	kfree(grp_dev); +	dev->iommu_group = NULL; +	kobject_put(group->devices_kobj); +} + +static void iommu_release_device(struct device *dev) +{ +	struct iommu_group *group = dev->iommu_group; +	struct group_device *device;  	const struct iommu_ops *ops; -	if (!dev->iommu) +	if (!dev->iommu || !group)  		return;  	iommu_device_unlink(dev->iommu->iommu_dev, dev); +	mutex_lock(&group->mutex); +	device = __iommu_group_remove_device(group, dev); + +	/* +	 * If the group has become empty then ownership must have been released, +	 * and the current domain must be set back to NULL or the default +	 * domain. +	 */ +	if (list_empty(&group->devices)) +		WARN_ON(group->owner_cnt || +			group->domain != group->default_domain); + +	/* +	 * release_device() must stop using any attached domain on the device. +	 * If there are still other devices in the group they are not effected +	 * by this callback. +	 * +	 * The IOMMU driver must set the device to either an identity or +	 * blocking translation and stop using any domain pointer, as it is +	 * going to be freed. +	 */  	ops = dev_iommu_ops(dev);  	if (ops->release_device)  		ops->release_device(dev); +	mutex_unlock(&group->mutex); + +	if (device) +		__iommu_group_release_device(group, device); -	iommu_group_remove_device(dev);  	module_put(ops->owner);  	dev_iommu_free(dev);  } @@ -550,7 +621,7 @@ static void iommu_group_remove_file(struct iommu_group *group,  static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)  { -	return sprintf(buf, "%s\n", group->name); +	return sysfs_emit(buf, "%s\n", group->name);  }  /** @@ -663,52 +734,51 @@ static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,  {  	struct iommu_resv_region *region, *next;  	struct list_head group_resv_regions; -	char *str = buf; +	int offset = 0;  	INIT_LIST_HEAD(&group_resv_regions);  	iommu_get_group_resv_regions(group, &group_resv_regions);  	list_for_each_entry_safe(region, next, &group_resv_regions, list) { -		str += sprintf(str, "0x%016llx 0x%016llx %s\n", -			       (long long int)region->start, -			       (long long int)(region->start + -						region->length - 1), -			       iommu_group_resv_type_string[region->type]); +		offset += sysfs_emit_at(buf, offset, "0x%016llx 0x%016llx %s\n", +					(long long)region->start, +					(long long)(region->start + +						    region->length - 1), +					iommu_group_resv_type_string[region->type]);  		kfree(region);  	} -	return (str - buf); +	return offset;  }  static ssize_t iommu_group_show_type(struct iommu_group *group,  				     char *buf)  { -	char *type = "unknown\n"; +	char *type = "unknown";  	mutex_lock(&group->mutex);  	if (group->default_domain) {  		switch (group->default_domain->type) {  		case IOMMU_DOMAIN_BLOCKED: -			type = "blocked\n"; +			type = "blocked";  			break;  		case IOMMU_DOMAIN_IDENTITY: -			type = "identity\n"; +			type = "identity";  			break;  		case IOMMU_DOMAIN_UNMANAGED: -			type = "unmanaged\n"; +			type = "unmanaged";  			break;  		case IOMMU_DOMAIN_DMA: -			type = "DMA\n"; +			type = "DMA";  			break;  		case IOMMU_DOMAIN_DMA_FQ: -			type = "DMA-FQ\n"; +			type = "DMA-FQ";  			break;  		}  	}  	mutex_unlock(&group->mutex); -	strcpy(buf, type); -	return strlen(type); +	return sysfs_emit(buf, "%s\n", type);  }  static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); @@ -739,7 +809,7 @@ static void iommu_group_release(struct kobject *kobj)  	kfree(group);  } -static struct kobj_type iommu_group_ktype = { +static const struct kobj_type iommu_group_ktype = {  	.sysfs_ops = &iommu_group_sysfs_ops,  	.release = iommu_group_release,  }; @@ -816,35 +886,6 @@ struct iommu_group *iommu_group_alloc(void)  }  EXPORT_SYMBOL_GPL(iommu_group_alloc); -struct iommu_group *iommu_group_get_by_id(int id) -{ -	struct kobject *group_kobj; -	struct iommu_group *group; -	const char *name; - -	if (!iommu_group_kset) -		return NULL; - -	name = kasprintf(GFP_KERNEL, "%d", id); -	if (!name) -		return NULL; - -	group_kobj = kset_find_obj(iommu_group_kset, name); -	kfree(name); - -	if (!group_kobj) -		return NULL; - -	group = container_of(group_kobj, struct iommu_group, kobj); -	BUG_ON(group->id != id); - -	kobject_get(group->devices_kobj); -	kobject_put(&group->kobj); - -	return group; -} -EXPORT_SYMBOL_GPL(iommu_group_get_by_id); -  /**   * iommu_group_get_iommudata - retrieve iommu_data registered for a group   * @group: the group @@ -1068,7 +1109,7 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device);  void iommu_group_remove_device(struct device *dev)  {  	struct iommu_group *group = dev->iommu_group; -	struct group_device *tmp_device, *device = NULL; +	struct group_device *device;  	if (!group)  		return; @@ -1076,27 +1117,11 @@ void iommu_group_remove_device(struct device *dev)  	dev_info(dev, "Removing from iommu group %d\n", group->id);  	mutex_lock(&group->mutex); -	list_for_each_entry(tmp_device, &group->devices, list) { -		if (tmp_device->dev == dev) { -			device = tmp_device; -			list_del(&device->list); -			break; -		} -	} +	device = __iommu_group_remove_device(group, dev);  	mutex_unlock(&group->mutex); -	if (!device) -		return; - -	sysfs_remove_link(group->devices_kobj, device->name); -	sysfs_remove_link(&dev->kobj, "iommu_group"); - -	trace_remove_device_from_group(group->id, dev); - -	kfree(device->name); -	kfree(device); -	dev->iommu_group = NULL; -	kobject_put(group->devices_kobj); +	if (device) +		__iommu_group_release_device(group, device);  }  EXPORT_SYMBOL_GPL(iommu_group_remove_device); @@ -1631,7 +1656,7 @@ static int iommu_get_def_domain_type(struct device *dev)  	return 0;  } -static int iommu_group_alloc_default_domain(struct bus_type *bus, +static int iommu_group_alloc_default_domain(const struct bus_type *bus,  					    struct iommu_group *group,  					    unsigned int type)  { @@ -1777,7 +1802,7 @@ static int probe_get_default_domain_type(struct device *dev, void *data)  	return 0;  } -static void probe_alloc_default_domain(struct bus_type *bus, +static void probe_alloc_default_domain(const struct bus_type *bus,  				       struct iommu_group *group)  {  	struct __group_domain_type gtype; @@ -1832,7 +1857,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group)  					  iommu_do_create_direct_mappings);  } -int bus_iommu_probe(struct bus_type *bus) +int bus_iommu_probe(const struct bus_type *bus)  {  	struct iommu_group *group, *next;  	LIST_HEAD(group_list); @@ -1876,7 +1901,7 @@ int bus_iommu_probe(struct bus_type *bus)  	return ret;  } -bool iommu_present(struct bus_type *bus) +bool iommu_present(const struct bus_type *bus)  {  	return bus->iommu_ops != NULL;  } @@ -1951,7 +1976,7 @@ void iommu_set_fault_handler(struct iommu_domain *domain,  }  EXPORT_SYMBOL_GPL(iommu_set_fault_handler); -static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, +static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus,  						 unsigned type)  {  	struct iommu_domain *domain; @@ -1964,8 +1989,13 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,  		return NULL;  	domain->type = type; -	/* Assume all sizes by default; the driver may override this later */ -	domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; +	/* +	 * If not already set, assume all sizes by default; the driver +	 * may override this later +	 */ +	if (!domain->pgsize_bitmap) +		domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; +  	if (!domain->ops)  		domain->ops = bus->iommu_ops->default_domain_ops; @@ -1976,7 +2006,7 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,  	return domain;  } -struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) +struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)  {  	return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);  } @@ -2817,11 +2847,10 @@ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)  EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);  /* - * Changes the default domain of an iommu group that has *only* one device + * Changes the default domain of an iommu group   *   * @group: The group for which the default domain should be changed - * @prev_dev: The device in the group (this is used to make sure that the device - *	 hasn't changed after the caller has called this function) + * @dev: The first device in the group   * @type: The type of the new default domain that gets associated with the group   *   * Returns 0 on success and error code on failure @@ -2832,124 +2861,63 @@ EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);   *    Please take a closer look if intended to use for other purposes.   */  static int iommu_change_dev_def_domain(struct iommu_group *group, -				       struct device *prev_dev, int type) +				       struct device *dev, int type)  { +	struct __group_domain_type gtype = {NULL, 0};  	struct iommu_domain *prev_dom; -	struct group_device *grp_dev; -	int ret, dev_def_dom; -	struct device *dev; - -	mutex_lock(&group->mutex); - -	if (group->default_domain != group->domain) { -		dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n"); -		ret = -EBUSY; -		goto out; -	} - -	/* -	 * iommu group wasn't locked while acquiring device lock in -	 * iommu_group_store_type(). So, make sure that the device count hasn't -	 * changed while acquiring device lock. -	 * -	 * Changing default domain of an iommu group with two or more devices -	 * isn't supported because there could be a potential deadlock. Consider -	 * the following scenario. T1 is trying to acquire device locks of all -	 * the devices in the group and before it could acquire all of them, -	 * there could be another thread T2 (from different sub-system and use -	 * case) that has already acquired some of the device locks and might be -	 * waiting for T1 to release other device locks. -	 */ -	if (iommu_group_device_count(group) != 1) { -		dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n"); -		ret = -EINVAL; -		goto out; -	} +	int ret; -	/* Since group has only one device */ -	grp_dev = list_first_entry(&group->devices, struct group_device, list); -	dev = grp_dev->dev; - -	if (prev_dev != dev) { -		dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n"); -		ret = -EBUSY; -		goto out; -	} +	lockdep_assert_held(&group->mutex);  	prev_dom = group->default_domain; -	if (!prev_dom) { -		ret = -EINVAL; -		goto out; -	} - -	dev_def_dom = iommu_get_def_domain_type(dev); +	__iommu_group_for_each_dev(group, >ype, +				   probe_get_default_domain_type);  	if (!type) {  		/*  		 * If the user hasn't requested any specific type of domain and  		 * if the device supports both the domains, then default to the  		 * domain the device was booted with  		 */ -		type = dev_def_dom ? : iommu_def_domain_type; -	} else if (dev_def_dom && type != dev_def_dom) { -		dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n", +		type = gtype.type ? : iommu_def_domain_type; +	} else if (gtype.type && type != gtype.type) { +		dev_err_ratelimited(dev, "Device cannot be in %s domain\n",  				    iommu_domain_type_str(type)); -		ret = -EINVAL; -		goto out; +		return -EINVAL;  	}  	/*  	 * Switch to a new domain only if the requested domain type is different  	 * from the existing default domain type  	 */ -	if (prev_dom->type == type) { -		ret = 0; -		goto out; -	} +	if (prev_dom->type == type) +		return 0; -	/* We can bring up a flush queue without tearing down the domain */ -	if (type == IOMMU_DOMAIN_DMA_FQ && prev_dom->type == IOMMU_DOMAIN_DMA) { -		ret = iommu_dma_init_fq(prev_dom); -		if (!ret) -			prev_dom->type = IOMMU_DOMAIN_DMA_FQ; -		goto out; -	} +	group->default_domain = NULL; +	group->domain = NULL;  	/* Sets group->default_domain to the newly allocated domain */  	ret = iommu_group_alloc_default_domain(dev->bus, group, type);  	if (ret) -		goto out; +		goto restore_old_domain; -	ret = iommu_create_device_direct_mappings(group, dev); +	ret = iommu_group_create_direct_mappings(group);  	if (ret)  		goto free_new_domain; -	ret = __iommu_attach_device(group->default_domain, dev); +	ret = __iommu_attach_group(group->default_domain, group);  	if (ret)  		goto free_new_domain; -	group->domain = group->default_domain; - -	/* -	 * Release the mutex here because ops->probe_finalize() call-back of -	 * some vendor IOMMU drivers calls arm_iommu_attach_device() which -	 * in-turn might call back into IOMMU core code, where it tries to take -	 * group->mutex, resulting in a deadlock. -	 */ -	mutex_unlock(&group->mutex); - -	/* Make sure dma_ops is appropriatley set */ -	iommu_group_do_probe_finalize(dev, group->default_domain);  	iommu_domain_free(prev_dom); +  	return 0;  free_new_domain:  	iommu_domain_free(group->default_domain); +restore_old_domain:  	group->default_domain = prev_dom;  	group->domain = prev_dom; -out: -	mutex_unlock(&group->mutex); -  	return ret;  } @@ -2959,7 +2927,7 @@ out:   * transition. Return failure if this isn't met.   *   * We need to consider the race between this and the device release path. - * device_lock(dev) is used here to guarantee that the device release path + * group->mutex is used here to guarantee that the device release path   * will not be entered at the same time.   */  static ssize_t iommu_group_store_type(struct iommu_group *group, @@ -2986,67 +2954,42 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,  	else  		return -EINVAL; -	/* -	 * Lock/Unlock the group mutex here before device lock to -	 * 1. Make sure that the iommu group has only one device (this is a -	 *    prerequisite for step 2) -	 * 2. Get struct *dev which is needed to lock device -	 */  	mutex_lock(&group->mutex); -	if (iommu_group_device_count(group) != 1) { +	/* We can bring up a flush queue without tearing down the domain. */ +	if (req_type == IOMMU_DOMAIN_DMA_FQ && +	    group->default_domain->type == IOMMU_DOMAIN_DMA) { +		ret = iommu_dma_init_fq(group->default_domain); +		if (!ret) +			group->default_domain->type = IOMMU_DOMAIN_DMA_FQ;  		mutex_unlock(&group->mutex); -		pr_err_ratelimited("Cannot change default domain: Group has more than one device\n"); -		return -EINVAL; + +		return ret ?: count; +	} + +	/* Otherwise, ensure that device exists and no driver is bound. */ +	if (list_empty(&group->devices) || group->owner_cnt) { +		mutex_unlock(&group->mutex); +		return -EPERM;  	} -	/* Since group has only one device */  	grp_dev = list_first_entry(&group->devices, struct group_device, list);  	dev = grp_dev->dev; -	get_device(dev); + +	ret = iommu_change_dev_def_domain(group, dev, req_type);  	/* -	 * Don't hold the group mutex because taking group mutex first and then -	 * the device lock could potentially cause a deadlock as below. Assume -	 * two threads T1 and T2. T1 is trying to change default domain of an -	 * iommu group and T2 is trying to hot unplug a device or release [1] VF -	 * of a PCIe device which is in the same iommu group. T1 takes group -	 * mutex and before it could take device lock assume T2 has taken device -	 * lock and is yet to take group mutex. Now, both the threads will be -	 * waiting for the other thread to release lock. Below, lock order was -	 * suggested. -	 * device_lock(dev); -	 *	mutex_lock(&group->mutex); -	 *		iommu_change_dev_def_domain(); -	 *	mutex_unlock(&group->mutex); -	 * device_unlock(dev); -	 * -	 * [1] Typical device release path -	 * device_lock() from device/driver core code -	 *  -> bus_notifier() -	 *   -> iommu_bus_notifier() -	 *    -> iommu_release_device() -	 *     -> ops->release_device() vendor driver calls back iommu core code -	 *      -> mutex_lock() from iommu core code +	 * Release the mutex here because ops->probe_finalize() call-back of +	 * some vendor IOMMU drivers calls arm_iommu_attach_device() which +	 * in-turn might call back into IOMMU core code, where it tries to take +	 * group->mutex, resulting in a deadlock.  	 */  	mutex_unlock(&group->mutex); -	/* Check if the device in the group still has a driver bound to it */ -	device_lock(dev); -	if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ && -	    group->default_domain->type == IOMMU_DOMAIN_DMA)) { -		pr_err_ratelimited("Device is still bound to driver\n"); -		ret = -EBUSY; -		goto out; -	} - -	ret = iommu_change_dev_def_domain(group, dev, req_type); -	ret = ret ?: count; - -out: -	device_unlock(dev); -	put_device(dev); +	/* Make sure dma_ops is appropriatley set */ +	if (!ret) +		__iommu_group_dma_finalize(group); -	return ret; +	return ret ?: count;  }  static bool iommu_is_default_domain(struct iommu_group *group)  |