diff options
Diffstat (limited to 'drivers/devfreq/devfreq.c')
| -rw-r--r-- | drivers/devfreq/devfreq.c | 921 | 
1 files changed, 642 insertions, 279 deletions
| diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index b146d76f04cf..53766f39aadd 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -27,21 +27,17 @@  #include <linux/hrtimer.h>  #include "governor.h" -struct class *devfreq_class; +static struct class *devfreq_class;  /* - * devfreq_work periodically monitors every registered device. - * The minimum polling interval is one jiffy. The polling interval is - * determined by the minimum polling period among all polling devfreq - * devices. The resolution of polling interval is one jiffy. + * devfreq core provides delayed work based load monitoring helper + * functions. Governors can use these or can implement their own + * monitoring mechanism.   */ -static bool polling;  static struct workqueue_struct *devfreq_wq; -static struct delayed_work devfreq_work; - -/* wait removing if this is to be removed */ -static struct devfreq *wait_remove_device; +/* The list of all device-devfreq governors */ +static LIST_HEAD(devfreq_governor_list);  /* The list of all device-devfreq */  static LIST_HEAD(devfreq_list);  static DEFINE_MUTEX(devfreq_list_lock); @@ -73,6 +69,79 @@ static struct devfreq *find_device_devfreq(struct device *dev)  }  /** + * devfreq_get_freq_level() - Lookup freq_table for the frequency + * @devfreq:	the devfreq instance + * @freq:	the target frequency + */ +static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) +{ +	int lev; + +	for (lev = 0; lev < devfreq->profile->max_state; lev++) +		if (freq == devfreq->profile->freq_table[lev]) +			return lev; + +	return -EINVAL; +} + +/** + * devfreq_update_status() - Update statistics of devfreq behavior + * @devfreq:	the devfreq instance + * @freq:	the update target frequency + */ +static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) +{ +	int lev, prev_lev; +	unsigned long cur_time; + +	lev = devfreq_get_freq_level(devfreq, freq); +	if (lev < 0) +		return lev; + +	cur_time = jiffies; +	devfreq->time_in_state[lev] += +			 cur_time - devfreq->last_stat_updated; +	if (freq != devfreq->previous_freq) { +		prev_lev = devfreq_get_freq_level(devfreq, +						devfreq->previous_freq); +		devfreq->trans_table[(prev_lev * +				devfreq->profile->max_state) + lev]++; +		devfreq->total_trans++; +	} +	devfreq->last_stat_updated = cur_time; + +	return 0; +} + +/** + * find_devfreq_governor() - find devfreq governor from name + * @name:	name of the governor + * + * Search the list of devfreq governors and return the matched + * governor's pointer. devfreq_list_lock should be held by the caller. + */ +static struct devfreq_governor *find_devfreq_governor(const char *name) +{ +	struct devfreq_governor *tmp_governor; + +	if (unlikely(IS_ERR_OR_NULL(name))) { +		pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); +		return ERR_PTR(-EINVAL); +	} +	WARN(!mutex_is_locked(&devfreq_list_lock), +	     "devfreq_list_lock must be locked."); + +	list_for_each_entry(tmp_governor, &devfreq_governor_list, node) { +		if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) +			return tmp_governor; +	} + +	return ERR_PTR(-ENODEV); +} + +/* Load monitoring helper functions for governors use */ + +/**   * update_devfreq() - Reevaluate the device and configure frequency.   * @devfreq:	the devfreq instance.   * @@ -90,6 +159,9 @@ int update_devfreq(struct devfreq *devfreq)  		return -EINVAL;  	} +	if (!devfreq->governor) +		return -EINVAL; +  	/* Reevaluate the proper frequency */  	err = devfreq->governor->get_target_freq(devfreq, &freq);  	if (err) @@ -116,16 +188,173 @@ int update_devfreq(struct devfreq *devfreq)  	if (err)  		return err; +	if (devfreq->profile->freq_table) +		if (devfreq_update_status(devfreq, freq)) +			dev_err(&devfreq->dev, +				"Couldn't update frequency transition information.\n"); +  	devfreq->previous_freq = freq;  	return err;  } +EXPORT_SYMBOL(update_devfreq); + +/** + * devfreq_monitor() - Periodically poll devfreq objects. + * @work:	the work struct used to run devfreq_monitor periodically. + * + */ +static void devfreq_monitor(struct work_struct *work) +{ +	int err; +	struct devfreq *devfreq = container_of(work, +					struct devfreq, work.work); + +	mutex_lock(&devfreq->lock); +	err = update_devfreq(devfreq); +	if (err) +		dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); + +	queue_delayed_work(devfreq_wq, &devfreq->work, +				msecs_to_jiffies(devfreq->profile->polling_ms)); +	mutex_unlock(&devfreq->lock); +} + +/** + * devfreq_monitor_start() - Start load monitoring of devfreq instance + * @devfreq:	the devfreq instance. + * + * Helper function for starting devfreq device load monitoing. By + * default delayed work based monitoring is supported. Function + * to be called from governor in response to DEVFREQ_GOV_START + * event when device is added to devfreq framework. + */ +void devfreq_monitor_start(struct devfreq *devfreq) +{ +	INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); +	if (devfreq->profile->polling_ms) +		queue_delayed_work(devfreq_wq, &devfreq->work, +			msecs_to_jiffies(devfreq->profile->polling_ms)); +} +EXPORT_SYMBOL(devfreq_monitor_start); + +/** + * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance + * @devfreq:	the devfreq instance. + * + * Helper function to stop devfreq device load monitoing. Function + * to be called from governor in response to DEVFREQ_GOV_STOP + * event when device is removed from devfreq framework. + */ +void devfreq_monitor_stop(struct devfreq *devfreq) +{ +	cancel_delayed_work_sync(&devfreq->work); +} +EXPORT_SYMBOL(devfreq_monitor_stop); + +/** + * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance + * @devfreq:	the devfreq instance. + * + * Helper function to suspend devfreq device load monitoing. Function + * to be called from governor in response to DEVFREQ_GOV_SUSPEND + * event or when polling interval is set to zero. + * + * Note: Though this function is same as devfreq_monitor_stop(), + * intentionally kept separate to provide hooks for collecting + * transition statistics. + */ +void devfreq_monitor_suspend(struct devfreq *devfreq) +{ +	mutex_lock(&devfreq->lock); +	if (devfreq->stop_polling) { +		mutex_unlock(&devfreq->lock); +		return; +	} + +	devfreq->stop_polling = true; +	mutex_unlock(&devfreq->lock); +	cancel_delayed_work_sync(&devfreq->work); +} +EXPORT_SYMBOL(devfreq_monitor_suspend); + +/** + * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance + * @devfreq:    the devfreq instance. + * + * Helper function to resume devfreq device load monitoing. Function + * to be called from governor in response to DEVFREQ_GOV_RESUME + * event or when polling interval is set to non-zero. + */ +void devfreq_monitor_resume(struct devfreq *devfreq) +{ +	mutex_lock(&devfreq->lock); +	if (!devfreq->stop_polling) +		goto out; + +	if (!delayed_work_pending(&devfreq->work) && +			devfreq->profile->polling_ms) +		queue_delayed_work(devfreq_wq, &devfreq->work, +			msecs_to_jiffies(devfreq->profile->polling_ms)); +	devfreq->stop_polling = false; + +out: +	mutex_unlock(&devfreq->lock); +} +EXPORT_SYMBOL(devfreq_monitor_resume); + +/** + * devfreq_interval_update() - Update device devfreq monitoring interval + * @devfreq:    the devfreq instance. + * @delay:      new polling interval to be set. + * + * Helper function to set new load monitoring polling interval. Function + * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. + */ +void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) +{ +	unsigned int cur_delay = devfreq->profile->polling_ms; +	unsigned int new_delay = *delay; + +	mutex_lock(&devfreq->lock); +	devfreq->profile->polling_ms = new_delay; + +	if (devfreq->stop_polling) +		goto out; + +	/* if new delay is zero, stop polling */ +	if (!new_delay) { +		mutex_unlock(&devfreq->lock); +		cancel_delayed_work_sync(&devfreq->work); +		return; +	} + +	/* if current delay is zero, start polling with new delay */ +	if (!cur_delay) { +		queue_delayed_work(devfreq_wq, &devfreq->work, +			msecs_to_jiffies(devfreq->profile->polling_ms)); +		goto out; +	} + +	/* if current delay is greater than new delay, restart polling */ +	if (cur_delay > new_delay) { +		mutex_unlock(&devfreq->lock); +		cancel_delayed_work_sync(&devfreq->work); +		mutex_lock(&devfreq->lock); +		if (!devfreq->stop_polling) +			queue_delayed_work(devfreq_wq, &devfreq->work, +			      msecs_to_jiffies(devfreq->profile->polling_ms)); +	} +out: +	mutex_unlock(&devfreq->lock); +} +EXPORT_SYMBOL(devfreq_interval_update);  /**   * devfreq_notifier_call() - Notify that the device frequency requirements   *			   has been changed out of devfreq framework. - * @nb		the notifier_block (supposed to be devfreq->nb) - * @type	not used - * @devp	not used + * @nb:		the notifier_block (supposed to be devfreq->nb) + * @type:	not used + * @devp:	not used   *   * Called by a notifier that uses devfreq->nb.   */ @@ -143,59 +372,34 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,  }  /** - * _remove_devfreq() - Remove devfreq from the device. + * _remove_devfreq() - Remove devfreq from the list and release its resources.   * @devfreq:	the devfreq struct   * @skip:	skip calling device_unregister(). - * - * Note that the caller should lock devfreq->lock before calling - * this. _remove_devfreq() will unlock it and free devfreq - * internally. devfreq_list_lock should be locked by the caller - * as well (not relased at return) - * - * Lock usage: - * devfreq->lock: locked before call. - *		  unlocked at return (and freed) - * devfreq_list_lock: locked before call. - *		      kept locked at return. - *		      if devfreq is centrally polled. - * - * Freed memory: - * devfreq   */  static void _remove_devfreq(struct devfreq *devfreq, bool skip)  { -	if (!mutex_is_locked(&devfreq->lock)) { -		WARN(true, "devfreq->lock must be locked by the caller.\n"); -		return; -	} -	if (!devfreq->governor->no_central_polling && -	    !mutex_is_locked(&devfreq_list_lock)) { -		WARN(true, "devfreq_list_lock must be locked by the caller.\n"); +	mutex_lock(&devfreq_list_lock); +	if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { +		mutex_unlock(&devfreq_list_lock); +		dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");  		return;  	} +	list_del(&devfreq->node); +	mutex_unlock(&devfreq_list_lock); -	if (devfreq->being_removed) -		return; - -	devfreq->being_removed = true; +	if (devfreq->governor) +		devfreq->governor->event_handler(devfreq, +						 DEVFREQ_GOV_STOP, NULL);  	if (devfreq->profile->exit)  		devfreq->profile->exit(devfreq->dev.parent); -	if (devfreq->governor->exit) -		devfreq->governor->exit(devfreq); -  	if (!skip && get_device(&devfreq->dev)) {  		device_unregister(&devfreq->dev);  		put_device(&devfreq->dev);  	} -	if (!devfreq->governor->no_central_polling) -		list_del(&devfreq->node); - -	mutex_unlock(&devfreq->lock);  	mutex_destroy(&devfreq->lock); -  	kfree(devfreq);  } @@ -210,163 +414,39 @@ static void _remove_devfreq(struct devfreq *devfreq, bool skip)  static void devfreq_dev_release(struct device *dev)  {  	struct devfreq *devfreq = to_devfreq(dev); -	bool central_polling = !devfreq->governor->no_central_polling; - -	/* -	 * If devfreq_dev_release() was called by device_unregister() of -	 * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and -	 * being_removed is already set. This also partially checks the case -	 * where devfreq_dev_release() is called from a thread other than -	 * the one called _remove_devfreq(); however, this case is -	 * dealt completely with another following being_removed check. -	 * -	 * Because being_removed is never being -	 * unset, we do not need to worry about race conditions on -	 * being_removed. -	 */ -	if (devfreq->being_removed) -		return; - -	if (central_polling) -		mutex_lock(&devfreq_list_lock); - -	mutex_lock(&devfreq->lock); -	/* -	 * Check being_removed flag again for the case where -	 * devfreq_dev_release() was called in a thread other than the one -	 * possibly called _remove_devfreq(). -	 */ -	if (devfreq->being_removed) { -		mutex_unlock(&devfreq->lock); -		goto out; -	} - -	/* devfreq->lock is unlocked and removed in _removed_devfreq() */  	_remove_devfreq(devfreq, true); - -out: -	if (central_polling) -		mutex_unlock(&devfreq_list_lock); -} - -/** - * devfreq_monitor() - Periodically poll devfreq objects. - * @work: the work struct used to run devfreq_monitor periodically. - * - */ -static void devfreq_monitor(struct work_struct *work) -{ -	static unsigned long last_polled_at; -	struct devfreq *devfreq, *tmp; -	int error; -	unsigned long jiffies_passed; -	unsigned long next_jiffies = ULONG_MAX, now = jiffies; -	struct device *dev; - -	/* Initially last_polled_at = 0, polling every device at bootup */ -	jiffies_passed = now - last_polled_at; -	last_polled_at = now; -	if (jiffies_passed == 0) -		jiffies_passed = 1; - -	mutex_lock(&devfreq_list_lock); -	list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) { -		mutex_lock(&devfreq->lock); -		dev = devfreq->dev.parent; - -		/* Do not remove tmp for a while */ -		wait_remove_device = tmp; - -		if (devfreq->governor->no_central_polling || -		    devfreq->next_polling == 0) { -			mutex_unlock(&devfreq->lock); -			continue; -		} -		mutex_unlock(&devfreq_list_lock); - -		/* -		 * Reduce more next_polling if devfreq_wq took an extra -		 * delay. (i.e., CPU has been idled.) -		 */ -		if (devfreq->next_polling <= jiffies_passed) { -			error = update_devfreq(devfreq); - -			/* Remove a devfreq with an error. */ -			if (error && error != -EAGAIN) { - -				dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n", -					error, devfreq->governor->name); - -				/* -				 * Unlock devfreq before locking the list -				 * in order to avoid deadlock with -				 * find_device_devfreq or others -				 */ -				mutex_unlock(&devfreq->lock); -				mutex_lock(&devfreq_list_lock); -				/* Check if devfreq is already removed */ -				if (IS_ERR(find_device_devfreq(dev))) -					continue; -				mutex_lock(&devfreq->lock); -				/* This unlocks devfreq->lock and free it */ -				_remove_devfreq(devfreq, false); -				continue; -			} -			devfreq->next_polling = devfreq->polling_jiffies; -		} else { -			devfreq->next_polling -= jiffies_passed; -		} - -		if (devfreq->next_polling) -			next_jiffies = (next_jiffies > devfreq->next_polling) ? -					devfreq->next_polling : next_jiffies; - -		mutex_unlock(&devfreq->lock); -		mutex_lock(&devfreq_list_lock); -	} -	wait_remove_device = NULL; -	mutex_unlock(&devfreq_list_lock); - -	if (next_jiffies > 0 && next_jiffies < ULONG_MAX) { -		polling = true; -		queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies); -	} else { -		polling = false; -	}  }  /**   * devfreq_add_device() - Add devfreq feature to the device   * @dev:	the device to add devfreq feature.   * @profile:	device-specific profile to run devfreq. - * @governor:	the policy to choose frequency. + * @governor_name:	name of the policy to choose frequency.   * @data:	private data for the governor. The devfreq framework does not   *		touch this value.   */  struct devfreq *devfreq_add_device(struct device *dev,  				   struct devfreq_dev_profile *profile, -				   const struct devfreq_governor *governor, +				   const char *governor_name,  				   void *data)  {  	struct devfreq *devfreq; +	struct devfreq_governor *governor;  	int err = 0; -	if (!dev || !profile || !governor) { +	if (!dev || !profile || !governor_name) {  		dev_err(dev, "%s: Invalid parameters.\n", __func__);  		return ERR_PTR(-EINVAL);  	} - -	if (!governor->no_central_polling) { -		mutex_lock(&devfreq_list_lock); -		devfreq = find_device_devfreq(dev); -		mutex_unlock(&devfreq_list_lock); -		if (!IS_ERR(devfreq)) { -			dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); -			err = -EINVAL; -			goto err_out; -		} +	mutex_lock(&devfreq_list_lock); +	devfreq = find_device_devfreq(dev); +	mutex_unlock(&devfreq_list_lock); +	if (!IS_ERR(devfreq)) { +		dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); +		err = -EINVAL; +		goto err_out;  	}  	devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); @@ -383,92 +463,316 @@ struct devfreq *devfreq_add_device(struct device *dev,  	devfreq->dev.class = devfreq_class;  	devfreq->dev.release = devfreq_dev_release;  	devfreq->profile = profile; -	devfreq->governor = governor; +	strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);  	devfreq->previous_freq = profile->initial_freq;  	devfreq->data = data; -	devfreq->next_polling = devfreq->polling_jiffies -			      = msecs_to_jiffies(devfreq->profile->polling_ms);  	devfreq->nb.notifier_call = devfreq_notifier_call; +	devfreq->trans_table =	devm_kzalloc(dev, sizeof(unsigned int) * +						devfreq->profile->max_state * +						devfreq->profile->max_state, +						GFP_KERNEL); +	devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) * +						devfreq->profile->max_state, +						GFP_KERNEL); +	devfreq->last_stat_updated = jiffies; +  	dev_set_name(&devfreq->dev, dev_name(dev));  	err = device_register(&devfreq->dev);  	if (err) {  		put_device(&devfreq->dev); +		mutex_unlock(&devfreq->lock);  		goto err_dev;  	} -	if (governor->init) -		err = governor->init(devfreq); -	if (err) -		goto err_init; -  	mutex_unlock(&devfreq->lock); -	if (governor->no_central_polling) -		goto out; -  	mutex_lock(&devfreq_list_lock); -  	list_add(&devfreq->node, &devfreq_list); -	if (devfreq_wq && devfreq->next_polling && !polling) { -		polling = true; -		queue_delayed_work(devfreq_wq, &devfreq_work, -				   devfreq->next_polling); -	} +	governor = find_devfreq_governor(devfreq->governor_name); +	if (!IS_ERR(governor)) +		devfreq->governor = governor; +	if (devfreq->governor) +		err = devfreq->governor->event_handler(devfreq, +					DEVFREQ_GOV_START, NULL);  	mutex_unlock(&devfreq_list_lock); -out: +	if (err) { +		dev_err(dev, "%s: Unable to start governor for the device\n", +			__func__); +		goto err_init; +	} +  	return devfreq;  err_init: +	list_del(&devfreq->node);  	device_unregister(&devfreq->dev);  err_dev: -	mutex_unlock(&devfreq->lock);  	kfree(devfreq);  err_out:  	return ERR_PTR(err);  } +EXPORT_SYMBOL(devfreq_add_device);  /**   * devfreq_remove_device() - Remove devfreq feature from a device. - * @devfreq	the devfreq instance to be removed + * @devfreq:	the devfreq instance to be removed   */  int devfreq_remove_device(struct devfreq *devfreq)  { -	bool central_polling; +	if (!devfreq) +		return -EINVAL; + +	_remove_devfreq(devfreq, false); +	return 0; +} +EXPORT_SYMBOL(devfreq_remove_device); + +/** + * devfreq_suspend_device() - Suspend devfreq of a device. + * @devfreq: the devfreq instance to be suspended + */ +int devfreq_suspend_device(struct devfreq *devfreq) +{  	if (!devfreq)  		return -EINVAL; -	central_polling = !devfreq->governor->no_central_polling; +	if (!devfreq->governor) +		return 0; + +	return devfreq->governor->event_handler(devfreq, +				DEVFREQ_GOV_SUSPEND, NULL); +} +EXPORT_SYMBOL(devfreq_suspend_device); + +/** + * devfreq_resume_device() - Resume devfreq of a device. + * @devfreq: the devfreq instance to be resumed + */ +int devfreq_resume_device(struct devfreq *devfreq) +{ +	if (!devfreq) +		return -EINVAL; + +	if (!devfreq->governor) +		return 0; + +	return devfreq->governor->event_handler(devfreq, +				DEVFREQ_GOV_RESUME, NULL); +} +EXPORT_SYMBOL(devfreq_resume_device); + +/** + * devfreq_add_governor() - Add devfreq governor + * @governor:	the devfreq governor to be added + */ +int devfreq_add_governor(struct devfreq_governor *governor) +{ +	struct devfreq_governor *g; +	struct devfreq *devfreq; +	int err = 0; + +	if (!governor) { +		pr_err("%s: Invalid parameters.\n", __func__); +		return -EINVAL; +	} + +	mutex_lock(&devfreq_list_lock); +	g = find_devfreq_governor(governor->name); +	if (!IS_ERR(g)) { +		pr_err("%s: governor %s already registered\n", __func__, +		       g->name); +		err = -EINVAL; +		goto err_out; +	} -	if (central_polling) { -		mutex_lock(&devfreq_list_lock); -		while (wait_remove_device == devfreq) { -			mutex_unlock(&devfreq_list_lock); -			schedule(); -			mutex_lock(&devfreq_list_lock); +	list_add(&governor->node, &devfreq_governor_list); + +	list_for_each_entry(devfreq, &devfreq_list, node) { +		int ret = 0; +		struct device *dev = devfreq->dev.parent; + +		if (!strncmp(devfreq->governor_name, governor->name, +			     DEVFREQ_NAME_LEN)) { +			/* The following should never occur */ +			if (devfreq->governor) { +				dev_warn(dev, +					 "%s: Governor %s already present\n", +					 __func__, devfreq->governor->name); +				ret = devfreq->governor->event_handler(devfreq, +							DEVFREQ_GOV_STOP, NULL); +				if (ret) { +					dev_warn(dev, +						 "%s: Governor %s stop = %d\n", +						 __func__, +						 devfreq->governor->name, ret); +				} +				/* Fall through */ +			} +			devfreq->governor = governor; +			ret = devfreq->governor->event_handler(devfreq, +						DEVFREQ_GOV_START, NULL); +			if (ret) { +				dev_warn(dev, "%s: Governor %s start=%d\n", +					 __func__, devfreq->governor->name, +					 ret); +			}  		}  	} -	mutex_lock(&devfreq->lock); -	_remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */ +err_out: +	mutex_unlock(&devfreq_list_lock); -	if (central_polling) -		mutex_unlock(&devfreq_list_lock); +	return err; +} +EXPORT_SYMBOL(devfreq_add_governor); -	return 0; +/** + * devfreq_remove_device() - Remove devfreq feature from a device. + * @governor:	the devfreq governor to be removed + */ +int devfreq_remove_governor(struct devfreq_governor *governor) +{ +	struct devfreq_governor *g; +	struct devfreq *devfreq; +	int err = 0; + +	if (!governor) { +		pr_err("%s: Invalid parameters.\n", __func__); +		return -EINVAL; +	} + +	mutex_lock(&devfreq_list_lock); +	g = find_devfreq_governor(governor->name); +	if (IS_ERR(g)) { +		pr_err("%s: governor %s not registered\n", __func__, +		       governor->name); +		err = PTR_ERR(g); +		goto err_out; +	} +	list_for_each_entry(devfreq, &devfreq_list, node) { +		int ret; +		struct device *dev = devfreq->dev.parent; + +		if (!strncmp(devfreq->governor_name, governor->name, +			     DEVFREQ_NAME_LEN)) { +			/* we should have a devfreq governor! */ +			if (!devfreq->governor) { +				dev_warn(dev, "%s: Governor %s NOT present\n", +					 __func__, governor->name); +				continue; +				/* Fall through */ +			} +			ret = devfreq->governor->event_handler(devfreq, +						DEVFREQ_GOV_STOP, NULL); +			if (ret) { +				dev_warn(dev, "%s: Governor %s stop=%d\n", +					 __func__, devfreq->governor->name, +					 ret); +			} +			devfreq->governor = NULL; +		} +	} + +	list_del(&governor->node); +err_out: +	mutex_unlock(&devfreq_list_lock); + +	return err;  } +EXPORT_SYMBOL(devfreq_remove_governor);  static ssize_t show_governor(struct device *dev,  			     struct device_attribute *attr, char *buf)  { +	if (!to_devfreq(dev)->governor) +		return -EINVAL; +  	return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);  } +static ssize_t store_governor(struct device *dev, struct device_attribute *attr, +			      const char *buf, size_t count) +{ +	struct devfreq *df = to_devfreq(dev); +	int ret; +	char str_governor[DEVFREQ_NAME_LEN + 1]; +	struct devfreq_governor *governor; + +	ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); +	if (ret != 1) +		return -EINVAL; + +	mutex_lock(&devfreq_list_lock); +	governor = find_devfreq_governor(str_governor); +	if (IS_ERR(governor)) { +		ret = PTR_ERR(governor); +		goto out; +	} +	if (df->governor == governor) +		goto out; + +	if (df->governor) { +		ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); +		if (ret) { +			dev_warn(dev, "%s: Governor %s not stopped(%d)\n", +				 __func__, df->governor->name, ret); +			goto out; +		} +	} +	df->governor = governor; +	strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); +	ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); +	if (ret) +		dev_warn(dev, "%s: Governor %s not started(%d)\n", +			 __func__, df->governor->name, ret); +out: +	mutex_unlock(&devfreq_list_lock); + +	if (!ret) +		ret = count; +	return ret; +} +static ssize_t show_available_governors(struct device *d, +				    struct device_attribute *attr, +				    char *buf) +{ +	struct devfreq_governor *tmp_governor; +	ssize_t count = 0; + +	mutex_lock(&devfreq_list_lock); +	list_for_each_entry(tmp_governor, &devfreq_governor_list, node) +		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), +				   "%s ", tmp_governor->name); +	mutex_unlock(&devfreq_list_lock); + +	/* Truncate the trailing space */ +	if (count) +		count--; + +	count += sprintf(&buf[count], "\n"); + +	return count; +} +  static ssize_t show_freq(struct device *dev,  			 struct device_attribute *attr, char *buf)  { +	unsigned long freq; +	struct devfreq *devfreq = to_devfreq(dev); + +	if (devfreq->profile->get_cur_freq && +		!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) +			return sprintf(buf, "%lu\n", freq); + +	return sprintf(buf, "%lu\n", devfreq->previous_freq); +} + +static ssize_t show_target_freq(struct device *dev, +			struct device_attribute *attr, char *buf) +{  	return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);  } @@ -486,39 +790,19 @@ static ssize_t store_polling_interval(struct device *dev,  	unsigned int value;  	int ret; +	if (!df->governor) +		return -EINVAL; +  	ret = sscanf(buf, "%u", &value);  	if (ret != 1) -		goto out; - -	mutex_lock(&df->lock); -	df->profile->polling_ms = value; -	df->next_polling = df->polling_jiffies -			 = msecs_to_jiffies(value); -	mutex_unlock(&df->lock); +		return -EINVAL; +	df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);  	ret = count; -	if (df->governor->no_central_polling) -		goto out; - -	mutex_lock(&devfreq_list_lock); -	if (df->next_polling > 0 && !polling) { -		polling = true; -		queue_delayed_work(devfreq_wq, &devfreq_work, -				   df->next_polling); -	} -	mutex_unlock(&devfreq_list_lock); -out:  	return ret;  } -static ssize_t show_central_polling(struct device *dev, -				    struct device_attribute *attr, char *buf) -{ -	return sprintf(buf, "%d\n", -		       !to_devfreq(dev)->governor->no_central_polling); -} -  static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,  			      const char *buf, size_t count)  { @@ -529,7 +813,7 @@ static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,  	ret = sscanf(buf, "%lu", &value);  	if (ret != 1) -		goto out; +		return -EINVAL;  	mutex_lock(&df->lock);  	max = df->max_freq; @@ -543,7 +827,6 @@ static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,  	ret = count;  unlock:  	mutex_unlock(&df->lock); -out:  	return ret;  } @@ -563,7 +846,7 @@ static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,  	ret = sscanf(buf, "%lu", &value);  	if (ret != 1) -		goto out; +		return -EINVAL;  	mutex_lock(&df->lock);  	min = df->min_freq; @@ -577,7 +860,6 @@ static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,  	ret = count;  unlock:  	mutex_unlock(&df->lock); -out:  	return ret;  } @@ -587,34 +869,92 @@ static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,  	return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);  } +static ssize_t show_available_freqs(struct device *d, +				    struct device_attribute *attr, +				    char *buf) +{ +	struct devfreq *df = to_devfreq(d); +	struct device *dev = df->dev.parent; +	struct opp *opp; +	ssize_t count = 0; +	unsigned long freq = 0; + +	rcu_read_lock(); +	do { +		opp = opp_find_freq_ceil(dev, &freq); +		if (IS_ERR(opp)) +			break; + +		count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), +				   "%lu ", freq); +		freq++; +	} while (1); +	rcu_read_unlock(); + +	/* Truncate the trailing space */ +	if (count) +		count--; + +	count += sprintf(&buf[count], "\n"); + +	return count; +} + +static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr, +				char *buf) +{ +	struct devfreq *devfreq = to_devfreq(dev); +	ssize_t len; +	int i, j, err; +	unsigned int max_state = devfreq->profile->max_state; + +	err = devfreq_update_status(devfreq, devfreq->previous_freq); +	if (err) +		return 0; + +	len = sprintf(buf, "   From  :   To\n"); +	len += sprintf(buf + len, "         :"); +	for (i = 0; i < max_state; i++) +		len += sprintf(buf + len, "%8u", +				devfreq->profile->freq_table[i]); + +	len += sprintf(buf + len, "   time(ms)\n"); + +	for (i = 0; i < max_state; i++) { +		if (devfreq->profile->freq_table[i] +					== devfreq->previous_freq) { +			len += sprintf(buf + len, "*"); +		} else { +			len += sprintf(buf + len, " "); +		} +		len += sprintf(buf + len, "%8u:", +				devfreq->profile->freq_table[i]); +		for (j = 0; j < max_state; j++) +			len += sprintf(buf + len, "%8u", +				devfreq->trans_table[(i * max_state) + j]); +		len += sprintf(buf + len, "%10u\n", +			jiffies_to_msecs(devfreq->time_in_state[i])); +	} + +	len += sprintf(buf + len, "Total transition : %u\n", +					devfreq->total_trans); +	return len; +} +  static struct device_attribute devfreq_attrs[] = { -	__ATTR(governor, S_IRUGO, show_governor, NULL), +	__ATTR(governor, S_IRUGO | S_IWUSR, show_governor, store_governor), +	__ATTR(available_governors, S_IRUGO, show_available_governors, NULL),  	__ATTR(cur_freq, S_IRUGO, show_freq, NULL), -	__ATTR(central_polling, S_IRUGO, show_central_polling, NULL), +	__ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL), +	__ATTR(target_freq, S_IRUGO, show_target_freq, NULL),  	__ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,  	       store_polling_interval),  	__ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),  	__ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq), +	__ATTR(trans_stat, S_IRUGO, show_trans_table, NULL),  	{ },  }; -/** - * devfreq_start_polling() - Initialize data structure for devfreq framework and - *			   start polling registered devfreq devices. - */ -static int __init devfreq_start_polling(void) -{ -	mutex_lock(&devfreq_list_lock); -	polling = false; -	devfreq_wq = create_freezable_workqueue("devfreq_wq"); -	INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor); -	mutex_unlock(&devfreq_list_lock); - -	devfreq_monitor(&devfreq_work.work); -	return 0; -} -late_initcall(devfreq_start_polling); -  static int __init devfreq_init(void)  {  	devfreq_class = class_create(THIS_MODULE, "devfreq"); @@ -622,7 +962,15 @@ static int __init devfreq_init(void)  		pr_err("%s: couldn't create class\n", __FILE__);  		return PTR_ERR(devfreq_class);  	} + +	devfreq_wq = create_freezable_workqueue("devfreq_wq"); +	if (IS_ERR(devfreq_wq)) { +		class_destroy(devfreq_class); +		pr_err("%s: couldn't create workqueue\n", __FILE__); +		return PTR_ERR(devfreq_wq); +	}  	devfreq_class->dev_attrs = devfreq_attrs; +  	return 0;  }  subsys_initcall(devfreq_init); @@ -630,6 +978,7 @@ subsys_initcall(devfreq_init);  static void __exit devfreq_exit(void)  {  	class_destroy(devfreq_class); +	destroy_workqueue(devfreq_wq);  }  module_exit(devfreq_exit); @@ -641,9 +990,9 @@ module_exit(devfreq_exit);  /**   * devfreq_recommended_opp() - Helper function to get proper OPP for the   *			     freq value given to target callback. - * @dev		The devfreq user device. (parent of devfreq) - * @freq	The frequency given to target function - * @flags	Flags handed from devfreq framework. + * @dev:	The devfreq user device. (parent of devfreq) + * @freq:	The frequency given to target function + * @flags:	Flags handed from devfreq framework.   *   */  struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, @@ -656,14 +1005,14 @@ struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,  		opp = opp_find_freq_floor(dev, freq);  		/* If not available, use the closest opp */ -		if (opp == ERR_PTR(-ENODEV)) +		if (opp == ERR_PTR(-ERANGE))  			opp = opp_find_freq_ceil(dev, freq);  	} else {  		/* The freq is an lower bound. opp should be higher */  		opp = opp_find_freq_ceil(dev, freq);  		/* If not available, use the closest opp */ -		if (opp == ERR_PTR(-ENODEV)) +		if (opp == ERR_PTR(-ERANGE))  			opp = opp_find_freq_floor(dev, freq);  	} @@ -674,35 +1023,49 @@ struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,   * devfreq_register_opp_notifier() - Helper function to get devfreq notified   *				   for any changes in the OPP availability   *				   changes - * @dev		The devfreq user device. (parent of devfreq) - * @devfreq	The devfreq object. + * @dev:	The devfreq user device. (parent of devfreq) + * @devfreq:	The devfreq object.   */  int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)  { -	struct srcu_notifier_head *nh = opp_get_notifier(dev); +	struct srcu_notifier_head *nh; +	int ret = 0; +	rcu_read_lock(); +	nh = opp_get_notifier(dev);  	if (IS_ERR(nh)) -		return PTR_ERR(nh); -	return srcu_notifier_chain_register(nh, &devfreq->nb); +		ret = PTR_ERR(nh); +	rcu_read_unlock(); +	if (!ret) +		ret = srcu_notifier_chain_register(nh, &devfreq->nb); + +	return ret;  }  /**   * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq   *				     notified for any changes in the OPP   *				     availability changes anymore. - * @dev		The devfreq user device. (parent of devfreq) - * @devfreq	The devfreq object. + * @dev:	The devfreq user device. (parent of devfreq) + * @devfreq:	The devfreq object.   *   * At exit() callback of devfreq_dev_profile, this must be included if   * devfreq_recommended_opp is used.   */  int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)  { -	struct srcu_notifier_head *nh = opp_get_notifier(dev); +	struct srcu_notifier_head *nh; +	int ret = 0; +	rcu_read_lock(); +	nh = opp_get_notifier(dev);  	if (IS_ERR(nh)) -		return PTR_ERR(nh); -	return srcu_notifier_chain_unregister(nh, &devfreq->nb); +		ret = PTR_ERR(nh); +	rcu_read_unlock(); +	if (!ret) +		ret = srcu_notifier_chain_unregister(nh, &devfreq->nb); + +	return ret;  }  MODULE_AUTHOR("MyungJoo Ham <[email protected]>"); |