Commit 8e677ce8 authored by Alexander Clouter's avatar Alexander Clouter Committed by Dave Jones
Browse files

[CPUFREQ] conservative: fixup governor to function more like ondemand logic



As conservative is based off ondemand the codebases occasionally need to be
resync'd.  This patch, although ugly, does this.

Signed-off-by: default avatarAlexander Clouter <alex@digriz.org.uk>
Signed-off-by: default avatarDave Jones <davej@redhat.com>
parent f407a08b
Loading
Loading
Loading
Loading
+188 −140
Original line number Original line Diff line number Diff line
@@ -13,22 +13,17 @@


#include <linux/kernel.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ctype.h>
#include <linux/cpufreq.h>
#include <linux/cpufreq.h>
#include <linux/sysctl.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/sysfs.h>
#include <linux/cpu.h>
#include <linux/cpu.h>
#include <linux/kmod.h>
#include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/jiffies.h>
#include <linux/kernel_stat.h>
#include <linux/kernel_stat.h>
#include <linux/percpu.h>
#include <linux/mutex.h>
#include <linux/mutex.h>
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/ktime.h>
#include <linux/sched.h>

/*
/*
 * dbs is used in this file as a shortform for demandbased switching
 * dbs is used in this file as a shortform for demandbased switching
 * It helps to keep variable names smaller, simpler
 * It helps to keep variable names smaller, simpler
@@ -43,8 +38,8 @@
 * latency of the processor. The governor will work on any processor with
 * latency of the processor. The governor will work on any processor with
 * transition latency <= 10mS, using appropriate sampling
 * transition latency <= 10mS, using appropriate sampling
 * rate.
 * rate.
 * For CPUs with transition latency > 10mS (mostly drivers
 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
 * with CPUFREQ_ETERNAL), this governor will not work.
 * this governor will not work.
 * All times here are in uS.
 * All times here are in uS.
 */
 */
static unsigned int def_sampling_rate;
static unsigned int def_sampling_rate;
@@ -75,12 +70,15 @@ static unsigned int minimum_sampling_rate(void)
static void do_dbs_timer(struct work_struct *work);
static void do_dbs_timer(struct work_struct *work);


struct cpu_dbs_info_s {
struct cpu_dbs_info_s {
	cputime64_t prev_cpu_idle;
	cputime64_t prev_cpu_wall;
	cputime64_t prev_cpu_nice;
	struct cpufreq_policy *cur_policy;
	struct cpufreq_policy *cur_policy;
	unsigned int prev_cpu_idle_up;
	struct delayed_work work;
	unsigned int prev_cpu_idle_down;
	unsigned int enable;
	unsigned int down_skip;
	unsigned int down_skip;
	unsigned int requested_freq;
	unsigned int requested_freq;
	int cpu;
	unsigned int enable:1;
};
};
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);


@@ -95,18 +93,17 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
 * is recursive for the same process. -Venki
 * is recursive for the same process. -Venki
 */
 */
static DEFINE_MUTEX(dbs_mutex);
static DEFINE_MUTEX(dbs_mutex);
static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);


struct dbs_tuners {
static struct workqueue_struct	*kconservative_wq;

static struct dbs_tuners {
	unsigned int sampling_rate;
	unsigned int sampling_rate;
	unsigned int sampling_down_factor;
	unsigned int sampling_down_factor;
	unsigned int up_threshold;
	unsigned int up_threshold;
	unsigned int down_threshold;
	unsigned int down_threshold;
	unsigned int ignore_nice;
	unsigned int ignore_nice;
	unsigned int freq_step;
	unsigned int freq_step;
};
} dbs_tuners_ins = {

static struct dbs_tuners dbs_tuners_ins = {
	.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
	.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
	.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
	.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
	.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
	.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
@@ -114,18 +111,37 @@ static struct dbs_tuners dbs_tuners_ins = {
	.freq_step = 5,
	.freq_step = 5,
};
};


static inline unsigned int get_cpu_idle_time(unsigned int cpu)
static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
							cputime64_t *wall)
{
{
	unsigned int add_nice = 0, ret;
	cputime64_t idle_time;
	cputime64_t cur_wall_time;
	cputime64_t busy_time;


	if (dbs_tuners_ins.ignore_nice)
	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
		add_nice = kstat_cpu(cpu).cpustat.nice;
	busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
			kstat_cpu(cpu).cpustat.system);

	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);

	idle_time = cputime64_sub(cur_wall_time, busy_time);
	if (wall)
		*wall = cur_wall_time;

	return idle_time;
}

static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
{
	u64 idle_time = get_cpu_idle_time_us(cpu, wall);


	ret = kstat_cpu(cpu).cpustat.idle +
	if (idle_time == -1ULL)
		kstat_cpu(cpu).cpustat.iowait +
		return get_cpu_idle_time_jiffy(cpu, wall);
		add_nice;


	return ret;
	return idle_time;
}
}


/* keep track of frequency transitions */
/* keep track of frequency transitions */
@@ -213,6 +229,7 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
	unsigned int input;
	unsigned int input;
	int ret;
	int ret;
	ret = sscanf(buf, "%u", &input);
	ret = sscanf(buf, "%u", &input);

	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
	if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
		return -EINVAL;
		return -EINVAL;


@@ -230,11 +247,10 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
	int ret;
	int ret;
	ret = sscanf(buf, "%u", &input);
	ret = sscanf(buf, "%u", &input);


	mutex_lock(&dbs_mutex);
	if (ret != 1)
	if (ret != 1) {
		mutex_unlock(&dbs_mutex);
		return -EINVAL;
		return -EINVAL;
	}

	mutex_lock(&dbs_mutex);
	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
	dbs_tuners_ins.sampling_rate = max(input, minimum_sampling_rate());
	mutex_unlock(&dbs_mutex);
	mutex_unlock(&dbs_mutex);


@@ -269,7 +285,9 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
	ret = sscanf(buf, "%u", &input);
	ret = sscanf(buf, "%u", &input);


	mutex_lock(&dbs_mutex);
	mutex_lock(&dbs_mutex);
	if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) {
	/* cannot be lower than 11 otherwise freq will not fall */
	if (ret != 1 || input < 11 || input > 100 ||
			input >= dbs_tuners_ins.up_threshold) {
		mutex_unlock(&dbs_mutex);
		mutex_unlock(&dbs_mutex);
		return -EINVAL;
		return -EINVAL;
	}
	}
@@ -302,12 +320,14 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
	}
	}
	dbs_tuners_ins.ignore_nice = input;
	dbs_tuners_ins.ignore_nice = input;


	/* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
	/* we need to re-evaluate prev_cpu_idle */
	for_each_online_cpu(j) {
	for_each_online_cpu(j) {
		struct cpu_dbs_info_s *j_dbs_info;
		struct cpu_dbs_info_s *dbs_info;
		j_dbs_info = &per_cpu(cpu_dbs_info, j);
		dbs_info = &per_cpu(cpu_dbs_info, j);
		j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
		dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
		j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
						&dbs_info->prev_cpu_wall);
		if (dbs_tuners_ins.ignore_nice)
			dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
	}
	}
	mutex_unlock(&dbs_mutex);
	mutex_unlock(&dbs_mutex);


@@ -319,7 +339,6 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
{
{
	unsigned int input;
	unsigned int input;
	int ret;
	int ret;

	ret = sscanf(buf, "%u", &input);
	ret = sscanf(buf, "%u", &input);


	if (ret != 1)
	if (ret != 1)
@@ -367,55 +386,78 @@ static struct attribute_group dbs_attr_group = {


/************************** sysfs end ************************/
/************************** sysfs end ************************/


static void dbs_check_cpu(int cpu)
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
{
	unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
	unsigned int load = 0;
	unsigned int tmp_idle_ticks, total_idle_ticks;
	unsigned int freq_target;
	unsigned int freq_target;
	unsigned int freq_down_sampling_rate;
	struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
	struct cpufreq_policy *policy;


	if (!this_dbs_info->enable)
	struct cpufreq_policy *policy;
		return;
	unsigned int j;


	policy = this_dbs_info->cur_policy;
	policy = this_dbs_info->cur_policy;


	/*
	/*
	 * The default safe range is 20% to 80%
	 * Every sampling_rate, we check, if current idle time is less
	 * Every sampling_rate, we check
	 * than 20% (default), then we try to increase frequency
	 *	- If current idle time is less than 20%, then we try to
	 * Every sampling_rate*sampling_down_factor, we check, if current
	 *	  increase frequency
	 * idle time is more than 80%, then we try to decrease frequency
	 * Every sampling_rate*sampling_down_factor, we check
	 *	- If current idle time is more than 80%, then we try to
	 *	  decrease frequency
	 *
	 *
	 * Any frequency increase takes it to the maximum frequency.
	 * Any frequency increase takes it to the maximum frequency.
	 * Frequency reduction happens at minimum steps of
	 * Frequency reduction happens at minimum steps of
	 * 5% (default) of max_frequency
	 * 5% (default) of maximum frequency
	 */
	 */


	/* Check for frequency increase */
	/* Get Absolute Load */
	idle_ticks = UINT_MAX;
	for_each_cpu(j, policy->cpus) {
		struct cpu_dbs_info_s *j_dbs_info;
		cputime64_t cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;


	/* Check for frequency increase */
		j_dbs_info = &per_cpu(cpu_dbs_info, j);
	total_idle_ticks = get_cpu_idle_time(cpu);

	tmp_idle_ticks = total_idle_ticks -
		cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
		this_dbs_info->prev_cpu_idle_up;
	this_dbs_info->prev_cpu_idle_up = total_idle_ticks;


	if (tmp_idle_ticks < idle_ticks)
		wall_time = (unsigned int) cputime64_sub(cur_wall_time,
		idle_ticks = tmp_idle_ticks;
				j_dbs_info->prev_cpu_wall);
		j_dbs_info->prev_cpu_wall = cur_wall_time;


	/* Scale idle ticks by 100 and compare with up and down ticks */
		idle_time = (unsigned int) cputime64_sub(cur_idle_time,
	idle_ticks *= 100;
				j_dbs_info->prev_cpu_idle);
	up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
		j_dbs_info->prev_cpu_idle = cur_idle_time;
			usecs_to_jiffies(dbs_tuners_ins.sampling_rate);

		if (dbs_tuners_ins.ignore_nice) {
			cputime64_t cur_nice;
			unsigned long cur_nice_jiffies;

			cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
					 j_dbs_info->prev_cpu_nice);
			/*
			 * Assumption: nice time between sampling periods will
			 * be less than 2^32 jiffies for 32 bit sys
			 */
			cur_nice_jiffies = (unsigned long)
					cputime64_to_jiffies64(cur_nice);

			j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
			idle_time += jiffies_to_usecs(cur_nice_jiffies);
		}

		if (unlikely(!wall_time || wall_time < idle_time))
			continue;

		load = 100 * (wall_time - idle_time) / wall_time;
	}

	/*
	 * break out if we 'cannot' reduce the speed as the user might
	 * want freq_step to be zero
	 */
	if (dbs_tuners_ins.freq_step == 0)
		return;


	if (idle_ticks < up_idle_ticks) {
	/* Check for frequency increase */
	if (load > dbs_tuners_ins.up_threshold) {
		this_dbs_info->down_skip = 0;
		this_dbs_info->down_skip = 0;
		this_dbs_info->prev_cpu_idle_down =
			this_dbs_info->prev_cpu_idle_up;


		/* if we are already at full speed then break out early */
		/* if we are already at full speed then break out early */
		if (this_dbs_info->requested_freq == policy->max)
		if (this_dbs_info->requested_freq == policy->max)
@@ -436,49 +478,24 @@ static void dbs_check_cpu(int cpu)
		return;
		return;
	}
	}


	/* Check for frequency decrease */
	this_dbs_info->down_skip++;
	if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor)
		return;

	/* Check for frequency decrease */
	total_idle_ticks = this_dbs_info->prev_cpu_idle_up;
	tmp_idle_ticks = total_idle_ticks -
		this_dbs_info->prev_cpu_idle_down;
	this_dbs_info->prev_cpu_idle_down = total_idle_ticks;

	if (tmp_idle_ticks < idle_ticks)
		idle_ticks = tmp_idle_ticks;

	/* Scale idle ticks by 100 and compare with up and down ticks */
	idle_ticks *= 100;
	this_dbs_info->down_skip = 0;

	freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
		dbs_tuners_ins.sampling_down_factor;
	down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
		usecs_to_jiffies(freq_down_sampling_rate);

	if (idle_ticks > down_idle_ticks) {
	/*
	/*
		 * if we are already at the lowest speed then break out early
	 * The optimal frequency is the frequency that is the lowest that
		 * or if we 'cannot' reduce the speed as the user might want
	 * can support the current CPU usage without triggering the up
		 * freq_target to be zero
	 * policy. To be safe, we focus 10 points under the threshold.
	 */
	 */
		if (this_dbs_info->requested_freq == policy->min
	if (load < (dbs_tuners_ins.down_threshold - 10)) {
				|| dbs_tuners_ins.freq_step == 0)
			return;

		freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
		freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;


		/* max freq cannot be less than 100. But who knows.... */
		if (unlikely(freq_target == 0))
			freq_target = 5;

		this_dbs_info->requested_freq -= freq_target;
		this_dbs_info->requested_freq -= freq_target;
		if (this_dbs_info->requested_freq < policy->min)
		if (this_dbs_info->requested_freq < policy->min)
			this_dbs_info->requested_freq = policy->min;
			this_dbs_info->requested_freq = policy->min;


		/*
		 * if we cannot reduce the frequency anymore, break out early
		 */
		if (policy->cur == policy->min)
			return;

		__cpufreq_driver_target(policy, this_dbs_info->requested_freq,
		__cpufreq_driver_target(policy, this_dbs_info->requested_freq,
				CPUFREQ_RELATION_H);
				CPUFREQ_RELATION_H);
		return;
		return;
@@ -487,27 +504,45 @@ static void dbs_check_cpu(int cpu)


static void do_dbs_timer(struct work_struct *work)
static void do_dbs_timer(struct work_struct *work)
{
{
	int i;
	struct cpu_dbs_info_s *dbs_info =
	mutex_lock(&dbs_mutex);
		container_of(work, struct cpu_dbs_info_s, work.work);
	for_each_online_cpu(i)
	unsigned int cpu = dbs_info->cpu;
		dbs_check_cpu(i);

	schedule_delayed_work(&dbs_work,
	/* We want all CPUs to do sampling nearly on same jiffy */
			usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
	int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
	mutex_unlock(&dbs_mutex);

	delay -= jiffies % delay;

	if (lock_policy_rwsem_write(cpu) < 0)
		return;

	if (!dbs_info->enable) {
		unlock_policy_rwsem_write(cpu);
		return;
	}
	}


static inline void dbs_timer_init(void)
	dbs_check_cpu(dbs_info);

	queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay);
	unlock_policy_rwsem_write(cpu);
}

static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
{
{
	init_timer_deferrable(&dbs_work.timer);
	/* We want all CPUs to do sampling nearly on same jiffy */
	schedule_delayed_work(&dbs_work,
	int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
			usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
	delay -= jiffies % delay;
	return;

	dbs_info->enable = 1;
	INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
	queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work,
				delay);
}
}


static inline void dbs_timer_exit(void)
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
{
{
	cancel_delayed_work(&dbs_work);
	dbs_info->enable = 0;
	return;
	cancel_delayed_work(&dbs_info->work);
}
}


static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -541,11 +576,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
			j_dbs_info = &per_cpu(cpu_dbs_info, j);
			j_dbs_info = &per_cpu(cpu_dbs_info, j);
			j_dbs_info->cur_policy = policy;
			j_dbs_info->cur_policy = policy;


			j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu);
			j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
			j_dbs_info->prev_cpu_idle_down
						&j_dbs_info->prev_cpu_wall);
				= j_dbs_info->prev_cpu_idle_up;
			if (dbs_tuners_ins.ignore_nice) {
				j_dbs_info->prev_cpu_nice =
						kstat_cpu(j).cpustat.nice;
			}
		}
		}
		this_dbs_info->enable = 1;
		this_dbs_info->down_skip = 0;
		this_dbs_info->down_skip = 0;
		this_dbs_info->requested_freq = policy->cur;
		this_dbs_info->requested_freq = policy->cur;


@@ -567,30 +604,30 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,


			dbs_tuners_ins.sampling_rate = def_sampling_rate;
			dbs_tuners_ins.sampling_rate = def_sampling_rate;


			dbs_timer_init();
			cpufreq_register_notifier(
			cpufreq_register_notifier(
					&dbs_cpufreq_notifier_block,
					&dbs_cpufreq_notifier_block,
					CPUFREQ_TRANSITION_NOTIFIER);
					CPUFREQ_TRANSITION_NOTIFIER);
		}
		}
		dbs_timer_init(this_dbs_info);


		mutex_unlock(&dbs_mutex);
		mutex_unlock(&dbs_mutex);

		break;
		break;


	case CPUFREQ_GOV_STOP:
	case CPUFREQ_GOV_STOP:
		mutex_lock(&dbs_mutex);
		mutex_lock(&dbs_mutex);
		this_dbs_info->enable = 0;
		dbs_timer_exit(this_dbs_info);
		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
		sysfs_remove_group(&policy->kobj, &dbs_attr_group);
		dbs_enable--;
		dbs_enable--;

		/*
		/*
		 * Stop the timerschedule work, when this governor
		 * Stop the timerschedule work, when this governor
		 * is used for first time
		 * is used for first time
		 */
		 */
		if (dbs_enable == 0) {
		if (dbs_enable == 0)
			dbs_timer_exit();
			cpufreq_unregister_notifier(
			cpufreq_unregister_notifier(
					&dbs_cpufreq_notifier_block,
					&dbs_cpufreq_notifier_block,
					CPUFREQ_TRANSITION_NOTIFIER);
					CPUFREQ_TRANSITION_NOTIFIER);
		}


		mutex_unlock(&dbs_mutex);
		mutex_unlock(&dbs_mutex);


@@ -607,6 +644,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
					this_dbs_info->cur_policy,
					this_dbs_info->cur_policy,
					policy->min, CPUFREQ_RELATION_L);
					policy->min, CPUFREQ_RELATION_L);
		mutex_unlock(&dbs_mutex);
		mutex_unlock(&dbs_mutex);

		break;
		break;
	}
	}
	return 0;
	return 0;
@@ -624,15 +662,25 @@ struct cpufreq_governor cpufreq_gov_conservative = {


static int __init cpufreq_gov_dbs_init(void)
static int __init cpufreq_gov_dbs_init(void)
{
{
	return cpufreq_register_governor(&cpufreq_gov_conservative);
	int err;

	kconservative_wq = create_workqueue("kconservative");
	if (!kconservative_wq) {
		printk(KERN_ERR "Creation of kconservative failed\n");
		return -EFAULT;
	}

	err = cpufreq_register_governor(&cpufreq_gov_conservative);
	if (err)
		destroy_workqueue(kconservative_wq);

	return err;
}
}


static void __exit cpufreq_gov_dbs_exit(void)
static void __exit cpufreq_gov_dbs_exit(void)
{
{
	/* Make sure that the scheduled work is indeed not running */
	flush_scheduled_work();

	cpufreq_unregister_governor(&cpufreq_gov_conservative);
	cpufreq_unregister_governor(&cpufreq_gov_conservative);
	destroy_workqueue(kconservative_wq);
}
}