Commit 06812058 authored by Morten Rasmussen's avatar Morten Rasmussen Committed by Morten Rasmussen
Browse files

arm: Frequency invariant scheduler load-tracking support



Implements arch-specific function to provide the scheduler with a
frequency scaling correction factor for more accurate load-tracking. The
factor is:

	current_freq(cpu) << SCHED_CAPACITY_SHIFT / max_freq(cpu)

This implementation only provides frequency invariance. No
micro-architecture invariance yet.

Cc: Russell King <linux@arm.linux.org.uk>

Signed-off-by: Morten Rasmussen's avatarMorten Rasmussen <morten.rasmussen@arm.com>
parent 62a935b2
......@@ -24,6 +24,13 @@ void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
#define arch_scale_freq_capacity arm_arch_scale_freq_capacity
struct sched_domain;
extern
unsigned long arm_arch_scale_freq_capacity(struct sched_domain *sd, int cpu);
DECLARE_PER_CPU(atomic_long_t, cpu_freq_capacity);
#else
static inline void init_cpu_topology(void) { }
......
......@@ -672,12 +672,34 @@ static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
static unsigned long global_l_p_j_ref;
static unsigned long global_l_p_j_ref_freq;
static DEFINE_PER_CPU(atomic_long_t, cpu_max_freq);
DEFINE_PER_CPU(atomic_long_t, cpu_freq_capacity);
/*
* Scheduler load-tracking scale-invariance
*
* Provides the scheduler with a scale-invariance correction factor that
* compensates for frequency scaling through arch_scale_freq_capacity()
* (implemented in topology.c).
*/
static inline
void scale_freq_capacity(int cpu, unsigned long curr, unsigned long max)
{
unsigned long capacity;
if (!max)
return;
capacity = (curr << SCHED_CAPACITY_SHIFT) / max;
atomic_long_set(&per_cpu(cpu_freq_capacity, cpu), capacity);
}
static int cpufreq_callback(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
int cpu = freq->cpu;
unsigned long max = atomic_long_read(&per_cpu(cpu_max_freq, cpu));
if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK;
......@@ -702,6 +724,9 @@ static int cpufreq_callback(struct notifier_block *nb,
per_cpu(l_p_j_ref_freq, cpu),
freq->new);
}
scale_freq_capacity(cpu, freq->new, max);
return NOTIFY_OK;
}
......@@ -709,11 +734,38 @@ static struct notifier_block cpufreq_notifier = {
.notifier_call = cpufreq_callback,
};
static int cpufreq_policy_callback(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_policy *policy = data;
int i;
if (val != CPUFREQ_NOTIFY)
return NOTIFY_OK;
for_each_cpu(i, policy->cpus) {
scale_freq_capacity(i, policy->cur, policy->max);
atomic_long_set(&per_cpu(cpu_max_freq, i), policy->max);
}
return NOTIFY_OK;
}
static struct notifier_block cpufreq_policy_notifier = {
.notifier_call = cpufreq_policy_callback,
};
static int __init register_cpufreq_notifier(void)
{
return cpufreq_register_notifier(&cpufreq_notifier,
int ret;
ret = cpufreq_register_notifier(&cpufreq_notifier,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret)
return ret;
return cpufreq_register_notifier(&cpufreq_policy_notifier,
CPUFREQ_POLICY_NOTIFIER);
}
core_initcall(register_cpufreq_notifier);
#endif
......@@ -169,6 +169,23 @@ static void update_cpu_capacity(unsigned int cpu)
cpu, arch_scale_cpu_capacity(NULL, cpu));
}
/*
* Scheduler load-tracking scale-invariance
*
* Provides the scheduler with a scale-invariance correction factor that
* compensates for frequency scaling (arch_scale_freq_capacity()). The scaling
* factor is updated in smp.c
*/
unsigned long arm_arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
{
unsigned long curr = atomic_long_read(&per_cpu(cpu_freq_capacity, cpu));
if (!curr)
return SCHED_CAPACITY_SCALE;
return curr;
}
#else
static inline void parse_dt_topology(void) {}
static inline void update_cpu_capacity(unsigned int cpuid) {}
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment