Commit 1a72d01b authored by Morten Rasmussen's avatar Morten Rasmussen
Browse files

arm64: Set SD_ASYM_CPUCAPACITY topology flag for asymmetric cpu capacity systems



In addition to adjusting the cpu capacities asymmetric cpu capacity
systems also have to set the SD_ASYM_CPUCAPACITY topology flag to inform
the scheduler to look harder when load balancing. This patch sets the
topology flag on DIE level for such systems based on the cpu capacities
provided through DT or sysfs.

cc: Catalin Marinas <catalin.marinas@arm.com>
cc: Will Deacon <will.deacon@arm.com>

Signed-off-by: Morten Rasmussen's avatarMorten Rasmussen <morten.rasmussen@arm.com>
parent 20784cde
......@@ -21,12 +21,15 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/cpufreq.h>
#include <linux/cpuset.h>
#include <asm/cputype.h>
#include <asm/topology.h>
static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
static DEFINE_MUTEX(cpu_scale_mutex);
static bool asym_cpucap;
static bool update_flags;
unsigned long scale_cpu_capacity(struct sched_domain *sd, int cpu)
{
......@@ -38,6 +41,14 @@ static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
per_cpu(cpu_scale, cpu) = capacity;
}
static void update_sched_flags(void)
{
update_flags = true;
rebuild_sched_domains();
update_flags = false;
pr_debug("cpu_capacity: Rebuilt sched_domain hierarchy.\n");
}
#ifdef CONFIG_PROC_SYSCTL
#include <asm/cpu.h>
#include <linux/string.h>
......@@ -67,6 +78,7 @@ static ssize_t store_cpu_capacity(struct device *dev,
if (count) {
char *p = (char *) buf;
bool asym = false;
ret = kstrtoul(p, 0, &new_capacity);
if (ret)
......@@ -77,6 +89,16 @@ static ssize_t store_cpu_capacity(struct device *dev,
mutex_lock(&cpu_scale_mutex);
for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
set_capacity_scale(i, new_capacity);
for_each_possible_cpu(i) {
if (per_cpu(cpu_scale, i) != new_capacity)
asym = true;
}
if (asym != asym_cpucap) {
asym_cpucap = asym;
update_sched_flags();
}
mutex_unlock(&cpu_scale_mutex);
}
......@@ -153,6 +175,7 @@ static void normalize_cpu_capacity(void)
{
u64 capacity;
int cpu;
bool asym = false;
if (!raw_capacity || cap_parsing_failed)
return;
......@@ -167,8 +190,13 @@ static void normalize_cpu_capacity(void)
set_capacity_scale(cpu, capacity);
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
cpu, arch_scale_cpu_capacity(NULL, cpu));
if (capacity < capacity_scale)
asym = true;
}
mutex_unlock(&cpu_scale_mutex);
if (asym != asym_cpucap)
asym_cpucap = asym;
}
#ifdef CONFIG_CPU_FREQ
......@@ -182,6 +210,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
{
struct cpufreq_policy *policy = data;
int cpu;
bool asym;
if (cap_parsing_failed || cap_parsing_done)
return 0;
......@@ -200,7 +229,10 @@ init_cpu_capacity_callback(struct notifier_block *nb,
capacity_scale = max(raw_capacity[cpu], capacity_scale);
}
if (cpumask_empty(cpus_to_visit)) {
asym = asym_cpucap;
normalize_cpu_capacity();
if (asym != asym_cpucap)
update_sched_flags();
kfree(raw_capacity);
pr_debug("cpu_capacity: parsing done\n");
cap_parsing_done = true;
......@@ -431,6 +463,11 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
return &cpu_topology[cpu].core_sibling;
}
static int cpu_cpu_flags(void)
{
return asym_cpucap ? SD_ASYM_CPUCAPACITY : 0;
}
static void update_siblings_masks(unsigned int cpuid)
{
struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
......@@ -512,6 +549,19 @@ static void __init reset_cpu_topology(void)
}
}
static struct sched_domain_topology_level arm64_topology[] = {
#ifdef CONFIG_SCHED_MC
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
#endif
{ cpu_cpu_mask, cpu_cpu_flags, SD_INIT_NAME(DIE) },
{ NULL, }
};
int arch_update_cpu_topology(void)
{
return update_flags ? 1 : 0;
}
void __init init_cpu_topology(void)
{
reset_cpu_topology();
......@@ -522,4 +572,6 @@ void __init init_cpu_topology(void)
*/
if (of_have_populated_dt() && parse_dt_topology())
reset_cpu_topology();
else
set_sched_topology(arm64_topology);
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment