Commit 8b0ec53c authored by Vincent Donnefort's avatar Vincent Donnefort Committed by Ionela Voinescu
Browse files

sched_tp: Add util_est trace events


Signed-off-by: Vincent Donnefort's avatarVincent Donnefort <vincent.donnefort@arm.com>
Signed-off-by: Qais Yousef's avatarQais Yousef <qais.yousef@arm.com>
parent 157af54e
......@@ -141,6 +141,65 @@ TRACE_EVENT(sched_overutilized,
__entry->overutilized, __entry->span)
);
TRACE_EVENT(sched_util_est_se,
TP_PROTO(int cpu, char *path, char *comm, int pid,
const struct sched_avg *avg),
TP_ARGS(cpu, path, comm, pid, avg),
TP_STRUCT__entry(
__field( int, cpu )
__array( char, path, PATH_SIZE )
__array( char, comm, TASK_COMM_LEN )
__field( int, pid )
__field( unsigned int, enqueued )
__field( unsigned int, ewma )
__field( unsigned long, util )
),
TP_fast_assign(
__entry->cpu = cpu;
strlcpy(__entry->path, path, PATH_SIZE);
strlcpy(__entry->comm, comm, TASK_COMM_LEN);
__entry->pid = pid;
__entry->enqueued = avg->util_est.enqueued;
__entry->ewma = avg->util_est.ewma;
__entry->util = avg->util_avg;
),
TP_printk("cpu=%d path=%s comm=%s pid=%d enqueued=%u ewma=%u util=%lu",
__entry->cpu, __entry->path, __entry->comm, __entry->pid,
__entry->enqueued, __entry->ewma, __entry->util)
);
TRACE_EVENT(sched_util_est_cfs,
TP_PROTO(int cpu, char *path, const struct sched_avg *avg),
TP_ARGS(cpu, path, avg),
TP_STRUCT__entry(
__field( int, cpu )
__array( char, path, PATH_SIZE )
__field( unsigned int, enqueued )
__field( unsigned int, ewma )
__field( unsigned long, util )
),
TP_fast_assign(
__entry->cpu = cpu;
strlcpy(__entry->path, path, PATH_SIZE);
__entry->enqueued = avg->util_est.enqueued;
__entry->ewma = avg->util_est.ewma;
__entry->util = avg->util_avg;
),
TP_printk("cpu=%d path=%s enqueued=%u ewma=%u util=%lu",
__entry->cpu, __entry->path, __entry->enqueued,
__entry->ewma, __entry->util)
);
#ifdef CONFIG_UCLAMP_TASK
struct rq;
......
......@@ -27,19 +27,47 @@ static inline struct cfs_rq *get_se_cfs_rq(struct sched_entity *se)
#endif
}
static void sched_pelt_cfs(void *data, struct cfs_rq *cfs_rq)
static inline void _trace_cfs(struct cfs_rq *cfs_rq,
void (*trace_event)(int, char*,
const struct sched_avg*))
{
if (trace_sched_pelt_cfs_enabled()) {
const struct sched_avg *avg;
char path[PATH_SIZE];
int cpu;
const struct sched_avg *avg;
char path[PATH_SIZE];
int cpu;
avg = sched_trace_cfs_rq_avg(cfs_rq);
sched_trace_cfs_rq_path(cfs_rq, path, PATH_SIZE);
cpu = sched_trace_cfs_rq_cpu(cfs_rq);
avg = sched_trace_cfs_rq_avg(cfs_rq);
sched_trace_cfs_rq_path(cfs_rq, path, PATH_SIZE);
cpu = sched_trace_cfs_rq_cpu(cfs_rq);
trace_sched_pelt_cfs(cpu, path, avg);
}
trace_event(cpu, path, avg);
}
static inline void _trace_se(struct sched_entity *se,
void (*trace_event)(int, char*, char*, int,
const struct sched_avg*))
{
void *gcfs_rq = get_group_cfs_rq(se);
void *cfs_rq = get_se_cfs_rq(se);
struct task_struct *p;
char path[PATH_SIZE];
char *comm;
pid_t pid;
int cpu;
sched_trace_cfs_rq_path(gcfs_rq, path, PATH_SIZE);
cpu = sched_trace_cfs_rq_cpu(cfs_rq);
p = gcfs_rq ? NULL : container_of(se, struct task_struct, se);
comm = p ? p->comm : "(null)";
pid = p ? p->pid : -1;
trace_event(cpu, path, comm, pid, &se->avg);
}
static void sched_pelt_cfs(void *data, struct cfs_rq *cfs_rq)
{
if (trace_sched_pelt_cfs_enabled())
_trace_cfs(cfs_rq, trace_sched_pelt_cfs);
if (trace_uclamp_util_cfs_enabled()) {
unsigned int cpu = sched_trace_cfs_rq_cpu(cfs_rq);
......@@ -91,22 +119,7 @@ static void sched_pelt_irq(void *data, struct rq *rq)
static void sched_pelt_se(void *data, struct sched_entity *se)
{
if (trace_sched_pelt_se_enabled()) {
void *gcfs_rq = get_group_cfs_rq(se);
void *cfs_rq = get_se_cfs_rq(se);
struct task_struct *p;
char path[PATH_SIZE];
char *comm;
pid_t pid;
int cpu;
sched_trace_cfs_rq_path(gcfs_rq, path, PATH_SIZE);
cpu = sched_trace_cfs_rq_cpu(cfs_rq);
p = gcfs_rq ? NULL : container_of(se, struct task_struct, se);
comm = p ? p->comm : "(null)";
pid = p ? p->pid : -1;
trace_sched_pelt_se(cpu, path, comm, pid, &se->avg);
_trace_se(se, trace_sched_pelt_se);
}
if (trace_uclamp_util_se_enabled()) {
......@@ -129,6 +142,18 @@ static void sched_overutilized(void *data, struct root_domain *rd, bool overutil
}
}
static void sched_util_est_cfs(void *data, struct cfs_rq *cfs_rq)
{
if (trace_sched_util_est_cfs_enabled())
_trace_cfs(cfs_rq, trace_sched_util_est_cfs);
}
static void sched_util_est_se(void *data, struct sched_entity *se)
{
if (trace_sched_util_est_se_enabled())
_trace_se(se, trace_sched_util_est_se);
}
static int sched_tp_init(void)
{
register_trace_pelt_cfs_tp(sched_pelt_cfs, NULL);
......@@ -137,6 +162,8 @@ static int sched_tp_init(void)
register_trace_pelt_irq_tp(sched_pelt_irq, NULL);
register_trace_pelt_se_tp(sched_pelt_se, NULL);
register_trace_sched_overutilized_tp(sched_overutilized, NULL);
register_trace_sched_util_est_cfs_tp(sched_util_est_cfs, NULL);
register_trace_sched_util_est_se_tp(sched_util_est_se, NULL);
return 0;
}
......@@ -149,6 +176,8 @@ static void sched_tp_finish(void)
unregister_trace_pelt_irq_tp(sched_pelt_irq, NULL);
unregister_trace_pelt_se_tp(sched_pelt_se, NULL);
unregister_trace_sched_overutilized_tp(sched_overutilized, NULL);
unregister_trace_sched_util_est_cfs_tp(sched_util_est_cfs, NULL);
unregister_trace_sched_util_est_se_tp(sched_util_est_se, NULL);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment