Commit 0c42668b authored by Qais Yousef's avatar Qais Yousef Committed by Ionela Voinescu
Browse files

sched: add a module to convert tp into events



The module is always compiled as built-in except for !CONFIG_SMP
where the targeted tracepoints don't exist/make sense.

It creates a set of sched events in tracefs that are required to run
Lisa tests.
Signed-off-by: Qais Yousef's avatarQais Yousef <qais.yousef@arm.com>
parent c1158a98
......@@ -26,6 +26,9 @@ obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle.o fair.o rt.o deadline.o
obj-y += wait.o wait_bit.o swait.o completion.o
obj-$(CONFIG_SMP) += sched_tp.o
CFLAGS_sched_tp.o := -I$(src)
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o
obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
......
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM sched
#if !defined(_SCHED_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _SCHED_EVENTS_H
#define PATH_SIZE 64
#define SPAN_SIZE NR_CPUS/4
#include <linux/tracepoint.h>
TRACE_EVENT(sched_pelt_cfs,
TP_PROTO(int cpu, char *path, const struct sched_avg *avg),
TP_ARGS(cpu, path, avg),
TP_STRUCT__entry(
__field( int, cpu )
__array( char, path, PATH_SIZE )
__field( unsigned long, load )
__field( unsigned long, rbl_load )
__field( unsigned long, util )
),
TP_fast_assign(
__entry->cpu = cpu;
strlcpy(__entry->path, path, PATH_SIZE);
__entry->load = avg->load_avg;
__entry->rbl_load = avg->runnable_load_avg;
__entry->util = avg->util_avg;
),
TP_printk("cpu=%d path=%s load=%lu rbl_load=%lu util=%lu",
__entry->cpu, __entry->path, __entry->load,
__entry->rbl_load,__entry->util)
);
DECLARE_EVENT_CLASS(sched_pelt_rq_template,
TP_PROTO(int cpu, const struct sched_avg *avg),
TP_ARGS(cpu, avg),
TP_STRUCT__entry(
__field( int, cpu )
__field( unsigned long, load )
__field( unsigned long, rbl_load )
__field( unsigned long, util )
),
TP_fast_assign(
__entry->cpu = cpu;
__entry->load = avg->load_avg;
__entry->rbl_load = avg->runnable_load_avg;
__entry->util = avg->util_avg;
),
TP_printk("cpu=%d load=%lu rbl_load=%lu util=%lu",
__entry->cpu, __entry->load,
__entry->rbl_load,__entry->util)
);
DEFINE_EVENT(sched_pelt_rq_template, sched_pelt_rt,
TP_PROTO(int cpu, const struct sched_avg *avg),
TP_ARGS(cpu, avg));
DEFINE_EVENT(sched_pelt_rq_template, sched_pelt_dl,
TP_PROTO(int cpu, const struct sched_avg *avg),
TP_ARGS(cpu, avg));
DEFINE_EVENT(sched_pelt_rq_template, sched_pelt_irq,
TP_PROTO(int cpu, const struct sched_avg *avg),
TP_ARGS(cpu, avg));
TRACE_EVENT(sched_pelt_se,
TP_PROTO(int cpu, char *path, char *comm, int pid, const struct sched_avg *avg),
TP_ARGS(cpu, path, comm, pid, avg),
TP_STRUCT__entry(
__field( int, cpu )
__array( char, path, PATH_SIZE )
__array( char, comm, TASK_COMM_LEN )
__field( int, pid )
__field( unsigned long, load )
__field( unsigned long, rbl_load )
__field( unsigned long, util )
),
TP_fast_assign(
__entry->cpu = cpu;
strlcpy(__entry->path, path, PATH_SIZE);
strlcpy(__entry->comm, comm, TASK_COMM_LEN);
__entry->pid = pid;
__entry->load = avg->load_avg;
__entry->rbl_load = avg->runnable_load_avg;
__entry->util = avg->util_avg;
),
TP_printk("cpu=%d path=%s comm=%s pid=%d load=%lu rbl_load=%lu util=%lu",
__entry->cpu, __entry->path, __entry->comm, __entry->pid,
__entry->load, __entry->rbl_load,__entry->util)
);
TRACE_EVENT(sched_overutilized,
TP_PROTO(int overutilized, char *span),
TP_ARGS(overutilized, span),
TP_STRUCT__entry(
__field( int, overutilized )
__array( char, span, SPAN_SIZE )
),
TP_fast_assign(
__entry->overutilized = overutilized;
strlcpy(__entry->span, span, SPAN_SIZE);
),
TP_printk("overutilized=%d span=0x%s",
__entry->overutilized, __entry->span)
);
#endif /* _SCHED_EVENTS_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE sched_events
#include <trace/define_trace.h>
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/module.h>
#include <linux/sched.h>
#include <trace/events/sched.h>
#define CREATE_TRACE_POINTS
#include "sched_events.h"
static inline struct cfs_rq *get_group_cfs_rq(struct sched_entity *se)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
return se->my_q;
#else
return NULL;
#endif
}
static inline struct cfs_rq *get_se_cfs_rq(struct sched_entity *se)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
return se->cfs_rq;
#else
return NULL;
#endif
}
static void sched_pelt_cfs(void *data, struct cfs_rq *cfs_rq)
{
if (trace_sched_pelt_cfs_enabled()) {
const struct sched_avg *avg;
char path[PATH_SIZE];
int cpu;
avg = sched_trace_cfs_rq_avg(cfs_rq);
sched_trace_cfs_rq_path(cfs_rq, path, PATH_SIZE);
cpu = sched_trace_cfs_rq_cpu(cfs_rq);
trace_sched_pelt_cfs(cpu, path, avg);
}
}
static void sched_pelt_rt(void *data, struct rq *rq)
{
if (trace_sched_pelt_rt_enabled()) {
const struct sched_avg *avg = sched_trace_rq_avg_rt(rq);
int cpu = sched_trace_rq_cpu(rq);
if (!avg)
return;
trace_sched_pelt_rt(cpu, avg);
}
}
static void sched_pelt_dl(void *data, struct rq *rq)
{
if (trace_sched_pelt_dl_enabled()) {
const struct sched_avg *avg = sched_trace_rq_avg_dl(rq);
int cpu = sched_trace_rq_cpu(rq);
if (!avg)
return;
trace_sched_pelt_dl(cpu, avg);
}
}
static void sched_pelt_irq(void *data, struct rq *rq)
{
if (trace_sched_pelt_irq_enabled()){
const struct sched_avg *avg = sched_trace_rq_avg_irq(rq);
int cpu = sched_trace_rq_cpu(rq);
if (!avg)
return;
trace_sched_pelt_irq(cpu, avg);
}
}
static void sched_pelt_se(void *data, struct sched_entity *se)
{
if (trace_sched_pelt_se_enabled()) {
void *gcfs_rq = get_group_cfs_rq(se);
void *cfs_rq = get_se_cfs_rq(se);
struct task_struct *p;
char path[PATH_SIZE];
char *comm;
pid_t pid;
int cpu;
sched_trace_cfs_rq_path(gcfs_rq, path, PATH_SIZE);
cpu = sched_trace_cfs_rq_cpu(cfs_rq);
p = gcfs_rq ? NULL : container_of(se, struct task_struct, se);
comm = p ? p->comm : "(null)";
pid = p ? p->pid : -1;
trace_sched_pelt_se(cpu, path, comm, pid, &se->avg);
}
}
static void sched_overutilized(void *data, struct root_domain *rd, bool overutilized)
{
if (trace_sched_overutilized_enabled()) {
char span[SPAN_SIZE];
cpumap_print_to_pagebuf(false, span, sched_trace_rd_span(rd));
trace_sched_overutilized(overutilized, span);
}
}
static int sched_tp_init(void)
{
register_trace_pelt_cfs_tp(sched_pelt_cfs, NULL);
register_trace_pelt_rt_tp(sched_pelt_rt, NULL);
register_trace_pelt_dl_tp(sched_pelt_dl, NULL);
register_trace_pelt_irq_tp(sched_pelt_irq, NULL);
register_trace_pelt_se_tp(sched_pelt_se, NULL);
register_trace_sched_overutilized_tp(sched_overutilized, NULL);
return 0;
}
static void sched_tp_finish(void)
{
unregister_trace_pelt_cfs_tp(sched_pelt_cfs, NULL);
unregister_trace_pelt_rt_tp(sched_pelt_rt, NULL);
unregister_trace_pelt_dl_tp(sched_pelt_dl, NULL);
unregister_trace_pelt_irq_tp(sched_pelt_irq, NULL);
unregister_trace_pelt_se_tp(sched_pelt_se, NULL);
unregister_trace_sched_overutilized_tp(sched_overutilized, NULL);
}
module_init(sched_tp_init);
module_exit(sched_tp_finish);
MODULE_LICENSE("GPL");
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment