diff options
Diffstat (limited to 'include/trace/events/sched.h')
-rw-r--r-- | include/trace/events/sched.h | 641 |
1 files changed, 640 insertions, 1 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 18197e0bb510..30145bcd1a32 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -241,7 +241,7 @@ DECLARE_EVENT_CLASS(sched_process_template, DEFINE_EVENT(sched_process_template, sched_process_free, TP_PROTO(struct task_struct *p), TP_ARGS(p)); - + /* * Tracepoint for a task exiting: @@ -396,6 +396,30 @@ DEFINE_EVENT(sched_stat_template, sched_stat_blocked, TP_ARGS(tsk, delay)); /* + * Tracepoint for recording the cause of uninterruptible sleep. + */ +TRACE_EVENT(sched_blocked_reason, + + TP_PROTO(struct task_struct *tsk), + + TP_ARGS(tsk), + + TP_STRUCT__entry( + __field( pid_t, pid ) + __field( void*, caller ) + __field( bool, io_wait ) + ), + + TP_fast_assign( + __entry->pid = tsk->pid; + __entry->caller = (void*)get_wchan(tsk); + __entry->io_wait = tsk->in_iowait; + ), + + TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller) +); + +/* * Tracepoint for accounting runtime (time the task is executing * on a CPU). */ @@ -587,6 +611,621 @@ TRACE_EVENT(sched_wake_idle_without_ipi, TP_printk("cpu=%d", __entry->cpu) ); + +#ifdef CONFIG_SMP +#ifdef CREATE_TRACE_POINTS +static inline +int __trace_sched_cpu(struct cfs_rq *cfs_rq, struct sched_entity *se) +{ +#ifdef CONFIG_FAIR_GROUP_SCHED + struct rq *rq = cfs_rq ? cfs_rq->rq : NULL; +#else + struct rq *rq = cfs_rq ? container_of(cfs_rq, struct rq, cfs) : NULL; +#endif + return rq ? cpu_of(rq) + : task_cpu((container_of(se, struct task_struct, se))); +} + +static inline +int __trace_sched_path(struct cfs_rq *cfs_rq, char *path, int len) +{ +#ifdef CONFIG_FAIR_GROUP_SCHED + int l = path ? len : 0; + + if (cfs_rq && task_group_is_autogroup(cfs_rq->tg)) + return autogroup_path(cfs_rq->tg, path, l) + 1; + else if (cfs_rq && cfs_rq->tg->css.cgroup) + return cgroup_path(cfs_rq->tg->css.cgroup, path, l) + 1; +#endif + if (path) + strcpy(path, "(null)"); + + return strlen("(null)"); +} + +static inline +struct cfs_rq *__trace_sched_group_cfs_rq(struct sched_entity *se) +{ +#ifdef CONFIG_FAIR_GROUP_SCHED + return se->my_q; +#else + return NULL; +#endif +} +#endif /* CREATE_TRACE_POINTS */ + +#ifdef CONFIG_SCHED_WALT +extern unsigned int sysctl_sched_use_walt_cpu_util; +extern unsigned int sysctl_sched_use_walt_task_util; +extern unsigned int walt_ravg_window; +extern bool walt_disabled; + +#define walt_util(util_var, demand_sum) {\ + u64 sum = demand_sum << SCHED_CAPACITY_SHIFT;\ + do_div(sum, walt_ravg_window);\ + util_var = (typeof(util_var))sum;\ + } +#endif + +/* + * Tracepoint for cfs_rq load tracking: + */ +TRACE_EVENT(sched_load_cfs_rq, + + TP_PROTO(struct cfs_rq *cfs_rq), + + TP_ARGS(cfs_rq), + + TP_STRUCT__entry( + __field( int, cpu ) + __dynamic_array(char, path, + __trace_sched_path(cfs_rq, NULL, 0) ) + __field( unsigned long, load ) + __field( unsigned long, util ) + __field( unsigned long, util_pelt ) + __field( unsigned long, util_walt ) + ), + + TP_fast_assign( + __entry->cpu = __trace_sched_cpu(cfs_rq, NULL); + __trace_sched_path(cfs_rq, __get_dynamic_array(path), + __get_dynamic_array_len(path)); + __entry->load = cfs_rq->runnable_load_avg; + __entry->util = cfs_rq->avg.util_avg; + __entry->util_pelt = cfs_rq->avg.util_avg; + __entry->util_walt = 0; +#ifdef CONFIG_SCHED_WALT + if (&cfs_rq->rq->cfs == cfs_rq) { + walt_util(__entry->util_walt, + cfs_rq->rq->prev_runnable_sum); + if (!walt_disabled && sysctl_sched_use_walt_cpu_util) + __entry->util = __entry->util_walt; + } +#endif + ), + + TP_printk("cpu=%d path=%s load=%lu util=%lu util_pelt=%lu util_walt=%lu", + __entry->cpu, __get_str(path), __entry->load, __entry->util, + __entry->util_pelt, __entry->util_walt) +); + +/* + * Tracepoint for rt_rq load tracking: + */ +struct rt_rq; + +TRACE_EVENT(sched_load_rt_rq, + + TP_PROTO(int cpu, struct rt_rq *rt_rq), + + TP_ARGS(cpu, rt_rq), + + TP_STRUCT__entry( + __field( int, cpu ) + __field( unsigned long, util ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->util = rt_rq->avg.util_avg; + ), + + TP_printk("cpu=%d util=%lu", __entry->cpu, + __entry->util) +); + +/* + * Tracepoint for sched_entity load tracking: + */ +TRACE_EVENT(sched_load_se, + + TP_PROTO(struct sched_entity *se), + + TP_ARGS(se), + + TP_STRUCT__entry( + __field( int, cpu ) + __dynamic_array(char, path, + __trace_sched_path(__trace_sched_group_cfs_rq(se), NULL, 0) ) + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( unsigned long, load ) + __field( unsigned long, util ) + __field( unsigned long, util_pelt ) + __field( unsigned long, util_walt ) + ), + + TP_fast_assign( + struct cfs_rq *gcfs_rq = __trace_sched_group_cfs_rq(se); + struct task_struct *p = gcfs_rq ? NULL + : container_of(se, struct task_struct, se); + + __entry->cpu = __trace_sched_cpu(gcfs_rq, se); + __trace_sched_path(gcfs_rq, __get_dynamic_array(path), + __get_dynamic_array_len(path)); + memcpy(__entry->comm, p ? p->comm : "(null)", + p ? TASK_COMM_LEN : sizeof("(null)")); + __entry->pid = p ? p->pid : -1; + __entry->load = se->avg.load_avg; + __entry->util = se->avg.util_avg; + __entry->util_pelt = __entry->util; + __entry->util_walt = 0; +#ifdef CONFIG_SCHED_WALT + if (!se->my_q) { + struct task_struct *p = container_of(se, struct task_struct, se); + walt_util(__entry->util_walt, p->ravg.demand); + if (!walt_disabled && sysctl_sched_use_walt_task_util) + __entry->util = __entry->util_walt; + } +#endif + ), + + TP_printk("cpu=%d path=%s comm=%s pid=%d load=%lu util=%lu util_pelt=%lu util_walt=%lu", + __entry->cpu, __get_str(path), __entry->comm, + __entry->pid, __entry->load, __entry->util, + __entry->util_pelt, __entry->util_walt) +); + +/* + * Tracepoint for task_group load tracking: + */ +#ifdef CONFIG_FAIR_GROUP_SCHED +TRACE_EVENT(sched_load_tg, + + TP_PROTO(struct cfs_rq *cfs_rq), + + TP_ARGS(cfs_rq), + + TP_STRUCT__entry( + __field( int, cpu ) + __dynamic_array(char, path, + __trace_sched_path(cfs_rq, NULL, 0) ) + __field( long, load ) + ), + + TP_fast_assign( + __entry->cpu = cfs_rq->rq->cpu; + __trace_sched_path(cfs_rq, __get_dynamic_array(path), + __get_dynamic_array_len(path)); + __entry->load = atomic_long_read(&cfs_rq->tg->load_avg); + ), + + TP_printk("cpu=%d path=%s load=%ld", __entry->cpu, __get_str(path), + __entry->load) +); +#endif /* CONFIG_FAIR_GROUP_SCHED */ + +/* + * Tracepoint for accounting CPU boosted utilization + */ +TRACE_EVENT(sched_boost_cpu, + + TP_PROTO(int cpu, unsigned long util, long margin), + + TP_ARGS(cpu, util, margin), + + TP_STRUCT__entry( + __field( int, cpu ) + __field( unsigned long, util ) + __field(long, margin ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->util = util; + __entry->margin = margin; + ), + + TP_printk("cpu=%d util=%lu margin=%ld", + __entry->cpu, + __entry->util, + __entry->margin) +); + +/* + * Tracepoint for schedtune_tasks_update + */ +TRACE_EVENT(sched_tune_tasks_update, + + TP_PROTO(struct task_struct *tsk, int cpu, int tasks, int idx, + int boost, int max_boost, u64 group_ts), + + TP_ARGS(tsk, cpu, tasks, idx, boost, max_boost, group_ts), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, cpu ) + __field( int, tasks ) + __field( int, idx ) + __field( int, boost ) + __field( int, max_boost ) + __field( u64, group_ts ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->cpu = cpu; + __entry->tasks = tasks; + __entry->idx = idx; + __entry->boost = boost; + __entry->max_boost = max_boost; + __entry->group_ts = group_ts; + ), + + TP_printk("pid=%d comm=%s " + "cpu=%d tasks=%d idx=%d boost=%d max_boost=%d timeout=%llu", + __entry->pid, __entry->comm, + __entry->cpu, __entry->tasks, __entry->idx, + __entry->boost, __entry->max_boost, + __entry->group_ts) +); + +/* + * Tracepoint for schedtune_boostgroup_update + */ +TRACE_EVENT(sched_tune_boostgroup_update, + + TP_PROTO(int cpu, int variation, int max_boost), + + TP_ARGS(cpu, variation, max_boost), + + TP_STRUCT__entry( + __field( int, cpu ) + __field( int, variation ) + __field( int, max_boost ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->variation = variation; + __entry->max_boost = max_boost; + ), + + TP_printk("cpu=%d variation=%d max_boost=%d", + __entry->cpu, __entry->variation, __entry->max_boost) +); + +/* + * Tracepoint for accounting task boosted utilization + */ +TRACE_EVENT(sched_boost_task, + + TP_PROTO(struct task_struct *tsk, unsigned long util, long margin), + + TP_ARGS(tsk, util, margin), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( unsigned long, util ) + __field( long, margin ) + + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->util = util; + __entry->margin = margin; + ), + + TP_printk("comm=%s pid=%d util=%lu margin=%ld", + __entry->comm, __entry->pid, + __entry->util, + __entry->margin) +); + +/* + * Tracepoint for system overutilized flag + */ +struct sched_domain; +TRACE_EVENT_CONDITION(sched_overutilized, + + TP_PROTO(struct sched_domain *sd, bool was_overutilized, bool overutilized), + + TP_ARGS(sd, was_overutilized, overutilized), + + TP_CONDITION(overutilized != was_overutilized), + + TP_STRUCT__entry( + __field( bool, overutilized ) + __array( char, cpulist , 32 ) + ), + + TP_fast_assign( + __entry->overutilized = overutilized; + scnprintf(__entry->cpulist, sizeof(__entry->cpulist), "%*pbl", cpumask_pr_args(sched_domain_span(sd))); + ), + + TP_printk("overutilized=%d sd_span=%s", + __entry->overutilized ? 1 : 0, __entry->cpulist) +); + +/* + * Tracepoint for find_best_target + */ +TRACE_EVENT(sched_find_best_target, + + TP_PROTO(struct task_struct *tsk, bool prefer_idle, + unsigned long min_util, int start_cpu, + int best_idle, int best_active, int target), + + TP_ARGS(tsk, prefer_idle, min_util, start_cpu, + best_idle, best_active, target), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( unsigned long, min_util ) + __field( bool, prefer_idle ) + __field( int, start_cpu ) + __field( int, best_idle ) + __field( int, best_active ) + __field( int, target ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->min_util = min_util; + __entry->prefer_idle = prefer_idle; + __entry->start_cpu = start_cpu; + __entry->best_idle = best_idle; + __entry->best_active = best_active; + __entry->target = target; + ), + + TP_printk("pid=%d comm=%s prefer_idle=%d start_cpu=%d " + "best_idle=%d best_active=%d target=%d", + __entry->pid, __entry->comm, + __entry->prefer_idle, __entry->start_cpu, + __entry->best_idle, __entry->best_active, + __entry->target) +); + +/* + * Tracepoint for tasks' estimated utilization. + */ +TRACE_EVENT(sched_util_est_task, + + TP_PROTO(struct task_struct *tsk, struct sched_avg *avg), + + TP_ARGS(tsk, avg), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( int, cpu ) + __field( unsigned int, util_avg ) + __field( unsigned int, est_enqueued ) + __field( unsigned int, est_ewma ) + ), + + TP_fast_assign( + memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); + __entry->pid = tsk->pid; + __entry->cpu = task_cpu(tsk); + __entry->util_avg = avg->util_avg; + __entry->est_enqueued = avg->util_est.enqueued; + __entry->est_ewma = avg->util_est.ewma; + ), + + TP_printk("comm=%s pid=%d cpu=%d util_avg=%u util_est_ewma=%u util_est_enqueued=%u", + __entry->comm, + __entry->pid, + __entry->cpu, + __entry->util_avg, + __entry->est_ewma, + __entry->est_enqueued) +); + +/* + * Tracepoint for root cfs_rq's estimated utilization. + */ +TRACE_EVENT(sched_util_est_cpu, + + TP_PROTO(int cpu, struct cfs_rq *cfs_rq), + + TP_ARGS(cpu, cfs_rq), + + TP_STRUCT__entry( + __field( int, cpu ) + __field( unsigned int, util_avg ) + __field( unsigned int, util_est_enqueued ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->util_avg = cfs_rq->avg.util_avg; + __entry->util_est_enqueued = cfs_rq->avg.util_est.enqueued; + ), + + TP_printk("cpu=%d util_avg=%u util_est_enqueued=%u", + __entry->cpu, + __entry->util_avg, + __entry->util_est_enqueued) +); + +#ifdef CONFIG_SCHED_WALT +struct rq; + +TRACE_EVENT(walt_update_task_ravg, + + TP_PROTO(struct task_struct *p, struct rq *rq, int evt, + u64 wallclock, u64 irqtime), + + TP_ARGS(p, rq, evt, wallclock, irqtime), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field( pid_t, cur_pid ) + __field( u64, wallclock ) + __field( u64, mark_start ) + __field( u64, delta_m ) + __field( u64, win_start ) + __field( u64, delta ) + __field( u64, irqtime ) + __array( char, evt, 16 ) + __field(unsigned int, demand ) + __field(unsigned int, sum ) + __field( int, cpu ) + __field( u64, cs ) + __field( u64, ps ) + __field( u32, curr_window ) + __field( u32, prev_window ) + __field( u64, nt_cs ) + __field( u64, nt_ps ) + __field( u32, active_windows ) + ), + + TP_fast_assign( + static const char* walt_event_names[] = + { + "PUT_PREV_TASK", + "PICK_NEXT_TASK", + "TASK_WAKE", + "TASK_MIGRATE", + "TASK_UPDATE", + "IRQ_UPDATE" + }; + __entry->wallclock = wallclock; + __entry->win_start = rq->window_start; + __entry->delta = (wallclock - rq->window_start); + strcpy(__entry->evt, walt_event_names[evt]); + __entry->cpu = rq->cpu; + __entry->cur_pid = rq->curr->pid; + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->mark_start = p->ravg.mark_start; + __entry->delta_m = (wallclock - p->ravg.mark_start); + __entry->demand = p->ravg.demand; + __entry->sum = p->ravg.sum; + __entry->irqtime = irqtime; + __entry->cs = rq->curr_runnable_sum; + __entry->ps = rq->prev_runnable_sum; + __entry->curr_window = p->ravg.curr_window; + __entry->prev_window = p->ravg.prev_window; + __entry->nt_cs = rq->nt_curr_runnable_sum; + __entry->nt_ps = rq->nt_prev_runnable_sum; + __entry->active_windows = p->ravg.active_windows; + ), + + TP_printk("wallclock=%llu window_start=%llu delta=%llu event=%s cpu=%d cur_pid=%d pid=%d comm=%s" + " mark_start=%llu delta=%llu demand=%u sum=%u irqtime=%llu" + " curr_runnable_sum=%llu prev_runnable_sum=%llu cur_window=%u" + " prev_window=%u nt_curr_runnable_sum=%llu nt_prev_runnable_sum=%llu active_windows=%u", + __entry->wallclock, __entry->win_start, __entry->delta, + __entry->evt, __entry->cpu, __entry->cur_pid, + __entry->pid, __entry->comm, __entry->mark_start, + __entry->delta_m, __entry->demand, + __entry->sum, __entry->irqtime, + __entry->cs, __entry->ps, + __entry->curr_window, __entry->prev_window, + __entry->nt_cs, __entry->nt_ps, + __entry->active_windows + ) +); + +TRACE_EVENT(walt_update_history, + + TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples, + int evt), + + TP_ARGS(rq, p, runtime, samples, evt), + + TP_STRUCT__entry( + __array( char, comm, TASK_COMM_LEN ) + __field( pid_t, pid ) + __field(unsigned int, runtime ) + __field( int, samples ) + __field( int, evt ) + __field( u64, demand ) + __field(unsigned int, walt_avg ) + __field(unsigned int, pelt_avg ) + __array( u32, hist, RAVG_HIST_SIZE_MAX) + __field( int, cpu ) + ), + + TP_fast_assign( + memcpy(__entry->comm, p->comm, TASK_COMM_LEN); + __entry->pid = p->pid; + __entry->runtime = runtime; + __entry->samples = samples; + __entry->evt = evt; + __entry->demand = p->ravg.demand; + walt_util(__entry->walt_avg,__entry->demand); + __entry->pelt_avg = p->se.avg.util_avg; + memcpy(__entry->hist, p->ravg.sum_history, + RAVG_HIST_SIZE_MAX * sizeof(u32)); + __entry->cpu = rq->cpu; + ), + + TP_printk("pid=%d comm=%s runtime=%u samples=%d event=%d demand=%llu ravg_window=%u" + " walt=%u pelt=%u hist0=%u hist1=%u hist2=%u hist3=%u hist4=%u cpu=%d", + __entry->pid, __entry->comm, + __entry->runtime, __entry->samples, __entry->evt, + __entry->demand, + walt_ravg_window, + __entry->walt_avg, + __entry->pelt_avg, + __entry->hist[0], __entry->hist[1], + __entry->hist[2], __entry->hist[3], + __entry->hist[4], __entry->cpu) +); + +TRACE_EVENT(walt_migration_update_sum, + + TP_PROTO(struct rq *rq, struct task_struct *p), + + TP_ARGS(rq, p), + + TP_STRUCT__entry( + __field(int, cpu ) + __field(int, pid ) + __field( u64, cs ) + __field( u64, ps ) + __field( s64, nt_cs ) + __field( s64, nt_ps ) + ), + + TP_fast_assign( + __entry->cpu = cpu_of(rq); + __entry->cs = rq->curr_runnable_sum; + __entry->ps = rq->prev_runnable_sum; + __entry->nt_cs = (s64)rq->nt_curr_runnable_sum; + __entry->nt_ps = (s64)rq->nt_prev_runnable_sum; + __entry->pid = p->pid; + ), + + TP_printk("cpu=%d curr_runnable_sum=%llu prev_runnable_sum=%llu nt_curr_runnable_sum=%lld nt_prev_runnable_sum=%lld pid=%d", + __entry->cpu, __entry->cs, __entry->ps, + __entry->nt_cs, __entry->nt_ps, __entry->pid) +); +#endif /* CONFIG_SCHED_WALT */ +#endif /* CONFIG_SMP */ #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ |