From: Steven Rostedt on
On Wed, 2010-03-10 at 08:51 +0100, Jiri Olsa wrote:

> @@ -993,14 +991,6 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
> if (ret != TRACE_TYPE_HANDLED)
> return ret;
> break;
> - default:
> - event = ftrace_find_event(ent->type);
> - if (!event)
> - return TRACE_TYPE_UNHANDLED;
> -
> - ret = event->trace(iter, sym_flags);
> - if (ret != TRACE_TYPE_HANDLED)
> - return ret;
> }
>

This has the same effect as the problem with the first patch:

was:

2) | _raw_spin_unlock_irqrestore() {
2) | /* lock_release: ffffffff816969a8 random_read_wait.lock */
2) 6.022 us | }
2) + 27.632 us | }
2) 2.939 us | kill_fasync();
2) | _raw_spin_unlock_irqrestore() {
2) | /* lock_release: ffffffff816967f0 &input_pool.lock */
2) 5.471 us | }
2) + 56.740 us | }
2) + 88.357 us | }
2) + 94.026 us | }


is now:

2) | _raw_spin_unlock_irqrestore() {
<idle>-0 [002] 411.798721: lock_release: ffffffff816969a8 random_read_wait.lock
2) 6.022 us | }
2) + 27.632 us | }
2) 2.939 us | kill_fasync();
2) | _raw_spin_unlock_irqrestore() {
<idle>-0 [002] 411.798741: lock_release: ffffffff816967f0 &input_pool.lock
2) 5.471 us | }
2) + 56.740 us | }
2) + 88.357 us | }
2) + 94.026 us | }


Please, before you apply any patch do the following:

(assuming debugfs is at /debug)

# echo 1 > /debug/tracing/events/enable
# echo function_graph > /debug/tracing/current_tracer
# cat /debug/tracing/trace

Examine what you see.

Then apply all your patches, and repeat the above. Make sure the
formatting is the same.

Thanks,

-- Steve


> /* Strip ending newline */
> @@ -1066,8 +1056,11 @@ print_graph_function(struct trace_iterator *iter)
> trace_assign_type(field, entry);
> return print_graph_return(&field->ret, s, entry, iter);
> }
> - default:
> + case TRACE_BPRINT:
> + case TRACE_PRINT:
> return print_graph_comment(s, entry, iter);
> + default:
> + return TRACE_TYPE_UNHANDLED;
> }
>
> return TRACE_TYPE_HANDLED;
> @@ -1101,7 +1094,7 @@ static void print_lat_header(struct seq_file *s)
> seq_printf(s, "#%.*s|||| / \n", size, spaces);
> }
>
> -static void print_graph_headers(struct seq_file *s)
> +void print_graph_headers(struct seq_file *s)
> {
> int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
>
> @@ -1137,7 +1130,7 @@ static void print_graph_headers(struct seq_file *s)
> seq_printf(s, " | | | |\n");
> }
>
> -static void graph_trace_open(struct trace_iterator *iter)
> +void graph_trace_open(struct trace_iterator *iter)
> {
> /* pid and depth on the last trace processed */
> struct fgraph_data *data;
> @@ -1172,7 +1165,7 @@ static void graph_trace_open(struct trace_iterator *iter)
> pr_warning("function graph tracer: not enough memory\n");
> }
>
> -static void graph_trace_close(struct trace_iterator *iter)
> +void graph_trace_close(struct trace_iterator *iter)
> {
> struct fgraph_data *data = iter->private;
>
> diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
> index 2974bc7..d0c6d6c 100644
> --- a/kernel/trace/trace_irqsoff.c
> +++ b/kernel/trace/trace_irqsoff.c
> @@ -34,6 +34,9 @@ static int trace_type __read_mostly;
>
> static int save_lat_flag;
>
> +static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
> +static int start_irqsoff_tracer(struct trace_array *tr, int graph);
> +
> #ifdef CONFIG_PREEMPT_TRACER
> static inline int
> preempt_trace(void)
> @@ -55,6 +58,23 @@ irq_trace(void)
> # define irq_trace() (0)
> #endif
>
> +#define TRACE_DISPLAY_GRAPH 1
> +
> +static struct tracer_opt trace_opts[] = {
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> + /* display latency trace as call graph */
> + { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
> +#endif
> + { } /* Empty entry */
> +};
> +
> +static struct tracer_flags tracer_flags = {
> + .val = 0,
> + .opts = trace_opts,
> +};
> +
> +#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
> +
> /*
> * Sequence count - we record it when starting a measurement and
> * skip the latency if the sequence has changed - some other section
> @@ -108,6 +128,109 @@ static struct ftrace_ops trace_ops __read_mostly =
> };
> #endif /* CONFIG_FUNCTION_TRACER */
>
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> +static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
> +{
> + int cpu;
> +
> + if (!(bit & TRACE_DISPLAY_GRAPH))
> + return -EINVAL;
> +
> + if (!(is_graph() ^ set))
> + return 0;
> +
> + stop_irqsoff_tracer(irqsoff_trace, !set);
> +
> + for_each_possible_cpu(cpu)
> + per_cpu(tracing_cpu, cpu) = 0;
> +
> + tracing_max_latency = 0;
> + tracing_reset_online_cpus(irqsoff_trace);
> +
> + return start_irqsoff_tracer(irqsoff_trace, set);
> +}
> +
> +static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
> +{
> + struct trace_array *tr = irqsoff_trace;
> + struct trace_array_cpu *data;
> + unsigned long flags;
> + long disabled;
> + int ret;
> + int cpu;
> + int pc;
> +
> + cpu = raw_smp_processor_id();
> + if (likely(!per_cpu(tracing_cpu, cpu)))
> + return 0;
> +
> + local_save_flags(flags);
> + /* slight chance to get a false positive on tracing_cpu */
> + if (!irqs_disabled_flags(flags))
> + return 0;
> +
> + data = tr->data[cpu];
> + disabled = atomic_inc_return(&data->disabled);
> +
> + if (likely(disabled == 1)) {
> + pc = preempt_count();
> + ret = __trace_graph_entry(tr, trace, flags, pc);
> + } else
> + ret = 0;
> +
> + atomic_dec(&data->disabled);
> + return ret;
> +}
> +
> +static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
> +{
> + struct trace_array *tr = irqsoff_trace;
> + struct trace_array_cpu *data;
> + unsigned long flags;
> + long disabled;
> + int cpu;
> + int pc;
> +
> + cpu = raw_smp_processor_id();
> + if (likely(!per_cpu(tracing_cpu, cpu)))
> + return;
> +
> + local_save_flags(flags);
> + /* slight chance to get a false positive on tracing_cpu */
> + if (!irqs_disabled_flags(flags))
> + return;
> +
> + data = tr->data[cpu];
> + disabled = atomic_inc_return(&data->disabled);
> +
> + if (likely(disabled == 1)) {
> + pc = preempt_count();
> + __trace_graph_return(tr, trace, flags, pc);
> + }
> +
> + atomic_dec(&data->disabled);
> +}
> +
> +static void irqsoff_trace_open(struct trace_iterator *iter)
> +{
> + if (is_graph())
> + graph_trace_open(iter);
> +
> +}
> +
> +static void irqsoff_trace_close(struct trace_iterator *iter)
> +{
> + if (iter->private)
> + graph_trace_close(iter);
> +}
> +
> +#else
> +static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
> +{
> + return -EINVAL;
> +}
> +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
> +
> /*
> * Should this new latency be reported/recorded?
> */
> @@ -347,19 +470,36 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
> }
> #endif /* CONFIG_PREEMPT_TRACER */
>
> -static void start_irqsoff_tracer(struct trace_array *tr)
> +static int start_irqsoff_tracer(struct trace_array *tr, int graph)
> {
> - register_ftrace_function(&trace_ops);
> - if (tracing_is_enabled())
> + int ret = 0;
> +
> + if (!graph)
> + ret = register_ftrace_function(&trace_ops);
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> + else
> + ret = register_ftrace_graph(&irqsoff_graph_return,
> + &irqsoff_graph_entry);
> +#endif
> +
> + if (!ret && tracing_is_enabled())
> tracer_enabled = 1;
> else
> tracer_enabled = 0;
> +
> + return ret;
> }
>
> -static void stop_irqsoff_tracer(struct trace_array *tr)
> +static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
> {
> tracer_enabled = 0;
> - unregister_ftrace_function(&trace_ops);
> +
> + if (!graph)
> + unregister_ftrace_function(&trace_ops);
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> + else
> + unregister_ftrace_graph();
> +#endif
> }
>
> static void __irqsoff_tracer_init(struct trace_array *tr)
> @@ -372,12 +512,14 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
> /* make sure that the tracer is visible */
> smp_wmb();
> tracing_reset_online_cpus(tr);
> - start_irqsoff_tracer(tr);
> +
> + if (start_irqsoff_tracer(tr, is_graph()))
> + printk(KERN_ERR "failed to start irqsoff tracer\n");
> }
>
> static void irqsoff_tracer_reset(struct trace_array *tr)
> {
> - stop_irqsoff_tracer(tr);
> + stop_irqsoff_tracer(tr, is_graph());
>
> if (!save_lat_flag)
> trace_flags &= ~TRACE_ITER_LATENCY_FMT;
> @@ -409,9 +551,15 @@ static struct tracer irqsoff_tracer __read_mostly =
> .start = irqsoff_tracer_start,
> .stop = irqsoff_tracer_stop,
> .print_max = 1,
> + .flags = &tracer_flags,
> + .set_flag = irqsoff_set_flag,
> #ifdef CONFIG_FTRACE_SELFTEST
> .selftest = trace_selftest_startup_irqsoff,
> #endif
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> + .open = irqsoff_trace_open,
> + .close = irqsoff_trace_close,
> +#endif
> };
> # define register_irqsoff(trace) register_tracer(&trace)
> #else


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Jiri Olsa on
On Tue, Mar 23, 2010 at 09:19:16PM -0400, Steven Rostedt wrote:
> On Wed, 2010-03-10 at 08:51 +0100, Jiri Olsa wrote:
>
> > @@ -993,14 +991,6 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
> > if (ret != TRACE_TYPE_HANDLED)
> > return ret;
> > break;
> > - default:
> > - event = ftrace_find_event(ent->type);
> > - if (!event)
> > - return TRACE_TYPE_UNHANDLED;
> > -
> > - ret = event->trace(iter, sym_flags);
> > - if (ret != TRACE_TYPE_HANDLED)
> > - return ret;
> > }
> >
>
> This has the same effect as the problem with the first patch:
>
> was:
>
> 2) | _raw_spin_unlock_irqrestore() {
> 2) | /* lock_release: ffffffff816969a8 random_read_wait.lock */
> 2) 6.022 us | }
> 2) + 27.632 us | }
> 2) 2.939 us | kill_fasync();
> 2) | _raw_spin_unlock_irqrestore() {
> 2) | /* lock_release: ffffffff816967f0 &input_pool.lock */
> 2) 5.471 us | }
> 2) + 56.740 us | }
> 2) + 88.357 us | }
> 2) + 94.026 us | }
>
>
> is now:
>
> 2) | _raw_spin_unlock_irqrestore() {
> <idle>-0 [002] 411.798721: lock_release: ffffffff816969a8 random_read_wait.lock
> 2) 6.022 us | }
> 2) + 27.632 us | }
> 2) 2.939 us | kill_fasync();
> 2) | _raw_spin_unlock_irqrestore() {
> <idle>-0 [002] 411.798741: lock_release: ffffffff816967f0 &input_pool.lock
> 2) 5.471 us | }
> 2) + 56.740 us | }
> 2) + 88.357 us | }
> 2) + 94.026 us | }
>
>
> Please, before you apply any patch do the following:
>
> (assuming debugfs is at /debug)
>
> # echo 1 > /debug/tracing/events/enable
> # echo function_graph > /debug/tracing/current_tracer
> # cat /debug/tracing/trace
>
> Examine what you see.
>
> Then apply all your patches, and repeat the above. Make sure the
> formatting is the same.
hi,

shoot, looks like I forgot about comments... sending new version soon :)

thanks,
jirka

>
> Thanks,
>
> -- Steve
>
>
> > /* Strip ending newline */
> > @@ -1066,8 +1056,11 @@ print_graph_function(struct trace_iterator *iter)
> > trace_assign_type(field, entry);
> > return print_graph_return(&field->ret, s, entry, iter);
> > }
> > - default:
> > + case TRACE_BPRINT:
> > + case TRACE_PRINT:
> > return print_graph_comment(s, entry, iter);
> > + default:
> > + return TRACE_TYPE_UNHANDLED;
> > }
> >
> > return TRACE_TYPE_HANDLED;
> > @@ -1101,7 +1094,7 @@ static void print_lat_header(struct seq_file *s)
> > seq_printf(s, "#%.*s|||| / \n", size, spaces);
> > }
> >
> > -static void print_graph_headers(struct seq_file *s)
> > +void print_graph_headers(struct seq_file *s)
> > {
> > int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
> >
> > @@ -1137,7 +1130,7 @@ static void print_graph_headers(struct seq_file *s)
> > seq_printf(s, " | | | |\n");
> > }
> >
> > -static void graph_trace_open(struct trace_iterator *iter)
> > +void graph_trace_open(struct trace_iterator *iter)
> > {
> > /* pid and depth on the last trace processed */
> > struct fgraph_data *data;
> > @@ -1172,7 +1165,7 @@ static void graph_trace_open(struct trace_iterator *iter)
> > pr_warning("function graph tracer: not enough memory\n");
> > }
> >
> > -static void graph_trace_close(struct trace_iterator *iter)
> > +void graph_trace_close(struct trace_iterator *iter)
> > {
> > struct fgraph_data *data = iter->private;
> >
> > diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
> > index 2974bc7..d0c6d6c 100644
> > --- a/kernel/trace/trace_irqsoff.c
> > +++ b/kernel/trace/trace_irqsoff.c
> > @@ -34,6 +34,9 @@ static int trace_type __read_mostly;
> >
> > static int save_lat_flag;
> >
> > +static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
> > +static int start_irqsoff_tracer(struct trace_array *tr, int graph);
> > +
> > #ifdef CONFIG_PREEMPT_TRACER
> > static inline int
> > preempt_trace(void)
> > @@ -55,6 +58,23 @@ irq_trace(void)
> > # define irq_trace() (0)
> > #endif
> >
> > +#define TRACE_DISPLAY_GRAPH 1
> > +
> > +static struct tracer_opt trace_opts[] = {
> > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> > + /* display latency trace as call graph */
> > + { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
> > +#endif
> > + { } /* Empty entry */
> > +};
> > +
> > +static struct tracer_flags tracer_flags = {
> > + .val = 0,
> > + .opts = trace_opts,
> > +};
> > +
> > +#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
> > +
> > /*
> > * Sequence count - we record it when starting a measurement and
> > * skip the latency if the sequence has changed - some other section
> > @@ -108,6 +128,109 @@ static struct ftrace_ops trace_ops __read_mostly =
> > };
> > #endif /* CONFIG_FUNCTION_TRACER */
> >
> > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> > +static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
> > +{
> > + int cpu;
> > +
> > + if (!(bit & TRACE_DISPLAY_GRAPH))
> > + return -EINVAL;
> > +
> > + if (!(is_graph() ^ set))
> > + return 0;
> > +
> > + stop_irqsoff_tracer(irqsoff_trace, !set);
> > +
> > + for_each_possible_cpu(cpu)
> > + per_cpu(tracing_cpu, cpu) = 0;
> > +
> > + tracing_max_latency = 0;
> > + tracing_reset_online_cpus(irqsoff_trace);
> > +
> > + return start_irqsoff_tracer(irqsoff_trace, set);
> > +}
> > +
> > +static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
> > +{
> > + struct trace_array *tr = irqsoff_trace;
> > + struct trace_array_cpu *data;
> > + unsigned long flags;
> > + long disabled;
> > + int ret;
> > + int cpu;
> > + int pc;
> > +
> > + cpu = raw_smp_processor_id();
> > + if (likely(!per_cpu(tracing_cpu, cpu)))
> > + return 0;
> > +
> > + local_save_flags(flags);
> > + /* slight chance to get a false positive on tracing_cpu */
> > + if (!irqs_disabled_flags(flags))
> > + return 0;
> > +
> > + data = tr->data[cpu];
> > + disabled = atomic_inc_return(&data->disabled);
> > +
> > + if (likely(disabled == 1)) {
> > + pc = preempt_count();
> > + ret = __trace_graph_entry(tr, trace, flags, pc);
> > + } else
> > + ret = 0;
> > +
> > + atomic_dec(&data->disabled);
> > + return ret;
> > +}
> > +
> > +static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
> > +{
> > + struct trace_array *tr = irqsoff_trace;
> > + struct trace_array_cpu *data;
> > + unsigned long flags;
> > + long disabled;
> > + int cpu;
> > + int pc;
> > +
> > + cpu = raw_smp_processor_id();
> > + if (likely(!per_cpu(tracing_cpu, cpu)))
> > + return;
> > +
> > + local_save_flags(flags);
> > + /* slight chance to get a false positive on tracing_cpu */
> > + if (!irqs_disabled_flags(flags))
> > + return;
> > +
> > + data = tr->data[cpu];
> > + disabled = atomic_inc_return(&data->disabled);
> > +
> > + if (likely(disabled == 1)) {
> > + pc = preempt_count();
> > + __trace_graph_return(tr, trace, flags, pc);
> > + }
> > +
> > + atomic_dec(&data->disabled);
> > +}
> > +
> > +static void irqsoff_trace_open(struct trace_iterator *iter)
> > +{
> > + if (is_graph())
> > + graph_trace_open(iter);
> > +
> > +}
> > +
> > +static void irqsoff_trace_close(struct trace_iterator *iter)
> > +{
> > + if (iter->private)
> > + graph_trace_close(iter);
> > +}
> > +
> > +#else
> > +static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
> > +{
> > + return -EINVAL;
> > +}
> > +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
> > +
> > /*
> > * Should this new latency be reported/recorded?
> > */
> > @@ -347,19 +470,36 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
> > }
> > #endif /* CONFIG_PREEMPT_TRACER */
> >
> > -static void start_irqsoff_tracer(struct trace_array *tr)
> > +static int start_irqsoff_tracer(struct trace_array *tr, int graph)
> > {
> > - register_ftrace_function(&trace_ops);
> > - if (tracing_is_enabled())
> > + int ret = 0;
> > +
> > + if (!graph)
> > + ret = register_ftrace_function(&trace_ops);
> > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> > + else
> > + ret = register_ftrace_graph(&irqsoff_graph_return,
> > + &irqsoff_graph_entry);
> > +#endif
> > +
> > + if (!ret && tracing_is_enabled())
> > tracer_enabled = 1;
> > else
> > tracer_enabled = 0;
> > +
> > + return ret;
> > }
> >
> > -static void stop_irqsoff_tracer(struct trace_array *tr)
> > +static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
> > {
> > tracer_enabled = 0;
> > - unregister_ftrace_function(&trace_ops);
> > +
> > + if (!graph)
> > + unregister_ftrace_function(&trace_ops);
> > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> > + else
> > + unregister_ftrace_graph();
> > +#endif
> > }
> >
> > static void __irqsoff_tracer_init(struct trace_array *tr)
> > @@ -372,12 +512,14 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
> > /* make sure that the tracer is visible */
> > smp_wmb();
> > tracing_reset_online_cpus(tr);
> > - start_irqsoff_tracer(tr);
> > +
> > + if (start_irqsoff_tracer(tr, is_graph()))
> > + printk(KERN_ERR "failed to start irqsoff tracer\n");
> > }
> >
> > static void irqsoff_tracer_reset(struct trace_array *tr)
> > {
> > - stop_irqsoff_tracer(tr);
> > + stop_irqsoff_tracer(tr, is_graph());
> >
> > if (!save_lat_flag)
> > trace_flags &= ~TRACE_ITER_LATENCY_FMT;
> > @@ -409,9 +551,15 @@ static struct tracer irqsoff_tracer __read_mostly =
> > .start = irqsoff_tracer_start,
> > .stop = irqsoff_tracer_stop,
> > .print_max = 1,
> > + .flags = &tracer_flags,
> > + .set_flag = irqsoff_set_flag,
> > #ifdef CONFIG_FTRACE_SELFTEST
> > .selftest = trace_selftest_startup_irqsoff,
> > #endif
> > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> > + .open = irqsoff_trace_open,
> > + .close = irqsoff_trace_close,
> > +#endif
> > };
> > # define register_irqsoff(trace) register_tracer(&trace)
> > #else
>
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Frederic Weisbecker on
On Thu, Apr 01, 2010 at 05:37:08PM +0200, Jiri Olsa wrote:
> +static int start_irqsoff_tracer(struct trace_array *tr, int graph)
> {
> - register_ftrace_function(&trace_ops);
> - if (tracing_is_enabled())
> + int ret = 0;
> +
> + if (!graph)
> + ret = register_ftrace_function(&trace_ops);
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> + else
> + ret = register_ftrace_graph(&irqsoff_graph_return,
> + &irqsoff_graph_entry);
> +#endif



Please rather define static inline stubs for register_ftrace_graph
in linux/ftrace.h if !CONFIG_FUNCTION_GRAPH_TRACER



> +
> + if (!ret && tracing_is_enabled())
> tracer_enabled = 1;
> else
> tracer_enabled = 0;
> +
> + return ret;
> }
>
> -static void stop_irqsoff_tracer(struct trace_array *tr)
> +static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
> {
> tracer_enabled = 0;
> - unregister_ftrace_function(&trace_ops);
> +
> + if (!graph)
> + unregister_ftrace_function(&trace_ops);
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> + else
> + unregister_ftrace_graph();
> +#endif



Same here.

Thanks.

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Frederic Weisbecker on
On Fri, Apr 02, 2010 at 07:01:22PM +0200, Jiri Olsa wrote:
> +#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
> + TRACE_GRAPH_PRINT_PROC)
> +
> +static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
> +{
> + u32 flags = GRAPH_TRACER_FLAGS;
> +
> + if (trace_flags & TRACE_ITER_LATENCY_FMT)
> + flags |= TRACE_GRAPH_PRINT_DURATION;
> + else
> + flags |= TRACE_GRAPH_PRINT_ABS_TIME;



Why not having ABS_TIME in any case?

And more important, you probably want to add TRACE_ITER_LATENCY_FMT
as well to get the contexts (need-resched, hardirq/softirq, preempt-depth,
lock-depth)



> + /*
> + * In graph mode call the graph tracer output function,
> + * otherwise go with the TRACE_FN event handler
> + */
> + if (is_graph())
> + return print_graph_function_flags(iter, flags);
> +
> + return TRACE_TYPE_UNHANDLED;
> +}
> +
> +static void irqsoff_print_header(struct seq_file *s)
> +{
> + if (is_graph()) {
> + struct trace_iterator *iter = s->private;
> + u32 flags = GRAPH_TRACER_FLAGS;
> +
> + if (trace_flags & TRACE_ITER_LATENCY_FMT) {
> + /* print nothing if the buffers are empty */
> + if (trace_empty(iter))
> + return;
> +
> + print_trace_header(s, iter);
> + flags |= TRACE_GRAPH_PRINT_DURATION;
> + } else
> + flags |= TRACE_GRAPH_PRINT_ABS_TIME;
> +
> + print_graph_headers_flags(s, flags);



Same here.


Other than these few comments, the whole set looks good!

Thanks!

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Jiri Olsa on
On Tue, Apr 13, 2010 at 01:33:37AM +0200, Frederic Weisbecker wrote:
> On Fri, Apr 02, 2010 at 07:01:22PM +0200, Jiri Olsa wrote:
> > +#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
> > + TRACE_GRAPH_PRINT_PROC)
> > +
> > +static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
> > +{
> > + u32 flags = GRAPH_TRACER_FLAGS;
> > +
> > + if (trace_flags & TRACE_ITER_LATENCY_FMT)
> > + flags |= TRACE_GRAPH_PRINT_DURATION;
> > + else
> > + flags |= TRACE_GRAPH_PRINT_ABS_TIME;
>
>
>
> Why not having ABS_TIME in any case?
>
> And more important, you probably want to add TRACE_ITER_LATENCY_FMT
> as well to get the contexts (need-resched, hardirq/softirq, preempt-depth,
> lock-depth)

hi,

latency info
The latency info is governed by global 'trace_flags', the graph tracers
follows that as well.


absolute time
The idea was to follow the current irqsoff function trace format. Though
it cannot be exatly the same, it can print the same info.

By default I added TRACE_GRAPH_PRINT_CPU and TRACE_GRAPH_PRINT_PROC,
since they are always there for current irqsoff trace.

Then the absolute time is printed only in the case when the
latency info is switched off.

AFAICS the 'time' field in the output (for latency enabled) actually
represents the duration (computed/printed in trace_print_lat_context function).
Hence I added TRACE_GRAPH_PRINT_DURATION to graph output for the latency case.

So the answer to your question is, the absolute time is not part
of the output, since it's not part of the original trace output.
My opinion is the absolute time is not be that important
for this tracer, the duration is.


latency output OFF:

current tracer:
TASK-PID CPU# TIMESTAMP FUNCTION


graph tracer:
TIME CPU TASK/PID FUNCTION CALLS


latency output ON:

current tracer:
_------=> CPU#
/ _-----=> irqs-off
| / _----=> need-resched
|| / _---=> hardirq/softirq
||| / _--=> preempt-depth
|||| /_--=> lock-depth
|||||/ delay
cmd pid |||||| time | caller
\\ / |||||| \\ | /



graph tracer:
_-----=> irqs-off
/ _----=> need-resched
| / _---=> hardirq/softirq
|| / _--=> preempt-depth
||| / _-=> lock-depth
|||| /
CPU TASK/PID ||||| DURATION FUNCTION CALLS


Also the non-latency case covers the 'verbose' option output.




thanks for your comments,
hopefully I haven't missed anything.. ;)

jirka
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/