From: Frederic Weisbecker on
On Thu, Jul 01, 2010 at 05:36:01PM +0200, Frederic Weisbecker wrote:
> +static struct perf_callchain_entry *get_callchain_entry(int *rctx)
> +{
> + struct perf_callchain_entry_cpus *cpu_entries;
> +
> + *rctx = get_recursion_context(&__get_cpu_var(callchain_recursion));
> + if (*rctx == -1)
> + return NULL;
> +
> + cpu_entries = rcu_dereference(callchain_entries[*rctx]);
> + if (!cpu_entries)
> + return NULL;
> +
> + return this_cpu_ptr(cpu_entries->entries);
> +}
> +
> +static void
> +put_callchain_entry(int rctx)
> +{
> + put_recursion_context(&__get_cpu_var(callchain_recursion), rctx);
> +}
> +
> +static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
> +{
> + int rctx;
> + struct perf_callchain_entry *entry;
> +
> +
> + entry = get_callchain_entry(&rctx);
> + if (!entry)
> + goto exit_put;


I realize this should only call put_callchain_entry() if rctx == -1, but you
got the idea...



> +
> + entry->nr = 0;
> +
> + if (!user_mode(regs)) {
> + perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
> + perf_callchain_kernel(entry, regs);
> + if (current->mm)
> + regs = task_pt_regs(current);
> + else
> + regs = NULL;
> + }
> +
> + if (regs) {
> + perf_callchain_store(entry, PERF_CONTEXT_USER);
> + perf_callchain_user(entry, regs);
> + }
> +
> +exit_put:
> + put_callchain_entry(rctx);
> +
> + return entry;
> +}

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Peter Zijlstra on
On Thu, 2010-07-01 at 17:36 +0200, Frederic Weisbecker wrote:
> Now that software events don't have interrupt disabled anymore in
> the event path, callchains can nest on any context. So seperating
> nmi and others contexts in two buffers has become racy.
>
> Fix this by providing one buffer per nesting level. Given the size
> of the callchain entries (2040 bytes * 4), we now need to allocate
> them dynamically.

OK so I guess you want to allocate them because 8k per cpu is too much
to always have about?

> +static int get_callchain_buffers(void)
> +{
> + int i;
> + int err = 0;
> + struct perf_callchain_entry_cpus *buf;
> +
> + mutex_lock(&callchain_mutex);
> +
> + if (WARN_ON_ONCE(++nr_callchain_events < 1)) {
> + err = -EINVAL;
> + goto exit;
> + }
> +
> + if (nr_callchain_events > 1)
> + goto exit;
> +
> + for (i = 0; i < 4; i++) {
> + buf = kzalloc(sizeof(*buf), GFP_KERNEL);
> + /* free_event() will clean the rest */
> + if (!buf) {
> + err = -ENOMEM;
> + goto exit;
> + }
> + buf->entries = alloc_percpu(struct perf_callchain_entry);
> + if (!buf->entries) {
> + kfree(buf);
> + err = -ENOMEM;
> + goto exit;
> + }
> + rcu_assign_pointer(callchain_entries[i], buf);
> + }
> +
> +exit:
> + mutex_unlock(&callchain_mutex);
> +
> + return err;
> +}

> +static void put_callchain_buffers(void)
> +{
> + int i;
> + struct perf_callchain_entry_cpus *entry;
> +
> + mutex_lock(&callchain_mutex);
> +
> + if (WARN_ON_ONCE(--nr_callchain_events < 0))
> + goto exit;
> +
> + if (nr_callchain_events > 0)
> + goto exit;
> +
> + for (i = 0; i < 4; i++) {
> + entry = callchain_entries[i];
> + if (entry) {
> + callchain_entries[i] = NULL;
> + call_rcu(&entry->rcu_head, release_callchain_buffers);
> + }
> + }
> +
> +exit:
> + mutex_unlock(&callchain_mutex);
> +}

If you make nr_callchain_events an atomic_t, then you can do the
refcounting outside the mutex. See the existing user of
atomic_dec_and_mutex_lock().

I would also split it in get/put and alloc/free functions for clarity.

I'm not at all sure why you're using RCU though.

> @@ -1895,6 +2072,8 @@ static void free_event(struct perf_event *event)
> atomic_dec(&nr_comm_events);
> if (event->attr.task)
> atomic_dec(&nr_task_events);
> + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
> + put_callchain_buffers();
> }
>
> if (event->buffer) {

If this was the last even, there's no callchain user left, so nobody can
be here:

> @@ -3480,14 +3610,20 @@ static void perf_event_output(struct perf_event *event, int nmi,
> struct perf_output_handle handle;
> struct perf_event_header header;
>
> + /* protect the callchain buffers */
> + rcu_read_lock();
> +
> perf_prepare_sample(&header, data, event, regs);
>
> if (perf_output_begin(&handle, event, header.size, nmi, 1))
> - return;
> + goto exit;
>
> perf_output_sample(&handle, &header, data, event);
>
> perf_output_end(&handle);
> +
> +exit:
> + rcu_read_unlock();
> }

Rendering that RCU stuff superfluous.


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Frederic Weisbecker on
On Fri, Jul 02, 2010 at 08:07:35PM +0200, Peter Zijlstra wrote:
> On Thu, 2010-07-01 at 17:36 +0200, Frederic Weisbecker wrote:
> > Now that software events don't have interrupt disabled anymore in
> > the event path, callchains can nest on any context. So seperating
> > nmi and others contexts in two buffers has become racy.
> >
> > Fix this by providing one buffer per nesting level. Given the size
> > of the callchain entries (2040 bytes * 4), we now need to allocate
> > them dynamically.
>
> OK so I guess you want to allocate them because 8k per cpu is too much
> to always have about?



Right. I know that really adds complexity and I hesitated much before
doing so. But I think that's quite necessary.



> > +static int get_callchain_buffers(void)
> > +{
> > + int i;
> > + int err = 0;
> > + struct perf_callchain_entry_cpus *buf;
> > +
> > + mutex_lock(&callchain_mutex);
> > +
> > + if (WARN_ON_ONCE(++nr_callchain_events < 1)) {
> > + err = -EINVAL;
> > + goto exit;
> > + }
> > +
> > + if (nr_callchain_events > 1)
> > + goto exit;
> > +
> > + for (i = 0; i < 4; i++) {
> > + buf = kzalloc(sizeof(*buf), GFP_KERNEL);
> > + /* free_event() will clean the rest */
> > + if (!buf) {
> > + err = -ENOMEM;
> > + goto exit;
> > + }
> > + buf->entries = alloc_percpu(struct perf_callchain_entry);
> > + if (!buf->entries) {
> > + kfree(buf);
> > + err = -ENOMEM;
> > + goto exit;
> > + }
> > + rcu_assign_pointer(callchain_entries[i], buf);
> > + }
> > +
> > +exit:
> > + mutex_unlock(&callchain_mutex);
> > +
> > + return err;
> > +}
>
> > +static void put_callchain_buffers(void)
> > +{
> > + int i;
> > + struct perf_callchain_entry_cpus *entry;
> > +
> > + mutex_lock(&callchain_mutex);
> > +
> > + if (WARN_ON_ONCE(--nr_callchain_events < 0))
> > + goto exit;
> > +
> > + if (nr_callchain_events > 0)
> > + goto exit;
> > +
> > + for (i = 0; i < 4; i++) {
> > + entry = callchain_entries[i];
> > + if (entry) {
> > + callchain_entries[i] = NULL;
> > + call_rcu(&entry->rcu_head, release_callchain_buffers);
> > + }
> > + }
> > +
> > +exit:
> > + mutex_unlock(&callchain_mutex);
> > +}
>
> If you make nr_callchain_events an atomic_t, then you can do the
> refcounting outside the mutex. See the existing user of
> atomic_dec_and_mutex_lock().
>
> I would also split it in get/put and alloc/free functions for clarity.



Ok I will.




> I'm not at all sure why you're using RCU though.
>
> > @@ -1895,6 +2072,8 @@ static void free_event(struct perf_event *event)
> > atomic_dec(&nr_comm_events);
> > if (event->attr.task)
> > atomic_dec(&nr_task_events);
> > + if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
> > + put_callchain_buffers();
> > }
> >
> > if (event->buffer) {
>
> If this was the last even, there's no callchain user left, so nobody can
> be here:
>
> > @@ -3480,14 +3610,20 @@ static void perf_event_output(struct perf_event *event, int nmi,
> > struct perf_output_handle handle;
> > struct perf_event_header header;
> >
> > + /* protect the callchain buffers */
> > + rcu_read_lock();
> > +
> > perf_prepare_sample(&header, data, event, regs);
> >
> > if (perf_output_begin(&handle, event, header.size, nmi, 1))
> > - return;
> > + goto exit;
> >
> > perf_output_sample(&handle, &header, data, event);
> >
> > perf_output_end(&handle);
> > +
> > +exit:
> > + rcu_read_unlock();
> > }
>
> Rendering that RCU stuff superfluous.


May be I'm omitting something that would make it non-rcu-safe.

But consider a perf event running on CPU 1. And you close the fd on
CPU 0. CPU 1 has started to use a callchain buffer but receives an IPI
to retire the event from the cpu. But still it has yet to finish his
callchain processing.

If right after that CPU 0 releases the callchain buffers, CPU 1 may
crash in the middle.

So you need to wait for the grace period to end.

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/