From: Daisuke Nishimura on
On Mon, 8 Mar 2010 10:56:41 +0900, KAMEZAWA Hiroyuki <kamezawa.hiroyu(a)jp.fujitsu.com> wrote:
> On Mon, 8 Mar 2010 10:44:47 +0900
> Daisuke Nishimura <nishimura(a)mxp.nes.nec.co.jp> wrote:
>
> > > +/*
> > > + * mem_cgroup_update_page_stat_locked() - update memcg file cache's accounting
> > > + * @page: the page involved in a file cache operation.
> > > + * @idx: the particular file cache statistic.
> > > + * @charge: true to increment, false to decrement the statistic specified
> > > + * by @idx.
> > > + *
> > > + * Update memory cgroup file cache's accounting from a locked context.
> > > + *
> > > + * NOTE: must be called with mapping->tree_lock held.
> > > + */
> > > +void mem_cgroup_update_page_stat_locked(struct page *page,
> > > + enum mem_cgroup_write_page_stat_item idx, bool charge)
> > > +{
> > > + struct address_space *mapping = page_mapping(page);
> > > + struct page_cgroup *pc;
> > > +
> > > + if (mem_cgroup_disabled())
> > > + return;
> > > + WARN_ON_ONCE(!irqs_disabled());
> > > + WARN_ON_ONCE(mapping && !spin_is_locked(&mapping->tree_lock));
> > > +
> > I think this is a wrong place to insert assertion.
> > The problem about page cgroup lock is that it can be interrupted in current implementation.
> > So,
> >
> > a) it must not be aquired under another lock which can be aquired in interrupt context,
> > such as mapping->tree_lock, to avoid:
> >
> > context1 context2
> > lock_page_cgroup(pcA)
> > spin_lock_irq(&tree_lock)
> > lock_page_cgroup(pcA) <interrupted>
> > =>fail spin_lock_irqsave(&tree_lock)
> > =>fail
> >
> > b) it must not be aquired in interrupt context to avoid:
> >
> > lock_page_cgroup(pcA)
> > <interrupted>
> > lock_page_cgroup(pcA)
> > =>fail
> >
> > I think something like this would be better:
> >
> > @@ -83,8 +83,14 @@ static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
> > return page_zonenum(pc->page);
> > }
> >
> > +#include <linux/irqflags.h>
> > +#include <linux/hardirq.h>
> > static inline void lock_page_cgroup(struct page_cgroup *pc)
> > {
> > +#ifdef CONFIG_DEBUG_VM
> > + WARN_ON_ONCE(irqs_disabled());
> > + WARN_ON_ONCE(in_interrupt());
> > +#endif
> > bit_spin_lock(PCG_LOCK, &pc->flags);
> > }
> >
> > > + pc = lookup_page_cgroup(page);
> > > + if (unlikely(!pc) || !PageCgroupUsed(pc))
> > > + return;
> > > + mem_cgroup_update_page_stat(pc, idx, charge);
> > > +}
> > > +EXPORT_SYMBOL_GPL(mem_cgroup_update_page_stat_locked);
> > > +
> > > +/*
> > > + * mem_cgroup_update_page_stat_unlocked() - update memcg file cache's accounting
> > > + * @page: the page involved in a file cache operation.
> > > + * @idx: the particular file cache statistic.
> > > + * @charge: true to increment, false to decrement the statistic specified
> > > + * by @idx.
> > > + *
> > > + * Update memory cgroup file cache's accounting from an unlocked context.
> > > + */
> > > +void mem_cgroup_update_page_stat_unlocked(struct page *page,
> > > + enum mem_cgroup_write_page_stat_item idx, bool charge)
> > > +{
> > > + struct page_cgroup *pc;
> > > +
> > > + if (mem_cgroup_disabled())
> > > + return;
> > > + pc = lookup_page_cgroup(page);
> > > + if (unlikely(!pc) || !PageCgroupUsed(pc))
> > > + return;
> > > + lock_page_cgroup(pc);
> > > + mem_cgroup_update_page_stat(pc, idx, charge);
> > > unlock_page_cgroup(pc);
> > > }
> > > +EXPORT_SYMBOL_GPL(mem_cgroup_update_page_stat_unlocked);
> > >
> > IIUC, test_clear_page_writeback(at least) can be called under interrupt context.
> > This means lock_page_cgroup() is called under interrupt context, that is,
> > the case b) above can happen.
> > hmm... I don't have any good idea for now except disabling irq around page cgroup lock
> > to avoid all of these mess things.
> >
>
> Hmm...simply IRQ-off for all updates ?
I think so in current code.
But after these changes, we must use local_irq_save()/restore()
instead of local_irq_disable()/enable() in mem_cgroup_update_page_stat().

> But IIRC, clear_writeback is done under treelock.... No ?
>
The place where NR_WRITEBACK is updated is out of tree_lock.

1311 int test_clear_page_writeback(struct page *page)
1312 {
1313 struct address_space *mapping = page_mapping(page);
1314 int ret;
1315
1316 if (mapping) {
1317 struct backing_dev_info *bdi = mapping->backing_dev_info;
1318 unsigned long flags;
1319
1320 spin_lock_irqsave(&mapping->tree_lock, flags);
1321 ret = TestClearPageWriteback(page);
1322 if (ret) {
1323 radix_tree_tag_clear(&mapping->page_tree,
1324 page_index(page),
1325 PAGECACHE_TAG_WRITEBACK);
1326 if (bdi_cap_account_writeback(bdi)) {
1327 __dec_bdi_stat(bdi, BDI_WRITEBACK);
1328 __bdi_writeout_inc(bdi);
1329 }
1330 }
1331 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1332 } else {
1333 ret = TestClearPageWriteback(page);
1334 }
1335 if (ret)
1336 dec_zone_page_state(page, NR_WRITEBACK);
1337 return ret;
1338 }
1339


Thanks,
Daisuke Nishimura.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: KAMEZAWA Hiroyuki on
On Mon, 8 Mar 2010 11:17:24 +0900
Daisuke Nishimura <nishimura(a)mxp.nes.nec.co.jp> wrote:

> > But IIRC, clear_writeback is done under treelock.... No ?
> >
> The place where NR_WRITEBACK is updated is out of tree_lock.
>
> 1311 int test_clear_page_writeback(struct page *page)
> 1312 {
> 1313 struct address_space *mapping = page_mapping(page);
> 1314 int ret;
> 1315
> 1316 if (mapping) {
> 1317 struct backing_dev_info *bdi = mapping->backing_dev_info;
> 1318 unsigned long flags;
> 1319
> 1320 spin_lock_irqsave(&mapping->tree_lock, flags);
> 1321 ret = TestClearPageWriteback(page);
> 1322 if (ret) {
> 1323 radix_tree_tag_clear(&mapping->page_tree,
> 1324 page_index(page),
> 1325 PAGECACHE_TAG_WRITEBACK);
> 1326 if (bdi_cap_account_writeback(bdi)) {
> 1327 __dec_bdi_stat(bdi, BDI_WRITEBACK);
> 1328 __bdi_writeout_inc(bdi);
> 1329 }
> 1330 }
> 1331 spin_unlock_irqrestore(&mapping->tree_lock, flags);
> 1332 } else {
> 1333 ret = TestClearPageWriteback(page);
> 1334 }
> 1335 if (ret)
> 1336 dec_zone_page_state(page, NR_WRITEBACK);
> 1337 return ret;
> 1338 }

We can move this up to under tree_lock. Considering memcg, all our target has "mapping".

If we newly account bounce-buffers (for NILFS, FUSE, etc..), which has no ->mapping,
we need much more complex new charge/uncharge theory.

But yes, adding new lock scheme seems complicated. (Sorry Andrea.)
My concerns is performance. We may need somehing new re-implementation of
locks/migrate/charge/uncharge.

Thanks,
-Kame



--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Daisuke Nishimura on
On Mon, 8 Mar 2010 11:37:11 +0900, KAMEZAWA Hiroyuki <kamezawa.hiroyu(a)jp.fujitsu.com> wrote:
> On Mon, 8 Mar 2010 11:17:24 +0900
> Daisuke Nishimura <nishimura(a)mxp.nes.nec.co.jp> wrote:
>
> > > But IIRC, clear_writeback is done under treelock.... No ?
> > >
> > The place where NR_WRITEBACK is updated is out of tree_lock.
> >
> > 1311 int test_clear_page_writeback(struct page *page)
> > 1312 {
> > 1313 struct address_space *mapping = page_mapping(page);
> > 1314 int ret;
> > 1315
> > 1316 if (mapping) {
> > 1317 struct backing_dev_info *bdi = mapping->backing_dev_info;
> > 1318 unsigned long flags;
> > 1319
> > 1320 spin_lock_irqsave(&mapping->tree_lock, flags);
> > 1321 ret = TestClearPageWriteback(page);
> > 1322 if (ret) {
> > 1323 radix_tree_tag_clear(&mapping->page_tree,
> > 1324 page_index(page),
> > 1325 PAGECACHE_TAG_WRITEBACK);
> > 1326 if (bdi_cap_account_writeback(bdi)) {
> > 1327 __dec_bdi_stat(bdi, BDI_WRITEBACK);
> > 1328 __bdi_writeout_inc(bdi);
> > 1329 }
> > 1330 }
> > 1331 spin_unlock_irqrestore(&mapping->tree_lock, flags);
> > 1332 } else {
> > 1333 ret = TestClearPageWriteback(page);
> > 1334 }
> > 1335 if (ret)
> > 1336 dec_zone_page_state(page, NR_WRITEBACK);
> > 1337 return ret;
> > 1338 }
>
> We can move this up to under tree_lock. Considering memcg, all our target has "mapping".
>
> If we newly account bounce-buffers (for NILFS, FUSE, etc..), which has no ->mapping,
> we need much more complex new charge/uncharge theory.
>
> But yes, adding new lock scheme seems complicated. (Sorry Andrea.)
> My concerns is performance. We may need somehing new re-implementation of
> locks/migrate/charge/uncharge.
>
I agree. Performance is my concern too.

I made a patch below and measured the time(average of 10 times) of kernel build
on tmpfs(make -j8 on 8 CPU machine with 2.6.33 defconfig).

<before>
- root cgroup: 190.47 sec
- child cgroup: 192.81 sec

<after>
- root cgroup: 191.06 sec
- child cgroup: 193.06 sec

Hmm... about 0.3% slower for root, 0.1% slower for child.

===
From: Daisuke Nishimura <nishimura(a)mxp.nes.nec.co.jp>

In current implementation, we don't have to disable irq at lock_page_cgroup()
because the lock is never acquired in interrupt context.
But we are going to do it in later patch, so this patch encloses all of
lock_page_cgroup()/unlock_page_cgroup() with irq_disabled()/irq_enabled().

Signed-off-by: Daisuke Nishimura <nishimura(a)mxp.nes.nec.co.jp>
---
mm/memcontrol.c | 17 +++++++++++++++++
1 files changed, 17 insertions(+), 0 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 02ea959..e5ae1a1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1359,6 +1359,7 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
if (unlikely(!pc))
return;

+ local_irq_disable();
lock_page_cgroup(pc);
mem = pc->mem_cgroup;
if (!mem)
@@ -1374,6 +1375,7 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)

done:
unlock_page_cgroup(pc);
+ local_irq_enable();
}

/*
@@ -1711,6 +1713,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
VM_BUG_ON(!PageLocked(page));

pc = lookup_page_cgroup(page);
+ local_irq_disable();
lock_page_cgroup(pc);
if (PageCgroupUsed(pc)) {
mem = pc->mem_cgroup;
@@ -1726,6 +1729,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
rcu_read_unlock();
}
unlock_page_cgroup(pc);
+ local_irq_enable();
return mem;
}

@@ -1742,9 +1746,11 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
if (!mem)
return;

+ local_irq_disable();
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
+ local_irq_enable();
mem_cgroup_cancel_charge(mem);
return;
}
@@ -1775,6 +1781,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
mem_cgroup_charge_statistics(mem, pc, true);

unlock_page_cgroup(pc);
+ local_irq_enable();
/*
* "charge_statistics" updated event counter. Then, check it.
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
@@ -1844,12 +1851,14 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
{
int ret = -EINVAL;
+ local_irq_disable();
lock_page_cgroup(pc);
if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
__mem_cgroup_move_account(pc, from, to, uncharge);
ret = 0;
}
unlock_page_cgroup(pc);
+ local_irq_enable();
/*
* check events
*/
@@ -1981,12 +1990,15 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
pc = lookup_page_cgroup(page);
if (!pc)
return 0;
+ local_irq_disable();
lock_page_cgroup(pc);
if (PageCgroupUsed(pc)) {
unlock_page_cgroup(pc);
+ local_irq_enable();
return 0;
}
unlock_page_cgroup(pc);
+ local_irq_enable();
}

if (unlikely(!mm && !mem))
@@ -2182,6 +2194,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
if (unlikely(!pc || !PageCgroupUsed(pc)))
return NULL;

+ local_irq_disable();
lock_page_cgroup(pc);

mem = pc->mem_cgroup;
@@ -2222,6 +2235,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)

mz = page_cgroup_zoneinfo(pc);
unlock_page_cgroup(pc);
+ local_irq_enable();

memcg_check_events(mem, page);
/* at swapout, this memcg will be accessed to record to swap */
@@ -2232,6 +2246,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)

unlock_out:
unlock_page_cgroup(pc);
+ local_irq_enable();
return NULL;
}

@@ -2424,12 +2439,14 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
return 0;

pc = lookup_page_cgroup(page);
+ local_irq_disable();
lock_page_cgroup(pc);
if (PageCgroupUsed(pc)) {
mem = pc->mem_cgroup;
css_get(&mem->css);
}
unlock_page_cgroup(pc);
+ local_irq_enable();

if (mem) {
ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
--
1.6.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: KAMEZAWA Hiroyuki on
On Mon, 8 Mar 2010 17:07:11 +0900
Daisuke Nishimura <nishimura(a)mxp.nes.nec.co.jp> wrote:

> On Mon, 8 Mar 2010 11:37:11 +0900, KAMEZAWA Hiroyuki <kamezawa.hiroyu(a)jp.fujitsu.com> wrote:
> > On Mon, 8 Mar 2010 11:17:24 +0900
> > Daisuke Nishimura <nishimura(a)mxp.nes.nec.co.jp> wrote:
> >
> > > > But IIRC, clear_writeback is done under treelock.... No ?
> > > >
> > > The place where NR_WRITEBACK is updated is out of tree_lock.
> > >
> > > 1311 int test_clear_page_writeback(struct page *page)
> > > 1312 {
> > > 1313 struct address_space *mapping = page_mapping(page);
> > > 1314 int ret;
> > > 1315
> > > 1316 if (mapping) {
> > > 1317 struct backing_dev_info *bdi = mapping->backing_dev_info;
> > > 1318 unsigned long flags;
> > > 1319
> > > 1320 spin_lock_irqsave(&mapping->tree_lock, flags);
> > > 1321 ret = TestClearPageWriteback(page);
> > > 1322 if (ret) {
> > > 1323 radix_tree_tag_clear(&mapping->page_tree,
> > > 1324 page_index(page),
> > > 1325 PAGECACHE_TAG_WRITEBACK);
> > > 1326 if (bdi_cap_account_writeback(bdi)) {
> > > 1327 __dec_bdi_stat(bdi, BDI_WRITEBACK);
> > > 1328 __bdi_writeout_inc(bdi);
> > > 1329 }
> > > 1330 }
> > > 1331 spin_unlock_irqrestore(&mapping->tree_lock, flags);
> > > 1332 } else {
> > > 1333 ret = TestClearPageWriteback(page);
> > > 1334 }
> > > 1335 if (ret)
> > > 1336 dec_zone_page_state(page, NR_WRITEBACK);
> > > 1337 return ret;
> > > 1338 }
> >
> > We can move this up to under tree_lock. Considering memcg, all our target has "mapping".
> >
> > If we newly account bounce-buffers (for NILFS, FUSE, etc..), which has no ->mapping,
> > we need much more complex new charge/uncharge theory.
> >
> > But yes, adding new lock scheme seems complicated. (Sorry Andrea.)
> > My concerns is performance. We may need somehing new re-implementation of
> > locks/migrate/charge/uncharge.
> >
> I agree. Performance is my concern too.
>
> I made a patch below and measured the time(average of 10 times) of kernel build
> on tmpfs(make -j8 on 8 CPU machine with 2.6.33 defconfig).
>
> <before>
> - root cgroup: 190.47 sec
> - child cgroup: 192.81 sec
>
> <after>
> - root cgroup: 191.06 sec
> - child cgroup: 193.06 sec
>
> Hmm... about 0.3% slower for root, 0.1% slower for child.
>

Hmm...accepatable ? (sounds it's in error-range)

BTW, why local_irq_disable() ?
local_irq_save()/restore() isn't better ?

Thanks,
-Kame

> ===
> From: Daisuke Nishimura <nishimura(a)mxp.nes.nec.co.jp>
>
> In current implementation, we don't have to disable irq at lock_page_cgroup()
> because the lock is never acquired in interrupt context.
> But we are going to do it in later patch, so this patch encloses all of
> lock_page_cgroup()/unlock_page_cgroup() with irq_disabled()/irq_enabled().
>
> Signed-off-by: Daisuke Nishimura <nishimura(a)mxp.nes.nec.co.jp>
> ---
> mm/memcontrol.c | 17 +++++++++++++++++
> 1 files changed, 17 insertions(+), 0 deletions(-)
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 02ea959..e5ae1a1 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -1359,6 +1359,7 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
> if (unlikely(!pc))
> return;
>
> + local_irq_disable();
> lock_page_cgroup(pc);
> mem = pc->mem_cgroup;
> if (!mem)
> @@ -1374,6 +1375,7 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
>
> done:
> unlock_page_cgroup(pc);
> + local_irq_enable();
> }
>
> /*
> @@ -1711,6 +1713,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
> VM_BUG_ON(!PageLocked(page));
>
> pc = lookup_page_cgroup(page);
> + local_irq_disable();
> lock_page_cgroup(pc);
> if (PageCgroupUsed(pc)) {
> mem = pc->mem_cgroup;
> @@ -1726,6 +1729,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
> rcu_read_unlock();
> }
> unlock_page_cgroup(pc);
> + local_irq_enable();
> return mem;
> }
>
> @@ -1742,9 +1746,11 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
> if (!mem)
> return;
>
> + local_irq_disable();
> lock_page_cgroup(pc);
> if (unlikely(PageCgroupUsed(pc))) {
> unlock_page_cgroup(pc);
> + local_irq_enable();
> mem_cgroup_cancel_charge(mem);
> return;
> }
> @@ -1775,6 +1781,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
> mem_cgroup_charge_statistics(mem, pc, true);
>
> unlock_page_cgroup(pc);
> + local_irq_enable();
> /*
> * "charge_statistics" updated event counter. Then, check it.
> * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
> @@ -1844,12 +1851,14 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
> struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
> {
> int ret = -EINVAL;
> + local_irq_disable();
> lock_page_cgroup(pc);
> if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
> __mem_cgroup_move_account(pc, from, to, uncharge);
> ret = 0;
> }
> unlock_page_cgroup(pc);
> + local_irq_enable();
> /*
> * check events
> */
> @@ -1981,12 +1990,15 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
> pc = lookup_page_cgroup(page);
> if (!pc)
> return 0;
> + local_irq_disable();
> lock_page_cgroup(pc);
> if (PageCgroupUsed(pc)) {
> unlock_page_cgroup(pc);
> + local_irq_enable();
> return 0;
> }
> unlock_page_cgroup(pc);
> + local_irq_enable();
> }
>
> if (unlikely(!mm && !mem))
> @@ -2182,6 +2194,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
> if (unlikely(!pc || !PageCgroupUsed(pc)))
> return NULL;
>
> + local_irq_disable();
> lock_page_cgroup(pc);
>
> mem = pc->mem_cgroup;
> @@ -2222,6 +2235,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
>
> mz = page_cgroup_zoneinfo(pc);
> unlock_page_cgroup(pc);
> + local_irq_enable();
>
> memcg_check_events(mem, page);
> /* at swapout, this memcg will be accessed to record to swap */
> @@ -2232,6 +2246,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
>
> unlock_out:
> unlock_page_cgroup(pc);
> + local_irq_enable();
> return NULL;
> }
>
> @@ -2424,12 +2439,14 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
> return 0;
>
> pc = lookup_page_cgroup(page);
> + local_irq_disable();
> lock_page_cgroup(pc);
> if (PageCgroupUsed(pc)) {
> mem = pc->mem_cgroup;
> css_get(&mem->css);
> }
> unlock_page_cgroup(pc);
> + local_irq_enable();
>
> if (mem) {
> ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
> --
> 1.6.4
>
>

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
From: Andrea Righi on
On Mon, Mar 08, 2010 at 05:07:11PM +0900, Daisuke Nishimura wrote:
> On Mon, 8 Mar 2010 11:37:11 +0900, KAMEZAWA Hiroyuki <kamezawa.hiroyu(a)jp.fujitsu.com> wrote:
> > On Mon, 8 Mar 2010 11:17:24 +0900
> > Daisuke Nishimura <nishimura(a)mxp.nes.nec.co.jp> wrote:
> >
> > > > But IIRC, clear_writeback is done under treelock.... No ?
> > > >
> > > The place where NR_WRITEBACK is updated is out of tree_lock.
> > >
> > > 1311 int test_clear_page_writeback(struct page *page)
> > > 1312 {
> > > 1313 struct address_space *mapping = page_mapping(page);
> > > 1314 int ret;
> > > 1315
> > > 1316 if (mapping) {
> > > 1317 struct backing_dev_info *bdi = mapping->backing_dev_info;
> > > 1318 unsigned long flags;
> > > 1319
> > > 1320 spin_lock_irqsave(&mapping->tree_lock, flags);
> > > 1321 ret = TestClearPageWriteback(page);
> > > 1322 if (ret) {
> > > 1323 radix_tree_tag_clear(&mapping->page_tree,
> > > 1324 page_index(page),
> > > 1325 PAGECACHE_TAG_WRITEBACK);
> > > 1326 if (bdi_cap_account_writeback(bdi)) {
> > > 1327 __dec_bdi_stat(bdi, BDI_WRITEBACK);
> > > 1328 __bdi_writeout_inc(bdi);
> > > 1329 }
> > > 1330 }
> > > 1331 spin_unlock_irqrestore(&mapping->tree_lock, flags);
> > > 1332 } else {
> > > 1333 ret = TestClearPageWriteback(page);
> > > 1334 }
> > > 1335 if (ret)
> > > 1336 dec_zone_page_state(page, NR_WRITEBACK);
> > > 1337 return ret;
> > > 1338 }
> >
> > We can move this up to under tree_lock. Considering memcg, all our target has "mapping".
> >
> > If we newly account bounce-buffers (for NILFS, FUSE, etc..), which has no ->mapping,
> > we need much more complex new charge/uncharge theory.
> >
> > But yes, adding new lock scheme seems complicated. (Sorry Andrea.)
> > My concerns is performance. We may need somehing new re-implementation of
> > locks/migrate/charge/uncharge.
> >
> I agree. Performance is my concern too.
>
> I made a patch below and measured the time(average of 10 times) of kernel build
> on tmpfs(make -j8 on 8 CPU machine with 2.6.33 defconfig).
>
> <before>
> - root cgroup: 190.47 sec
> - child cgroup: 192.81 sec
>
> <after>
> - root cgroup: 191.06 sec
> - child cgroup: 193.06 sec
>
> Hmm... about 0.3% slower for root, 0.1% slower for child.

Thanks Daisuke-san. This doesn't seem too bad, I'll repeat the test on
my machine and compare performance with my patch + your and Kame-san
fixes. Even if I agree that adding multiple locked/unlocked versions of
mem_cgroup_update_page_stat() it's just too bug-prone...

Thanks,
-Andrea

>
> ===
> From: Daisuke Nishimura <nishimura(a)mxp.nes.nec.co.jp>
>
> In current implementation, we don't have to disable irq at lock_page_cgroup()
> because the lock is never acquired in interrupt context.
> But we are going to do it in later patch, so this patch encloses all of
> lock_page_cgroup()/unlock_page_cgroup() with irq_disabled()/irq_enabled().
>
> Signed-off-by: Daisuke Nishimura <nishimura(a)mxp.nes.nec.co.jp>
> ---
> mm/memcontrol.c | 17 +++++++++++++++++
> 1 files changed, 17 insertions(+), 0 deletions(-)
>
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 02ea959..e5ae1a1 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -1359,6 +1359,7 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
> if (unlikely(!pc))
> return;
>
> + local_irq_disable();
> lock_page_cgroup(pc);
> mem = pc->mem_cgroup;
> if (!mem)
> @@ -1374,6 +1375,7 @@ void mem_cgroup_update_file_mapped(struct page *page, int val)
>
> done:
> unlock_page_cgroup(pc);
> + local_irq_enable();
> }
>
> /*
> @@ -1711,6 +1713,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
> VM_BUG_ON(!PageLocked(page));
>
> pc = lookup_page_cgroup(page);
> + local_irq_disable();
> lock_page_cgroup(pc);
> if (PageCgroupUsed(pc)) {
> mem = pc->mem_cgroup;
> @@ -1726,6 +1729,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
> rcu_read_unlock();
> }
> unlock_page_cgroup(pc);
> + local_irq_enable();
> return mem;
> }
>
> @@ -1742,9 +1746,11 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
> if (!mem)
> return;
>
> + local_irq_disable();
> lock_page_cgroup(pc);
> if (unlikely(PageCgroupUsed(pc))) {
> unlock_page_cgroup(pc);
> + local_irq_enable();
> mem_cgroup_cancel_charge(mem);
> return;
> }
> @@ -1775,6 +1781,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
> mem_cgroup_charge_statistics(mem, pc, true);
>
> unlock_page_cgroup(pc);
> + local_irq_enable();
> /*
> * "charge_statistics" updated event counter. Then, check it.
> * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
> @@ -1844,12 +1851,14 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
> struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge)
> {
> int ret = -EINVAL;
> + local_irq_disable();
> lock_page_cgroup(pc);
> if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
> __mem_cgroup_move_account(pc, from, to, uncharge);
> ret = 0;
> }
> unlock_page_cgroup(pc);
> + local_irq_enable();
> /*
> * check events
> */
> @@ -1981,12 +1990,15 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
> pc = lookup_page_cgroup(page);
> if (!pc)
> return 0;
> + local_irq_disable();
> lock_page_cgroup(pc);
> if (PageCgroupUsed(pc)) {
> unlock_page_cgroup(pc);
> + local_irq_enable();
> return 0;
> }
> unlock_page_cgroup(pc);
> + local_irq_enable();
> }
>
> if (unlikely(!mm && !mem))
> @@ -2182,6 +2194,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
> if (unlikely(!pc || !PageCgroupUsed(pc)))
> return NULL;
>
> + local_irq_disable();
> lock_page_cgroup(pc);
>
> mem = pc->mem_cgroup;
> @@ -2222,6 +2235,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
>
> mz = page_cgroup_zoneinfo(pc);
> unlock_page_cgroup(pc);
> + local_irq_enable();
>
> memcg_check_events(mem, page);
> /* at swapout, this memcg will be accessed to record to swap */
> @@ -2232,6 +2246,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
>
> unlock_out:
> unlock_page_cgroup(pc);
> + local_irq_enable();
> return NULL;
> }
>
> @@ -2424,12 +2439,14 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
> return 0;
>
> pc = lookup_page_cgroup(page);
> + local_irq_disable();
> lock_page_cgroup(pc);
> if (PageCgroupUsed(pc)) {
> mem = pc->mem_cgroup;
> css_get(&mem->css);
> }
> unlock_page_cgroup(pc);
> + local_irq_enable();
>
> if (mem) {
> ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
> --
> 1.6.4
>
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/