From: Eric Dumazet on
I discovered that we can overflow stack if CONFIG_SLUB_DEBUG=y and use
slabs with many objects, since list_slab_objects() and process_slab()
use DECLARE_BITMAP(map, page->objects);

With 65535 bits, we use 8192 bytes of stack ...

A possible fix is to lower MAX_OBJS_PER_PAGE so that these bitmaps dont
use more than a third of THREAD_SIZE. I suspect plain memory allocation
in these functions is not an option.

Using non dynamic stack allocation makes the problem more obvious if
somebody runs checkstack.pl

Signed-off-by: Eric Dumazet <eric.dumazet(a)gmail.com>
---
diff --git a/mm/slub.c b/mm/slub.c
index b364844..adf04c1 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -167,7 +167,13 @@

#define OO_SHIFT 16
#define OO_MASK ((1 << OO_SHIFT) - 1)
+
+#ifdef CONFIG_SLUB_DEBUG
+/* We use an onstack BITMAP while debugging, make sure this wont be too big */
+#define MAX_OBJS_PER_PAGE min_t(int, 65535, 8*(THREAD_SIZE/3))
+#else
#define MAX_OBJS_PER_PAGE 65535 /* since page.objects is u16 */
+#endif

/* Internal SLUB flags */
#define __OBJECT_POISON 0x80000000 /* Poison object */
@@ -2426,7 +2432,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
#ifdef CONFIG_SLUB_DEBUG
void *addr = page_address(page);
void *p;
- DECLARE_BITMAP(map, page->objects);
+ DECLARE_BITMAP(map, MAX_OBJS_PER_PAGE);

bitmap_zero(map, page->objects);
slab_err(s, page, "%s", text);
@@ -3651,7 +3657,7 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
struct page *page, enum track_item alloc)
{
void *addr = page_address(page);
- DECLARE_BITMAP(map, page->objects);
+ DECLARE_BITMAP(map, MAX_OBJS_PER_PAGE);
void *p;

bitmap_zero(map, page->objects);


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/