From: Wu Fengguang on
When /dev/kmem read()/write() encounters hwpoison page, stop it
and return the amount of work done till now, or return -EIO if
nothing have been copied.

CC: Kelly Bowa <kmb(a)tuxedu.org>
CC: Greg KH <greg(a)kroah.com>
CC: Andi Kleen <andi(a)firstfloor.org>
CC: Benjamin Herrenschmidt <benh(a)kernel.crashing.org>
CC: Christoph Lameter <cl(a)linux-foundation.org>
CC: Ingo Molnar <mingo(a)elte.hu>
CC: Tejun Heo <tj(a)kernel.org>
CC: Nick Piggin <npiggin(a)suse.de>
CC: KAMEZAWA Hiroyuki <kamezawa.hiroyu(a)jp.fujitsu.com>
Signed-off-by: Wu Fengguang <fengguang.wu(a)intel.com>
---
drivers/char/mem.c | 26 ++++++++++++++++++++------
mm/vmalloc.c | 8 ++++++++
2 files changed, 28 insertions(+), 6 deletions(-)

--- linux-mm.orig/drivers/char/mem.c 2010-01-11 10:32:39.000000000 +0800
+++ linux-mm/drivers/char/mem.c 2010-01-11 10:32:42.000000000 +0800
@@ -426,6 +426,9 @@ static ssize_t read_kmem(struct file *fi
*/
kbuf = xlate_dev_kmem_ptr((char *)p);

+ if (unlikely(virt_addr_valid(kbuf) &&
+ PageHWPoison(virt_to_page(kbuf))))
+ return -EIO;
if (copy_to_user(buf, kbuf, sz))
return -EFAULT;
buf += sz;
@@ -447,8 +450,10 @@ static ssize_t read_kmem(struct file *fi
break;
}
sz = vread_page(kbuf, (char *)p, sz);
- if (!sz)
+ if (sz <= 0) {
+ err = sz;
break;
+ }
if (copy_to_user(buf, kbuf, sz)) {
err = -EFAULT;
break;
@@ -471,6 +476,7 @@ do_write_kmem(unsigned long p, const cha
{
ssize_t written, sz;
unsigned long copied;
+ int err = 0;

written = 0;
#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
@@ -497,13 +503,19 @@ do_write_kmem(unsigned long p, const cha
*/
ptr = xlate_dev_kmem_ptr((char *)p);

+ if (unlikely(virt_addr_valid(ptr) &&
+ PageHWPoison(virt_to_page(ptr)))) {
+ err = -EIO;
+ break;
+ }
+
copied = copy_from_user(ptr, buf, sz);
if (copied) {
written += sz - copied;
- if (written)
- break;
- return -EFAULT;
+ err = -EFAULT;
+ break;
}
+
buf += sz;
p += sz;
count -= sz;
@@ -511,7 +523,7 @@ do_write_kmem(unsigned long p, const cha
}

*ppos += written;
- return written;
+ return written ? written : err;
}


@@ -555,7 +567,9 @@ static ssize_t write_kmem(struct file *
err = -EFAULT;
break;
}
- vwrite_page(kbuf, (char *)p, sz);
+ err = vwrite_page(kbuf, (char *)p, sz);
+ if (err < 0)
+ break;
count -= sz;
buf += sz;
virtr += sz;
--- linux-mm.orig/mm/vmalloc.c 2010-01-11 10:32:39.000000000 +0800
+++ linux-mm/mm/vmalloc.c 2010-01-11 10:33:21.000000000 +0800
@@ -1654,6 +1654,7 @@ EXPORT_SYMBOL(vmalloc_32_user);
*
* Returns # of bytes copied on success.
* Returns 0 if @addr is not vmalloc'ed, or is mapped to non-RAM.
+ * Returns -EIO if the mapped page is corrupted.
*
* This function checks that addr is a valid vmalloc'ed area, and
* copy data from that area to a given buffer. If the given memory range
@@ -1684,6 +1685,10 @@ int vread_page(char *buf, char *addr, un
memset(buf, 0, count);
return 0;
}
+ if (PageHWPoison(p)) {
+ memset(buf, 0, count);
+ return -EIO;
+ }

/*
* To do safe access to this _mapped_ area, we need
@@ -1707,6 +1712,7 @@ int vread_page(char *buf, char *addr, un
*
* Returns # of bytes copied on success.
* Returns 0 if @addr is not vmalloc'ed, or is mapped to non-RAM.
+ * Returns -EIO if the mapped page is corrupted.
*
* This function checks that addr is a valid vmalloc'ed area, and
* copy data from a buffer to the given addr. If specified range of
@@ -1736,6 +1742,8 @@ int vwrite_page(char *buf, char *addr, u
return 0;
if (!page_is_ram(page_to_pfn(p)))
return 0;
+ if (PageHWPoison(p))
+ return -EIO;

map = kmap_atomic(p, KM_USER0);
memcpy(map + offset, buf, count);


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo(a)vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/