aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-memory-page-offline44
-rw-r--r--drivers/base/memory.c61
-rw-r--r--include/linux/mm.h3
-rw-r--r--mm/hwpoison-inject.c2
-rw-r--r--mm/memory-failure.c194
5 files changed, 297 insertions, 7 deletions
diff --git a/Documentation/ABI/testing/sysfs-memory-page-offline b/Documentation/ABI/testing/sysfs-memory-page-offline
new file mode 100644
index 000000000000..e14703f12fdf
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-memory-page-offline
@@ -0,0 +1,44 @@
+What: /sys/devices/system/memory/soft_offline_page
+Date: Sep 2009
+KernelVersion: 2.6.33
+Contact: andi@firstfloor.org
+Description:
+ Soft-offline the memory page containing the physical address
+ written into this file. Input is a hex number specifying the
+ physical address of the page. The kernel will then attempt
+ to soft-offline it, by moving the contents elsewhere or
+ dropping it if possible. The kernel will then be placed
+ on the bad page list and never be reused.
+
+ The offlining is done in kernel specific granuality.
+ Normally it's the base page size of the kernel, but
+ this might change.
+
+ The page must be still accessible, not poisoned. The
+ kernel will never kill anything for this, but rather
+ fail the offline. Return value is the size of the
+ number, or a error when the offlining failed. Reading
+ the file is not allowed.
+
+What: /sys/devices/system/memory/hard_offline_page
+Date: Sep 2009
+KernelVersion: 2.6.33
+Contact: andi@firstfloor.org
+Description:
+ Hard-offline the memory page containing the physical
+ address written into this file. Input is a hex number
+ specifying the physical address of the page. The
+ kernel will then attempt to hard-offline the page, by
+ trying to drop the page or killing any owner or
+ triggering IO errors if needed. Note this may kill
+ any processes owning the page. The kernel will avoid
+ to access this page assuming it's poisoned by the
+ hardware.
+
+ The offlining is done in kernel specific granuality.
+ Normally it's the base page size of the kernel, but
+ this might change.
+
+ Return value is the size of the number, or a error when
+ the offlining failed.
+ Reading the file is not allowed.
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 989429cfed88..c4c8f2e1dd15 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -341,6 +341,64 @@ static inline int memory_probe_init(void)
}
#endif
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Support for offlining pages of memory
+ */
+
+/* Soft offline a page */
+static ssize_t
+store_soft_offline_page(struct class *class, const char *buf, size_t count)
+{
+ int ret;
+ u64 pfn;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (strict_strtoull(buf, 0, &pfn) < 0)
+ return -EINVAL;
+ pfn >>= PAGE_SHIFT;
+ if (!pfn_valid(pfn))
+ return -ENXIO;
+ ret = soft_offline_page(pfn_to_page(pfn), 0);
+ return ret == 0 ? count : ret;
+}
+
+/* Forcibly offline a page, including killing processes. */
+static ssize_t
+store_hard_offline_page(struct class *class, const char *buf, size_t count)
+{
+ int ret;
+ u64 pfn;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (strict_strtoull(buf, 0, &pfn) < 0)
+ return -EINVAL;
+ pfn >>= PAGE_SHIFT;
+ ret = __memory_failure(pfn, 0, 0);
+ return ret ? ret : count;
+}
+
+static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
+static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
+
+static __init int memory_fail_init(void)
+{
+ int err;
+
+ err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
+ &class_attr_soft_offline_page.attr);
+ if (!err)
+ err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
+ &class_attr_hard_offline_page.attr);
+ return err;
+}
+#else
+static inline int memory_fail_init(void)
+{
+ return 0;
+}
+#endif
+
/*
* Note that phys_device is optional. It is here to allow for
* differentiation between which *physical* devices each
@@ -473,6 +531,9 @@ int __init memory_dev_init(void)
err = memory_probe_init();
if (!ret)
ret = err;
+ err = memory_fail_init();
+ if (!ret)
+ ret = err;
err = block_size_init();
if (!ret)
ret = err;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8cdb941fc7b5..849b4a61bd8f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1339,8 +1339,9 @@ extern int __memory_failure(unsigned long pfn, int trapno, int flags);
extern int unpoison_memory(unsigned long pfn);
extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
-extern void shake_page(struct page *p);
+extern void shake_page(struct page *p, int access);
extern atomic_long_t mce_bad_pages;
+extern int soft_offline_page(struct page *page, int flags);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index c597f46ac18a..a77fe3f9e211 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -29,7 +29,7 @@ static int hwpoison_inject(void *data, u64 val)
return 0;
if (!PageLRU(p))
- shake_page(p);
+ shake_page(p, 0);
/*
* This implies unable to support non-LRU pages.
*/
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index b5c3b6bd511f..bcce28755832 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -41,6 +41,9 @@
#include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/backing-dev.h>
+#include <linux/migrate.h>
+#include <linux/page-isolation.h>
+#include <linux/suspend.h>
#include "internal.h"
int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -201,7 +204,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
* When a unknown page type is encountered drain as many buffers as possible
* in the hope to turn the page into a LRU or free page, which we can handle.
*/
-void shake_page(struct page *p)
+void shake_page(struct page *p, int access)
{
if (!PageSlab(p)) {
lru_add_drain_all();
@@ -211,11 +214,19 @@ void shake_page(struct page *p)
if (PageLRU(p) || is_free_buddy_page(p))
return;
}
+
/*
- * Could call shrink_slab here (which would also
- * shrink other caches). Unfortunately that might
- * also access the corrupted page, which could be fatal.
+ * Only all shrink_slab here (which would also
+ * shrink other caches) if access is not potentially fatal.
*/
+ if (access) {
+ int nr;
+ do {
+ nr = shrink_slab(1000, GFP_KERNEL, 1000);
+ if (page_count(p) == 0)
+ break;
+ } while (nr > 10);
+ }
}
EXPORT_SYMBOL_GPL(shake_page);
@@ -949,7 +960,7 @@ int __memory_failure(unsigned long pfn, int trapno, int flags)
* walked by the page reclaim code, however that's not a big loss.
*/
if (!PageLRU(p))
- shake_page(p);
+ shake_page(p, 0);
if (!PageLRU(p)) {
/*
* shake_page could have turned it free.
@@ -1099,3 +1110,176 @@ int unpoison_memory(unsigned long pfn)
return 0;
}
EXPORT_SYMBOL(unpoison_memory);
+
+static struct page *new_page(struct page *p, unsigned long private, int **x)
+{
+ return alloc_pages(GFP_HIGHUSER_MOVABLE, 0);
+}
+
+/*
+ * Safely get reference count of an arbitrary page.
+ * Returns 0 for a free page, -EIO for a zero refcount page
+ * that is not free, and 1 for any other page type.
+ * For 1 the page is returned with increased page count, otherwise not.
+ */
+static int get_any_page(struct page *p, unsigned long pfn, int flags)
+{
+ int ret;
+
+ if (flags & MF_COUNT_INCREASED)
+ return 1;
+
+ /*
+ * The lock_system_sleep prevents a race with memory hotplug,
+ * because the isolation assumes there's only a single user.
+ * This is a big hammer, a better would be nicer.
+ */
+ lock_system_sleep();
+
+ /*
+ * Isolate the page, so that it doesn't get reallocated if it
+ * was free.
+ */
+ set_migratetype_isolate(p);
+ if (!get_page_unless_zero(compound_head(p))) {
+ if (is_free_buddy_page(p)) {
+ pr_debug("get_any_page: %#lx free buddy page\n", pfn);
+ /* Set hwpoison bit while page is still isolated */
+ SetPageHWPoison(p);
+ ret = 0;
+ } else {
+ pr_debug("get_any_page: %#lx: unknown zero refcount page type %lx\n",
+ pfn, p->flags);
+ ret = -EIO;
+ }
+ } else {
+ /* Not a free page */
+ ret = 1;
+ }
+ unset_migratetype_isolate(p);
+ unlock_system_sleep();
+ return ret;
+}
+
+/**
+ * soft_offline_page - Soft offline a page.
+ * @page: page to offline
+ * @flags: flags. Same as memory_failure().
+ *
+ * Returns 0 on success, otherwise negated errno.
+ *
+ * Soft offline a page, by migration or invalidation,
+ * without killing anything. This is for the case when
+ * a page is not corrupted yet (so it's still valid to access),
+ * but has had a number of corrected errors and is better taken
+ * out.
+ *
+ * The actual policy on when to do that is maintained by
+ * user space.
+ *
+ * This should never impact any application or cause data loss,
+ * however it might take some time.
+ *
+ * This is not a 100% solution for all memory, but tries to be
+ * ``good enough'' for the majority of memory.
+ */
+int soft_offline_page(struct page *page, int flags)
+{
+ int ret;
+ unsigned long pfn = page_to_pfn(page);
+
+ ret = get_any_page(page, pfn, flags);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ goto done;
+
+ /*
+ * Page cache page we can handle?
+ */
+ if (!PageLRU(page)) {
+ /*
+ * Try to free it.
+ */
+ put_page(page);
+ shake_page(page, 1);
+
+ /*
+ * Did it turn free?
+ */
+ ret = get_any_page(page, pfn, 0);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ goto done;
+ }
+ if (!PageLRU(page)) {
+ pr_debug("soft_offline: %#lx: unknown non LRU page type %lx\n",
+ pfn, page->flags);
+ return -EIO;
+ }
+
+ lock_page(page);
+ wait_on_page_writeback(page);
+
+ /*
+ * Synchronized using the page lock with memory_failure()
+ */
+ if (PageHWPoison(page)) {
+ unlock_page(page);
+ put_page(page);
+ pr_debug("soft offline: %#lx page already poisoned\n", pfn);
+ return -EBUSY;
+ }
+
+ /*
+ * Try to invalidate first. This should work for
+ * non dirty unmapped page cache pages.
+ */
+ ret = invalidate_inode_page(page);
+ unlock_page(page);
+
+ /*
+ * Drop count because page migration doesn't like raised
+ * counts. The page could get re-allocated, but if it becomes
+ * LRU the isolation will just fail.
+ * RED-PEN would be better to keep it isolated here, but we
+ * would need to fix isolation locking first.
+ */
+ put_page(page);
+ if (ret == 1) {
+ ret = 0;
+ pr_debug("soft_offline: %#lx: invalidated\n", pfn);
+ goto done;
+ }
+
+ /*
+ * Simple invalidation didn't work.
+ * Try to migrate to a new page instead. migrate.c
+ * handles a large number of cases for us.
+ */
+ ret = isolate_lru_page(page);
+ if (!ret) {
+ LIST_HEAD(pagelist);
+
+ list_add(&page->lru, &pagelist);
+ ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0);
+ if (ret) {
+ pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
+ pfn, ret, page->flags);
+ if (ret > 0)
+ ret = -EIO;
+ }
+ } else {
+ pr_debug("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
+ pfn, ret, page_count(page), page->flags);
+ }
+ if (ret)
+ return ret;
+
+done:
+ atomic_long_add(1, &mce_bad_pages);
+ SetPageHWPoison(page);
+ /* keep elevated page count for bad page */
+ return ret;
+}