summaryrefslogtreecommitdiffstats
path: root/sys/kern/subr_hibernate.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/subr_hibernate.c')
-rw-r--r--sys/kern/subr_hibernate.c43
1 files changed, 42 insertions, 1 deletions
diff --git a/sys/kern/subr_hibernate.c b/sys/kern/subr_hibernate.c
index c622e1a2cb8..7ac3bf26277 100644
--- a/sys/kern/subr_hibernate.c
+++ b/sys/kern/subr_hibernate.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: subr_hibernate.c,v 1.5 2011/07/08 18:34:46 ariane Exp $ */
+/* $OpenBSD: subr_hibernate.c,v 1.6 2011/07/08 21:00:53 ariane Exp $ */
/*
* Copyright (c) 2011 Ariane van der Steldt <ariane@stack.nl>
@@ -264,6 +264,47 @@ uvm_pmr_zero_everything(void)
}
/*
+ * Mark all memory as dirty.
+ *
+ * Used to inform the system that the clean memory isn't clean for some
+ * reason, for example because we just came back from hibernate.
+ */
+void
+uvm_pmr_dirty_everything(void)
+{
+ struct uvm_pmemrange *pmr;
+ struct vm_page *pg;
+ int i;
+
+ uvm_lock_fpageq();
+ TAILQ_FOREACH(pmr, &uvm.pmr_control.use, pmr_use) {
+ /* Dirty single pages. */
+ while ((pg = TAILQ_FIRST(&pmr->single[UVM_PMR_MEMTYPE_ZERO]))
+ != NULL) {
+ uvm_pmr_remove(pmr, pg);
+ uvm_pagezero(pg);
+ atomic_clearbits_int(&pg->pg_flags, PG_ZERO);
+ uvm_pmr_insert(pmr, pg, 0);
+ }
+
+ /* Dirty multi page ranges. */
+ while ((pg = RB_ROOT(&pmr->size[UVM_PMR_MEMTYPE_ZEOR]))
+ != NULL) {
+ pg--; /* Size tree always has second page. */
+ uvm_pmr_remove(pmr, pg);
+ for (i = 0; i < pg->fpgsz; i++) {
+ uvm_pagezero(&pg[i]);
+ atomic_clearbits_int(&pg[i].pg_flags, PG_ZERO);
+ }
+ uvm_pmr_insert(pmr, pg, 0);
+ }
+ }
+
+ uvmexp.zeropages = 0;
+ uvm_unlock_fpageq();
+}
+
+/*
* Allocate the highest address that can hold sz.
*
* sz in bytes.