Skip to content

Instantly share code, notes, and snippets.

@kostikbel
Last active September 21, 2017 11:06
Show Gist options
  • Save kostikbel/587dec6c8d5b0f1ced29fdbb4179cc0f to your computer and use it in GitHub Desktop.
Save kostikbel/587dec6c8d5b0f1ced29fdbb4179cc0f to your computer and use it in GitHub Desktop.
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index f9d1d31394d..df4cb0a86bc 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2215,15 +2215,21 @@ pmap_qremove(vm_offset_t sva, int count)
static __inline void
pmap_free_zero_pages(struct spglist *free)
{
+ struct pglist pgl;
vm_page_t m;
int count;
+ if (SLIST_EMPTY(free))
+ return;
+ TAILQ_INIT(&pgl);
for (count = 0; (m = SLIST_FIRST(free)) != NULL; count++) {
SLIST_REMOVE_HEAD(free, plinks.s.ss);
/* Preserve the page's PG_ZERO setting. */
- vm_page_free_toq(m);
+ if (vm_page_free_prep(m, false))
+ TAILQ_INSERT_TAIL(&pgl, m, listq);
}
atomic_subtract_int(&vm_cnt.v_wire_count, count);
+ vm_page_free_phys_pglist(&pgl);
}
/*
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index fe015bdb877..33e7e6f8643 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -713,8 +713,8 @@ static void
vm_object_terminate_pages(vm_object_t object)
{
vm_page_t p, p_next;
- struct mtx *mtx, *mtx1;
- struct vm_pagequeue *pq, *pq1;
+ struct mtx *mtx;
+ struct vm_pagequeue *pq;
VM_OBJECT_ASSERT_WLOCKED(object);
@@ -734,34 +734,19 @@ vm_object_terminate_pages(vm_object_t object)
* vm_page_free_prep() only needs the page
* lock for managed pages.
*/
- mtx1 = vm_page_lockptr(p);
- if (mtx1 != mtx) {
- if (mtx != NULL)
- mtx_unlock(mtx);
- if (pq != NULL) {
- vm_pagequeue_unlock(pq);
- pq = NULL;
- }
- mtx = mtx1;
- mtx_lock(mtx);
+ if (vm_page_lockptr(p) != mtx && pq != NULL) {
+ vm_pagequeue_unlock(pq);
+ pq = NULL;
}
+ vm_page_change_lock(p, &mtx);
}
p->object = NULL;
if (p->wire_count != 0)
goto unlist;
VM_CNT_INC(v_pfree);
p->flags &= ~PG_ZERO;
- if (p->queue != PQ_NONE) {
- KASSERT(p->queue < PQ_COUNT, ("vm_object_terminate: "
- "page %p is not queued", p));
- pq1 = vm_page_pagequeue(p);
- if (pq != pq1) {
- if (pq != NULL)
- vm_pagequeue_unlock(pq);
- pq = pq1;
- vm_pagequeue_lock(pq);
- }
- }
+ if (p->queue != PQ_NONE)
+ vm_page_change_qlock(p, &pq);
if (vm_page_free_prep(p, true))
continue;
unlist:
@@ -1953,6 +1938,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
{
vm_page_t p, next;
struct mtx *mtx;
+ struct vm_pagequeue *pq;
struct pglist pgl;
VM_OBJECT_ASSERT_WLOCKED(object);
@@ -1966,6 +1952,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
again:
p = vm_page_find_least(object, start);
mtx = NULL;
+ pq = NULL;
/*
* Here, the variable "p" is either (1) the page with the least pindex
@@ -1982,8 +1969,14 @@ again:
* however, be invalidated if the option OBJPR_CLEANONLY is
* not specified.
*/
+ if (vm_page_lockptr(p) != mtx && pq != NULL) {
+ vm_pagequeue_unlock(pq);
+ pq = NULL;
+ }
vm_page_change_lock(p, &mtx);
if (vm_page_xbusied(p)) {
+ if (pq != NULL)
+ vm_pagequeue_unlock(pq);
VM_OBJECT_WUNLOCK(object);
vm_page_busy_sleep(p, "vmopax", true);
VM_OBJECT_WLOCK(object);
@@ -1999,6 +1992,8 @@ again:
continue;
}
if (vm_page_busied(p)) {
+ if (pq != NULL)
+ vm_pagequeue_unlock(pq);
VM_OBJECT_WUNLOCK(object);
vm_page_busy_sleep(p, "vmopar", false);
VM_OBJECT_WLOCK(object);
@@ -2014,10 +2009,16 @@ again:
}
if ((options & OBJPR_NOTMAPPED) == 0)
pmap_remove_all(p);
+ else if (p->queue != PQ_NONE)
+ vm_page_change_qlock(p, &pq);
p->flags &= ~PG_ZERO;
- if (vm_page_free_prep(p, false))
+ if (vm_page_free_prep(p, pq != NULL))
TAILQ_INSERT_TAIL(&pgl, p, listq);
}
+ if (pq != NULL) {
+ MPASS((options & OBJPR_NOTMAPPED) != 0);
+ vm_pagequeue_unlock(pq);
+ }
if (mtx != NULL)
mtx_unlock(mtx);
vm_page_free_phys_pglist(&pgl);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index d1cb81a80c3..c8ddb3c9ff2 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -956,6 +956,22 @@ vm_page_change_lock(vm_page_t m, struct mtx **mtx)
mtx_lock(mtx1);
}
+void
+vm_page_change_qlock(vm_page_t m, struct vm_pagequeue **pq)
+{
+ struct vm_pagequeue *pq1;
+
+ KASSERT(m->queue < PQ_COUNT, ("vm_pageq_change_lock: "
+ "page %p is not queued", m));
+ pq1 = vm_page_pagequeue(m);
+ if (*pq != pq1) {
+ if (*pq != NULL)
+ vm_pagequeue_unlock(*pq);
+ *pq = pq1;
+ vm_pagequeue_lock(pq1);
+ }
+}
+
/*
* Keep page from being freed by the page daemon
* much of the same effect as wiring, except much lower
@@ -2856,6 +2872,8 @@ vm_page_free_phys_pglist(struct pglist *tq)
{
vm_page_t m;
+ if (TAILQ_EMPTY(tq))
+ return;
mtx_lock(&vm_page_queue_free_mtx);
TAILQ_FOREACH(m, tq, listq)
vm_page_free_phys(m);
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 44d8cdf7e70..986e75cec74 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -474,6 +474,7 @@ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
vm_paddr_t boundary, vm_memattr_t memattr);
vm_page_t vm_page_alloc_freelist(int, int);
void vm_page_change_lock(vm_page_t m, struct mtx **mtx);
+void vm_page_change_qlock(vm_page_t m, struct vm_pagequeue **pq);
vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
vm_page_t *ma, int count);
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index f9d1d31394d..cac40602ff5 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -2215,15 +2215,19 @@ pmap_qremove(vm_offset_t sva, int count)
static __inline void
pmap_free_zero_pages(struct spglist *free)
{
+ struct pglist pgl;
vm_page_t m;
int count;
+ TAILQ_INIT(&pgl);
for (count = 0; (m = SLIST_FIRST(free)) != NULL; count++) {
SLIST_REMOVE_HEAD(free, plinks.s.ss);
/* Preserve the page's PG_ZERO setting. */
- vm_page_free_toq(m);
+ if (vm_page_free_prep(m, false))
+ TAILQ_INSERT_TAIL(&pgl, m, listq);
}
atomic_subtract_int(&vm_cnt.v_wire_count, count);
+ vm_page_free_phys_pglist(&pgl);
}
/*
diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c
index fe015bdb877..33e7e6f8643 100644
--- a/sys/vm/vm_object.c
+++ b/sys/vm/vm_object.c
@@ -713,8 +713,8 @@ static void
vm_object_terminate_pages(vm_object_t object)
{
vm_page_t p, p_next;
- struct mtx *mtx, *mtx1;
- struct vm_pagequeue *pq, *pq1;
+ struct mtx *mtx;
+ struct vm_pagequeue *pq;
VM_OBJECT_ASSERT_WLOCKED(object);
@@ -734,34 +734,19 @@ vm_object_terminate_pages(vm_object_t object)
* vm_page_free_prep() only needs the page
* lock for managed pages.
*/
- mtx1 = vm_page_lockptr(p);
- if (mtx1 != mtx) {
- if (mtx != NULL)
- mtx_unlock(mtx);
- if (pq != NULL) {
- vm_pagequeue_unlock(pq);
- pq = NULL;
- }
- mtx = mtx1;
- mtx_lock(mtx);
+ if (vm_page_lockptr(p) != mtx && pq != NULL) {
+ vm_pagequeue_unlock(pq);
+ pq = NULL;
}
+ vm_page_change_lock(p, &mtx);
}
p->object = NULL;
if (p->wire_count != 0)
goto unlist;
VM_CNT_INC(v_pfree);
p->flags &= ~PG_ZERO;
- if (p->queue != PQ_NONE) {
- KASSERT(p->queue < PQ_COUNT, ("vm_object_terminate: "
- "page %p is not queued", p));
- pq1 = vm_page_pagequeue(p);
- if (pq != pq1) {
- if (pq != NULL)
- vm_pagequeue_unlock(pq);
- pq = pq1;
- vm_pagequeue_lock(pq);
- }
- }
+ if (p->queue != PQ_NONE)
+ vm_page_change_qlock(p, &pq);
if (vm_page_free_prep(p, true))
continue;
unlist:
@@ -1953,6 +1938,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
{
vm_page_t p, next;
struct mtx *mtx;
+ struct vm_pagequeue *pq;
struct pglist pgl;
VM_OBJECT_ASSERT_WLOCKED(object);
@@ -1966,6 +1952,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
again:
p = vm_page_find_least(object, start);
mtx = NULL;
+ pq = NULL;
/*
* Here, the variable "p" is either (1) the page with the least pindex
@@ -1982,8 +1969,14 @@ again:
* however, be invalidated if the option OBJPR_CLEANONLY is
* not specified.
*/
+ if (vm_page_lockptr(p) != mtx && pq != NULL) {
+ vm_pagequeue_unlock(pq);
+ pq = NULL;
+ }
vm_page_change_lock(p, &mtx);
if (vm_page_xbusied(p)) {
+ if (pq != NULL)
+ vm_pagequeue_unlock(pq);
VM_OBJECT_WUNLOCK(object);
vm_page_busy_sleep(p, "vmopax", true);
VM_OBJECT_WLOCK(object);
@@ -1999,6 +1992,8 @@ again:
continue;
}
if (vm_page_busied(p)) {
+ if (pq != NULL)
+ vm_pagequeue_unlock(pq);
VM_OBJECT_WUNLOCK(object);
vm_page_busy_sleep(p, "vmopar", false);
VM_OBJECT_WLOCK(object);
@@ -2014,10 +2009,16 @@ again:
}
if ((options & OBJPR_NOTMAPPED) == 0)
pmap_remove_all(p);
+ else if (p->queue != PQ_NONE)
+ vm_page_change_qlock(p, &pq);
p->flags &= ~PG_ZERO;
- if (vm_page_free_prep(p, false))
+ if (vm_page_free_prep(p, pq != NULL))
TAILQ_INSERT_TAIL(&pgl, p, listq);
}
+ if (pq != NULL) {
+ MPASS((options & OBJPR_NOTMAPPED) != 0);
+ vm_pagequeue_unlock(pq);
+ }
if (mtx != NULL)
mtx_unlock(mtx);
vm_page_free_phys_pglist(&pgl);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index d1cb81a80c3..591701aa370 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -956,6 +956,22 @@ vm_page_change_lock(vm_page_t m, struct mtx **mtx)
mtx_lock(mtx1);
}
+void
+vm_page_change_qlock(vm_page_t m, struct vm_pagequeue **pq)
+{
+ struct vm_pagequeue *pq1;
+
+ KASSERT(m->queue < PQ_COUNT, ("vm_pageq_change_lock: "
+ "page %p is not queued", m));
+ pq1 = vm_page_pagequeue(m);
+ if (*pq != pq1) {
+ if (*pq != NULL)
+ vm_pagequeue_unlock(*pq);
+ *pq = pq1;
+ vm_pagequeue_lock(pq1);
+ }
+}
+
/*
* Keep page from being freed by the page daemon
* much of the same effect as wiring, except much lower
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 44d8cdf7e70..986e75cec74 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -474,6 +474,7 @@ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
vm_paddr_t boundary, vm_memattr_t memattr);
vm_page_t vm_page_alloc_freelist(int, int);
void vm_page_change_lock(vm_page_t m, struct mtx **mtx);
+void vm_page_change_qlock(vm_page_t m, struct vm_pagequeue **pq);
vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
vm_page_t *ma, int count);
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment