mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-30 12:04:07 +00:00
With the demise of page coloring, the page queue macros no longer serve any
useful purpose. Eliminate them. Reviewed by: kib
This commit is contained in:
parent
70b0d39bbc
commit
9cf5198832
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=209647
@ -175,7 +175,7 @@ vm_contig_launder(int queue, vm_paddr_t low, vm_paddr_t high)
|
||||
vm_page_unlock(m);
|
||||
continue;
|
||||
}
|
||||
KASSERT(VM_PAGE_INQUEUE2(m, queue),
|
||||
KASSERT(m->queue == queue,
|
||||
("vm_contig_launder: page %p's queue is not %d", m, queue));
|
||||
error = vm_contig_launder_page(m, &next);
|
||||
vm_page_lock_assert(m, MA_NOTOWNED);
|
||||
|
@ -592,7 +592,7 @@ vm_page_unhold(vm_page_t mem)
|
||||
vm_page_lock_assert(mem, MA_OWNED);
|
||||
--mem->hold_count;
|
||||
KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
|
||||
if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD))
|
||||
if (mem->hold_count == 0 && mem->queue == PQ_HOLD)
|
||||
vm_page_free_toq(mem);
|
||||
}
|
||||
|
||||
@ -1381,10 +1381,11 @@ vm_waitpfault(void)
|
||||
void
|
||||
vm_page_requeue(vm_page_t m)
|
||||
{
|
||||
int queue = VM_PAGE_GETQUEUE(m);
|
||||
struct vpgqueues *vpq;
|
||||
int queue;
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
queue = m->queue;
|
||||
KASSERT(queue != PQ_NONE,
|
||||
("vm_page_requeue: page %p is not queued", m));
|
||||
vpq = &vm_page_queues[queue];
|
||||
@ -1422,12 +1423,12 @@ vm_page_queue_remove(int queue, vm_page_t m)
|
||||
void
|
||||
vm_pageq_remove(vm_page_t m)
|
||||
{
|
||||
int queue = VM_PAGE_GETQUEUE(m);
|
||||
int queue;
|
||||
|
||||
vm_page_lock_assert(m, MA_OWNED);
|
||||
if (queue != PQ_NONE) {
|
||||
if ((queue = m->queue) != PQ_NONE) {
|
||||
vm_page_lock_queues();
|
||||
VM_PAGE_SETQUEUE2(m, PQ_NONE);
|
||||
m->queue = PQ_NONE;
|
||||
vm_page_queue_remove(queue, m);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
@ -1446,7 +1447,7 @@ vm_page_enqueue(int queue, vm_page_t m)
|
||||
struct vpgqueues *vpq;
|
||||
|
||||
vpq = &vm_page_queues[queue];
|
||||
VM_PAGE_SETQUEUE2(m, queue);
|
||||
m->queue = queue;
|
||||
TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
|
||||
++*vpq->cnt;
|
||||
}
|
||||
@ -1467,7 +1468,7 @@ vm_page_activate(vm_page_t m)
|
||||
int queue;
|
||||
|
||||
vm_page_lock_assert(m, MA_OWNED);
|
||||
if ((queue = VM_PAGE_GETKNOWNQUEUE2(m)) != PQ_ACTIVE) {
|
||||
if ((queue = m->queue) != PQ_ACTIVE) {
|
||||
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
|
||||
if (m->act_count < ACT_INIT)
|
||||
m->act_count = ACT_INIT;
|
||||
@ -1728,7 +1729,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
|
||||
/*
|
||||
* Ignore if already inactive.
|
||||
*/
|
||||
if ((queue = VM_PAGE_GETKNOWNQUEUE2(m)) == PQ_INACTIVE)
|
||||
if ((queue = m->queue) == PQ_INACTIVE)
|
||||
return;
|
||||
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
|
||||
vm_page_lock_queues();
|
||||
@ -1741,7 +1742,7 @@ _vm_page_deactivate(vm_page_t m, int athead)
|
||||
else
|
||||
TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m,
|
||||
pageq);
|
||||
VM_PAGE_SETQUEUE2(m, PQ_INACTIVE);
|
||||
m->queue = PQ_INACTIVE;
|
||||
cnt.v_inactive_count++;
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
@ -1954,8 +1955,7 @@ vm_page_dontneed(vm_page_t m)
|
||||
/*
|
||||
* Occasionally leave the page alone.
|
||||
*/
|
||||
if ((dnw & 0x01F0) == 0 ||
|
||||
VM_PAGE_INQUEUE2(m, PQ_INACTIVE)) {
|
||||
if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) {
|
||||
if (m->act_count >= ACT_INIT)
|
||||
--m->act_count;
|
||||
return;
|
||||
|
@ -153,18 +153,6 @@ struct vm_page {
|
||||
#define PQ_HOLD 3
|
||||
#define PQ_COUNT 4
|
||||
|
||||
/* Returns the real queue a page is on. */
|
||||
#define VM_PAGE_GETQUEUE(m) ((m)->queue)
|
||||
|
||||
/* Returns the well known queue a page is on. */
|
||||
#define VM_PAGE_GETKNOWNQUEUE2(m) VM_PAGE_GETQUEUE(m)
|
||||
|
||||
/* Returns true if the page is in the named well known queue. */
|
||||
#define VM_PAGE_INQUEUE2(m, q) (VM_PAGE_GETKNOWNQUEUE2(m) == (q))
|
||||
|
||||
/* Sets the queue a page is on. */
|
||||
#define VM_PAGE_SETQUEUE2(m, q) (VM_PAGE_GETQUEUE(m) = (q))
|
||||
|
||||
struct vpgqueues {
|
||||
struct pglist pl;
|
||||
int *cnt;
|
||||
|
@ -773,9 +773,8 @@ vm_pageout_scan(int pass)
|
||||
|
||||
cnt.v_pdpages++;
|
||||
|
||||
if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE) {
|
||||
if (m->queue != PQ_INACTIVE)
|
||||
goto rescan0;
|
||||
}
|
||||
|
||||
next = TAILQ_NEXT(m, pageq);
|
||||
|
||||
@ -1025,7 +1024,7 @@ vm_pageout_scan(int pass)
|
||||
* above. The page might have been freed and
|
||||
* reused for another vnode.
|
||||
*/
|
||||
if (VM_PAGE_GETQUEUE(m) != PQ_INACTIVE ||
|
||||
if (m->queue != PQ_INACTIVE ||
|
||||
m->object != object ||
|
||||
TAILQ_NEXT(m, pageq) != &marker) {
|
||||
vm_page_unlock(m);
|
||||
@ -1115,7 +1114,7 @@ vm_pageout_scan(int pass)
|
||||
|
||||
while ((m != NULL) && (pcount-- > 0) && (page_shortage > 0)) {
|
||||
|
||||
KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE),
|
||||
KASSERT(m->queue == PQ_ACTIVE,
|
||||
("vm_pageout_scan: page %p isn't active", m));
|
||||
|
||||
next = TAILQ_NEXT(m, pageq);
|
||||
@ -1379,7 +1378,7 @@ vm_pageout_page_stats()
|
||||
while ((m != NULL) && (pcount-- > 0)) {
|
||||
int actcount;
|
||||
|
||||
KASSERT(VM_PAGE_INQUEUE2(m, PQ_ACTIVE),
|
||||
KASSERT(m->queue == PQ_ACTIVE,
|
||||
("vm_pageout_page_stats: page %p isn't active", m));
|
||||
|
||||
next = TAILQ_NEXT(m, pageq);
|
||||
|
Loading…
Reference in New Issue
Block a user