1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-01 08:27:59 +00:00

Move pmap_collect() out of the machine-dependent code, rename it

to reflect its new location, and add page queue and flag locking.

Notes: (1) alpha, i386, and ia64 had identical implementations
of pmap_collect() in terms of machine-independent interfaces;
(2) sparc64 doesn't require it; (3) powerpc had it as a TODO.
This commit is contained in:
Alan Cox 2002-11-13 05:39:58 +00:00
parent c3e6b1182b
commit eea85e9bb6
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=106838
11 changed files with 37 additions and 154 deletions

View File

@ -323,7 +323,7 @@ static struct mtx allpmaps_lock;
*/
static uma_zone_t pvzone;
static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
static int pmap_pagedaemon_waken = 0;
int pmap_pagedaemon_waken;
static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
static pv_entry_t get_pv_entry(void);
@ -1723,36 +1723,6 @@ get_pv_entry(void)
return uma_zalloc(pvzone, M_NOWAIT);
}
/*
* This routine is very drastic, but can save the system
* in a pinch.
*/
void
pmap_collect()
{
int i;
vm_page_t m;
static int warningdone = 0;
if (pmap_pagedaemon_waken == 0)
return;
if (warningdone < 5) {
printf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
warningdone++;
}
for(i = 0; i < vm_page_array_size; i++) {
m = &vm_page_array[i];
if (m->wire_count || m->hold_count || m->busy ||
(m->flags & (PG_BUSY | PG_UNMANAGED)))
continue;
pmap_remove_all(m);
}
pmap_pagedaemon_waken = 0;
}
/*
* If it is the first entry on the list, it is actually
* in the header and we must copy the following entry up

View File

@ -173,7 +173,7 @@ extern u_int32_t KERNend;
static uma_zone_t pvzone;
static struct vm_object pvzone_obj;
static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
static int pmap_pagedaemon_waken = 0;
int pmap_pagedaemon_waken;
/*
* All those kernel PT submaps that BSD is so fond of
@ -1618,36 +1618,6 @@ get_pv_entry(void)
return uma_zalloc(pvzone, M_NOWAIT);
}
/*
* This routine is very drastic, but can save the system
* in a pinch.
*/
void
pmap_collect()
{
int i;
vm_page_t m;
static int warningdone = 0;
if (pmap_pagedaemon_waken == 0)
return;
if (warningdone < 5) {
printf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
warningdone++;
}
for(i = 0; i < vm_page_array_size; i++) {
m = &vm_page_array[i];
if (m->wire_count || m->hold_count || m->busy ||
(m->flags & (PG_BUSY | PG_UNMANAGED)))
continue;
pmap_remove_all(m);
}
pmap_pagedaemon_waken = 0;
}
/*
* If it is the first entry on the list, it is actually
* in the header and we must copy the following entry up

View File

@ -173,7 +173,7 @@ extern u_int32_t KERNend;
static uma_zone_t pvzone;
static struct vm_object pvzone_obj;
static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
static int pmap_pagedaemon_waken = 0;
int pmap_pagedaemon_waken;
/*
* All those kernel PT submaps that BSD is so fond of
@ -1618,36 +1618,6 @@ get_pv_entry(void)
return uma_zalloc(pvzone, M_NOWAIT);
}
/*
* This routine is very drastic, but can save the system
* in a pinch.
*/
void
pmap_collect()
{
int i;
vm_page_t m;
static int warningdone = 0;
if (pmap_pagedaemon_waken == 0)
return;
if (warningdone < 5) {
printf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
warningdone++;
}
for(i = 0; i < vm_page_array_size; i++) {
m = &vm_page_array[i];
if (m->wire_count || m->hold_count || m->busy ||
(m->flags & (PG_BUSY | PG_UNMANAGED)))
continue;
pmap_remove_all(m);
}
pmap_pagedaemon_waken = 0;
}
/*
* If it is the first entry on the list, it is actually
* in the header and we must copy the following entry up

View File

@ -228,7 +228,7 @@ struct mtx pmap_ridmutex;
*/
static uma_zone_t pvzone;
static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
static int pmap_pagedaemon_waken = 0;
int pmap_pagedaemon_waken;
static struct pv_entry *pvbootentries;
static int pvbootnext, pvbootmax;
@ -1419,35 +1419,6 @@ pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
return IA64_PHYS_TO_RR7(start);
}
/*
* This routine is very drastic, but can save the system
* in a pinch.
*/
void
pmap_collect()
{
int i;
vm_page_t m;
static int warningdone = 0;
if (pmap_pagedaemon_waken == 0)
return;
if (warningdone < 5) {
printf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
warningdone++;
}
for(i = 0; i < vm_page_array_size; i++) {
m = &vm_page_array[i];
if (m->wire_count || m->hold_count || m->busy ||
(m->flags & (PG_BUSY | PG_UNMANAGED)))
continue;
pmap_remove_all(m);
}
pmap_pagedaemon_waken = 0;
}
/*
* Remove a single page from a process address space
*/

View File

@ -870,12 +870,6 @@ pmap_clear_modify(vm_page_t m)
pmap_clear_bit(m, PTE_CHG);
}
void
pmap_collect(void)
{
TODO;
}
void
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
vm_size_t len, vm_offset_t src_addr)

View File

@ -870,12 +870,6 @@ pmap_clear_modify(vm_page_t m)
pmap_clear_bit(m, PTE_CHG);
}
void
pmap_collect(void)
{
TODO;
}
void
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
vm_size_t len, vm_offset_t src_addr)

View File

@ -870,12 +870,6 @@ pmap_clear_modify(vm_page_t m)
pmap_clear_bit(m, PTE_CHG);
}
void
pmap_collect(void)
{
TODO;
}
void
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
vm_size_t len, vm_offset_t src_addr)

View File

@ -104,8 +104,6 @@ extern vm_offset_t phys_avail[];
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
extern int pmap_pagedaemon_waken;
extern vm_offset_t msgbuf_phys;
static __inline int

View File

@ -1278,15 +1278,6 @@ pmap_growkernel(vm_offset_t addr)
panic("pmap_growkernel: can't grow kernel");
}
/*
* This routine is very drastic, but can save the system
* in a pinch.
*/
void
pmap_collect(void)
{
}
int
pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
vm_offset_t va)

View File

@ -89,13 +89,14 @@ typedef struct pmap_statistics *pmap_statistics_t;
struct proc;
struct thread;
extern int pmap_pagedaemon_waken;
#ifdef __alpha__
void pmap_page_is_free(vm_page_t m);
#endif
void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t);
void pmap_clear_modify(vm_page_t m);
void pmap_clear_reference(vm_page_t m);
void pmap_collect(void);
void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
void pmap_copy_page(vm_page_t, vm_page_t);
void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,

View File

@ -109,6 +109,7 @@
/* the kernel process "vm_pageout"*/
static void vm_pageout(void);
static int vm_pageout_clean(vm_page_t);
static void vm_pageout_pmap_collect(void);
static void vm_pageout_scan(int pass);
static int vm_pageout_free_page_calc(vm_size_t count);
struct proc *pageproc;
@ -625,6 +626,35 @@ vm_pageout_page_free(vm_page_t m) {
vm_object_deallocate(object);
}
/*
* This routine is very drastic, but can save the system
* in a pinch.
*/
static void
vm_pageout_pmap_collect(void)
{
int i;
vm_page_t m;
static int warningdone;
if (pmap_pagedaemon_waken == 0)
return;
if (warningdone < 5) {
printf("collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
warningdone++;
}
vm_page_lock_queues();
for (i = 0; i < vm_page_array_size; i++) {
m = &vm_page_array[i];
if (m->wire_count || m->hold_count || m->busy ||
(m->flags & (PG_BUSY | PG_UNMANAGED)))
continue;
pmap_remove_all(m);
}
vm_page_unlock_queues();
pmap_pagedaemon_waken = 0;
}
/*
* vm_pageout_scan does the dirty work for the pageout daemon.
*/
@ -650,7 +680,7 @@ vm_pageout_scan(int pass)
/*
* Do whatever cleanup that the pmap code can.
*/
pmap_collect();
vm_pageout_pmap_collect();
uma_reclaim();
addl_page_shortage_init = vm_pageout_deficit;