mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-14 10:09:48 +00:00
Collect all the (now equivalent) pmap_new_proc/pmap_dispose_proc/
pmap_swapin_proc/pmap_swapout_proc functions from the MD pmap code and use a single equivalent MI version. There are other cleanups needed still. While here, use the UMA zone hooks to keep a cache of preinitialized proc structures handy, just like the thread system does. This eliminates one dependency on 'struct proc' being persistent even after being freed. There are some comments about things that can be factored out into ctor/dtor functions if it is worth it. For now they are mostly just doing statistics to get a feel of how it is working.
This commit is contained in:
parent
f9751ec2cd
commit
a136efe9b6
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=99559
@ -900,160 +900,6 @@ pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
|
||||
return m;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the UPAGES for a new process.
|
||||
* This routine directly affects the fork perf for a process.
|
||||
*/
|
||||
void
|
||||
pmap_new_proc(struct proc *p)
|
||||
{
|
||||
int i;
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
pt_entry_t *ptek, oldpte;
|
||||
|
||||
/*
|
||||
* allocate object for the upage
|
||||
*/
|
||||
upobj = p->p_upages_obj;
|
||||
if (upobj == NULL) {
|
||||
upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
|
||||
p->p_upages_obj = upobj;
|
||||
}
|
||||
|
||||
/* get a kernel virtual address for the UPAGES for this proc */
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
if (up == 0) {
|
||||
up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
|
||||
if (up == 0)
|
||||
panic("pmap_new_proc: upage allocation failed");
|
||||
p->p_uarea = (struct user *)up;
|
||||
}
|
||||
|
||||
ptek = vtopte(up);
|
||||
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
/*
|
||||
* Get a kernel page for the uarea
|
||||
*/
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
/*
|
||||
* Wire the page
|
||||
*/
|
||||
m->wire_count++;
|
||||
cnt.v_wire_count++;
|
||||
|
||||
/*
|
||||
* Enter the page into the kernel address space.
|
||||
*/
|
||||
oldpte = ptek[i];
|
||||
ptek[i] = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m))
|
||||
| PG_ASM | PG_KRE | PG_KWE | PG_V;
|
||||
if (oldpte)
|
||||
pmap_invalidate_page(kernel_pmap, up + i * PAGE_SIZE);
|
||||
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispose the UPAGES for a process that has exited.
|
||||
* This routine directly impacts the exit perf of a process.
|
||||
*/
|
||||
void
|
||||
pmap_dispose_proc(p)
|
||||
struct proc *p;
|
||||
{
|
||||
int i;
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
pt_entry_t *ptek;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
ptek = vtopte(up);
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_lookup(upobj, i);
|
||||
if (m == NULL)
|
||||
panic("pmap_dispose_proc: upage already missing?");
|
||||
vm_page_busy(m);
|
||||
ptek[i] = 0;
|
||||
pmap_invalidate_page(kernel_pmap, up + i * PAGE_SIZE);
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_free(m);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the process got swapped out some of its UPAGES might have gotten
|
||||
* swapped. Just get rid of the object to clean up the swap use
|
||||
* proactively. NOTE! might block waiting for paging I/O to complete.
|
||||
*/
|
||||
if (upobj->type == OBJT_SWAP) {
|
||||
p->p_upages_obj = NULL;
|
||||
vm_object_deallocate(upobj);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow the UPAGES for a process to be prejudicially paged out.
|
||||
*/
|
||||
void
|
||||
pmap_swapout_proc(p)
|
||||
struct proc *p;
|
||||
{
|
||||
int i;
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_lookup(upobj, i);
|
||||
if (m == NULL)
|
||||
panic("pmap_swapout_proc: upage already missing?");
|
||||
vm_page_dirty(m);
|
||||
vm_page_unwire(m, 0);
|
||||
pmap_kremove(up + i * PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Bring the UPAGES for a specified process back in.
|
||||
*/
|
||||
void
|
||||
pmap_swapin_proc(p)
|
||||
struct proc *p;
|
||||
{
|
||||
int i, rv;
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
rv = vm_pager_get_pages(upobj, &m, 1, 0);
|
||||
if (rv != VM_PAGER_OK)
|
||||
panic("pmap_swapin_proc: cannot get upages for proc: %d\n", p->p_pid);
|
||||
m = vm_page_lookup(upobj, i);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
vm_page_wire(m);
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the kernel stack for a new thread.
|
||||
* This routine directly affects the fork perf for a process and thread.
|
||||
|
@ -819,176 +819,6 @@ pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
|
||||
return m;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the Uarea stack for a new process.
|
||||
* This routine directly affects the fork perf for a process.
|
||||
*/
|
||||
void
|
||||
pmap_new_proc(struct proc *p)
|
||||
{
|
||||
#ifdef I386_CPU
|
||||
int updateneeded = 0;
|
||||
#endif
|
||||
int i;
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
pt_entry_t *ptek, oldpte;
|
||||
|
||||
/*
|
||||
* allocate object for the upage
|
||||
*/
|
||||
upobj = p->p_upages_obj;
|
||||
if (upobj == NULL) {
|
||||
upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
|
||||
p->p_upages_obj = upobj;
|
||||
}
|
||||
|
||||
/* get a kernel virtual address for the U area for this thread */
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
if (up == 0) {
|
||||
up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
|
||||
if (up == 0)
|
||||
panic("pmap_new_proc: upage allocation failed");
|
||||
p->p_uarea = (struct user *)up;
|
||||
}
|
||||
|
||||
ptek = vtopte(up);
|
||||
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
/*
|
||||
* Get a kernel page for the uarea
|
||||
*/
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
/*
|
||||
* Wire the page
|
||||
*/
|
||||
m->wire_count++;
|
||||
cnt.v_wire_count++;
|
||||
|
||||
/*
|
||||
* Enter the page into the kernel address space.
|
||||
*/
|
||||
oldpte = ptek[i];
|
||||
ptek[i] = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V | pgeflag;
|
||||
if (oldpte) {
|
||||
#ifdef I386_CPU
|
||||
updateneeded = 1;
|
||||
#else
|
||||
invlpg(up + i * PAGE_SIZE);
|
||||
#endif
|
||||
}
|
||||
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
#ifdef I386_CPU
|
||||
if (updateneeded)
|
||||
invltlb();
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispose the U-Area for a process that has exited.
|
||||
* This routine directly impacts the exit perf of a process.
|
||||
*/
|
||||
void
|
||||
pmap_dispose_proc(p)
|
||||
struct proc *p;
|
||||
{
|
||||
int i;
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
pt_entry_t *ptek;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
ptek = vtopte(up);
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_lookup(upobj, i);
|
||||
if (m == NULL)
|
||||
panic("pmap_dispose_proc: upage already missing?");
|
||||
vm_page_busy(m);
|
||||
ptek[i] = 0;
|
||||
#ifndef I386_CPU
|
||||
invlpg(up + i * PAGE_SIZE);
|
||||
#endif
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_free(m);
|
||||
}
|
||||
#ifdef I386_CPU
|
||||
invltlb();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If the process got swapped out some of its UPAGES might have gotten
|
||||
* swapped. Just get rid of the object to clean up the swap use
|
||||
* proactively. NOTE! might block waiting for paging I/O to complete.
|
||||
*/
|
||||
if (upobj->type == OBJT_SWAP) {
|
||||
p->p_upages_obj = NULL;
|
||||
vm_object_deallocate(upobj);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow the U_AREA for a process to be prejudicially paged out.
|
||||
*/
|
||||
void
|
||||
pmap_swapout_proc(p)
|
||||
struct proc *p;
|
||||
{
|
||||
int i;
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_lookup(upobj, i);
|
||||
if (m == NULL)
|
||||
panic("pmap_swapout_proc: upage already missing?");
|
||||
vm_page_dirty(m);
|
||||
vm_page_unwire(m, 0);
|
||||
pmap_kremove(up + i * PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Bring the U-Area for a specified process back in.
|
||||
*/
|
||||
void
|
||||
pmap_swapin_proc(p)
|
||||
struct proc *p;
|
||||
{
|
||||
int i, rv;
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
rv = vm_pager_get_pages(upobj, &m, 1, 0);
|
||||
if (rv != VM_PAGER_OK)
|
||||
panic("pmap_swapin_proc: cannot get upage for proc: %d\n", p->p_pid);
|
||||
m = vm_page_lookup(upobj, i);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
vm_page_wire(m);
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the kernel stack (including pcb for i386) for a new thread.
|
||||
* This routine directly affects the fork perf for a process and
|
||||
|
@ -819,176 +819,6 @@ pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
|
||||
return m;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the Uarea stack for a new process.
|
||||
* This routine directly affects the fork perf for a process.
|
||||
*/
|
||||
void
|
||||
pmap_new_proc(struct proc *p)
|
||||
{
|
||||
#ifdef I386_CPU
|
||||
int updateneeded = 0;
|
||||
#endif
|
||||
int i;
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
pt_entry_t *ptek, oldpte;
|
||||
|
||||
/*
|
||||
* allocate object for the upage
|
||||
*/
|
||||
upobj = p->p_upages_obj;
|
||||
if (upobj == NULL) {
|
||||
upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
|
||||
p->p_upages_obj = upobj;
|
||||
}
|
||||
|
||||
/* get a kernel virtual address for the U area for this thread */
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
if (up == 0) {
|
||||
up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
|
||||
if (up == 0)
|
||||
panic("pmap_new_proc: upage allocation failed");
|
||||
p->p_uarea = (struct user *)up;
|
||||
}
|
||||
|
||||
ptek = vtopte(up);
|
||||
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
/*
|
||||
* Get a kernel page for the uarea
|
||||
*/
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
/*
|
||||
* Wire the page
|
||||
*/
|
||||
m->wire_count++;
|
||||
cnt.v_wire_count++;
|
||||
|
||||
/*
|
||||
* Enter the page into the kernel address space.
|
||||
*/
|
||||
oldpte = ptek[i];
|
||||
ptek[i] = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V | pgeflag;
|
||||
if (oldpte) {
|
||||
#ifdef I386_CPU
|
||||
updateneeded = 1;
|
||||
#else
|
||||
invlpg(up + i * PAGE_SIZE);
|
||||
#endif
|
||||
}
|
||||
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
#ifdef I386_CPU
|
||||
if (updateneeded)
|
||||
invltlb();
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispose the U-Area for a process that has exited.
|
||||
* This routine directly impacts the exit perf of a process.
|
||||
*/
|
||||
void
|
||||
pmap_dispose_proc(p)
|
||||
struct proc *p;
|
||||
{
|
||||
int i;
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
pt_entry_t *ptek;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
ptek = vtopte(up);
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_lookup(upobj, i);
|
||||
if (m == NULL)
|
||||
panic("pmap_dispose_proc: upage already missing?");
|
||||
vm_page_busy(m);
|
||||
ptek[i] = 0;
|
||||
#ifndef I386_CPU
|
||||
invlpg(up + i * PAGE_SIZE);
|
||||
#endif
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_free(m);
|
||||
}
|
||||
#ifdef I386_CPU
|
||||
invltlb();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If the process got swapped out some of its UPAGES might have gotten
|
||||
* swapped. Just get rid of the object to clean up the swap use
|
||||
* proactively. NOTE! might block waiting for paging I/O to complete.
|
||||
*/
|
||||
if (upobj->type == OBJT_SWAP) {
|
||||
p->p_upages_obj = NULL;
|
||||
vm_object_deallocate(upobj);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow the U_AREA for a process to be prejudicially paged out.
|
||||
*/
|
||||
void
|
||||
pmap_swapout_proc(p)
|
||||
struct proc *p;
|
||||
{
|
||||
int i;
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_lookup(upobj, i);
|
||||
if (m == NULL)
|
||||
panic("pmap_swapout_proc: upage already missing?");
|
||||
vm_page_dirty(m);
|
||||
vm_page_unwire(m, 0);
|
||||
pmap_kremove(up + i * PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Bring the U-Area for a specified process back in.
|
||||
*/
|
||||
void
|
||||
pmap_swapin_proc(p)
|
||||
struct proc *p;
|
||||
{
|
||||
int i, rv;
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
rv = vm_pager_get_pages(upobj, &m, 1, 0);
|
||||
if (rv != VM_PAGER_OK)
|
||||
panic("pmap_swapin_proc: cannot get upage for proc: %d\n", p->p_pid);
|
||||
m = vm_page_lookup(upobj, i);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
vm_page_wire(m);
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the kernel stack (including pcb for i386) for a new thread.
|
||||
* This routine directly affects the fork perf for a process and
|
||||
|
@ -706,154 +706,6 @@ pmap_track_modified(vm_offset_t va)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the U area for a new process.
|
||||
* This routine directly affects the fork perf for a process.
|
||||
*/
|
||||
void
|
||||
pmap_new_proc(struct proc *p)
|
||||
{
|
||||
vm_page_t ma[UAREA_PAGES];
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
u_int i;
|
||||
|
||||
/*
|
||||
* Allocate object for the upage.
|
||||
*/
|
||||
upobj = p->p_upages_obj;
|
||||
if (upobj == NULL) {
|
||||
upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
|
||||
p->p_upages_obj = upobj;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a kernel virtual address for the U area for this process.
|
||||
*/
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
if (up == 0) {
|
||||
up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
|
||||
if (up == 0)
|
||||
panic("pmap_new_proc: upage allocation failed");
|
||||
p->p_uarea = (struct user *)up;
|
||||
}
|
||||
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
/*
|
||||
* Get a uarea page.
|
||||
*/
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
ma[i] = m;
|
||||
|
||||
/*
|
||||
* Wire the page.
|
||||
*/
|
||||
m->wire_count++;
|
||||
cnt.v_wire_count++;
|
||||
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enter the pages into the kernel address space.
|
||||
*/
|
||||
pmap_qenter(up, ma, UAREA_PAGES);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispose the U area for a process that has exited.
|
||||
* This routine directly impacts the exit perf of a process.
|
||||
*/
|
||||
void
|
||||
pmap_dispose_proc(struct proc *p)
|
||||
{
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
int i;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_lookup(upobj, i);
|
||||
if (m == NULL)
|
||||
panic("pmap_dispose_proc: upage already missing?");
|
||||
vm_page_busy(m);
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_free(m);
|
||||
}
|
||||
pmap_qremove(up, UAREA_PAGES);
|
||||
|
||||
/*
|
||||
* If the process got swapped out some of its UPAGES might have gotten
|
||||
* swapped. Just get rid of the object to clean up the swap use
|
||||
* proactively. NOTE! might block waiting for paging I/O to complete.
|
||||
*/
|
||||
if (upobj->type == OBJT_SWAP) {
|
||||
p->p_upages_obj = NULL;
|
||||
vm_object_deallocate(upobj);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow the U area for a process to be prejudicially paged out.
|
||||
*/
|
||||
void
|
||||
pmap_swapout_proc(struct proc *p)
|
||||
{
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
int i;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_lookup(upobj, i);
|
||||
if (m == NULL)
|
||||
panic("pmap_swapout_proc: upage already missing?");
|
||||
vm_page_dirty(m);
|
||||
vm_page_unwire(m, 0);
|
||||
}
|
||||
pmap_qremove(up, UAREA_PAGES);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bring the U area for a specified process back in.
|
||||
*/
|
||||
void
|
||||
pmap_swapin_proc(struct proc *p)
|
||||
{
|
||||
vm_page_t ma[UAREA_PAGES];
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
int rv;
|
||||
int i;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
rv = vm_pager_get_pages(upobj, &m, 1, 0);
|
||||
if (rv != VM_PAGER_OK)
|
||||
panic("pmap_swapin_proc: cannot get upage");
|
||||
m = vm_page_lookup(upobj, i);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
ma[i] = m;
|
||||
vm_page_wire(m);
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
}
|
||||
pmap_qenter(up, ma, UAREA_PAGES);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the KSTACK for a new thread.
|
||||
* This routine directly affects the fork perf for a process/thread.
|
||||
|
@ -97,6 +97,16 @@ thread_ctor(void *mem, int size, void *arg)
|
||||
(unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
|
||||
td->td_state = TDS_NEW;
|
||||
td->td_flags |= TDF_UNBOUND;
|
||||
#if 0
|
||||
/*
|
||||
* Maybe move these here from process creation, but maybe not.
|
||||
* Moving them here takes them away from their "natural" place
|
||||
* in the fork process.
|
||||
*/
|
||||
/* XXX td_contested does not appear to be initialized for threads! */
|
||||
LIST_INIT(&td->td_contested);
|
||||
callout_init(&td->td_slpcallout, 1);
|
||||
#endif
|
||||
cached_threads--; /* XXXSMP */
|
||||
active_threads++; /* XXXSMP */
|
||||
}
|
||||
|
@ -58,6 +58,7 @@
|
||||
#endif
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_extern.h>
|
||||
#include <vm/pmap.h>
|
||||
#include <vm/vm_map.h>
|
||||
#include <vm/uma.h>
|
||||
@ -76,6 +77,11 @@ static void pgdelete(struct pgrp *);
|
||||
|
||||
static void orphanpg(struct pgrp *pg);
|
||||
|
||||
static void proc_ctor(void *mem, int size, void *arg);
|
||||
static void proc_dtor(void *mem, int size, void *arg);
|
||||
static void proc_init(void *mem, int size);
|
||||
static void proc_fini(void *mem, int size);
|
||||
|
||||
/*
|
||||
* Other process lists
|
||||
*/
|
||||
@ -91,6 +97,12 @@ struct mtx pargs_ref_lock;
|
||||
uma_zone_t proc_zone;
|
||||
uma_zone_t ithread_zone;
|
||||
|
||||
static int active_procs;
|
||||
static int cached_procs;
|
||||
static int allocated_procs;
|
||||
|
||||
#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
|
||||
|
||||
CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
|
||||
|
||||
/*
|
||||
@ -107,11 +119,91 @@ procinit()
|
||||
LIST_INIT(&zombproc);
|
||||
pidhashtbl = hashinit(maxproc / 4, M_PROC, &pidhash);
|
||||
pgrphashtbl = hashinit(maxproc / 4, M_PROC, &pgrphash);
|
||||
proc_zone = uma_zcreate("PROC", sizeof (struct proc), NULL, NULL,
|
||||
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
||||
proc_zone = uma_zcreate("PROC", sizeof (struct proc),
|
||||
proc_ctor, proc_dtor, proc_init, proc_fini,
|
||||
UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
||||
uihashinit();
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare a proc for use.
|
||||
*/
|
||||
static void
|
||||
proc_ctor(void *mem, int size, void *arg)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
KASSERT((size == sizeof(struct proc)),
|
||||
("size mismatch: %d != %d\n", size, (int)sizeof(struct proc)));
|
||||
p = (struct proc *)mem;
|
||||
#if 0
|
||||
/*
|
||||
* Maybe move these from process creation, but maybe not.
|
||||
* Moving them here takes them away from their "natural" place
|
||||
* in the fork process.
|
||||
*/
|
||||
bzero(&p->p_startzero,
|
||||
(unsigned) RANGEOF(struct proc, p_startzero, p_endzero));
|
||||
p->p_state = PRS_NEW;
|
||||
mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
|
||||
LIST_INIT(&p->p_children);
|
||||
callout_init(&p->p_itcallout, 0);
|
||||
#endif
|
||||
cached_procs--;
|
||||
active_procs++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reclaim a proc after use.
|
||||
*/
|
||||
static void
|
||||
proc_dtor(void *mem, int size, void *arg)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
KASSERT((size == sizeof(struct proc)),
|
||||
("size mismatch: %d != %d\n", size, (int)sizeof(struct proc)));
|
||||
p = (struct proc *)mem;
|
||||
/* INVARIANTS checks go here */
|
||||
#if 0 /* See comment in proc_ctor about seperating things */
|
||||
mtx_destroy(&p->p_mtx);
|
||||
#endif
|
||||
active_procs--;
|
||||
cached_procs++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize type-stable parts of a proc (when newly created).
|
||||
*/
|
||||
static void
|
||||
proc_init(void *mem, int size)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
KASSERT((size == sizeof(struct proc)),
|
||||
("size mismatch: %d != %d\n", size, (int)sizeof(struct proc)));
|
||||
p = (struct proc *)mem;
|
||||
vm_proc_new(p);
|
||||
cached_procs++;
|
||||
allocated_procs++;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tear down type-stable parts of a proc (just before being discarded)
|
||||
*/
|
||||
static void
|
||||
proc_fini(void *mem, int size)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
KASSERT((size == sizeof(struct proc)),
|
||||
("size mismatch: %d != %d\n", size, (int)sizeof(struct proc)));
|
||||
p = (struct proc *)mem;
|
||||
vm_proc_dispose(p);
|
||||
cached_procs--;
|
||||
allocated_procs--;
|
||||
}
|
||||
|
||||
/*
|
||||
* KSE is linked onto the idle queue.
|
||||
*/
|
||||
@ -1143,3 +1235,12 @@ SYSCTL_NODE(_kern_proc, KERN_PROC_PID, pid, CTLFLAG_RD,
|
||||
|
||||
SYSCTL_NODE(_kern_proc, KERN_PROC_ARGS, args, CTLFLAG_RW | CTLFLAG_ANYBODY,
|
||||
sysctl_kern_proc_args, "Process argument list");
|
||||
|
||||
SYSCTL_INT(_kern_proc, OID_AUTO, active, CTLFLAG_RD,
|
||||
&active_procs, 0, "Number of active procs in system.");
|
||||
|
||||
SYSCTL_INT(_kern_proc, OID_AUTO, cached, CTLFLAG_RD,
|
||||
&cached_procs, 0, "Number of procs in proc cache.");
|
||||
|
||||
SYSCTL_INT(_kern_proc, OID_AUTO, allocated, CTLFLAG_RD,
|
||||
&allocated_procs, 0, "Number of procs in zone.");
|
||||
|
@ -97,6 +97,16 @@ thread_ctor(void *mem, int size, void *arg)
|
||||
(unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
|
||||
td->td_state = TDS_NEW;
|
||||
td->td_flags |= TDF_UNBOUND;
|
||||
#if 0
|
||||
/*
|
||||
* Maybe move these here from process creation, but maybe not.
|
||||
* Moving them here takes them away from their "natural" place
|
||||
* in the fork process.
|
||||
*/
|
||||
/* XXX td_contested does not appear to be initialized for threads! */
|
||||
LIST_INIT(&td->td_contested);
|
||||
callout_init(&td->td_slpcallout, 1);
|
||||
#endif
|
||||
cached_threads--; /* XXXSMP */
|
||||
active_threads++; /* XXXSMP */
|
||||
}
|
||||
|
@ -1170,61 +1170,6 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the uarea for a new process.
|
||||
* This routine directly affects the fork perf for a process.
|
||||
*/
|
||||
void
|
||||
pmap_new_proc(struct proc *p)
|
||||
{
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
u_int i;
|
||||
|
||||
/*
|
||||
* Allocate the object for the upages.
|
||||
*/
|
||||
upobj = p->p_upages_obj;
|
||||
if (upobj == NULL) {
|
||||
upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
|
||||
p->p_upages_obj = upobj;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a kernel virtual address for the uarea for this process.
|
||||
*/
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
if (up == 0) {
|
||||
up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
|
||||
if (up == 0)
|
||||
panic("pmap_new_proc: upage allocation failed");
|
||||
p->p_uarea = (struct user *)up;
|
||||
}
|
||||
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
/*
|
||||
* Get a uarea page.
|
||||
*/
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
/*
|
||||
* Wire the page.
|
||||
*/
|
||||
m->wire_count++;
|
||||
|
||||
/*
|
||||
* Enter the page into the kernel address space.
|
||||
*/
|
||||
pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
|
||||
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
|
||||
vm_pindex_t pindex, vm_size_t size, int limit)
|
||||
@ -1515,18 +1460,6 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
pmap_remove(pm, sva, eva);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_swapin_proc(struct proc *p)
|
||||
{
|
||||
TODO;
|
||||
}
|
||||
|
||||
void
|
||||
pmap_swapout_proc(struct proc *p)
|
||||
{
|
||||
TODO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the kernel stack and pcb for a new thread.
|
||||
* This routine directly affects the fork perf for a process and
|
||||
@ -1580,12 +1513,6 @@ pmap_new_thread(struct thread *td)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
pmap_dispose_proc(struct proc *p)
|
||||
{
|
||||
TODO;
|
||||
}
|
||||
|
||||
void
|
||||
pmap_dispose_thread(struct thread *td)
|
||||
{
|
||||
|
@ -1170,61 +1170,6 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the uarea for a new process.
|
||||
* This routine directly affects the fork perf for a process.
|
||||
*/
|
||||
void
|
||||
pmap_new_proc(struct proc *p)
|
||||
{
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
u_int i;
|
||||
|
||||
/*
|
||||
* Allocate the object for the upages.
|
||||
*/
|
||||
upobj = p->p_upages_obj;
|
||||
if (upobj == NULL) {
|
||||
upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
|
||||
p->p_upages_obj = upobj;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a kernel virtual address for the uarea for this process.
|
||||
*/
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
if (up == 0) {
|
||||
up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
|
||||
if (up == 0)
|
||||
panic("pmap_new_proc: upage allocation failed");
|
||||
p->p_uarea = (struct user *)up;
|
||||
}
|
||||
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
/*
|
||||
* Get a uarea page.
|
||||
*/
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
/*
|
||||
* Wire the page.
|
||||
*/
|
||||
m->wire_count++;
|
||||
|
||||
/*
|
||||
* Enter the page into the kernel address space.
|
||||
*/
|
||||
pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
|
||||
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
|
||||
vm_pindex_t pindex, vm_size_t size, int limit)
|
||||
@ -1515,18 +1460,6 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
pmap_remove(pm, sva, eva);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_swapin_proc(struct proc *p)
|
||||
{
|
||||
TODO;
|
||||
}
|
||||
|
||||
void
|
||||
pmap_swapout_proc(struct proc *p)
|
||||
{
|
||||
TODO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the kernel stack and pcb for a new thread.
|
||||
* This routine directly affects the fork perf for a process and
|
||||
@ -1580,12 +1513,6 @@ pmap_new_thread(struct thread *td)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
pmap_dispose_proc(struct proc *p)
|
||||
{
|
||||
TODO;
|
||||
}
|
||||
|
||||
void
|
||||
pmap_dispose_thread(struct thread *td)
|
||||
{
|
||||
|
@ -1170,61 +1170,6 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr)
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the uarea for a new process.
|
||||
* This routine directly affects the fork perf for a process.
|
||||
*/
|
||||
void
|
||||
pmap_new_proc(struct proc *p)
|
||||
{
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
u_int i;
|
||||
|
||||
/*
|
||||
* Allocate the object for the upages.
|
||||
*/
|
||||
upobj = p->p_upages_obj;
|
||||
if (upobj == NULL) {
|
||||
upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
|
||||
p->p_upages_obj = upobj;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a kernel virtual address for the uarea for this process.
|
||||
*/
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
if (up == 0) {
|
||||
up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
|
||||
if (up == 0)
|
||||
panic("pmap_new_proc: upage allocation failed");
|
||||
p->p_uarea = (struct user *)up;
|
||||
}
|
||||
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
/*
|
||||
* Get a uarea page.
|
||||
*/
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
/*
|
||||
* Wire the page.
|
||||
*/
|
||||
m->wire_count++;
|
||||
|
||||
/*
|
||||
* Enter the page into the kernel address space.
|
||||
*/
|
||||
pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
|
||||
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
|
||||
vm_pindex_t pindex, vm_size_t size, int limit)
|
||||
@ -1515,18 +1460,6 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
pmap_remove(pm, sva, eva);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_swapin_proc(struct proc *p)
|
||||
{
|
||||
TODO;
|
||||
}
|
||||
|
||||
void
|
||||
pmap_swapout_proc(struct proc *p)
|
||||
{
|
||||
TODO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the kernel stack and pcb for a new thread.
|
||||
* This routine directly affects the fork perf for a process and
|
||||
@ -1580,12 +1513,6 @@ pmap_new_thread(struct thread *td)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
pmap_dispose_proc(struct proc *p)
|
||||
{
|
||||
TODO;
|
||||
}
|
||||
|
||||
void
|
||||
pmap_dispose_thread(struct thread *td)
|
||||
{
|
||||
|
@ -811,154 +811,6 @@ pmap_qremove(vm_offset_t sva, int count)
|
||||
tlb_range_demap(kernel_pmap, sva, sva + (count * PAGE_SIZE) - 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the uarea for a new process.
|
||||
* This routine directly affects the fork perf for a process.
|
||||
*/
|
||||
void
|
||||
pmap_new_proc(struct proc *p)
|
||||
{
|
||||
vm_page_t ma[UAREA_PAGES];
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
u_int i;
|
||||
|
||||
/*
|
||||
* Allocate object for the upage.
|
||||
*/
|
||||
upobj = p->p_upages_obj;
|
||||
if (upobj == NULL) {
|
||||
upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
|
||||
p->p_upages_obj = upobj;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a kernel virtual address for the U area for this process.
|
||||
*/
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
if (up == 0) {
|
||||
up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
|
||||
if (up == 0)
|
||||
panic("pmap_new_proc: upage allocation failed");
|
||||
p->p_uarea = (struct user *)up;
|
||||
}
|
||||
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
/*
|
||||
* Get a uarea page.
|
||||
*/
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
ma[i] = m;
|
||||
|
||||
/*
|
||||
* Wire the page.
|
||||
*/
|
||||
m->wire_count++;
|
||||
cnt.v_wire_count++;
|
||||
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enter the pages into the kernel address space.
|
||||
*/
|
||||
pmap_qenter(up, ma, UAREA_PAGES);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispose the uarea for a process that has exited.
|
||||
* This routine directly impacts the exit perf of a process.
|
||||
*/
|
||||
void
|
||||
pmap_dispose_proc(struct proc *p)
|
||||
{
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
int i;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_lookup(upobj, i);
|
||||
if (m == NULL)
|
||||
panic("pmap_dispose_proc: upage already missing?");
|
||||
vm_page_busy(m);
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_free(m);
|
||||
}
|
||||
pmap_qremove(up, UAREA_PAGES);
|
||||
|
||||
/*
|
||||
* If the process got swapped out some of its UPAGES might have gotten
|
||||
* swapped. Just get rid of the object to clean up the swap use
|
||||
* proactively. NOTE! might block waiting for paging I/O to complete.
|
||||
*/
|
||||
if (upobj->type == OBJT_SWAP) {
|
||||
p->p_upages_obj = NULL;
|
||||
vm_object_deallocate(upobj);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow the uarea for a process to be prejudicially paged out.
|
||||
*/
|
||||
void
|
||||
pmap_swapout_proc(struct proc *p)
|
||||
{
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
int i;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_lookup(upobj, i);
|
||||
if (m == NULL)
|
||||
panic("pmap_swapout_proc: upage already missing?");
|
||||
vm_page_dirty(m);
|
||||
vm_page_unwire(m, 0);
|
||||
}
|
||||
pmap_qremove(up, UAREA_PAGES);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bring the uarea for a specified process back in.
|
||||
*/
|
||||
void
|
||||
pmap_swapin_proc(struct proc *p)
|
||||
{
|
||||
vm_page_t ma[UAREA_PAGES];
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
int rv;
|
||||
int i;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
rv = vm_pager_get_pages(upobj, &m, 1, 0);
|
||||
if (rv != VM_PAGER_OK)
|
||||
panic("pmap_swapin_proc: cannot get upage");
|
||||
m = vm_page_lookup(upobj, i);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
ma[i] = m;
|
||||
vm_page_wire(m);
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
}
|
||||
pmap_qenter(up, ma, UAREA_PAGES);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the kernel stack and pcb for a new thread.
|
||||
* This routine directly affects the fork perf for a process and
|
||||
|
@ -128,10 +128,6 @@ void pmap_zero_page(vm_page_t);
|
||||
void pmap_zero_page_area(vm_page_t, int off, int size);
|
||||
void pmap_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
|
||||
int pmap_mincore(pmap_t pmap, vm_offset_t addr);
|
||||
void pmap_new_proc(struct proc *p);
|
||||
void pmap_dispose_proc(struct proc *p);
|
||||
void pmap_swapout_proc(struct proc *p);
|
||||
void pmap_swapin_proc(struct proc *p);
|
||||
void pmap_new_thread(struct thread *td);
|
||||
void pmap_dispose_thread(struct thread *td);
|
||||
void pmap_swapout_thread(struct thread *td);
|
||||
|
@ -95,5 +95,7 @@ void vsunlock(caddr_t, u_int);
|
||||
void vm_object_print(/* db_expr_t */ long, boolean_t, /* db_expr_t */ long,
|
||||
char *);
|
||||
int vm_fault_quick(caddr_t v, int prot);
|
||||
void vm_proc_new(struct proc *p);
|
||||
void vm_proc_dispose(struct proc *p);
|
||||
#endif /* _KERNEL */
|
||||
#endif /* !_VM_EXTERN_H_ */
|
||||
|
159
sys/vm/vm_glue.c
159
sys/vm/vm_glue.c
@ -87,8 +87,10 @@
|
||||
#include <vm/vm_map.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_pageout.h>
|
||||
#include <vm/vm_object.h>
|
||||
#include <vm/vm_kern.h>
|
||||
#include <vm/vm_extern.h>
|
||||
#include <vm/vm_pager.h>
|
||||
|
||||
#include <sys/user.h>
|
||||
|
||||
@ -112,6 +114,8 @@ SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL)
|
||||
|
||||
#ifndef NO_SWAPPING
|
||||
static void swapout(struct proc *);
|
||||
static void vm_proc_swapin(struct proc *p);
|
||||
static void vm_proc_swapout(struct proc *p);
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -195,6 +199,144 @@ vsunlock(addr, len)
|
||||
round_page((vm_offset_t)addr + len), FALSE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the U area for a new process.
|
||||
* This routine directly affects the fork perf for a process.
|
||||
*/
|
||||
void
|
||||
vm_proc_new(struct proc *p)
|
||||
{
|
||||
vm_page_t ma[UAREA_PAGES];
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
u_int i;
|
||||
|
||||
/*
|
||||
* Allocate object for the upage.
|
||||
*/
|
||||
upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
|
||||
p->p_upages_obj = upobj;
|
||||
|
||||
/*
|
||||
* Get a kernel virtual address for the U area for this process.
|
||||
*/
|
||||
up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
|
||||
if (up == 0)
|
||||
panic("vm_proc_new: upage allocation failed");
|
||||
p->p_uarea = (struct user *)up;
|
||||
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
/*
|
||||
* Get a uarea page.
|
||||
*/
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
ma[i] = m;
|
||||
|
||||
/*
|
||||
* Wire the page.
|
||||
*/
|
||||
m->wire_count++;
|
||||
cnt.v_wire_count++;
|
||||
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_clear(m, PG_ZERO);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enter the pages into the kernel address space.
|
||||
*/
|
||||
pmap_qenter(up, ma, UAREA_PAGES);
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispose the U area for a process that has exited.
|
||||
* This routine directly impacts the exit perf of a process.
|
||||
* XXX proc_zone is marked UMA_ZONE_NOFREE, so this should never be called.
|
||||
*/
|
||||
void
|
||||
vm_proc_dispose(struct proc *p)
|
||||
{
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
int i;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_lookup(upobj, i);
|
||||
if (m == NULL)
|
||||
panic("vm_proc_dispose: upage already missing?");
|
||||
vm_page_busy(m);
|
||||
vm_page_unwire(m, 0);
|
||||
vm_page_free(m);
|
||||
}
|
||||
pmap_qremove(up, UAREA_PAGES);
|
||||
kmem_free(kernel_map, up, UAREA_PAGES * PAGE_SIZE);
|
||||
p->p_upages_obj = NULL;
|
||||
vm_object_deallocate(upobj);
|
||||
}
|
||||
|
||||
#ifndef NO_SWAPPING
|
||||
/*
|
||||
* Allow the U area for a process to be prejudicially paged out.
|
||||
*/
|
||||
void
|
||||
vm_proc_swapout(struct proc *p)
|
||||
{
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
int i;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_lookup(upobj, i);
|
||||
if (m == NULL)
|
||||
panic("vm_proc_swapout: upage already missing?");
|
||||
vm_page_dirty(m);
|
||||
vm_page_unwire(m, 0);
|
||||
}
|
||||
pmap_qremove(up, UAREA_PAGES);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bring the U area for a specified process back in.
|
||||
*/
|
||||
void
|
||||
vm_proc_swapin(struct proc *p)
|
||||
{
|
||||
vm_page_t ma[UAREA_PAGES];
|
||||
vm_object_t upobj;
|
||||
vm_offset_t up;
|
||||
vm_page_t m;
|
||||
int rv;
|
||||
int i;
|
||||
|
||||
upobj = p->p_upages_obj;
|
||||
up = (vm_offset_t)p->p_uarea;
|
||||
for (i = 0; i < UAREA_PAGES; i++) {
|
||||
m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
if (m->valid != VM_PAGE_BITS_ALL) {
|
||||
rv = vm_pager_get_pages(upobj, &m, 1, 0);
|
||||
if (rv != VM_PAGER_OK)
|
||||
panic("vm_proc_swapin: cannot get upage");
|
||||
m = vm_page_lookup(upobj, i);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
}
|
||||
ma[i] = m;
|
||||
vm_page_wire(m);
|
||||
vm_page_wakeup(m);
|
||||
vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
||||
}
|
||||
pmap_qenter(up, ma, UAREA_PAGES);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Implement fork's actions on an address space.
|
||||
* Here we arrange for the address space to be copied or referenced,
|
||||
@ -248,8 +390,6 @@ vm_forkproc(td, p2, td2, flags)
|
||||
shmfork(p1, p2);
|
||||
}
|
||||
|
||||
pmap_new_proc(p2);
|
||||
|
||||
/* XXXKSE this is unsatisfactory but should be adequate */
|
||||
up = p2->p_uarea;
|
||||
|
||||
@ -297,7 +437,6 @@ vm_waitproc(p)
|
||||
|
||||
GIANT_REQUIRED;
|
||||
cpu_wait(p);
|
||||
pmap_dispose_proc(p); /* drop per-process resources */
|
||||
/* XXXKSE by here there should not be any threads left! */
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
panic("vm_waitproc: Survivor thread!");
|
||||
@ -339,17 +478,22 @@ void
|
||||
faultin(p)
|
||||
struct proc *p;
|
||||
{
|
||||
struct thread *td;
|
||||
GIANT_REQUIRED;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef NO_SWAPPING
|
||||
if ((p->p_sflag & PS_INMEM) == 0)
|
||||
panic("faultin: proc swapped out with NO_SWAPPING!");
|
||||
#else
|
||||
if ((p->p_sflag & PS_INMEM) == 0) {
|
||||
struct thread *td;
|
||||
|
||||
++p->p_lock;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(p);
|
||||
|
||||
pmap_swapin_proc(p);
|
||||
vm_proc_swapin(p);
|
||||
FOREACH_THREAD_IN_PROC (p, td)
|
||||
pmap_swapin_thread(td);
|
||||
|
||||
@ -364,6 +508,7 @@ faultin(p)
|
||||
/* undo the effect of setting SLOCK above */
|
||||
--p->p_lock;
|
||||
}
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
|
||||
@ -641,7 +786,7 @@ swapout(p)
|
||||
remrunqueue(td); /* XXXKSE */
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
pmap_swapout_proc(p);
|
||||
vm_proc_swapout(p);
|
||||
FOREACH_THREAD_IN_PROC(p, td)
|
||||
pmap_swapout_thread(td);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
|
Loading…
Reference in New Issue
Block a user