mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-17 10:26:15 +00:00
During vm_page_cache()'s call to vm_radix_insert(), if vm_page_alloc() was
called to allocate a new page of radix trie nodes, there could be a call to vm_radix_remove() on the same trie (of PG_CACHED pages) as the in-progress vm_radix_insert(). With the removal of PG_CACHED pages, we can simplify vm_radix_insert() and vm_radix_remove() by removing the flags on the root of the trie that were used to detect this case and the code for restarting vm_radix_insert() when it happened. Reviewed by: kib, markj Tested by: pho Sponsored by: Dell EMC Isilon Differential Revision: https://reviews.freebsd.org/D8664
This commit is contained in:
parent
71500aab3e
commit
563a19d546
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=309365
@ -36,12 +36,8 @@
|
||||
*/
|
||||
struct vm_radix {
|
||||
uintptr_t rt_root;
|
||||
uint8_t rt_flags;
|
||||
};
|
||||
|
||||
#define RT_INSERT_INPROG 0x01
|
||||
#define RT_TRIE_MODIFIED 0x02
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
static __inline boolean_t
|
||||
|
@ -205,7 +205,6 @@ vm_object_zinit(void *mem, int size, int flags)
|
||||
object->type = OBJT_DEAD;
|
||||
object->ref_count = 0;
|
||||
object->rtree.rt_root = 0;
|
||||
object->rtree.rt_flags = 0;
|
||||
object->paging_in_progress = 0;
|
||||
object->resident_page_count = 0;
|
||||
object->shadow_count = 0;
|
||||
|
@ -341,8 +341,6 @@ vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
|
||||
|
||||
index = page->pindex;
|
||||
|
||||
restart:
|
||||
|
||||
/*
|
||||
* The owner of record for root is not really important because it
|
||||
* will never be used.
|
||||
@ -360,32 +358,10 @@ vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
|
||||
panic("%s: key %jx is already present",
|
||||
__func__, (uintmax_t)index);
|
||||
clev = vm_radix_keydiff(m->pindex, index);
|
||||
|
||||
/*
|
||||
* During node allocation the trie that is being
|
||||
* walked can be modified because of recursing radix
|
||||
* trie operations.
|
||||
* If this is the case, the recursing functions signal
|
||||
* such situation and the insert operation must
|
||||
* start from scratch again.
|
||||
* The freed radix node will then be in the UMA
|
||||
* caches very likely to avoid the same situation
|
||||
* to happen.
|
||||
*/
|
||||
rtree->rt_flags |= RT_INSERT_INPROG;
|
||||
tmp = vm_radix_node_get(vm_radix_trimkey(index,
|
||||
clev + 1), 2, clev);
|
||||
rtree->rt_flags &= ~RT_INSERT_INPROG;
|
||||
if (tmp == NULL) {
|
||||
rtree->rt_flags &= ~RT_TRIE_MODIFIED;
|
||||
if (tmp == NULL)
|
||||
return (ENOMEM);
|
||||
}
|
||||
if ((rtree->rt_flags & RT_TRIE_MODIFIED) != 0) {
|
||||
rtree->rt_flags &= ~RT_TRIE_MODIFIED;
|
||||
tmp->rn_count = 0;
|
||||
vm_radix_node_put(tmp);
|
||||
goto restart;
|
||||
}
|
||||
*parentp = tmp;
|
||||
vm_radix_addpage(tmp, index, clev, page);
|
||||
vm_radix_addpage(tmp, m->pindex, clev, m);
|
||||
@ -409,21 +385,9 @@ vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
|
||||
*/
|
||||
newind = rnode->rn_owner;
|
||||
clev = vm_radix_keydiff(newind, index);
|
||||
|
||||
/* See the comments above. */
|
||||
rtree->rt_flags |= RT_INSERT_INPROG;
|
||||
tmp = vm_radix_node_get(vm_radix_trimkey(index, clev + 1), 2, clev);
|
||||
rtree->rt_flags &= ~RT_INSERT_INPROG;
|
||||
if (tmp == NULL) {
|
||||
rtree->rt_flags &= ~RT_TRIE_MODIFIED;
|
||||
if (tmp == NULL)
|
||||
return (ENOMEM);
|
||||
}
|
||||
if ((rtree->rt_flags & RT_TRIE_MODIFIED) != 0) {
|
||||
rtree->rt_flags &= ~RT_TRIE_MODIFIED;
|
||||
tmp->rn_count = 0;
|
||||
vm_radix_node_put(tmp);
|
||||
goto restart;
|
||||
}
|
||||
*parentp = tmp;
|
||||
vm_radix_addpage(tmp, index, clev, page);
|
||||
slot = vm_radix_slot(newind, clev);
|
||||
@ -708,20 +672,6 @@ vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
|
||||
vm_page_t m;
|
||||
int i, slot;
|
||||
|
||||
/*
|
||||
* Detect if a page is going to be removed from a trie which is
|
||||
* already undergoing another trie operation.
|
||||
* Right now this is only possible for vm_radix_remove() recursing
|
||||
* into vm_radix_insert().
|
||||
* If this is the case, the caller must be notified about this
|
||||
* situation. It will also takecare to update the RT_TRIE_MODIFIED
|
||||
* accordingly.
|
||||
* The RT_TRIE_MODIFIED bit is set here because the remove operation
|
||||
* will always succeed.
|
||||
*/
|
||||
if ((rtree->rt_flags & RT_INSERT_INPROG) != 0)
|
||||
rtree->rt_flags |= RT_TRIE_MODIFIED;
|
||||
|
||||
rnode = vm_radix_getroot(rtree);
|
||||
if (vm_radix_isleaf(rnode)) {
|
||||
m = vm_radix_topage(rnode);
|
||||
@ -776,9 +726,6 @@ vm_radix_reclaim_allnodes(struct vm_radix *rtree)
|
||||
{
|
||||
struct vm_radix_node *root;
|
||||
|
||||
KASSERT((rtree->rt_flags & RT_INSERT_INPROG) == 0,
|
||||
("vm_radix_reclaim_allnodes: unexpected trie recursion"));
|
||||
|
||||
root = vm_radix_getroot(rtree);
|
||||
if (root == NULL)
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user