1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-19 10:53:58 +00:00

Refactor contigmalloc() into two functions: a simple front-end that deals

with the malloc tag and calls a new back-end, kmem_alloc_contig(), that
allocates the pages and maps them.

The motivations for this change are two-fold: (1) A cache mode parameter
will be added to kmem_alloc_contig().  In other words, kmem_alloc_contig()
will be extended to support the allocation of memory with caller-specified
caching. (2) The UMA allocation function that is used by the two jumbo
frames zones can use kmem_alloc_contig() in place of contigmalloc() and
thereby avoid having free jumbo frames held by the zone counted as live
malloc()ed memory.
This commit is contained in:
Alan Cox 2009-06-17 17:19:48 +00:00
parent 30a2bd2f78
commit d78200e4e8
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=194376
2 changed files with 22 additions and 8 deletions

View File

@ -193,7 +193,7 @@ vm_page_release_contig(vm_page_t m, vm_pindex_t count)
* specified through the given flags, then the pages are zeroed * specified through the given flags, then the pages are zeroed
* before they are mapped. * before they are mapped.
*/ */
static void * static vm_offset_t
contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, int flags) contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, int flags)
{ {
vm_object_t object = kernel_object; vm_object_t object = kernel_object;
@ -202,7 +202,7 @@ contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, int flags)
vm_map_lock(map); vm_map_lock(map);
if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
vm_map_unlock(map); vm_map_unlock(map);
return (NULL); return (0);
} }
vm_object_reference(object); vm_object_reference(object);
vm_map_insert(map, object, addr - VM_MIN_KERNEL_ADDRESS, vm_map_insert(map, object, addr - VM_MIN_KERNEL_ADDRESS,
@ -220,7 +220,7 @@ contigmapping(vm_map_t map, vm_size_t size, vm_page_t m, int flags)
VM_OBJECT_UNLOCK(object); VM_OBJECT_UNLOCK(object);
vm_map_wire(map, addr, addr + size, vm_map_wire(map, addr, addr + size,
VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES); VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
return ((void *)addr); return (addr);
} }
void * void *
@ -234,6 +234,19 @@ contigmalloc(
unsigned long boundary) unsigned long boundary)
{ {
void *ret; void *ret;
ret = (void *)kmem_alloc_contig(kernel_map, size, flags, low, high,
alignment, boundary);
if (ret != NULL)
malloc_type_allocated(type, round_page(size));
return (ret);
}
vm_offset_t
kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, unsigned long alignment, unsigned long boundary)
{
vm_offset_t ret;
vm_page_t pages; vm_page_t pages;
unsigned long npgs; unsigned long npgs;
int actl, actmax, inactl, inactmax, tries; int actl, actmax, inactl, inactmax, tries;
@ -265,13 +278,11 @@ contigmalloc(
tries++; tries++;
goto retry; goto retry;
} }
ret = NULL; ret = 0;
} else { } else {
ret = contigmapping(kernel_map, size, pages, flags); ret = contigmapping(map, size, pages, flags);
if (ret == NULL) if (ret == 0)
vm_page_release_contig(pages, npgs); vm_page_release_contig(pages, npgs);
else
malloc_type_allocated(type, npgs << PAGE_SHIFT);
} }
return (ret); return (ret);
} }

View File

@ -44,6 +44,9 @@ struct vnode;
int kernacc(void *, int, int); int kernacc(void *, int, int);
vm_offset_t kmem_alloc(vm_map_t, vm_size_t); vm_offset_t kmem_alloc(vm_map_t, vm_size_t);
vm_offset_t kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
unsigned long boundary);
vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t); vm_offset_t kmem_alloc_nofault(vm_map_t, vm_size_t);
vm_offset_t kmem_alloc_wait(vm_map_t, vm_size_t); vm_offset_t kmem_alloc_wait(vm_map_t, vm_size_t);
void kmem_free(vm_map_t, vm_offset_t, vm_size_t); void kmem_free(vm_map_t, vm_offset_t, vm_size_t);