mirror of
https://git.FreeBSD.org/src.git
synced 2025-01-02 12:20:51 +00:00
Eliminate the arena parameter to kmem_free(). Implicitly this corrects an
error in the function hypercall_memfree(), where the wrong arena was being passed to kmem_free(). Introduce a per-page flag, VPO_KMEM_EXEC, to mark physical pages that are mapped in kmem with execute permissions. Use this flag to determine which arena the kmem virtual addresses are returned to. Eliminate UMA_SLAB_KRWX. The introduction of VPO_KMEM_EXEC makes it redundant. Update the nearby comment for UMA_SLAB_KERNEL. Reviewed by: kib, markj Discussed with: jeff Approved by: re (marius) Differential Revision: https://reviews.freebsd.org/D16845
This commit is contained in:
parent
ee6281c3d3
commit
49bfa624ac
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=338318
@ -479,7 +479,7 @@ user_ldt_alloc(struct proc *p, int force)
|
||||
pldt = mdp->md_ldt;
|
||||
if (pldt != NULL && !force) {
|
||||
pmap_pti_remove_kva(sva, sva + sz);
|
||||
kmem_free(kernel_arena, sva, sz);
|
||||
kmem_free(sva, sz);
|
||||
free(new_ldt, M_SUBPROC);
|
||||
return (pldt);
|
||||
}
|
||||
@ -533,7 +533,7 @@ user_ldt_derefl(struct proc_ldt *pldt)
|
||||
sva = (vm_offset_t)pldt->ldt_base;
|
||||
sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
|
||||
pmap_pti_remove_kva(sva, sva + sz);
|
||||
kmem_free(kernel_arena, sva, sz);
|
||||
kmem_free(sva, sz);
|
||||
free(pldt, M_SUBPROC);
|
||||
}
|
||||
}
|
||||
|
@ -331,8 +331,7 @@ cpu_thread_clean(struct thread *td)
|
||||
if (pcb->pcb_tssp != NULL) {
|
||||
pmap_pti_remove_kva((vm_offset_t)pcb->pcb_tssp,
|
||||
(vm_offset_t)pcb->pcb_tssp + ctob(IOPAGES + 1));
|
||||
kmem_free(kernel_arena, (vm_offset_t)pcb->pcb_tssp,
|
||||
ctob(IOPAGES + 1));
|
||||
kmem_free((vm_offset_t)pcb->pcb_tssp, ctob(IOPAGES + 1));
|
||||
pcb->pcb_tssp = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -192,7 +192,7 @@ a10fb_allocfb(struct a10fb_softc *sc)
|
||||
static void
|
||||
a10fb_freefb(struct a10fb_softc *sc)
|
||||
{
|
||||
kmem_free(kernel_arena, sc->vaddr, sc->fbsize);
|
||||
kmem_free(sc->vaddr, sc->fbsize);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -792,7 +792,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
|
||||
uma_zfree(bufzone->umazone, vaddr);
|
||||
else
|
||||
kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
|
||||
kmem_free((vm_offset_t)vaddr, dmat->maxsize);
|
||||
|
||||
dmat->map_count--;
|
||||
if (map->flags & DMAMAP_COHERENT)
|
||||
|
@ -858,7 +858,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
!exclusion_bounce(dmat))
|
||||
uma_zfree(bufzone->umazone, vaddr);
|
||||
else
|
||||
kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
|
||||
kmem_free((vm_offset_t)vaddr, dmat->maxsize);
|
||||
|
||||
dmat->map_count--;
|
||||
if (map->flags & DMAMAP_COHERENT)
|
||||
|
@ -2242,8 +2242,7 @@ pmap_pinit(pmap_t pmap)
|
||||
* UMA_ZONE_NOFREE flag, it's important to leave
|
||||
* no allocation in pmap if initialization failed.
|
||||
*/
|
||||
kmem_free(kernel_arena, (vm_offset_t)pmap->pm_pt1,
|
||||
NB_IN_PT1);
|
||||
kmem_free((vm_offset_t)pmap->pm_pt1, NB_IN_PT1);
|
||||
pmap->pm_pt1 = NULL;
|
||||
return (0);
|
||||
}
|
||||
|
@ -196,8 +196,7 @@ sdma_free(int chn)
|
||||
channel = &sc->channel[chn];
|
||||
channel->in_use = 0;
|
||||
|
||||
kmem_free(kernel_arena, (vm_offset_t)channel->bd,
|
||||
PAGE_SIZE);
|
||||
kmem_free((vm_offset_t)channel->bd, PAGE_SIZE);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
@ -984,7 +984,7 @@ tegra_xhci_detach(device_t dev)
|
||||
if (sc->irq_hdl_mbox != NULL)
|
||||
bus_teardown_intr(dev, sc->irq_res_mbox, sc->irq_hdl_mbox);
|
||||
if (sc->fw_vaddr != 0)
|
||||
kmem_free(kernel_arena, sc->fw_vaddr, sc->fw_size);
|
||||
kmem_free(sc->fw_vaddr, sc->fw_size);
|
||||
LOCK_DESTROY(sc);
|
||||
return (0);
|
||||
}
|
||||
|
@ -532,8 +532,7 @@ bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0)
|
||||
free(vaddr, M_DEVBUF);
|
||||
else
|
||||
kmem_free(kernel_arena, (vm_offset_t)vaddr,
|
||||
dmat->common.maxsize);
|
||||
kmem_free((vm_offset_t)vaddr, dmat->common.maxsize);
|
||||
free(map, M_DEVBUF);
|
||||
dmat->map_count--;
|
||||
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
|
||||
|
@ -502,8 +502,7 @@ start_cpu(u_int id, uint64_t target_cpu)
|
||||
("Failed to start CPU %u (%lx)\n", id, target_cpu));
|
||||
|
||||
pcpu_destroy(pcpup);
|
||||
kmem_free(kernel_arena, (vm_offset_t)dpcpu[cpuid - 1],
|
||||
DPCPU_SIZE);
|
||||
kmem_free((vm_offset_t)dpcpu[cpuid - 1], DPCPU_SIZE);
|
||||
dpcpu[cpuid - 1] = NULL;
|
||||
mp_ncpus--;
|
||||
|
||||
|
@ -156,7 +156,7 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle)
|
||||
{
|
||||
|
||||
kmem_free(kmem_arena, (vm_offset_t)cpu_addr, size);
|
||||
kmem_free((vm_offset_t)cpu_addr, size);
|
||||
}
|
||||
|
||||
/* XXX This only works with no iommu. */
|
||||
|
@ -178,7 +178,7 @@ linux_free_kmem(vm_offset_t addr, unsigned int order)
|
||||
{
|
||||
size_t size = ((size_t)PAGE_SIZE) << order;
|
||||
|
||||
kmem_free(kmem_arena, addr, size);
|
||||
kmem_free(addr, size);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -171,8 +171,8 @@ agp_alloc_gatt(device_t dev)
|
||||
void
|
||||
agp_free_gatt(struct agp_gatt *gatt)
|
||||
{
|
||||
kmem_free(kernel_arena, (vm_offset_t)gatt->ag_virtual,
|
||||
gatt->ag_entries * sizeof(u_int32_t));
|
||||
kmem_free((vm_offset_t)gatt->ag_virtual, gatt->ag_entries *
|
||||
sizeof(u_int32_t));
|
||||
free(gatt, M_AGP);
|
||||
}
|
||||
|
||||
|
@ -119,8 +119,8 @@ agp_amd_alloc_gatt(device_t dev)
|
||||
if (bootverbose)
|
||||
device_printf(dev,
|
||||
"failed to allocate page directory\n");
|
||||
kmem_free(kernel_arena, (vm_offset_t)gatt->ag_virtual,
|
||||
entries * sizeof(u_int32_t));
|
||||
kmem_free((vm_offset_t)gatt->ag_virtual, entries *
|
||||
sizeof(u_int32_t));
|
||||
free(gatt, M_AGP);
|
||||
return 0;
|
||||
}
|
||||
@ -168,9 +168,9 @@ agp_amd_alloc_gatt(device_t dev)
|
||||
static void
|
||||
agp_amd_free_gatt(struct agp_amd_gatt *gatt)
|
||||
{
|
||||
kmem_free(kernel_arena, (vm_offset_t)gatt->ag_vdir, AGP_PAGE_SIZE);
|
||||
kmem_free(kernel_arena, (vm_offset_t)gatt->ag_virtual,
|
||||
gatt->ag_entries * sizeof(u_int32_t));
|
||||
kmem_free((vm_offset_t)gatt->ag_vdir, AGP_PAGE_SIZE);
|
||||
kmem_free((vm_offset_t)gatt->ag_virtual, gatt->ag_entries *
|
||||
sizeof(u_int32_t));
|
||||
free(gatt, M_AGP);
|
||||
}
|
||||
|
||||
|
@ -147,8 +147,8 @@ agp_ati_alloc_gatt(device_t dev)
|
||||
if (sc->ag_vdir == NULL) {
|
||||
if (bootverbose)
|
||||
device_printf(dev, "pagedir allocation failed\n");
|
||||
kmem_free(kernel_arena, (vm_offset_t)sc->ag_virtual,
|
||||
entries * sizeof(u_int32_t));
|
||||
kmem_free((vm_offset_t)sc->ag_virtual, entries *
|
||||
sizeof(u_int32_t));
|
||||
return ENOMEM;
|
||||
}
|
||||
sc->ag_pdir = vtophys((vm_offset_t)sc->ag_vdir);
|
||||
@ -265,9 +265,9 @@ agp_ati_detach(device_t dev)
|
||||
temp = pci_read_config(dev, apsize_reg, 4);
|
||||
pci_write_config(dev, apsize_reg, temp & ~1, 4);
|
||||
|
||||
kmem_free(kernel_arena, (vm_offset_t)sc->ag_vdir, AGP_PAGE_SIZE);
|
||||
kmem_free(kernel_arena, (vm_offset_t)sc->ag_virtual,
|
||||
sc->ag_entries * sizeof(u_int32_t));
|
||||
kmem_free((vm_offset_t)sc->ag_vdir, AGP_PAGE_SIZE);
|
||||
kmem_free((vm_offset_t)sc->ag_virtual, sc->ag_entries *
|
||||
sizeof(u_int32_t));
|
||||
|
||||
bus_release_resource(dev, SYS_RES_MEMORY, ATI_GART_MMADDR, sc->regs);
|
||||
agp_free_res(dev);
|
||||
|
@ -1329,7 +1329,7 @@ agp_i810_deinstall_gatt(device_t dev)
|
||||
|
||||
sc = device_get_softc(dev);
|
||||
bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, 0);
|
||||
kmem_free(kernel_arena, (vm_offset_t)sc->gatt->ag_virtual, 64 * 1024);
|
||||
kmem_free((vm_offset_t)sc->gatt->ag_virtual, 64 * 1024);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -195,7 +195,7 @@ ecc_ei_inject(int count)
|
||||
pause_sbt("ecc_ei_inject", delay_ms * SBT_1MS, 0, 0);
|
||||
}
|
||||
|
||||
kmem_free(kernel_arena, memory, PAGE_SIZE);
|
||||
kmem_free(memory, PAGE_SIZE);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -99,7 +99,7 @@ drm_sg_cleanup(struct drm_sg_mem *entry)
|
||||
return;
|
||||
|
||||
if (entry->vaddr != 0)
|
||||
kmem_free(kernel_arena, entry->vaddr, IDX_TO_OFF(entry->pages));
|
||||
kmem_free(entry->vaddr, IDX_TO_OFF(entry->pages));
|
||||
|
||||
free(entry->busaddr, DRM_MEM_SGLISTS);
|
||||
free(entry, DRM_MEM_DRIVER);
|
||||
|
@ -47,7 +47,7 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
|
||||
return;
|
||||
|
||||
if (entry->vaddr != 0)
|
||||
kmem_free(kernel_arena, entry->vaddr, IDX_TO_OFF(entry->pages));
|
||||
kmem_free(entry->vaddr, IDX_TO_OFF(entry->pages));
|
||||
|
||||
free(entry->busaddr, DRM_MEM_SGLISTS);
|
||||
free(entry, DRM_MEM_DRIVER);
|
||||
|
@ -264,8 +264,7 @@ SYSINIT(hyperv_initialize, SI_SUB_HYPERVISOR, SI_ORDER_FIRST, hyperv_init,
|
||||
static void
|
||||
hypercall_memfree(void)
|
||||
{
|
||||
kmem_free(kernel_arena, (vm_offset_t)hypercall_context.hc_addr,
|
||||
PAGE_SIZE);
|
||||
kmem_free((vm_offset_t)hypercall_context.hc_addr, PAGE_SIZE);
|
||||
hypercall_context.hc_addr = NULL;
|
||||
}
|
||||
|
||||
|
@ -212,7 +212,7 @@ static inline void
|
||||
lio_dma_free(size_t size, void *cpu_addr)
|
||||
{
|
||||
|
||||
kmem_free(kmem_arena, (vm_offset_t)cpu_addr, size);
|
||||
kmem_free((vm_offset_t)cpu_addr, size);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
|
@ -475,7 +475,7 @@ void
|
||||
contigfree(void *addr, unsigned long size, struct malloc_type *type)
|
||||
{
|
||||
|
||||
kmem_free(kernel_arena, (vm_offset_t)addr, size);
|
||||
kmem_free((vm_offset_t)addr, size);
|
||||
malloc_type_freed(type, round_page(size));
|
||||
}
|
||||
|
||||
|
@ -171,6 +171,6 @@ void
|
||||
busdma_bufalloc_free_uncacheable(void *item, vm_size_t size, uint8_t pflag)
|
||||
{
|
||||
|
||||
kmem_free(kernel_arena, (vm_offset_t)item, size);
|
||||
kmem_free((vm_offset_t)item, size);
|
||||
}
|
||||
|
||||
|
@ -129,7 +129,7 @@ jzlcd_allocfb(struct jzlcd_softc *sc)
|
||||
static void
|
||||
jzlcd_freefb(struct jzlcd_softc *sc)
|
||||
{
|
||||
kmem_free(kernel_arena, sc->vaddr, sc->fbsize);
|
||||
kmem_free(sc->vaddr, sc->fbsize);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -756,7 +756,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
!_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr))
|
||||
uma_zfree(bufzone->umazone, vaddr);
|
||||
else
|
||||
kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize);
|
||||
kmem_free((vm_offset_t)vaddr, dmat->maxsize);
|
||||
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
|
||||
}
|
||||
|
||||
|
@ -570,7 +570,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
if (!map->contigalloc)
|
||||
free(vaddr, M_DEVBUF);
|
||||
else
|
||||
kmem_free(kmem_arena, (vm_offset_t)vaddr, dmat->maxsize);
|
||||
kmem_free((vm_offset_t)vaddr, dmat->maxsize);
|
||||
bus_dmamap_destroy(dmat, map);
|
||||
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
|
||||
}
|
||||
|
@ -616,12 +616,11 @@ void uma_zone_set_freef(uma_zone_t zone, uma_free freef);
|
||||
* These flags are setable in the allocf and visible in the freef.
|
||||
*/
|
||||
#define UMA_SLAB_BOOT 0x01 /* Slab alloced from boot pages */
|
||||
#define UMA_SLAB_KRWX 0x02 /* Slab alloced from kernel_rwx_arena */
|
||||
#define UMA_SLAB_KERNEL 0x04 /* Slab alloced from kernel_map */
|
||||
#define UMA_SLAB_KERNEL 0x04 /* Slab alloced from kmem */
|
||||
#define UMA_SLAB_PRIV 0x08 /* Slab alloced from priv allocator */
|
||||
#define UMA_SLAB_OFFP 0x10 /* Slab is managed separately */
|
||||
#define UMA_SLAB_MALLOC 0x20 /* Slab is a large malloc slab */
|
||||
/* 0x40 and 0x80 are available */
|
||||
/* 0x02, 0x40, and 0x80 are available */
|
||||
|
||||
/*
|
||||
* Used to pre-fill a zone with some number of items
|
||||
|
@ -1300,14 +1300,11 @@ noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
|
||||
static void
|
||||
page_free(void *mem, vm_size_t size, uint8_t flags)
|
||||
{
|
||||
struct vmem *vmem;
|
||||
|
||||
if (flags & UMA_SLAB_KERNEL)
|
||||
vmem = kernel_arena;
|
||||
else
|
||||
if ((flags & UMA_SLAB_KERNEL) == 0)
|
||||
panic("UMA: page_free used with invalid flags %x", flags);
|
||||
|
||||
kmem_free(vmem, (vm_offset_t)mem, size);
|
||||
kmem_free((vm_offset_t)mem, size);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3694,10 +3691,6 @@ uma_large_malloc_domain(vm_size_t size, int domain, int wait)
|
||||
vsetslab(addr, slab);
|
||||
slab->us_data = (void *)addr;
|
||||
slab->us_flags = UMA_SLAB_KERNEL | UMA_SLAB_MALLOC;
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
if (__predict_false((wait & M_EXEC) != 0))
|
||||
slab->us_flags |= UMA_SLAB_KRWX;
|
||||
#endif
|
||||
slab->us_size = size;
|
||||
slab->us_domain = vm_phys_domain(PHYS_TO_VM_PAGE(
|
||||
pmap_kextract(addr)));
|
||||
@ -3719,19 +3712,10 @@ uma_large_malloc(vm_size_t size, int wait)
|
||||
void
|
||||
uma_large_free(uma_slab_t slab)
|
||||
{
|
||||
struct vmem *arena;
|
||||
|
||||
KASSERT((slab->us_flags & UMA_SLAB_KERNEL) != 0,
|
||||
("uma_large_free: Memory not allocated with uma_large_malloc."));
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
if (__predict_true((slab->us_flags & UMA_SLAB_KRWX) == 0))
|
||||
arena = kernel_arena;
|
||||
else
|
||||
arena = kernel_rwx_arena;
|
||||
#else
|
||||
arena = kernel_arena;
|
||||
#endif
|
||||
kmem_free(arena, (vm_offset_t)slab->us_data, slab->us_size);
|
||||
kmem_free((vm_offset_t)slab->us_data, slab->us_size);
|
||||
uma_total_dec(slab->us_size);
|
||||
zone_free_item(slabzone, slab, NULL, SKIP_NONE);
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags,
|
||||
vm_memattr_t memattr);
|
||||
vm_offset_t kmem_malloc(vm_size_t size, int flags);
|
||||
vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags);
|
||||
void kmem_free(struct vmem *, vm_offset_t, vm_size_t);
|
||||
void kmem_free(vm_offset_t addr, vm_size_t size);
|
||||
|
||||
/* This provides memory for previously allocated address space. */
|
||||
int kmem_back(vm_object_t, vm_offset_t, vm_size_t, int);
|
||||
|
@ -462,6 +462,10 @@ kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
pmap_enter(kernel_pmap, addr + i, m, prot,
|
||||
prot | PMAP_ENTER_WIRED, 0);
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
if (__predict_false((prot & VM_PROT_EXECUTE) != 0))
|
||||
m->oflags |= VPO_KMEM_EXEC;
|
||||
#endif
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
@ -497,9 +501,10 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
|
||||
* A physical page must exist within the specified object at each index
|
||||
* that is being unmapped.
|
||||
*/
|
||||
static int
|
||||
static struct vmem *
|
||||
_kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
|
||||
{
|
||||
struct vmem *arena;
|
||||
vm_page_t m, next;
|
||||
vm_offset_t end, offset;
|
||||
int domain;
|
||||
@ -508,13 +513,21 @@ _kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
|
||||
("kmem_unback: only supports kernel object."));
|
||||
|
||||
if (size == 0)
|
||||
return (0);
|
||||
return (NULL);
|
||||
pmap_remove(kernel_pmap, addr, addr + size);
|
||||
offset = addr - VM_MIN_KERNEL_ADDRESS;
|
||||
end = offset + size;
|
||||
VM_OBJECT_WLOCK(object);
|
||||
m = vm_page_lookup(object, atop(offset));
|
||||
domain = vm_phys_domain(m);
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
if (__predict_true((m->oflags & VPO_KMEM_EXEC) == 0))
|
||||
arena = vm_dom[domain].vmd_kernel_arena;
|
||||
else
|
||||
arena = vm_dom[domain].vmd_kernel_rwx_arena;
|
||||
#else
|
||||
arena = vm_dom[domain].vmd_kernel_arena;
|
||||
#endif
|
||||
for (; offset < end; offset += PAGE_SIZE, m = next) {
|
||||
next = vm_page_next(m);
|
||||
vm_page_unwire(m, PQ_NONE);
|
||||
@ -522,14 +535,14 @@ _kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
return (domain);
|
||||
return (arena);
|
||||
}
|
||||
|
||||
void
|
||||
kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
|
||||
{
|
||||
|
||||
_kmem_unback(object, addr, size);
|
||||
(void)_kmem_unback(object, addr, size);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -539,30 +552,14 @@ kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
|
||||
* original allocation.
|
||||
*/
|
||||
void
|
||||
kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size)
|
||||
kmem_free(vm_offset_t addr, vm_size_t size)
|
||||
{
|
||||
struct vmem *arena;
|
||||
int domain;
|
||||
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
KASSERT(vmem == kernel_arena || vmem == kernel_rwx_arena,
|
||||
("kmem_free: Only kernel_arena or kernel_rwx_arena are supported."));
|
||||
#else
|
||||
KASSERT(vmem == kernel_arena,
|
||||
("kmem_free: Only kernel_arena is supported."));
|
||||
#endif
|
||||
|
||||
size = round_page(size);
|
||||
domain = _kmem_unback(kernel_object, addr, size);
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
if (__predict_true(vmem == kernel_arena))
|
||||
arena = vm_dom[domain].vmd_kernel_arena;
|
||||
else
|
||||
arena = vm_dom[domain].vmd_kernel_rwx_arena;
|
||||
#else
|
||||
arena = vm_dom[domain].vmd_kernel_arena;
|
||||
#endif
|
||||
vmem_free(arena, addr, size);
|
||||
arena = _kmem_unback(kernel_object, addr, size);
|
||||
if (arena != NULL)
|
||||
vmem_free(arena, addr, size);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -234,7 +234,7 @@ struct vm_page {
|
||||
* mappings, and such pages are also not on any PQ queue.
|
||||
*
|
||||
*/
|
||||
#define VPO_UNUSED01 0x01 /* --available-- */
|
||||
#define VPO_KMEM_EXEC 0x01 /* kmem mapping allows execution */
|
||||
#define VPO_SWAPSLEEP 0x02 /* waiting for swap to finish */
|
||||
#define VPO_UNMANAGED 0x04 /* no PV management for page */
|
||||
#define VPO_SWAPINPROG 0x08 /* swap I/O in progress on page */
|
||||
|
@ -479,7 +479,7 @@ dmar_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
|
||||
} else {
|
||||
KASSERT((map->flags & BUS_DMAMAP_DMAR_KMEM_ALLOC) != 0,
|
||||
("dmar_bus_dmamem_free for non alloced map %p", map));
|
||||
kmem_free(kernel_arena, (vm_offset_t)vaddr, tag->common.maxsize);
|
||||
kmem_free((vm_offset_t)vaddr, tag->common.maxsize);
|
||||
map->flags &= ~BUS_DMAMAP_DMAR_KMEM_ALLOC;
|
||||
}
|
||||
|
||||
|
@ -374,7 +374,7 @@ dmar_fini_irt(struct dmar_unit *unit)
|
||||
dmar_disable_ir(unit);
|
||||
dmar_qi_invalidate_iec_glob(unit);
|
||||
vmem_destroy(unit->irtids);
|
||||
kmem_free(kernel_arena, (vm_offset_t)unit->irt,
|
||||
unit->irte_cnt * sizeof(dmar_irte_t));
|
||||
kmem_free((vm_offset_t)unit->irt, unit->irte_cnt *
|
||||
sizeof(dmar_irte_t));
|
||||
}
|
||||
}
|
||||
|
@ -444,7 +444,7 @@ dmar_fini_qi(struct dmar_unit *unit)
|
||||
("dmar%d: waiters on disabled queue", unit->unit));
|
||||
DMAR_UNLOCK(unit);
|
||||
|
||||
kmem_free(kernel_arena, unit->inv_queue, unit->inv_queue_size);
|
||||
kmem_free(unit->inv_queue, unit->inv_queue_size);
|
||||
unit->inv_queue = 0;
|
||||
unit->inv_queue_size = 0;
|
||||
unit->qi_enabled = 0;
|
||||
|
@ -499,8 +499,7 @@ bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0)
|
||||
free_domain(vaddr, M_DEVBUF);
|
||||
else
|
||||
kmem_free(kernel_arena, (vm_offset_t)vaddr,
|
||||
dmat->common.maxsize);
|
||||
kmem_free((vm_offset_t)vaddr, dmat->common.maxsize);
|
||||
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
|
||||
dmat->bounce_flags);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user