mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-01 08:27:59 +00:00
kmem_malloc/free: Use void * instead of vm_offset_t for kernel pointers.
Reviewed by: kib, markj Sponsored by: DARPA Differential Revision: https://reviews.freebsd.org/D36549
This commit is contained in:
parent
7ae99f80b6
commit
f49fd63a6a
@ -423,17 +423,17 @@ start_all_aps(void)
|
||||
domain = acpi_pxm_get_cpu_locality(apic_id);
|
||||
#endif
|
||||
/* allocate and set up an idle stack data page */
|
||||
bootstacks[cpu] = (void *)kmem_malloc(kstack_pages * PAGE_SIZE,
|
||||
bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
|
||||
M_WAITOK | M_ZERO);
|
||||
doublefault_stack = (char *)kmem_malloc(DBLFAULT_STACK_SIZE,
|
||||
doublefault_stack = kmem_malloc(DBLFAULT_STACK_SIZE,
|
||||
M_WAITOK | M_ZERO);
|
||||
mce_stack = (char *)kmem_malloc(MCE_STACK_SIZE,
|
||||
mce_stack = kmem_malloc(MCE_STACK_SIZE,
|
||||
M_WAITOK | M_ZERO);
|
||||
nmi_stack = (char *)kmem_malloc_domainset(
|
||||
nmi_stack = kmem_malloc_domainset(
|
||||
DOMAINSET_PREF(domain), NMI_STACK_SIZE, M_WAITOK | M_ZERO);
|
||||
dbg_stack = (char *)kmem_malloc_domainset(
|
||||
dbg_stack = kmem_malloc_domainset(
|
||||
DOMAINSET_PREF(domain), DBG_STACK_SIZE, M_WAITOK | M_ZERO);
|
||||
dpcpu = (void *)kmem_malloc_domainset(DOMAINSET_PREF(domain),
|
||||
dpcpu = kmem_malloc_domainset(DOMAINSET_PREF(domain),
|
||||
DPCPU_SIZE, M_WAITOK | M_ZERO);
|
||||
|
||||
bootpcpu = &__pcpu[cpu];
|
||||
|
@ -2390,7 +2390,7 @@ pmap_init_pv_table(void)
|
||||
*/
|
||||
s = (vm_size_t)pv_npg * sizeof(struct md_page);
|
||||
s = round_page(s);
|
||||
pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
|
||||
pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
|
||||
for (i = 0; i < pv_npg; i++)
|
||||
TAILQ_INIT(&pv_table[i].pv_list);
|
||||
TAILQ_INIT(&pv_dummy.pv_list);
|
||||
|
@ -418,8 +418,7 @@ amd64_set_ioperm(td, uap)
|
||||
*/
|
||||
pcb = td->td_pcb;
|
||||
if (pcb->pcb_tssp == NULL) {
|
||||
tssp = (struct amd64tss *)kmem_malloc(ctob(IOPAGES + 1),
|
||||
M_WAITOK);
|
||||
tssp = kmem_malloc(ctob(IOPAGES + 1), M_WAITOK);
|
||||
pmap_pti_add_kva((vm_offset_t)tssp, (vm_offset_t)tssp +
|
||||
ctob(IOPAGES + 1), false);
|
||||
iomap = (char *)&tssp[1];
|
||||
@ -523,8 +522,8 @@ user_ldt_alloc(struct proc *p, int force)
|
||||
mtx_unlock(&dt_lock);
|
||||
new_ldt = malloc(sizeof(struct proc_ldt), M_SUBPROC, M_WAITOK);
|
||||
sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
|
||||
sva = kmem_malloc(sz, M_WAITOK | M_ZERO);
|
||||
new_ldt->ldt_base = (caddr_t)sva;
|
||||
new_ldt->ldt_base = kmem_malloc(sz, M_WAITOK | M_ZERO);
|
||||
sva = (uintptr_t)new_ldt->ldt_base;
|
||||
pmap_pti_add_kva(sva, sva + sz, false);
|
||||
new_ldt->ldt_refcnt = 1;
|
||||
sldt.ssd_base = sva;
|
||||
@ -539,7 +538,7 @@ user_ldt_alloc(struct proc *p, int force)
|
||||
pldt = mdp->md_ldt;
|
||||
if (pldt != NULL && !force) {
|
||||
pmap_pti_remove_kva(sva, sva + sz);
|
||||
kmem_free(sva, sz);
|
||||
kmem_free(new_ldt->ldt_base, sz);
|
||||
free(new_ldt, M_SUBPROC);
|
||||
return (pldt);
|
||||
}
|
||||
@ -592,7 +591,7 @@ user_ldt_derefl(struct proc_ldt *pldt)
|
||||
sva = (vm_offset_t)pldt->ldt_base;
|
||||
sz = max_ldt_segment * sizeof(struct user_segment_descriptor);
|
||||
pmap_pti_remove_kva(sva, sva + sz);
|
||||
kmem_free(sva, sz);
|
||||
kmem_free(pldt->ldt_base, sz);
|
||||
free(pldt, M_SUBPROC);
|
||||
}
|
||||
}
|
||||
|
@ -373,7 +373,7 @@ cpu_thread_clean(struct thread *td)
|
||||
if (pcb->pcb_tssp != NULL) {
|
||||
pmap_pti_remove_kva((vm_offset_t)pcb->pcb_tssp,
|
||||
(vm_offset_t)pcb->pcb_tssp + ctob(IOPAGES + 1));
|
||||
kmem_free((vm_offset_t)pcb->pcb_tssp, ctob(IOPAGES + 1));
|
||||
kmem_free(pcb->pcb_tssp, ctob(IOPAGES + 1));
|
||||
pcb->pcb_tssp = NULL;
|
||||
}
|
||||
}
|
||||
|
@ -776,10 +776,10 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
|
||||
howmany(dmat->maxsize, MIN(dmat->maxsegsz, PAGE_SIZE)) &&
|
||||
dmat->alignment <= PAGE_SIZE &&
|
||||
(dmat->boundary % PAGE_SIZE) == 0) {
|
||||
*vaddr = (void *)kmem_alloc_attr(dmat->maxsize, mflags, 0,
|
||||
*vaddr = kmem_alloc_attr(dmat->maxsize, mflags, 0,
|
||||
dmat->lowaddr, memattr);
|
||||
} else {
|
||||
*vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0,
|
||||
*vaddr = kmem_alloc_contig(dmat->maxsize, mflags, 0,
|
||||
dmat->lowaddr, dmat->alignment, dmat->boundary, memattr);
|
||||
}
|
||||
if (*vaddr == NULL) {
|
||||
@ -822,7 +822,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
!exclusion_bounce(dmat))
|
||||
uma_zfree(bufzone->umazone, vaddr);
|
||||
else
|
||||
kmem_free((vm_offset_t)vaddr, dmat->maxsize);
|
||||
kmem_free(vaddr, dmat->maxsize);
|
||||
|
||||
dmat->map_count--;
|
||||
if (map->flags & DMAMAP_COHERENT)
|
||||
|
@ -115,7 +115,7 @@ cpu_mp_start(void)
|
||||
|
||||
/* Reserve memory for application processors */
|
||||
for(i = 0; i < (mp_ncpus - 1); i++)
|
||||
dpcpu[i] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
|
||||
dpcpu[i] = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
|
||||
|
||||
dcache_wbinv_poc_all();
|
||||
|
||||
|
@ -1780,7 +1780,7 @@ pmap_init(void)
|
||||
*/
|
||||
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
|
||||
s = round_page(s);
|
||||
pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
|
||||
pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
|
||||
for (i = 0; i < pv_npg; i++)
|
||||
TAILQ_INIT(&pv_table[i].pv_list);
|
||||
|
||||
@ -2213,7 +2213,7 @@ pmap_pinit(pmap_t pmap)
|
||||
*/
|
||||
|
||||
if (pmap->pm_pt1 == NULL) {
|
||||
pmap->pm_pt1 = (pt1_entry_t *)kmem_alloc_contig(NB_IN_PT1,
|
||||
pmap->pm_pt1 = kmem_alloc_contig(NB_IN_PT1,
|
||||
M_NOWAIT | M_ZERO, 0, -1UL, NB_IN_PT1, 0, pt_memattr);
|
||||
if (pmap->pm_pt1 == NULL)
|
||||
return (0);
|
||||
@ -2229,7 +2229,7 @@ pmap_pinit(pmap_t pmap)
|
||||
* be used no matter which process is current. Its mapping
|
||||
* in PT2MAP can be used only for current process.
|
||||
*/
|
||||
pmap->pm_pt2tab = (pt2_entry_t *)kmem_alloc_attr(NB_IN_PT2TAB,
|
||||
pmap->pm_pt2tab = kmem_alloc_attr(NB_IN_PT2TAB,
|
||||
M_NOWAIT | M_ZERO, 0, -1UL, pt_memattr);
|
||||
if (pmap->pm_pt2tab == NULL) {
|
||||
/*
|
||||
@ -2237,7 +2237,7 @@ pmap_pinit(pmap_t pmap)
|
||||
* UMA_ZONE_NOFREE flag, it's important to leave
|
||||
* no allocation in pmap if initialization failed.
|
||||
*/
|
||||
kmem_free((vm_offset_t)pmap->pm_pt1, NB_IN_PT1);
|
||||
kmem_free(pmap->pm_pt1, NB_IN_PT1);
|
||||
pmap->pm_pt1 = NULL;
|
||||
return (0);
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ sdma_alloc(void)
|
||||
chn = i;
|
||||
|
||||
/* Allocate area for buffer descriptors */
|
||||
channel->bd = (void *)kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0,
|
||||
channel->bd = kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0,
|
||||
PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
|
||||
|
||||
return (chn);
|
||||
@ -202,7 +202,7 @@ sdma_free(int chn)
|
||||
channel = &sc->channel[chn];
|
||||
channel->in_use = 0;
|
||||
|
||||
kmem_free((vm_offset_t)channel->bd, PAGE_SIZE);
|
||||
kmem_free(channel->bd, PAGE_SIZE);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -396,7 +396,7 @@ boot_firmware(struct sdma_softc *sc)
|
||||
|
||||
sz = SDMA_N_CHANNELS * sizeof(struct sdma_channel_control) + \
|
||||
sizeof(struct sdma_context_data);
|
||||
sc->ccb = (void *)kmem_alloc_contig(sz, M_ZERO, 0, ~0, PAGE_SIZE, 0,
|
||||
sc->ccb = kmem_alloc_contig(sz, M_ZERO, 0, ~0, PAGE_SIZE, 0,
|
||||
VM_MEMATTR_UNCACHEABLE);
|
||||
sc->ccb_phys = vtophys(sc->ccb);
|
||||
|
||||
@ -415,7 +415,7 @@ boot_firmware(struct sdma_softc *sc)
|
||||
/* Channel 0 is used for booting firmware */
|
||||
chn = 0;
|
||||
|
||||
sc->bd0 = (void *)kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0, PAGE_SIZE,
|
||||
sc->bd0 = kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0, PAGE_SIZE,
|
||||
0, VM_MEMATTR_UNCACHEABLE);
|
||||
bd0 = sc->bd0;
|
||||
sc->ccb[chn].base_bd_ptr = vtophys(bd0);
|
||||
|
@ -1231,7 +1231,8 @@ dc_init_client(device_t dev, device_t host1x, struct tegra_drm *drm)
|
||||
sc->tegra_crtc.cursor_vbase = kmem_alloc_contig(256 * 256 * 4,
|
||||
M_WAITOK | M_ZERO, 0, -1UL, PAGE_SIZE, 0,
|
||||
VM_MEMATTR_WRITE_COMBINING);
|
||||
sc->tegra_crtc.cursor_pbase = vtophys(sc->tegra_crtc.cursor_vbase);
|
||||
sc->tegra_crtc.cursor_pbase =
|
||||
vtophys((uintptr_t)sc->tegra_crtc.cursor_vbase);
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -64,7 +64,7 @@ struct tegra_crtc {
|
||||
device_t dev;
|
||||
int nvidia_head;
|
||||
vm_paddr_t cursor_pbase; /* Cursor buffer */
|
||||
vm_offset_t cursor_vbase;
|
||||
void *cursor_vbase;
|
||||
};
|
||||
|
||||
struct tegra_drm_encoder {
|
||||
|
@ -1382,7 +1382,7 @@ tegra_pcib_attach_msi(device_t dev)
|
||||
|
||||
sc = device_get_softc(dev);
|
||||
|
||||
sc->msi_page = kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0,
|
||||
sc->msi_page = (uintptr_t)kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0,
|
||||
BUS_SPACE_MAXADDR, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
|
||||
|
||||
/* MSI BAR */
|
||||
|
@ -289,7 +289,7 @@ struct tegra_xhci_softc {
|
||||
|
||||
struct intr_config_hook irq_hook;
|
||||
bool xhci_inited;
|
||||
vm_offset_t fw_vaddr;
|
||||
void *fw_vaddr;
|
||||
vm_size_t fw_size;
|
||||
};
|
||||
|
||||
@ -744,7 +744,7 @@ load_fw(struct tegra_xhci_softc *sc)
|
||||
const struct firmware *fw;
|
||||
const struct tegra_xusb_fw_hdr *fw_hdr;
|
||||
vm_paddr_t fw_paddr, fw_base;
|
||||
vm_offset_t fw_vaddr;
|
||||
void *fw_vaddr;
|
||||
vm_size_t fw_size;
|
||||
uint32_t code_tags, code_size;
|
||||
struct clocktime fw_clock;
|
||||
@ -775,9 +775,9 @@ load_fw(struct tegra_xhci_softc *sc)
|
||||
|
||||
fw_vaddr = kmem_alloc_contig(fw_size, M_WAITOK, 0, -1UL, PAGE_SIZE, 0,
|
||||
VM_MEMATTR_UNCACHEABLE);
|
||||
fw_paddr = vtophys(fw_vaddr);
|
||||
fw_paddr = vtophys((uintptr_t)fw_vaddr);
|
||||
fw_hdr = (const struct tegra_xusb_fw_hdr *)fw_vaddr;
|
||||
memcpy((void *)fw_vaddr, fw->data, fw_size);
|
||||
memcpy(fw_vaddr, fw->data, fw_size);
|
||||
|
||||
firmware_put(fw, FIRMWARE_UNLOAD);
|
||||
sc->fw_vaddr = fw_vaddr;
|
||||
@ -947,7 +947,7 @@ tegra_xhci_detach(device_t dev)
|
||||
xhci_uninit(xsc);
|
||||
if (sc->irq_hdl_mbox != NULL)
|
||||
bus_teardown_intr(dev, sc->irq_res_mbox, sc->irq_hdl_mbox);
|
||||
if (sc->fw_vaddr != 0)
|
||||
if (sc->fw_vaddr != NULL)
|
||||
kmem_free(sc->fw_vaddr, sc->fw_size);
|
||||
LOCK_DESTROY(sc);
|
||||
return (0);
|
||||
|
@ -567,11 +567,11 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
|
||||
dmat->alloc_alignment <= PAGE_SIZE &&
|
||||
(dmat->common.boundary % PAGE_SIZE) == 0) {
|
||||
/* Page-based multi-segment allocations allowed */
|
||||
*vaddr = (void *)kmem_alloc_attr(dmat->alloc_size, mflags,
|
||||
*vaddr = kmem_alloc_attr(dmat->alloc_size, mflags,
|
||||
0ul, dmat->common.lowaddr, attr);
|
||||
dmat->bounce_flags |= BF_KMEM_ALLOC;
|
||||
} else {
|
||||
*vaddr = (void *)kmem_alloc_contig(dmat->alloc_size, mflags,
|
||||
*vaddr = kmem_alloc_contig(dmat->alloc_size, mflags,
|
||||
0ul, dmat->common.lowaddr, dmat->alloc_alignment != 0 ?
|
||||
dmat->alloc_alignment : 1ul, dmat->common.boundary, attr);
|
||||
dmat->bounce_flags |= BF_KMEM_ALLOC;
|
||||
@ -608,7 +608,7 @@ bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0)
|
||||
free(vaddr, M_DEVBUF);
|
||||
else
|
||||
kmem_free((vm_offset_t)vaddr, dmat->alloc_size);
|
||||
kmem_free(vaddr, dmat->alloc_size);
|
||||
free(map, M_DEVBUF);
|
||||
dmat->map_count--;
|
||||
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
|
||||
|
@ -319,8 +319,7 @@ smp_after_idle_runnable(void *arg __unused)
|
||||
|
||||
for (cpu = 1; cpu < mp_ncpus; cpu++) {
|
||||
if (bootstacks[cpu] != NULL)
|
||||
kmem_free((vm_offset_t)bootstacks[cpu],
|
||||
MP_BOOTSTACK_SIZE);
|
||||
kmem_free(bootstacks[cpu], MP_BOOTSTACK_SIZE);
|
||||
}
|
||||
}
|
||||
SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
|
||||
@ -498,7 +497,6 @@ static bool
|
||||
start_cpu(u_int cpuid, uint64_t target_cpu, int domain)
|
||||
{
|
||||
struct pcpu *pcpup;
|
||||
vm_offset_t pcpu_mem;
|
||||
vm_size_t size;
|
||||
vm_paddr_t pa;
|
||||
int err, naps;
|
||||
@ -514,11 +512,9 @@ start_cpu(u_int cpuid, uint64_t target_cpu, int domain)
|
||||
KASSERT(cpuid < MAXCPU, ("Too many CPUs"));
|
||||
|
||||
size = round_page(sizeof(*pcpup) + DPCPU_SIZE);
|
||||
pcpu_mem = kmem_malloc_domainset(DOMAINSET_PREF(domain), size,
|
||||
pcpup = kmem_malloc_domainset(DOMAINSET_PREF(domain), size,
|
||||
M_WAITOK | M_ZERO);
|
||||
pmap_disable_promotion(pcpu_mem, size);
|
||||
|
||||
pcpup = (struct pcpu *)pcpu_mem;
|
||||
pmap_disable_promotion((vm_offset_t)pcpup, size);
|
||||
pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
|
||||
pcpup->pc_mpidr_low = target_cpu & CPU_AFF_MASK;
|
||||
pcpup->pc_mpidr_high = (target_cpu & CPU_AFF_MASK) >> 32;
|
||||
@ -526,8 +522,8 @@ start_cpu(u_int cpuid, uint64_t target_cpu, int domain)
|
||||
dpcpu[cpuid - 1] = (void *)(pcpup + 1);
|
||||
dpcpu_init(dpcpu[cpuid - 1], cpuid);
|
||||
|
||||
bootstacks[cpuid] = (void *)kmem_malloc_domainset(
|
||||
DOMAINSET_PREF(domain), MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO);
|
||||
bootstacks[cpuid] = kmem_malloc_domainset(DOMAINSET_PREF(domain),
|
||||
MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO);
|
||||
|
||||
naps = atomic_load_int(&aps_started);
|
||||
bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE;
|
||||
@ -548,8 +544,8 @@ start_cpu(u_int cpuid, uint64_t target_cpu, int domain)
|
||||
|
||||
pcpu_destroy(pcpup);
|
||||
dpcpu[cpuid - 1] = NULL;
|
||||
kmem_free((vm_offset_t)bootstacks[cpuid], MP_BOOTSTACK_SIZE);
|
||||
kmem_free(pcpu_mem, size);
|
||||
kmem_free(bootstacks[cpuid], MP_BOOTSTACK_SIZE);
|
||||
kmem_free(pcpup, size);
|
||||
bootstacks[cpuid] = NULL;
|
||||
mp_ncpus--;
|
||||
return (false);
|
||||
|
@ -1247,7 +1247,7 @@ pmap_init_asids(struct asid_set *set, int bits)
|
||||
* bit_alloc().
|
||||
*/
|
||||
set->asid_set_size = 1 << set->asid_bits;
|
||||
set->asid_set = (bitstr_t *)kmem_malloc(bitstr_size(set->asid_set_size),
|
||||
set->asid_set = kmem_malloc(bitstr_size(set->asid_set_size),
|
||||
M_WAITOK | M_ZERO);
|
||||
for (i = 0; i < ASID_FIRST_AVAILABLE; i++)
|
||||
bit_set(set->asid_set, i);
|
||||
@ -1326,7 +1326,7 @@ pmap_init(void)
|
||||
*/
|
||||
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
|
||||
s = round_page(s);
|
||||
pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
|
||||
pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
|
||||
for (i = 0; i < pv_npg; i++)
|
||||
TAILQ_INIT(&pv_table[i].pv_list);
|
||||
TAILQ_INIT(&pv_dummy.pv_list);
|
||||
|
@ -175,7 +175,7 @@ dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
{
|
||||
|
||||
linux_dma_unmap(dev, dma_addr, size);
|
||||
kmem_free((vm_offset_t)cpu_addr, size);
|
||||
kmem_free(cpu_addr, size);
|
||||
}
|
||||
|
||||
static inline dma_addr_t
|
||||
|
@ -170,7 +170,7 @@ vm_offset_t
|
||||
linux_alloc_kmem(gfp_t flags, unsigned int order)
|
||||
{
|
||||
size_t size = ((size_t)PAGE_SIZE) << order;
|
||||
vm_offset_t addr;
|
||||
void *addr;
|
||||
|
||||
if ((flags & GFP_DMA32) == 0) {
|
||||
addr = kmem_malloc(size, flags & GFP_NATIVE_MASK);
|
||||
@ -178,7 +178,7 @@ linux_alloc_kmem(gfp_t flags, unsigned int order)
|
||||
addr = kmem_alloc_contig(size, flags & GFP_NATIVE_MASK, 0,
|
||||
BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
|
||||
}
|
||||
return (addr);
|
||||
return ((vm_offset_t)addr);
|
||||
}
|
||||
|
||||
void
|
||||
@ -186,7 +186,7 @@ linux_free_kmem(vm_offset_t addr, unsigned int order)
|
||||
{
|
||||
size_t size = ((size_t)PAGE_SIZE) << order;
|
||||
|
||||
kmem_free(addr, size);
|
||||
kmem_free((void *)addr, size);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -1124,13 +1124,13 @@ linux_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
align = PAGE_SIZE << get_order(size);
|
||||
/* Always zero the allocation. */
|
||||
flag |= M_ZERO;
|
||||
mem = (void *)kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high,
|
||||
mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high,
|
||||
align, 0, VM_MEMATTR_DEFAULT);
|
||||
if (mem != NULL) {
|
||||
*dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size,
|
||||
priv->dmat_coherent);
|
||||
if (*dma_handle == 0) {
|
||||
kmem_free((vm_offset_t)mem, size);
|
||||
kmem_free(mem, size);
|
||||
mem = NULL;
|
||||
}
|
||||
} else {
|
||||
|
@ -153,9 +153,8 @@ agp_alloc_gatt(device_t dev)
|
||||
return 0;
|
||||
|
||||
gatt->ag_entries = entries;
|
||||
gatt->ag_virtual = (void *)kmem_alloc_contig(entries *
|
||||
sizeof(u_int32_t), M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0,
|
||||
VM_MEMATTR_WRITE_COMBINING);
|
||||
gatt->ag_virtual = kmem_alloc_contig(entries * sizeof(uint32_t),
|
||||
M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING);
|
||||
if (!gatt->ag_virtual) {
|
||||
if (bootverbose)
|
||||
device_printf(dev, "contiguous allocation failed\n");
|
||||
@ -170,8 +169,7 @@ agp_alloc_gatt(device_t dev)
|
||||
void
|
||||
agp_free_gatt(struct agp_gatt *gatt)
|
||||
{
|
||||
kmem_free((vm_offset_t)gatt->ag_virtual, gatt->ag_entries *
|
||||
sizeof(u_int32_t));
|
||||
kmem_free(gatt->ag_virtual, gatt->ag_entries * sizeof(uint32_t));
|
||||
free(gatt, M_AGP);
|
||||
}
|
||||
|
||||
|
@ -101,7 +101,7 @@ agp_amd_alloc_gatt(device_t dev)
|
||||
* directory.
|
||||
*/
|
||||
gatt->ag_entries = entries;
|
||||
gatt->ag_virtual = (void *)kmem_alloc_attr(entries * sizeof(u_int32_t),
|
||||
gatt->ag_virtual = kmem_alloc_attr(entries * sizeof(uint32_t),
|
||||
M_NOWAIT | M_ZERO, 0, ~0, VM_MEMATTR_WRITE_COMBINING);
|
||||
if (!gatt->ag_virtual) {
|
||||
if (bootverbose)
|
||||
@ -113,14 +113,13 @@ agp_amd_alloc_gatt(device_t dev)
|
||||
/*
|
||||
* Allocate the page directory.
|
||||
*/
|
||||
gatt->ag_vdir = (void *)kmem_alloc_attr(AGP_PAGE_SIZE, M_NOWAIT |
|
||||
gatt->ag_vdir = kmem_alloc_attr(AGP_PAGE_SIZE, M_NOWAIT |
|
||||
M_ZERO, 0, ~0, VM_MEMATTR_WRITE_COMBINING);
|
||||
if (!gatt->ag_vdir) {
|
||||
if (bootverbose)
|
||||
device_printf(dev,
|
||||
"failed to allocate page directory\n");
|
||||
kmem_free((vm_offset_t)gatt->ag_virtual, entries *
|
||||
sizeof(u_int32_t));
|
||||
kmem_free(gatt->ag_virtual, entries * sizeof(uint32_t));
|
||||
free(gatt, M_AGP);
|
||||
return 0;
|
||||
}
|
||||
@ -168,9 +167,8 @@ agp_amd_alloc_gatt(device_t dev)
|
||||
static void
|
||||
agp_amd_free_gatt(struct agp_amd_gatt *gatt)
|
||||
{
|
||||
kmem_free((vm_offset_t)gatt->ag_vdir, AGP_PAGE_SIZE);
|
||||
kmem_free((vm_offset_t)gatt->ag_virtual, gatt->ag_entries *
|
||||
sizeof(u_int32_t));
|
||||
kmem_free(gatt->ag_vdir, AGP_PAGE_SIZE);
|
||||
kmem_free(gatt->ag_virtual, gatt->ag_entries * sizeof(uint32_t));
|
||||
free(gatt, M_AGP);
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ agp_ati_alloc_gatt(device_t dev)
|
||||
|
||||
/* Alloc the GATT -- pointers to pages of AGP memory */
|
||||
sc->ag_entries = entries;
|
||||
sc->ag_virtual = (void *)kmem_alloc_attr(entries * sizeof(u_int32_t),
|
||||
sc->ag_virtual = kmem_alloc_attr(entries * sizeof(uint32_t),
|
||||
M_NOWAIT | M_ZERO, 0, ~0, VM_MEMATTR_WRITE_COMBINING);
|
||||
if (sc->ag_virtual == NULL) {
|
||||
if (bootverbose)
|
||||
@ -141,13 +141,12 @@ agp_ati_alloc_gatt(device_t dev)
|
||||
}
|
||||
|
||||
/* Alloc the page directory -- pointers to each page of the GATT */
|
||||
sc->ag_vdir = (void *)kmem_alloc_attr(AGP_PAGE_SIZE, M_NOWAIT | M_ZERO,
|
||||
sc->ag_vdir = kmem_alloc_attr(AGP_PAGE_SIZE, M_NOWAIT | M_ZERO,
|
||||
0, ~0, VM_MEMATTR_WRITE_COMBINING);
|
||||
if (sc->ag_vdir == NULL) {
|
||||
if (bootverbose)
|
||||
device_printf(dev, "pagedir allocation failed\n");
|
||||
kmem_free((vm_offset_t)sc->ag_virtual, entries *
|
||||
sizeof(u_int32_t));
|
||||
kmem_free(sc->ag_virtual, entries * sizeof(uint32_t));
|
||||
return ENOMEM;
|
||||
}
|
||||
sc->ag_pdir = vtophys((vm_offset_t)sc->ag_vdir);
|
||||
@ -263,9 +262,8 @@ agp_ati_detach(device_t dev)
|
||||
temp = pci_read_config(dev, apsize_reg, 4);
|
||||
pci_write_config(dev, apsize_reg, temp & ~1, 4);
|
||||
|
||||
kmem_free((vm_offset_t)sc->ag_vdir, AGP_PAGE_SIZE);
|
||||
kmem_free((vm_offset_t)sc->ag_virtual, sc->ag_entries *
|
||||
sizeof(u_int32_t));
|
||||
kmem_free(sc->ag_vdir, AGP_PAGE_SIZE);
|
||||
kmem_free(sc->ag_virtual, sc->ag_entries * sizeof(uint32_t));
|
||||
|
||||
bus_release_resource(dev, SYS_RES_MEMORY, ATI_GART_MMADDR, sc->regs);
|
||||
agp_free_res(dev);
|
||||
|
@ -1189,7 +1189,7 @@ agp_i810_install_gatt(device_t dev)
|
||||
sc->dcache_size = 0;
|
||||
|
||||
/* According to the specs the gatt on the i810 must be 64k. */
|
||||
sc->gatt->ag_virtual = (void *)kmem_alloc_contig(64 * 1024, M_NOWAIT |
|
||||
sc->gatt->ag_virtual = kmem_alloc_contig(64 * 1024, M_NOWAIT |
|
||||
M_ZERO, 0, ~0, PAGE_SIZE, 0, VM_MEMATTR_WRITE_COMBINING);
|
||||
if (sc->gatt->ag_virtual == NULL) {
|
||||
if (bootverbose)
|
||||
@ -1329,7 +1329,7 @@ agp_i810_deinstall_gatt(device_t dev)
|
||||
|
||||
sc = device_get_softc(dev);
|
||||
bus_write_4(sc->sc_res[0], AGP_I810_PGTBL_CTL, 0);
|
||||
kmem_free((vm_offset_t)sc->gatt->ag_virtual, 64 * 1024);
|
||||
kmem_free(sc->gatt->ag_virtual, 64 * 1024);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -177,7 +177,7 @@ ecc_ei_inject_one(void *arg, size_t size)
|
||||
static void
|
||||
ecc_ei_inject(int count)
|
||||
{
|
||||
vm_offset_t memory;
|
||||
void *memory;
|
||||
int injected;
|
||||
|
||||
KASSERT((quadrant & ~QUADRANT_MASK) == 0,
|
||||
@ -191,7 +191,7 @@ ecc_ei_inject(int count)
|
||||
VM_MEMATTR_UNCACHEABLE);
|
||||
|
||||
for (injected = 0; injected < count; injected++) {
|
||||
ecc_ei_inject_one((void*)memory, PAGE_SIZE);
|
||||
ecc_ei_inject_one(memory, PAGE_SIZE);
|
||||
if (delay_ms != 0 && injected != count - 1)
|
||||
pause_sbt("ecc_ei_inject", delay_ms * SBT_1MS, 0, 0);
|
||||
}
|
||||
|
@ -497,7 +497,7 @@ struct drm_agp_head {
|
||||
* Scatter-gather memory.
|
||||
*/
|
||||
struct drm_sg_mem {
|
||||
vm_offset_t vaddr;
|
||||
void *vaddr;
|
||||
vm_paddr_t *busaddr;
|
||||
vm_pindex_t pages;
|
||||
};
|
||||
|
@ -392,8 +392,8 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
|
||||
free(map, DRM_MEM_MAPS);
|
||||
return -EINVAL;
|
||||
}
|
||||
map->handle = (void *)(dev->sg->vaddr + offset);
|
||||
map->offset += dev->sg->vaddr;
|
||||
map->handle = (char *)dev->sg->vaddr + offset;
|
||||
map->offset += (uintptr_t)dev->sg->vaddr;
|
||||
break;
|
||||
case _DRM_CONSISTENT:
|
||||
/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
|
||||
|
@ -35,7 +35,7 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#define DEBUG_SCATTER 0
|
||||
|
||||
static inline vm_offset_t drm_vmalloc_dma(vm_size_t size)
|
||||
static inline void *drm_vmalloc_dma(vm_size_t size)
|
||||
{
|
||||
return kmem_alloc_attr(size, M_NOWAIT | M_ZERO, 0,
|
||||
BUS_SPACE_MAXADDR_32BIT, VM_MEMATTR_WRITE_COMBINING);
|
||||
@ -46,7 +46,7 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
|
||||
if (entry == NULL)
|
||||
return;
|
||||
|
||||
if (entry->vaddr != 0)
|
||||
if (entry->vaddr != NULL)
|
||||
kmem_free(entry->vaddr, IDX_TO_OFF(entry->pages));
|
||||
|
||||
free(entry->busaddr, DRM_MEM_SGLISTS);
|
||||
@ -83,7 +83,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
|
||||
}
|
||||
|
||||
entry->vaddr = drm_vmalloc_dma(size);
|
||||
if (entry->vaddr == 0) {
|
||||
if (entry->vaddr == NULL) {
|
||||
free(entry->busaddr, DRM_MEM_DRIVER);
|
||||
free(entry, DRM_MEM_DRIVER);
|
||||
return -ENOMEM;
|
||||
@ -91,14 +91,14 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
|
||||
|
||||
for (pindex = 0; pindex < entry->pages; pindex++) {
|
||||
entry->busaddr[pindex] =
|
||||
vtophys(entry->vaddr + IDX_TO_OFF(pindex));
|
||||
vtophys((uintptr_t)entry->vaddr + IDX_TO_OFF(pindex));
|
||||
}
|
||||
|
||||
request->handle = entry->vaddr;
|
||||
request->handle = (uintptr_t)entry->vaddr;
|
||||
|
||||
dev->sg = entry;
|
||||
|
||||
DRM_DEBUG("allocated %ju pages @ 0x%08zx, contents=%08lx\n",
|
||||
DRM_DEBUG("allocated %ju pages @ %p, contents=%08lx\n",
|
||||
entry->pages, entry->vaddr, *(unsigned long *)entry->vaddr);
|
||||
|
||||
return 0;
|
||||
@ -125,10 +125,10 @@ int drm_sg_free(struct drm_device *dev, void *data,
|
||||
entry = dev->sg;
|
||||
dev->sg = NULL;
|
||||
|
||||
if (!entry || entry->vaddr != request->handle)
|
||||
if (!entry || (uintptr_t)entry->vaddr != request->handle)
|
||||
return -EINVAL;
|
||||
|
||||
DRM_DEBUG("free 0x%zx\n", entry->vaddr);
|
||||
DRM_DEBUG("free %p\n", entry->vaddr);
|
||||
|
||||
drm_sg_cleanup(entry);
|
||||
|
||||
|
@ -268,7 +268,7 @@ SYSINIT(hyperv_initialize, SI_SUB_HYPERVISOR, SI_ORDER_FIRST, hyperv_init,
|
||||
static void
|
||||
hypercall_memfree(void)
|
||||
{
|
||||
kmem_free((vm_offset_t)hypercall_context.hc_addr, PAGE_SIZE);
|
||||
kmem_free(hypercall_context.hc_addr, PAGE_SIZE);
|
||||
hypercall_context.hc_addr = NULL;
|
||||
}
|
||||
|
||||
@ -286,8 +286,7 @@ hypercall_create(void *arg __unused)
|
||||
* the NX bit.
|
||||
* - Assume kmem_malloc() returns properly aligned memory.
|
||||
*/
|
||||
hypercall_context.hc_addr = (void *)kmem_malloc(PAGE_SIZE, M_EXEC |
|
||||
M_WAITOK);
|
||||
hypercall_context.hc_addr = kmem_malloc(PAGE_SIZE, M_EXEC | M_WAITOK);
|
||||
hypercall_context.hc_paddr = vtophys(hypercall_context.hc_addr);
|
||||
|
||||
/* Get the 'reserved' bits, which requires preservation. */
|
||||
|
@ -519,7 +519,7 @@ iommu_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
|
||||
DOMAINSET_PREF(tag->common.domain), mflags);
|
||||
map->flags |= BUS_DMAMAP_IOMMU_MALLOC;
|
||||
} else {
|
||||
*vaddr = (void *)kmem_alloc_attr_domainset(
|
||||
*vaddr = kmem_alloc_attr_domainset(
|
||||
DOMAINSET_PREF(tag->common.domain), tag->common.maxsize,
|
||||
mflags, 0ul, BUS_SPACE_MAXADDR, attr);
|
||||
map->flags |= BUS_DMAMAP_IOMMU_KMEM_ALLOC;
|
||||
@ -547,7 +547,7 @@ iommu_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map1)
|
||||
} else {
|
||||
KASSERT((map->flags & BUS_DMAMAP_IOMMU_KMEM_ALLOC) != 0,
|
||||
("iommu_bus_dmamem_free for non alloced map %p", map));
|
||||
kmem_free((vm_offset_t)vaddr, tag->common.maxsize);
|
||||
kmem_free(vaddr, tag->common.maxsize);
|
||||
map->flags &= ~BUS_DMAMAP_IOMMU_KMEM_ALLOC;
|
||||
}
|
||||
|
||||
|
@ -148,7 +148,7 @@ kvm_clock_attach(device_t dev)
|
||||
(regs[0] & KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) != 0;
|
||||
|
||||
/* Set up 'struct pvclock_vcpu_time_info' page(s): */
|
||||
sc->timeinfos = (struct pvclock_vcpu_time_info *)kmem_malloc(mp_ncpus *
|
||||
sc->timeinfos = kmem_malloc(mp_ncpus *
|
||||
sizeof(struct pvclock_vcpu_time_info), M_WAITOK | M_ZERO);
|
||||
kvm_clock_system_time_enable(sc);
|
||||
|
||||
|
@ -198,7 +198,7 @@ lio_dma_alloc(size_t size, vm_paddr_t *dma_handle)
|
||||
void *mem;
|
||||
|
||||
align = PAGE_SIZE << lio_get_order(size);
|
||||
mem = (void *)kmem_alloc_contig(size, M_WAITOK, 0, ~0ul, align, 0,
|
||||
mem = kmem_alloc_contig(size, M_WAITOK, 0, ~0ul, align, 0,
|
||||
VM_MEMATTR_DEFAULT);
|
||||
if (mem != NULL)
|
||||
*dma_handle = vtophys(mem);
|
||||
@ -212,7 +212,7 @@ static inline void
|
||||
lio_dma_free(size_t size, void *cpu_addr)
|
||||
{
|
||||
|
||||
kmem_free((vm_offset_t)cpu_addr, size);
|
||||
kmem_free(cpu_addr, size);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
|
@ -410,6 +410,7 @@ mlx5_ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
|
||||
struct mlx5_fw_update *fu;
|
||||
struct firmware fake_fw;
|
||||
struct mlx5_eeprom_get *eeprom_info;
|
||||
void *fw_data;
|
||||
int error;
|
||||
|
||||
error = 0;
|
||||
@ -461,21 +462,21 @@ mlx5_ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
|
||||
error = mlx5_dbsf_to_core(devaddr, &mdev);
|
||||
if (error != 0)
|
||||
break;
|
||||
bzero(&fake_fw, sizeof(fake_fw));
|
||||
fake_fw.name = "umlx_fw_up";
|
||||
fake_fw.datasize = fu->img_fw_data_len;
|
||||
fake_fw.version = 1;
|
||||
fake_fw.data = (void *)kmem_malloc(fu->img_fw_data_len,
|
||||
M_WAITOK);
|
||||
fw_data = kmem_malloc(fu->img_fw_data_len, M_WAITOK);
|
||||
if (fake_fw.data == NULL) {
|
||||
error = ENOMEM;
|
||||
break;
|
||||
}
|
||||
error = copyin(fu->img_fw_data, __DECONST(void *, fake_fw.data),
|
||||
fu->img_fw_data_len);
|
||||
if (error == 0)
|
||||
error = copyin(fu->img_fw_data, fw_data, fu->img_fw_data_len);
|
||||
if (error == 0) {
|
||||
bzero(&fake_fw, sizeof(fake_fw));
|
||||
fake_fw.name = "umlx_fw_up";
|
||||
fake_fw.datasize = fu->img_fw_data_len;
|
||||
fake_fw.version = 1;
|
||||
fake_fw.data = fw_data;
|
||||
error = -mlx5_firmware_flash(mdev, &fake_fw);
|
||||
kmem_free((vm_offset_t)fake_fw.data, fu->img_fw_data_len);
|
||||
}
|
||||
kmem_free(fw_data, fu->img_fw_data_len);
|
||||
break;
|
||||
case MLX5_FW_RESET:
|
||||
if ((fflag & FWRITE) == 0) {
|
||||
|
@ -393,9 +393,9 @@ start_all_aps(void)
|
||||
apic_id = cpu_apic_ids[cpu];
|
||||
|
||||
/* allocate and set up a boot stack data page */
|
||||
bootstacks[cpu] = (char *)kmem_malloc(kstack_pages * PAGE_SIZE,
|
||||
bootstacks[cpu] = kmem_malloc(kstack_pages * PAGE_SIZE,
|
||||
M_WAITOK | M_ZERO);
|
||||
dpcpu = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
|
||||
dpcpu = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
|
||||
/* setup a vector to our boot code */
|
||||
*((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET;
|
||||
*((volatile u_short *) WARMBOOT_SEG) = (boot_address >> 4);
|
||||
|
@ -1050,7 +1050,7 @@ __CONCAT(PMTYPE, init)(void)
|
||||
*/
|
||||
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
|
||||
s = round_page(s);
|
||||
pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
|
||||
pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
|
||||
for (i = 0; i < pv_npg; i++)
|
||||
TAILQ_INIT(&pv_table[i].pv_list);
|
||||
|
||||
|
@ -494,7 +494,7 @@ void
|
||||
contigfree(void *addr, unsigned long size, struct malloc_type *type)
|
||||
{
|
||||
|
||||
kmem_free((vm_offset_t)addr, size);
|
||||
kmem_free(addr, size);
|
||||
malloc_type_freed(type, round_page(size));
|
||||
}
|
||||
|
||||
@ -588,17 +588,15 @@ static caddr_t __noinline
|
||||
malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy,
|
||||
int flags DEBUG_REDZONE_ARG_DEF)
|
||||
{
|
||||
vm_offset_t kva;
|
||||
caddr_t va;
|
||||
void *va;
|
||||
|
||||
size = roundup(size, PAGE_SIZE);
|
||||
kva = kmem_malloc_domainset(policy, size, flags);
|
||||
if (kva != 0) {
|
||||
va = kmem_malloc_domainset(policy, size, flags);
|
||||
if (va != NULL) {
|
||||
/* The low bit is unused for slab pointers. */
|
||||
vsetzoneslab(kva, NULL, (void *)((size << 1) | 1));
|
||||
vsetzoneslab((uintptr_t)va, NULL, (void *)((size << 1) | 1));
|
||||
uma_total_inc(size);
|
||||
}
|
||||
va = (caddr_t)kva;
|
||||
malloc_type_allocated(mtp, va == NULL ? 0 : size);
|
||||
if (__predict_false(va == NULL)) {
|
||||
KASSERT((flags & M_WAITOK) == 0,
|
||||
@ -607,7 +605,7 @@ malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy,
|
||||
#ifdef DEBUG_REDZONE
|
||||
va = redzone_setup(va, osize);
|
||||
#endif
|
||||
kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
|
||||
kasan_mark(va, osize, size, KASAN_MALLOC_REDZONE);
|
||||
}
|
||||
return (va);
|
||||
}
|
||||
@ -616,7 +614,7 @@ static void
|
||||
free_large(void *addr, size_t size)
|
||||
{
|
||||
|
||||
kmem_free((vm_offset_t)addr, size);
|
||||
kmem_free(addr, size);
|
||||
uma_total_dec(size);
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,7 @@ busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, vm_size_t size, int domain,
|
||||
/* Inform UMA that this allocator uses kernel_arena/object. */
|
||||
*pflag = UMA_SLAB_KERNEL;
|
||||
|
||||
return ((void *)kmem_alloc_attr_domainset(DOMAINSET_FIXED(domain), size,
|
||||
return (kmem_alloc_attr_domainset(DOMAINSET_FIXED(domain), size,
|
||||
wait, 0, BUS_SPACE_MAXADDR, VM_MEMATTR_UNCACHEABLE));
|
||||
#else
|
||||
panic("VM_MEMATTR_UNCACHEABLE unavailable");
|
||||
@ -169,5 +169,5 @@ void
|
||||
busdma_bufalloc_free_uncacheable(void *item, vm_size_t size, uint8_t pflag)
|
||||
{
|
||||
|
||||
kmem_free((vm_offset_t)item, size);
|
||||
kmem_free(item, size);
|
||||
}
|
||||
|
@ -3686,7 +3686,7 @@ mmu_radix_init(void)
|
||||
*/
|
||||
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
|
||||
s = round_page(s);
|
||||
pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
|
||||
pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
|
||||
for (i = 0; i < pv_npg; i++)
|
||||
TAILQ_INIT(&pv_table[i].pv_list);
|
||||
TAILQ_INIT(&pv_dummy.pv_list);
|
||||
|
@ -483,7 +483,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
|
||||
* multi-seg allocations yet though.
|
||||
* XXX Certain AGP hardware does.
|
||||
*/
|
||||
*vaddr = (void *)kmem_alloc_contig(dmat->maxsize, mflags, 0ul,
|
||||
*vaddr = kmem_alloc_contig(dmat->maxsize, mflags, 0ul,
|
||||
dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul,
|
||||
dmat->boundary, attr);
|
||||
(*mapp)->contigalloc = 1;
|
||||
@ -511,7 +511,7 @@ bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
if (!map->contigalloc)
|
||||
free(vaddr, M_DEVBUF);
|
||||
else
|
||||
kmem_free((vm_offset_t)vaddr, dmat->maxsize);
|
||||
kmem_free(vaddr, dmat->maxsize);
|
||||
bus_dmamap_destroy(dmat, map);
|
||||
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags);
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ cpu_mp_start(void)
|
||||
void *dpcpu;
|
||||
|
||||
pc = &__pcpu[cpu.cr_cpuid];
|
||||
dpcpu = (void *)kmem_malloc_domainset(DOMAINSET_PREF(domain),
|
||||
dpcpu = kmem_malloc_domainset(DOMAINSET_PREF(domain),
|
||||
DPCPU_SIZE, M_WAITOK | M_ZERO);
|
||||
pcpu_init(pc, cpu.cr_cpuid, sizeof(*pc));
|
||||
dpcpu_init(dpcpu, cpu.cr_cpuid);
|
||||
|
@ -454,11 +454,11 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
|
||||
dmat->common.alignment <= PAGE_SIZE &&
|
||||
(dmat->common.boundary % PAGE_SIZE) == 0) {
|
||||
/* Page-based multi-segment allocations allowed */
|
||||
*vaddr = (void *)kmem_alloc_attr(dmat->common.maxsize, mflags,
|
||||
*vaddr = kmem_alloc_attr(dmat->common.maxsize, mflags,
|
||||
0ul, dmat->common.lowaddr, attr);
|
||||
dmat->bounce_flags |= BF_KMEM_ALLOC;
|
||||
} else {
|
||||
*vaddr = (void *)kmem_alloc_contig(dmat->common.maxsize, mflags,
|
||||
*vaddr = kmem_alloc_contig(dmat->common.maxsize, mflags,
|
||||
0ul, dmat->common.lowaddr, dmat->common.alignment != 0 ?
|
||||
dmat->common.alignment : 1ul, dmat->common.boundary, attr);
|
||||
dmat->bounce_flags |= BF_KMEM_ALLOC;
|
||||
@ -495,7 +495,7 @@ bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
if ((dmat->bounce_flags & BF_KMEM_ALLOC) == 0)
|
||||
free(vaddr, M_DEVBUF);
|
||||
else
|
||||
kmem_free((vm_offset_t)vaddr, dmat->common.maxsize);
|
||||
kmem_free(vaddr, dmat->common.maxsize);
|
||||
free(map, M_DEVBUF);
|
||||
dmat->map_count--;
|
||||
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
|
||||
|
@ -313,8 +313,7 @@ smp_after_idle_runnable(void *arg __unused)
|
||||
|
||||
for (cpu = 1; cpu <= mp_maxid; cpu++) {
|
||||
if (bootstacks[cpu] != NULL)
|
||||
kmem_free((vm_offset_t)bootstacks[cpu],
|
||||
MP_BOOTSTACK_SIZE);
|
||||
kmem_free(bootstacks[cpu], MP_BOOTSTACK_SIZE);
|
||||
}
|
||||
}
|
||||
SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
|
||||
@ -475,11 +474,10 @@ cpu_init_fdt(u_int id, phandle_t node, u_int addr_size, pcell_t *reg)
|
||||
pcpu_init(pcpup, cpuid, sizeof(struct pcpu));
|
||||
pcpup->pc_hart = hart;
|
||||
|
||||
dpcpu[cpuid - 1] = (void *)kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
|
||||
dpcpu[cpuid - 1] = kmem_malloc(DPCPU_SIZE, M_WAITOK | M_ZERO);
|
||||
dpcpu_init(dpcpu[cpuid - 1], cpuid);
|
||||
|
||||
bootstacks[cpuid] = (void *)kmem_malloc(MP_BOOTSTACK_SIZE,
|
||||
M_WAITOK | M_ZERO);
|
||||
bootstacks[cpuid] = kmem_malloc(MP_BOOTSTACK_SIZE, M_WAITOK | M_ZERO);
|
||||
|
||||
naps = atomic_load_int(&aps_started);
|
||||
bootstack = (char *)bootstacks[cpuid] + MP_BOOTSTACK_SIZE;
|
||||
|
@ -790,7 +790,7 @@ pmap_init(void)
|
||||
*/
|
||||
s = (vm_size_t)(pv_npg * sizeof(struct md_page));
|
||||
s = round_page(s);
|
||||
pv_table = (struct md_page *)kmem_malloc(s, M_WAITOK | M_ZERO);
|
||||
pv_table = kmem_malloc(s, M_WAITOK | M_ZERO);
|
||||
for (i = 0; i < pv_npg; i++)
|
||||
TAILQ_INIT(&pv_table[i].pv_list);
|
||||
TAILQ_INIT(&pv_dummy.pv_list);
|
||||
|
@ -1947,7 +1947,7 @@ page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
|
||||
void *p; /* Returned page */
|
||||
|
||||
*pflag = UMA_SLAB_KERNEL;
|
||||
p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
|
||||
p = kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
|
||||
|
||||
return (p);
|
||||
}
|
||||
@ -2104,7 +2104,7 @@ page_free(void *mem, vm_size_t size, uint8_t flags)
|
||||
KASSERT((flags & UMA_SLAB_KERNEL) != 0,
|
||||
("UMA: page_free used with invalid flags %x", flags));
|
||||
|
||||
kmem_free((vm_offset_t)mem, size);
|
||||
kmem_free(mem, size);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -57,20 +57,20 @@ vm_offset_t kmap_alloc_wait(vm_map_t, vm_size_t);
|
||||
void kmap_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
|
||||
|
||||
/* These operate on virtual addresses backed by memory. */
|
||||
vm_offset_t kmem_alloc_attr(vm_size_t size, int flags,
|
||||
void *kmem_alloc_attr(vm_size_t size, int flags,
|
||||
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
|
||||
vm_offset_t kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size,
|
||||
void *kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size,
|
||||
int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
|
||||
vm_offset_t kmem_alloc_contig(vm_size_t size, int flags,
|
||||
void *kmem_alloc_contig(vm_size_t size, int flags,
|
||||
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
|
||||
vm_memattr_t memattr);
|
||||
vm_offset_t kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size,
|
||||
void *kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size,
|
||||
int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment,
|
||||
vm_paddr_t boundary, vm_memattr_t memattr);
|
||||
vm_offset_t kmem_malloc(vm_size_t size, int flags);
|
||||
vm_offset_t kmem_malloc_domainset(struct domainset *ds, vm_size_t size,
|
||||
void *kmem_malloc(vm_size_t size, int flags);
|
||||
void *kmem_malloc_domainset(struct domainset *ds, vm_size_t size,
|
||||
int flags);
|
||||
void kmem_free(vm_offset_t addr, vm_size_t size);
|
||||
void kmem_free(void *addr, vm_size_t size);
|
||||
|
||||
/* This provides memory for previously allocated address space. */
|
||||
int kmem_back(vm_object_t, vm_offset_t, vm_size_t, int);
|
||||
|
@ -150,8 +150,7 @@ vm_mem_init(void *dummy)
|
||||
void
|
||||
vm_ksubmap_init(struct kva_md_info *kmi)
|
||||
{
|
||||
vm_offset_t firstaddr;
|
||||
caddr_t v;
|
||||
caddr_t firstaddr, v;
|
||||
vm_size_t size = 0;
|
||||
long physmem_est;
|
||||
vm_offset_t minaddr;
|
||||
@ -170,9 +169,9 @@ vm_ksubmap_init(struct kva_md_info *kmi)
|
||||
* needed and allocates it. The second pass assigns virtual
|
||||
* addresses to the various data structures.
|
||||
*/
|
||||
firstaddr = 0;
|
||||
firstaddr = NULL;
|
||||
again:
|
||||
v = (caddr_t)firstaddr;
|
||||
v = firstaddr;
|
||||
|
||||
/*
|
||||
* Discount the physical memory larger than the size of kernel_map
|
||||
@ -186,7 +185,7 @@ vm_ksubmap_init(struct kva_md_info *kmi)
|
||||
/*
|
||||
* End of first pass, size has been calculated so allocate memory
|
||||
*/
|
||||
if (firstaddr == 0) {
|
||||
if (firstaddr == NULL) {
|
||||
size = (vm_size_t)v;
|
||||
#ifdef VM_FREELIST_DMA32
|
||||
/*
|
||||
@ -195,10 +194,10 @@ vm_ksubmap_init(struct kva_md_info *kmi)
|
||||
*/
|
||||
firstaddr = kmem_alloc_attr(size, M_ZERO | M_NOWAIT,
|
||||
(vm_paddr_t)1 << 32, ~(vm_paddr_t)0, VM_MEMATTR_DEFAULT);
|
||||
if (firstaddr == 0)
|
||||
if (firstaddr == NULL)
|
||||
#endif
|
||||
firstaddr = kmem_malloc(size, M_ZERO | M_WAITOK);
|
||||
if (firstaddr == 0)
|
||||
if (firstaddr == NULL)
|
||||
panic("startup: no room for tables");
|
||||
goto again;
|
||||
}
|
||||
@ -206,15 +205,15 @@ vm_ksubmap_init(struct kva_md_info *kmi)
|
||||
/*
|
||||
* End of second pass, addresses have been assigned
|
||||
*/
|
||||
if ((vm_size_t)((char *)v - firstaddr) != size)
|
||||
if ((vm_size_t)(v - firstaddr) != size)
|
||||
panic("startup: table size inconsistency");
|
||||
|
||||
/*
|
||||
* Allocate the clean map to hold all of I/O virtual memory.
|
||||
*/
|
||||
size = (long)nbuf * BKVASIZE + (long)bio_transient_maxcnt * maxphys;
|
||||
kmi->clean_sva = firstaddr = kva_alloc(size);
|
||||
kmi->clean_eva = firstaddr + size;
|
||||
kmi->clean_sva = kva_alloc(size);
|
||||
kmi->clean_eva = kmi->clean_sva + size;
|
||||
|
||||
/*
|
||||
* Allocate the buffer arena.
|
||||
@ -223,11 +222,10 @@ vm_ksubmap_init(struct kva_md_info *kmi)
|
||||
* avoids lock contention at the expense of some fragmentation.
|
||||
*/
|
||||
size = (long)nbuf * BKVASIZE;
|
||||
kmi->buffer_sva = firstaddr;
|
||||
kmi->buffer_sva = kmi->clean_sva;
|
||||
kmi->buffer_eva = kmi->buffer_sva + size;
|
||||
vmem_init(buffer_arena, "buffer arena", kmi->buffer_sva, size,
|
||||
PAGE_SIZE, (mp_ncpus > 4) ? BKVASIZE * 8 : 0, M_WAITOK);
|
||||
firstaddr += size;
|
||||
|
||||
/*
|
||||
* And optionally transient bio space.
|
||||
@ -235,11 +233,8 @@ vm_ksubmap_init(struct kva_md_info *kmi)
|
||||
if (bio_transient_maxcnt != 0) {
|
||||
size = (long)bio_transient_maxcnt * maxphys;
|
||||
vmem_init(transient_arena, "transient arena",
|
||||
firstaddr, size, PAGE_SIZE, 0, M_WAITOK);
|
||||
firstaddr += size;
|
||||
kmi->buffer_eva, size, PAGE_SIZE, 0, M_WAITOK);
|
||||
}
|
||||
if (firstaddr != kmi->clean_eva)
|
||||
panic("Clean map calculation incorrect");
|
||||
|
||||
/*
|
||||
* Allocate the pageable submaps. We may cache an exec map entry per
|
||||
|
@ -229,7 +229,7 @@ kmem_alloc_contig_pages(vm_object_t object, vm_pindex_t pindex, int domain,
|
||||
* necessarily physically contiguous. If M_ZERO is specified through the
|
||||
* given flags, then the pages are zeroed before they are mapped.
|
||||
*/
|
||||
static vm_offset_t
|
||||
static void *
|
||||
kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
|
||||
vm_paddr_t high, vm_memattr_t memattr)
|
||||
{
|
||||
@ -270,10 +270,10 @@ kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
kmem_alloc_san(addr, size, asize, flags);
|
||||
return (addr);
|
||||
return ((void *)addr);
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
void *
|
||||
kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
|
||||
vm_memattr_t memattr)
|
||||
{
|
||||
@ -282,19 +282,19 @@ kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
|
||||
high, memattr));
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
void *
|
||||
kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size, int flags,
|
||||
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr)
|
||||
{
|
||||
struct vm_domainset_iter di;
|
||||
vm_offset_t addr;
|
||||
void *addr;
|
||||
int domain;
|
||||
|
||||
vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
|
||||
do {
|
||||
addr = kmem_alloc_attr_domain(domain, size, flags, low, high,
|
||||
memattr);
|
||||
if (addr != 0)
|
||||
if (addr != NULL)
|
||||
break;
|
||||
} while (vm_domainset_iter_policy(&di, &domain) == 0);
|
||||
|
||||
@ -309,7 +309,7 @@ kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size, int flags,
|
||||
* through the given flags, then the pages are zeroed before they are
|
||||
* mapped.
|
||||
*/
|
||||
static vm_offset_t
|
||||
static void *
|
||||
kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
|
||||
vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
|
||||
vm_memattr_t memattr)
|
||||
@ -326,7 +326,7 @@ kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
|
||||
asize = round_page(size);
|
||||
vmem = vm_dom[domain].vmd_kernel_arena;
|
||||
if (vmem_alloc(vmem, asize, flags | M_BESTFIT, &addr))
|
||||
return (0);
|
||||
return (NULL);
|
||||
offset = addr - VM_MIN_KERNEL_ADDRESS;
|
||||
pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
|
||||
npages = atop(asize);
|
||||
@ -336,7 +336,7 @@ kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
|
||||
if (m == NULL) {
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
vmem_free(vmem, addr, asize);
|
||||
return (0);
|
||||
return (NULL);
|
||||
}
|
||||
KASSERT(vm_page_domain(m) == domain,
|
||||
("kmem_alloc_contig_domain: Domain mismatch %d != %d",
|
||||
@ -353,10 +353,10 @@ kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
kmem_alloc_san(addr, size, asize, flags);
|
||||
return (addr);
|
||||
return ((void *)addr);
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
void *
|
||||
kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
|
||||
u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
|
||||
{
|
||||
@ -365,20 +365,20 @@ kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high,
|
||||
high, alignment, boundary, memattr));
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
void *
|
||||
kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags,
|
||||
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
|
||||
vm_memattr_t memattr)
|
||||
{
|
||||
struct vm_domainset_iter di;
|
||||
vm_offset_t addr;
|
||||
void *addr;
|
||||
int domain;
|
||||
|
||||
vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
|
||||
do {
|
||||
addr = kmem_alloc_contig_domain(domain, size, flags, low, high,
|
||||
alignment, boundary, memattr);
|
||||
if (addr != 0)
|
||||
if (addr != NULL)
|
||||
break;
|
||||
} while (vm_domainset_iter_policy(&di, &domain) == 0);
|
||||
|
||||
@ -423,7 +423,7 @@ kmem_subinit(vm_map_t map, vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
|
||||
*
|
||||
* Allocate wired-down pages in the kernel's address space.
|
||||
*/
|
||||
static vm_offset_t
|
||||
static void *
|
||||
kmem_malloc_domain(int domain, vm_size_t size, int flags)
|
||||
{
|
||||
vmem_t *arena;
|
||||
@ -445,27 +445,27 @@ kmem_malloc_domain(int domain, vm_size_t size, int flags)
|
||||
return (0);
|
||||
}
|
||||
kasan_mark((void *)addr, size, asize, KASAN_KMEM_REDZONE);
|
||||
return (addr);
|
||||
return ((void *)addr);
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
void *
|
||||
kmem_malloc(vm_size_t size, int flags)
|
||||
{
|
||||
|
||||
return (kmem_malloc_domainset(DOMAINSET_RR(), size, flags));
|
||||
}
|
||||
|
||||
vm_offset_t
|
||||
void *
|
||||
kmem_malloc_domainset(struct domainset *ds, vm_size_t size, int flags)
|
||||
{
|
||||
struct vm_domainset_iter di;
|
||||
vm_offset_t addr;
|
||||
void *addr;
|
||||
int domain;
|
||||
|
||||
vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
|
||||
do {
|
||||
addr = kmem_malloc_domain(domain, size, flags);
|
||||
if (addr != 0)
|
||||
if (addr != NULL)
|
||||
break;
|
||||
} while (vm_domainset_iter_policy(&di, &domain) == 0);
|
||||
|
||||
@ -631,15 +631,15 @@ kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
|
||||
* original allocation.
|
||||
*/
|
||||
void
|
||||
kmem_free(vm_offset_t addr, vm_size_t size)
|
||||
kmem_free(void *addr, vm_size_t size)
|
||||
{
|
||||
struct vmem *arena;
|
||||
|
||||
size = round_page(size);
|
||||
kasan_mark((void *)addr, size, size, 0);
|
||||
arena = _kmem_unback(kernel_object, addr, size);
|
||||
kasan_mark(addr, size, size, 0);
|
||||
arena = _kmem_unback(kernel_object, (uintptr_t)addr, size);
|
||||
if (arena != NULL)
|
||||
vmem_free(arena, addr, size);
|
||||
vmem_free(arena, (uintptr_t)addr, size);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -158,7 +158,7 @@ struct dmar_unit {
|
||||
|
||||
/* QI */
|
||||
int qi_enabled;
|
||||
vm_offset_t inv_queue;
|
||||
char *inv_queue;
|
||||
vm_size_t inv_queue_size;
|
||||
uint32_t inv_queue_avail;
|
||||
uint32_t inv_queue_tail;
|
||||
|
@ -342,9 +342,9 @@ dmar_init_irt(struct dmar_unit *unit)
|
||||
return (0);
|
||||
}
|
||||
unit->irte_cnt = clp2(num_io_irqs);
|
||||
unit->irt = (dmar_irte_t *)(uintptr_t)kmem_alloc_contig(
|
||||
unit->irte_cnt * sizeof(dmar_irte_t), M_ZERO | M_WAITOK, 0,
|
||||
dmar_high, PAGE_SIZE, 0, DMAR_IS_COHERENT(unit) ?
|
||||
unit->irt = kmem_alloc_contig(unit->irte_cnt * sizeof(dmar_irte_t),
|
||||
M_ZERO | M_WAITOK, 0, dmar_high, PAGE_SIZE, 0,
|
||||
DMAR_IS_COHERENT(unit) ?
|
||||
VM_MEMATTR_DEFAULT : VM_MEMATTR_UNCACHEABLE);
|
||||
if (unit->irt == NULL)
|
||||
return (ENOMEM);
|
||||
@ -378,7 +378,6 @@ dmar_fini_irt(struct dmar_unit *unit)
|
||||
dmar_disable_ir(unit);
|
||||
dmar_qi_invalidate_iec_glob(unit);
|
||||
vmem_destroy(unit->irtids);
|
||||
kmem_free((vm_offset_t)unit->irt, unit->irte_cnt *
|
||||
sizeof(dmar_irte_t));
|
||||
kmem_free(unit->irt, unit->irte_cnt * sizeof(dmar_irte_t));
|
||||
}
|
||||
}
|
||||
|
@ -510,7 +510,7 @@ dmar_init_qi(struct dmar_unit *unit)
|
||||
|
||||
DMAR_LOCK(unit);
|
||||
dmar_write8(unit, DMAR_IQT_REG, 0);
|
||||
iqa = pmap_kextract(unit->inv_queue);
|
||||
iqa = pmap_kextract((uintptr_t)unit->inv_queue);
|
||||
iqa |= qi_sz;
|
||||
dmar_write8(unit, DMAR_IQA_REG, iqa);
|
||||
dmar_enable_qi(unit);
|
||||
@ -552,7 +552,7 @@ dmar_fini_qi(struct dmar_unit *unit)
|
||||
DMAR_UNLOCK(unit);
|
||||
|
||||
kmem_free(unit->inv_queue, unit->inv_queue_size);
|
||||
unit->inv_queue = 0;
|
||||
unit->inv_queue = NULL;
|
||||
unit->inv_queue_size = 0;
|
||||
unit->qi_enabled = 0;
|
||||
}
|
||||
|
@ -449,12 +449,12 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
|
||||
dmat->common.alignment <= PAGE_SIZE &&
|
||||
(dmat->common.boundary % PAGE_SIZE) == 0) {
|
||||
/* Page-based multi-segment allocations allowed */
|
||||
*vaddr = (void *)kmem_alloc_attr_domainset(
|
||||
*vaddr = kmem_alloc_attr_domainset(
|
||||
DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize,
|
||||
mflags, 0ul, dmat->common.lowaddr, attr);
|
||||
dmat->bounce_flags |= BUS_DMA_KMEM_ALLOC;
|
||||
} else {
|
||||
*vaddr = (void *)kmem_alloc_contig_domainset(
|
||||
*vaddr = kmem_alloc_contig_domainset(
|
||||
DOMAINSET_PREF(dmat->common.domain), dmat->common.maxsize,
|
||||
mflags, 0ul, dmat->common.lowaddr,
|
||||
dmat->common.alignment != 0 ? dmat->common.alignment : 1ul,
|
||||
@ -490,7 +490,7 @@ bounce_bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
|
||||
if ((dmat->bounce_flags & BUS_DMA_KMEM_ALLOC) == 0)
|
||||
free(vaddr, M_DEVBUF);
|
||||
else
|
||||
kmem_free((vm_offset_t)vaddr, dmat->common.maxsize);
|
||||
kmem_free(vaddr, dmat->common.maxsize);
|
||||
CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat,
|
||||
dmat->bounce_flags);
|
||||
}
|
||||
|
@ -1154,8 +1154,7 @@ smp_after_idle_runnable(void *arg __unused)
|
||||
smp_no_rendezvous_barrier, NULL);
|
||||
|
||||
for (cpu = 1; cpu < mp_ncpus; cpu++) {
|
||||
kmem_free((vm_offset_t)bootstacks[cpu], kstack_pages *
|
||||
PAGE_SIZE);
|
||||
kmem_free(bootstacks[cpu], kstack_pages * PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
SYSINIT(smp_after_idle_runnable, SI_SUB_SMP, SI_ORDER_ANY,
|
||||
|
Loading…
Reference in New Issue
Block a user