mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-14 10:09:48 +00:00
sys: use our roundup2/rounddown2() macros when param.h is available.
rounddown2 tends to produce longer lines than the original code and when the code has a high indentation level it was not really advantageous to do the replacement. This tries to strike a balance between readability using the macros and flexibility of having the expressions, so not everything is converted.
This commit is contained in:
parent
5977d3e898
commit
d9c9c81c08
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=298433
@ -383,7 +383,7 @@ amd64_mrstoreone(void *arg)
|
||||
/* mask/active register */
|
||||
if (mrd->mr_flags & MDF_ACTIVE) {
|
||||
msrv = MTRR_PHYSMASK_VALID |
|
||||
(~(mrd->mr_len - 1) & mtrr_physmask);
|
||||
rounddown2(mtrr_physmask, mrd->mr_len);
|
||||
} else {
|
||||
msrv = 0;
|
||||
}
|
||||
|
@ -496,7 +496,7 @@ pmap_kmem_choose(vm_offset_t addr)
|
||||
{
|
||||
vm_offset_t newaddr = addr;
|
||||
|
||||
newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
|
||||
newaddr = roundup2(addr, NBPDR);
|
||||
return (newaddr);
|
||||
}
|
||||
|
||||
|
@ -675,8 +675,8 @@ __start(void)
|
||||
curaddr = (void*)((unsigned int)curaddr & 0xfff00000);
|
||||
#ifdef KZIP
|
||||
if (*kernel == 0x1f && kernel[1] == 0x8b) {
|
||||
pt_addr = (((int)&_end + KERNSIZE + 0x100) &
|
||||
~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE;
|
||||
pt_addr = L1_TABLE_SIZE +
|
||||
rounddown2((int)&_end + KERNSIZE + 0x100, L1_TABLE_SIZE);
|
||||
|
||||
#ifdef CPU_ARM9
|
||||
/* So that idcache_wbinv works; */
|
||||
@ -710,7 +710,7 @@ __start(void)
|
||||
(unsigned int)curaddr,
|
||||
(unsigned int)&func_end, 0);
|
||||
dst = (void *)(((vm_offset_t)dst & ~3));
|
||||
pt_addr = ((unsigned int)dst &~(L1_TABLE_SIZE - 1)) + L1_TABLE_SIZE;
|
||||
pt_addr = L1_TABLE_SIZE + rounddown2((unsigned int)dst, L1_TABLE_SIZE);
|
||||
setup_pagetables(pt_addr, (vm_paddr_t)curaddr,
|
||||
(vm_paddr_t)curaddr + 0x10000000, 0);
|
||||
sp = pt_addr + L1_TABLE_SIZE + 8192;
|
||||
|
@ -4754,7 +4754,7 @@ pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
|
||||
vm_size_t resid;
|
||||
int i;
|
||||
|
||||
resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
|
||||
resid = roundup2(size, PAGE_SIZE);
|
||||
|
||||
if (l1pt == 0)
|
||||
panic("pmap_map_chunk: no L1 table provided");
|
||||
|
@ -515,9 +515,9 @@ initarm(struct arm_boot_params *abp)
|
||||
pmap_link_l2pt(l1pagetable, KERNBASE + i * L1_S_SIZE,
|
||||
&kernel_pt_table[KERNEL_PT_KERN + i]);
|
||||
pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR,
|
||||
(((uint32_t)lastaddr - KERNBASE) + PAGE_SIZE) & ~(PAGE_SIZE - 1),
|
||||
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
afterkern = round_page((lastaddr + L1_S_SIZE) & ~(L1_S_SIZE - 1));
|
||||
rounddown2(((uint32_t)lastaddr - KERNBASE) + PAGE_SIZE, PAGE_SIZE),
|
||||
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
afterkern = round_page(rounddown2(lastaddr + L1_S_SIZE, L1_S_SIZE));
|
||||
for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
|
||||
pmap_link_l2pt(l1pagetable, afterkern + i * L1_S_SIZE,
|
||||
&kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
|
||||
|
@ -229,9 +229,9 @@ initarm(struct arm_boot_params *abp)
|
||||
pmap_link_l2pt(l1pagetable, KERNBASE + i * L1_S_SIZE,
|
||||
&kernel_pt_table[KERNEL_PT_KERN + i]);
|
||||
pmap_map_chunk(l1pagetable, KERNBASE, PHYSADDR,
|
||||
(((uint32_t)lastaddr - KERNBASE) + PAGE_SIZE) & ~(PAGE_SIZE - 1),
|
||||
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
afterkern = round_page((lastaddr + L1_S_SIZE) & ~(L1_S_SIZE - 1));
|
||||
rounddown2(((uint32_t)lastaddr - KERNBASE) + PAGE_SIZE, PAGE_SIZE),
|
||||
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
afterkern = round_page(rounddown2(lastaddr + L1_S_SIZE, L1_S_SIZE));
|
||||
for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
|
||||
pmap_link_l2pt(l1pagetable, afterkern + i * L1_S_SIZE,
|
||||
&kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
|
||||
|
@ -823,7 +823,7 @@ decode_win_cpu_valid(void)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (b != (b & ~(s - 1))) {
|
||||
if (b != rounddown2(b, s)) {
|
||||
printf("CPU window#%d: address 0x%08x is not aligned "
|
||||
"to 0x%08x\n", i, b, s);
|
||||
rv = 0;
|
||||
|
@ -131,8 +131,8 @@ static const struct arm_devmap_entry iq81342_devmap[] = {
|
||||
* Cheat and map a whole section, this will bring
|
||||
* both PCI-X and PCI-E outbound I/O
|
||||
*/
|
||||
IOP34X_PCIX_OIOBAR_VADDR &~ (0x100000 - 1),
|
||||
IOP34X_PCIX_OIOBAR &~ (0x100000 - 1),
|
||||
rounddown2(IOP34X_PCIX_OIOBAR_VADDR, 0x100000),
|
||||
rounddown2(IOP34X_PCIX_OIOBAR, 0x100000),
|
||||
0x100000,
|
||||
},
|
||||
{
|
||||
@ -227,8 +227,8 @@ initarm(struct arm_boot_params *abp)
|
||||
l1pagetable = kernel_l1pt.pv_va;
|
||||
|
||||
/* Map the L2 pages tables in the L1 page table */
|
||||
pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1),
|
||||
&kernel_pt_table[KERNEL_PT_SYS]);
|
||||
pmap_link_l2pt(l1pagetable, rounddown2(ARM_VECTORS_HIGH, 0x00100000),
|
||||
&kernel_pt_table[KERNEL_PT_SYS]);
|
||||
pmap_map_chunk(l1pagetable, KERNBASE, SDRAM_START, 0x100000,
|
||||
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
|
||||
@ -236,11 +236,10 @@ initarm(struct arm_boot_params *abp)
|
||||
0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
|
||||
|
||||
pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, SDRAM_START + 0x200000,
|
||||
(((uint32_t)(lastaddr) - KERNBASE - 0x200000) + L1_S_SIZE) & ~(L1_S_SIZE - 1),
|
||||
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
freemem_after = ((int)lastaddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
|
||||
afterkern = round_page(((vm_offset_t)lastaddr + L1_S_SIZE) & ~(L1_S_SIZE
|
||||
- 1));
|
||||
rounddown2(((uint32_t)(lastaddr) - KERNBASE - 0x200000) + L1_S_SIZE, L1_S_SIZE),
|
||||
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
freemem_after = rounddown2((int)lastaddr + PAGE_SIZE, PAGE_SIZE);
|
||||
afterkern = round_page(rounddown2((vm_offset_t)lastaddr + L1_S_SIZE, L1_S_SIZE));
|
||||
for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
|
||||
pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000,
|
||||
&kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
|
||||
|
@ -121,8 +121,8 @@ i81342_pci_attach(device_t dev)
|
||||
memstart | PCI_MAPREG_MEM_PREFETCHABLE_MASK |
|
||||
PCI_MAPREG_MEM_TYPE_64BIT);
|
||||
bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IAUBAR1, 0);
|
||||
bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IALR1, ~(memsize - 1)
|
||||
&~(0xfff));
|
||||
bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IALR1,
|
||||
rounddown2(~(0xfff), memsize));
|
||||
bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IATVR1, memstart);
|
||||
bus_space_write_4(sc->sc_st, sc->sc_atu_sh, ATU_IAUTVR1, 0);
|
||||
|
||||
|
@ -178,13 +178,13 @@ i81342_mem_bs_map(bus_space_tag_t tag, bus_addr_t bpa, bus_size_t size, int flag
|
||||
tmp = tmp->next;
|
||||
}
|
||||
addr = allocable;
|
||||
endaddr = ((addr + size) &~ (0x1000000 - 1)) + 0x1000000;
|
||||
endaddr = rounddown2(addr + size, 0x1000000) + 0x1000000;
|
||||
if (endaddr >= IOP34X_VADDR)
|
||||
panic("PCI virtual memory exhausted");
|
||||
allocable = endaddr;
|
||||
tmp = malloc(sizeof(*tmp), M_DEVBUF, M_WAITOK);
|
||||
tmp->next = NULL;
|
||||
paddr = bpa &~ (0x100000 - 1);
|
||||
paddr = rounddown2(bpa, 0x100000);
|
||||
tmp->paddr = paddr;
|
||||
tmp->vaddr = addr;
|
||||
tmp->size = 0;
|
||||
|
@ -291,7 +291,7 @@ initarm(struct arm_boot_params *abp)
|
||||
l1pagetable = kernel_l1pt.pv_va;
|
||||
|
||||
/* Map the L2 pages tables in the L1 page table */
|
||||
pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1),
|
||||
pmap_link_l2pt(l1pagetable, rounddown2(ARM_VECTORS_HIGH, 0x00100000),
|
||||
&kernel_pt_table[KERNEL_PT_SYS]);
|
||||
pmap_link_l2pt(l1pagetable, IXP425_IO_VBASE,
|
||||
&kernel_pt_table[KERNEL_PT_IO]);
|
||||
|
@ -222,8 +222,8 @@ initarm(struct arm_boot_params *abp)
|
||||
l1pagetable = kernel_l1pt.pv_va;
|
||||
|
||||
/* Map the L2 pages tables in the L1 page table */
|
||||
pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1),
|
||||
&kernel_pt_table[KERNEL_PT_SYS]);
|
||||
pmap_link_l2pt(l1pagetable, rounddown2(ARM_VECTORS_HIGH, 0x00100000),
|
||||
&kernel_pt_table[KERNEL_PT_SYS]);
|
||||
#if 0 /* XXXBJR: What is this? Don't know if there's an analogue. */
|
||||
pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE,
|
||||
&kernel_pt_table[KERNEL_PT_IOPXS]);
|
||||
@ -235,11 +235,10 @@ initarm(struct arm_boot_params *abp)
|
||||
pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, SDRAM_START + 0x100000,
|
||||
0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
|
||||
pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, SDRAM_START + 0x200000,
|
||||
(((uint32_t)(lastaddr) - KERNBASE - 0x200000) + L1_S_SIZE) & ~(L1_S_SIZE - 1),
|
||||
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
freemem_after = ((int)lastaddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
|
||||
afterkern = round_page(((vm_offset_t)lastaddr + L1_S_SIZE) &
|
||||
~(L1_S_SIZE - 1));
|
||||
rounddown2(((uint32_t)(lastaddr) - KERNBASE - 0x200000) + L1_S_SIZE, L1_S_SIZE),
|
||||
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
|
||||
freemem_after = rounddown2((int)lastaddr + PAGE_SIZE, PAGE_SIZE);
|
||||
afterkern = round_page(rounddown2((vm_offset_t)lastaddr + L1_S_SIZE, L1_S_SIZE));
|
||||
for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
|
||||
pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000,
|
||||
&kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
|
||||
|
@ -622,7 +622,7 @@ pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
|
||||
KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
|
||||
|
||||
l2 = pmap_l2(kernel_pmap, va);
|
||||
l2 = (pd_entry_t *)((uintptr_t)l2 & ~(PAGE_SIZE - 1));
|
||||
l2 = (pd_entry_t *)rounddown2((uintptr_t)l2, PAGE_SIZE);
|
||||
l2pt = (vm_offset_t)l2;
|
||||
l2_slot = pmap_l2_index(va);
|
||||
l3pt = l3_start;
|
||||
|
@ -744,7 +744,7 @@ file_search(const char *name, char **extlist)
|
||||
}
|
||||
|
||||
#define INT_ALIGN(base, ptr) ptr = \
|
||||
(base) + (((ptr) - (base) + sizeof(int) - 1) & ~(sizeof(int) - 1))
|
||||
(base) + roundup2((ptr) - (base), sizeof(int))
|
||||
|
||||
static char *
|
||||
mod_search_hints(struct moduledir *mdp, const char *modname,
|
||||
|
@ -86,7 +86,7 @@ beri_arch_loadaddr(u_int type, void *data, uint64_t addr)
|
||||
|
||||
/* Align ELF objects at page boundaries; others at cache lines. */
|
||||
align = (type == LOAD_ELF) ? PAGE_SIZE : CACHE_LINE_SIZE;
|
||||
return ((addr + align - 1) & ~(align - 1));
|
||||
return (roundup2(addr, align));
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -310,7 +310,7 @@ zfs_readdir(struct open_file *f, struct dirent *d)
|
||||
fzap_next:
|
||||
chunk = fp->f_seekp & (bsize - 1);
|
||||
if (chunk == ZAP_LEAF_NUMCHUNKS(&zl)) {
|
||||
fp->f_seekp = (fp->f_seekp & ~(bsize - 1)) + bsize;
|
||||
fp->f_seekp = rounddown2(fp->f_seekp, bsize) + bsize;
|
||||
chunk = 0;
|
||||
|
||||
/*
|
||||
|
@ -403,7 +403,7 @@ adb_mouse_receive_packet(device_t dev, u_char status, u_char command,
|
||||
* high button events when they are touched.
|
||||
*/
|
||||
|
||||
if (buttons & ~((1 << sc->hw.buttons) - 1)
|
||||
if (rounddown2(buttons, 1 << sc->hw.buttons)
|
||||
&& !(sc->flags & AMS_TOUCHPAD)) {
|
||||
buttons |= 1 << (sc->hw.buttons - 1);
|
||||
}
|
||||
|
@ -216,8 +216,8 @@ agp_nvidia_attach (device_t dev)
|
||||
if (sc->num_dirs == 0) {
|
||||
sc->num_dirs = 1;
|
||||
sc->num_active_entries /= (64 / size);
|
||||
sc->pg_offset = (apbase & (64 * 1024 * 1024 - 1) &
|
||||
~(AGP_GET_APERTURE(dev) - 1)) / PAGE_SIZE;
|
||||
sc->pg_offset = rounddown2(apbase & (64 * 1024 * 1024 - 1),
|
||||
AGP_GET_APERTURE(dev)) / PAGE_SIZE;
|
||||
}
|
||||
|
||||
/* (G)ATT Base Address */
|
||||
@ -410,7 +410,7 @@ nvidia_init_iorr(u_int32_t addr, u_int32_t size)
|
||||
}
|
||||
|
||||
base = (addr & ~0xfff) | 0x18;
|
||||
mask = (0xfULL << 32) | ((~(size - 1)) & 0xfffff000) | 0x800;
|
||||
mask = (0xfULL << 32) | rounddown2(0xfffff000, size) | 0x800;
|
||||
wrmsr(IORR_BASE0 + 2 * iorr_addr, base);
|
||||
wrmsr(IORR_MASK0 + 2 * iorr_addr, mask);
|
||||
|
||||
|
@ -3047,7 +3047,7 @@ bce_get_rx_buffer_sizes(struct bce_softc *sc, int mtu)
|
||||
sc->rx_bd_mbuf_alloc_size = MHLEN;
|
||||
/* Make sure offset is 16 byte aligned for hardware. */
|
||||
sc->rx_bd_mbuf_align_pad =
|
||||
roundup2((MSIZE - MHLEN), 16) - (MSIZE - MHLEN);
|
||||
roundup2(MSIZE - MHLEN, 16) - (MSIZE - MHLEN);
|
||||
sc->rx_bd_mbuf_data_len = sc->rx_bd_mbuf_alloc_size -
|
||||
sc->rx_bd_mbuf_align_pad;
|
||||
} else {
|
||||
|
@ -607,8 +607,8 @@ int t4_mem_read(struct adapter *adap, int mtype, u32 addr, u32 len,
|
||||
* need to round down the start and round up the end. We'll start
|
||||
* copying out of the first line at (addr - start) a word at a time.
|
||||
*/
|
||||
start = addr & ~(64-1);
|
||||
end = (addr + len + 64-1) & ~(64-1);
|
||||
start = rounddown2(addr, 64);
|
||||
end = roundup2(addr + len, 64);
|
||||
offset = (addr - start)/sizeof(__be32);
|
||||
|
||||
for (pos = start; pos < end; pos += 64, offset = 0) {
|
||||
|
@ -1487,7 +1487,7 @@ en_init(struct en_softc *sc)
|
||||
loc = sc->txslot[slot].cur = sc->txslot[slot].start;
|
||||
loc = loc - MID_RAMOFF;
|
||||
/* mask, cvt to words */
|
||||
loc = (loc & ~((EN_TXSZ * 1024) - 1)) >> 2;
|
||||
loc = rounddown2(loc, EN_TXSZ * 1024) >> 2;
|
||||
/* top 11 bits */
|
||||
loc = loc >> MIDV_LOCTOPSHFT;
|
||||
en_write(sc, MIDX_PLACE(slot), MIDX_MKPLACE(en_k2sz(EN_TXSZ),
|
||||
@ -2992,7 +2992,7 @@ en_attach(struct en_softc *sc)
|
||||
sc->rxslot[lcv].stop = ptr;
|
||||
midvloc = midvloc - MID_RAMOFF;
|
||||
/* mask, cvt to words */
|
||||
midvloc = (midvloc & ~((EN_RXSZ*1024) - 1)) >> 2;
|
||||
midvloc = rounddown2(midvloc, EN_RXSZ * 1024) >> 2;
|
||||
/* we only want the top 11 bits */
|
||||
midvloc = midvloc >> MIDV_LOCTOPSHFT;
|
||||
midvloc = (midvloc & MIDV_LOCMASK) << MIDV_LOCSHIFT;
|
||||
|
@ -402,7 +402,7 @@ exca_mem_set_offset(struct exca_softc *sc, struct resource *res,
|
||||
"set_memory_offset: specified resource not active\n");
|
||||
return (ENOENT);
|
||||
}
|
||||
sc->mem[win].cardaddr = cardaddr & ~(EXCA_MEM_PAGESIZE - 1);
|
||||
sc->mem[win].cardaddr = rounddown2(cardaddr, EXCA_MEM_PAGESIZE);
|
||||
delta = cardaddr % EXCA_MEM_PAGESIZE;
|
||||
if (deltap)
|
||||
*deltap = delta;
|
||||
|
@ -116,8 +116,8 @@ static const struct utopia_methods fatm_utopia_methods = {
|
||||
};
|
||||
|
||||
#define VC_OK(SC, VPI, VCI) \
|
||||
(((VPI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vpi_bits) - 1)) == 0 && \
|
||||
(VCI) != 0 && ((VCI) & ~((1 << IFP2IFATM((SC)->ifp)->mib.vci_bits) - 1)) == 0)
|
||||
(rounddown2(VPI, 1 << IFP2IFATM((SC)->ifp)->mib.vpi_bits) == 0 && \
|
||||
(VCI) != 0 && rounddown2(VCI, 1 << IFP2IFATM((SC)->ifp)->mib.vci_bits) == 0)
|
||||
|
||||
static int fatm_load_vc(struct fatm_softc *sc, struct card_vcc *vc);
|
||||
|
||||
|
@ -788,16 +788,14 @@ hatm_init_cm(struct hatm_softc *sc)
|
||||
rsra = 0;
|
||||
mlbm = ((rsra + IFP2IFATM(sc->ifp)->mib.max_vccs * 8) + 0x7ff) & ~0x7ff;
|
||||
rabr = ((mlbm + numbuffs * 2) + 0x7ff) & ~0x7ff;
|
||||
sc->rsrb = ((rabr + 2048) + (2 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1)) &
|
||||
~(2 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1);
|
||||
sc->rsrb = roundup2(rabr + 2048, 2 * IFP2IFATM(sc->ifp)->mib.max_vccs);
|
||||
|
||||
tsra = 0;
|
||||
sc->tsrb = tsra + IFP2IFATM(sc->ifp)->mib.max_vccs * 8;
|
||||
sc->tsrc = sc->tsrb + IFP2IFATM(sc->ifp)->mib.max_vccs * 4;
|
||||
sc->tsrd = sc->tsrc + IFP2IFATM(sc->ifp)->mib.max_vccs * 2;
|
||||
tabr = sc->tsrd + IFP2IFATM(sc->ifp)->mib.max_vccs * 1;
|
||||
mtpd = ((tabr + 1024) + (16 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1)) &
|
||||
~(16 * IFP2IFATM(sc->ifp)->mib.max_vccs - 1);
|
||||
mtpd = roundup2(tabr + 1024, 16 * IFP2IFATM(sc->ifp)->mib.max_vccs);
|
||||
|
||||
DBG(sc, ATTACH, ("rsra=%x mlbm=%x rabr=%x rsrb=%x",
|
||||
rsra, mlbm, rabr, sc->rsrb));
|
||||
|
@ -783,8 +783,8 @@ DEVICEID hpt_create_array_v2(_VBUS_ARG PCREATE_ARRAY_PARAMS_V2 pParam)
|
||||
for(i = 0; i < pArray->u.array.bArnMember; i++)
|
||||
if(pArray->u.array.pMember[i]->VDeviceCapacity < capacity)
|
||||
capacity = pArray->u.array.pMember[i]->VDeviceCapacity;
|
||||
pArray->VDeviceCapacity = (capacity & ~(pArray->u.array.bStripeWitch - 1))
|
||||
* (pArray->u.array.bArnMember - 1);
|
||||
pArray->VDeviceCapacity = rounddown2(capacity, pArray->u.array.bStripeWitch) *
|
||||
(pArray->u.array.bArnMember - 1);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -1084,8 +1084,8 @@ mpr_alloc_queues(struct mpr_softc *sc)
|
||||
*
|
||||
* These two queues are allocated together for simplicity.
|
||||
*/
|
||||
sc->fqdepth = roundup2((sc->num_replies + 1), 16);
|
||||
sc->pqdepth = roundup2((sc->num_replies + 1), 16);
|
||||
sc->fqdepth = roundup2(sc->num_replies + 1, 16);
|
||||
sc->pqdepth = roundup2(sc->num_replies + 1, 16);
|
||||
fqsize= sc->fqdepth * 4;
|
||||
pqsize = sc->pqdepth * 8;
|
||||
qsize = fqsize + pqsize;
|
||||
|
@ -1080,8 +1080,8 @@ mps_alloc_queues(struct mps_softc *sc)
|
||||
*
|
||||
* These two queues are allocated together for simplicity.
|
||||
*/
|
||||
sc->fqdepth = roundup2((sc->num_replies + 1), 16);
|
||||
sc->pqdepth = roundup2((sc->num_replies + 1), 16);
|
||||
sc->fqdepth = roundup2(sc->num_replies + 1, 16);
|
||||
sc->pqdepth = roundup2(sc->num_replies + 1, 16);
|
||||
fqsize= sc->fqdepth * 4;
|
||||
pqsize = sc->pqdepth * 8;
|
||||
qsize = fqsize + pqsize;
|
||||
|
@ -1155,7 +1155,7 @@ cbb_cardbus_auto_open(struct cbb_softc *sc, int type)
|
||||
if (starts[i] == START_NONE)
|
||||
continue;
|
||||
starts[i] &= ~(align - 1);
|
||||
ends[i] = ((ends[i] + align - 1) & ~(align - 1)) - 1;
|
||||
ends[i] = roundup2(ends[i], align) - 1;
|
||||
}
|
||||
if (starts[0] != START_NONE && starts[1] != START_NONE) {
|
||||
if (starts[0] < starts[1]) {
|
||||
|
@ -5049,8 +5049,8 @@ STATIC void agtiapi_PrepCCBs( struct agtiapi_softc *pCard,
|
||||
sizeof(tiSgl_t),
|
||||
max_ccb );
|
||||
|
||||
ccb_sz = (AGTIAPI_CCB_SIZE + cache_line_size() - 1) & ~(cache_line_size() -1);
|
||||
hdr_sz = (sizeof(*hdr) + cache_line_size() - 1) & ~(cache_line_size() - 1);
|
||||
ccb_sz = roundup2(AGTIAPI_CCB_SIZE, cache_line_size());
|
||||
hdr_sz = roundup2(sizeof(*hdr), cache_line_size());
|
||||
|
||||
AGTIAPI_PRINTK("agtiapi_PrepCCBs: after cache line\n");
|
||||
|
||||
@ -5174,9 +5174,8 @@ STATIC U32 agtiapi_InitCCBs(struct agtiapi_softc *pCard, int tgtCount, int tid)
|
||||
#endif
|
||||
|
||||
max_ccb = tgtCount * AGTIAPI_CCB_PER_DEVICE;// / 4; // TBR
|
||||
ccb_sz = ( (AGTIAPI_CCB_SIZE + cache_line_size() - 1) &
|
||||
~(cache_line_size() -1) );
|
||||
hdr_sz = (sizeof(*hdr) + cache_line_size() - 1) & ~(cache_line_size() - 1);
|
||||
ccb_sz = roundup2(AGTIAPI_CCB_SIZE, cache_line_size());
|
||||
hdr_sz = roundup2(sizeof(*hdr), cache_line_size());
|
||||
size = ccb_sz * max_ccb + hdr_sz;
|
||||
|
||||
for (i = 0; i < (1 << no_allocs); i++)
|
||||
@ -5854,7 +5853,7 @@ STATIC void agtiapi_ReleaseCCBs( struct agtiapi_softc *pCard )
|
||||
while ((hdr = pCard->ccbAllocList) != NULL)
|
||||
{
|
||||
pCard->ccbAllocList = hdr->next;
|
||||
hdr_sz = (sizeof(*hdr) + cache_line_size() - 1) & ~(cache_line_size() - 1);
|
||||
hdr_sz = roundup2(sizeof(*hdr), cache_line_size());
|
||||
pccb = (ccb_t*) ((char*)hdr + hdr_sz);
|
||||
if (pCard->buffer_dmat != NULL && pccb->CCB_dmamap != NULL)
|
||||
{
|
||||
|
@ -7903,7 +7903,7 @@ sym_scatter_sg_physical(hcb_p np, ccb_p cp, bus_dma_segment_t *psegs, int nsegs)
|
||||
pe = ps + psegs[t].ds_len;
|
||||
|
||||
while (s >= 0) {
|
||||
pn = (pe - 1) & ~(SYM_CONF_DMA_BOUNDARY - 1);
|
||||
pn = rounddown2(pe - 1, SYM_CONF_DMA_BOUNDARY);
|
||||
if (pn <= ps)
|
||||
pn = ps;
|
||||
k = pe - pn;
|
||||
|
@ -434,7 +434,7 @@ ti_mem_read(struct ti_softc *sc, uint32_t addr, uint32_t len, void *buf)
|
||||
segsize = cnt;
|
||||
else
|
||||
segsize = TI_WINLEN - (segptr % TI_WINLEN);
|
||||
CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
|
||||
CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
|
||||
bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle,
|
||||
TI_WINDOW + (segptr & (TI_WINLEN - 1)), (uint32_t *)ptr,
|
||||
segsize / 4);
|
||||
@ -464,7 +464,7 @@ ti_mem_write(struct ti_softc *sc, uint32_t addr, uint32_t len, void *buf)
|
||||
segsize = cnt;
|
||||
else
|
||||
segsize = TI_WINLEN - (segptr % TI_WINLEN);
|
||||
CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
|
||||
CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
|
||||
bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle,
|
||||
TI_WINDOW + (segptr & (TI_WINLEN - 1)), (uint32_t *)ptr,
|
||||
segsize / 4);
|
||||
@ -491,7 +491,7 @@ ti_mem_zero(struct ti_softc *sc, uint32_t addr, uint32_t len)
|
||||
segsize = cnt;
|
||||
else
|
||||
segsize = TI_WINLEN - (segptr % TI_WINLEN);
|
||||
CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
|
||||
CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
|
||||
bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle,
|
||||
TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0, segsize / 4);
|
||||
segptr += segsize;
|
||||
@ -559,7 +559,7 @@ ti_copy_mem(struct ti_softc *sc, uint32_t tigon_addr, uint32_t len,
|
||||
segsize = cnt;
|
||||
else
|
||||
segsize = TI_WINLEN - (segptr % TI_WINLEN);
|
||||
CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
|
||||
CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
|
||||
|
||||
ti_offset = TI_WINDOW + (segptr & (TI_WINLEN -1));
|
||||
|
||||
@ -628,7 +628,7 @@ ti_copy_mem(struct ti_softc *sc, uint32_t tigon_addr, uint32_t len,
|
||||
/*
|
||||
* Set the segment pointer.
|
||||
*/
|
||||
CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1)));
|
||||
CSR_WRITE_4(sc, TI_WINBASE, rounddown2(segptr, TI_WINLEN));
|
||||
|
||||
ti_offset = TI_WINDOW + (segptr & (TI_WINLEN - 1));
|
||||
|
||||
|
@ -467,7 +467,7 @@ usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs,
|
||||
|
||||
off = 0;
|
||||
pg = pc->page_start;
|
||||
pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1);
|
||||
pg->physaddr = rounddown2(segs->ds_addr, USB_PAGE_SIZE);
|
||||
rem = segs->ds_addr & (USB_PAGE_SIZE - 1);
|
||||
pc->page_offset_buf = rem;
|
||||
pc->page_offset_end += rem;
|
||||
@ -502,7 +502,7 @@ usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs,
|
||||
break;
|
||||
}
|
||||
pg++;
|
||||
pg->physaddr = (segs->ds_addr + off) & ~(USB_PAGE_SIZE - 1);
|
||||
pg->physaddr = rounddown2(segs->ds_addr + off, USB_PAGE_SIZE);
|
||||
}
|
||||
|
||||
done:
|
||||
|
@ -116,7 +116,7 @@ vt_efifb_init(struct vt_device *vd)
|
||||
info->fb_depth = fls(efifb->fb_mask_red | efifb->fb_mask_green |
|
||||
efifb->fb_mask_blue | efifb->fb_mask_reserved);
|
||||
/* Round to a multiple of the bits in a byte. */
|
||||
info->fb_bpp = (info->fb_depth + NBBY - 1) & ~(NBBY - 1);
|
||||
info->fb_bpp = roundup2(info->fb_depth, NBBY);
|
||||
|
||||
/* Stride in bytes, not pixels */
|
||||
info->fb_stride = efifb->fb_stride * (info->fb_bpp / NBBY);
|
||||
|
@ -540,7 +540,7 @@ ext2_lookup_ino(struct vnode *vdp, struct vnode **vpp, struct componentname *cnp
|
||||
* in the cache as to where the entry was found.
|
||||
*/
|
||||
if ((flags & ISLASTCN) && nameiop == LOOKUP)
|
||||
dp->i_diroff = i_offset &~ (DIRBLKSIZ - 1);
|
||||
dp->i_diroff = rounddown2(i_offset, DIRBLKSIZ);
|
||||
/*
|
||||
* If deleting, and at end of pathname, return
|
||||
* parameters which can be used to remove file.
|
||||
|
@ -311,7 +311,7 @@ g_bde_map_sector(struct g_bde_work *wp)
|
||||
/* Compensate for lock sectors */
|
||||
for (u = 0; u < G_BDE_MAXKEYS; u++) {
|
||||
/* Find the start of this lock sector */
|
||||
ko = kp->lsector[u] & ~((uint64_t)kp->sectorsize - 1);
|
||||
ko = rounddown2(kp->lsector[u], (uint64_t)kp->sectorsize);
|
||||
|
||||
if (wp->kso >= ko)
|
||||
wp->kso += kp->sectorsize;
|
||||
|
@ -447,9 +447,9 @@ g_part_bsd64_resize(struct g_part_table *basetable,
|
||||
if (baseentry == NULL) {
|
||||
pp = LIST_FIRST(&basetable->gpt_gp->consumer)->provider;
|
||||
table = (struct g_part_bsd64_table *)basetable;
|
||||
table->d_abase = ((pp->mediasize -
|
||||
table->d_bbase * pp->sectorsize) & ~(table->d_align - 1)) /
|
||||
pp->sectorsize;
|
||||
table->d_abase =
|
||||
rounddown2(pp->mediasize - table->d_bbase * pp->sectorsize,
|
||||
table->d_align) / pp->sectorsize;
|
||||
basetable->gpt_last = table->d_abase - 1;
|
||||
return (0);
|
||||
}
|
||||
@ -477,8 +477,8 @@ g_part_bsd64_probe(struct g_part_table *table, struct g_consumer *cp)
|
||||
pp = cp->provider;
|
||||
if (pp->mediasize < 2 * PALIGN_SIZE)
|
||||
return (ENOSPC);
|
||||
v = (pp->sectorsize +
|
||||
offsetof(struct disklabel64, d_magic)) & ~(pp->sectorsize - 1);
|
||||
v = rounddown2(pp->sectorsize + offsetof(struct disklabel64, d_magic),
|
||||
pp->sectorsize);
|
||||
buf = g_read_data(cp, 0, v, &error);
|
||||
if (buf == NULL)
|
||||
return (error);
|
||||
@ -502,8 +502,7 @@ g_part_bsd64_read(struct g_part_table *basetable, struct g_consumer *cp)
|
||||
|
||||
pp = cp->provider;
|
||||
table = (struct g_part_bsd64_table *)basetable;
|
||||
v32 = (pp->sectorsize +
|
||||
sizeof(struct disklabel64) - 1) & ~(pp->sectorsize - 1);
|
||||
v32 = roundup2(sizeof(struct disklabel64), pp->sectorsize);
|
||||
buf = g_read_data(cp, 0, v32, &error);
|
||||
if (buf == NULL)
|
||||
return (error);
|
||||
@ -620,8 +619,7 @@ g_part_bsd64_write(struct g_part_table *basetable, struct g_consumer *cp)
|
||||
|
||||
pp = cp->provider;
|
||||
table = (struct g_part_bsd64_table *)basetable;
|
||||
sz = (pp->sectorsize +
|
||||
sizeof(struct disklabel64) - 1) & ~(pp->sectorsize - 1);
|
||||
sz = roundup2(sizeof(struct disklabel64), pp->sectorsize);
|
||||
dlp = g_malloc(sz, M_WAITOK | M_ZERO);
|
||||
|
||||
memcpy(dlp->d_reserved0, table->d_reserved0,
|
||||
|
@ -377,7 +377,7 @@ i686_mrstoreone(void *arg)
|
||||
/* mask/active register */
|
||||
if (mrd->mr_flags & MDF_ACTIVE) {
|
||||
msrv = MTRR_PHYSMASK_VALID |
|
||||
(~(mrd->mr_len - 1) & mtrr_physmask);
|
||||
rounddown2(mtrr_physmask, mrd->mr_len);
|
||||
} else {
|
||||
msrv = 0;
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ CTASSERT(sizeof(struct kerneldumpheader) == 512);
|
||||
#define SIZEOF_METADATA (64*1024)
|
||||
|
||||
#define MD_ALIGN(x) (((off_t)(x) + PAGE_MASK) & ~PAGE_MASK)
|
||||
#define DEV_ALIGN(x) (((off_t)(x) + (DEV_BSIZE-1)) & ~(DEV_BSIZE-1))
|
||||
#define DEV_ALIGN(x) roundup2((off_t)(x), DEV_BSIZE)
|
||||
|
||||
uint32_t *vm_page_dump;
|
||||
int vm_page_dump_size;
|
||||
|
@ -63,7 +63,7 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#define MAX_LD 8192
|
||||
#define LD_PER_PAGE 512
|
||||
#define NEW_MAX_LD(num) ((num + LD_PER_PAGE) & ~(LD_PER_PAGE-1))
|
||||
#define NEW_MAX_LD(num) rounddown2(num + LD_PER_PAGE, LD_PER_PAGE)
|
||||
#define SIZE_FROM_LARGEST_LD(num) (NEW_MAX_LD(num) << 3)
|
||||
#define NULL_LDT_BASE ((caddr_t)NULL)
|
||||
|
||||
|
@ -159,7 +159,7 @@ svr4_getcontext(td, uc, mask, oonstack)
|
||||
#if defined(DONE_MORE_SIGALTSTACK_WORK)
|
||||
bsd_to_svr4_sigaltstack(sf, s);
|
||||
#else
|
||||
s->ss_sp = (void *)(((u_long) tf->tf_esp) & ~(16384 - 1));
|
||||
s->ss_sp = (void *)rounddown2((u_long)tf->tf_esp, 16384);
|
||||
s->ss_size = 16384;
|
||||
s->ss_flags = 0;
|
||||
#endif
|
||||
|
@ -139,8 +139,8 @@ SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
|
||||
|
||||
static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
|
||||
|
||||
#define trunc_page_ps(va, ps) ((va) & ~(ps - 1))
|
||||
#define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1))
|
||||
#define trunc_page_ps(va, ps) rounddown2(va, ps)
|
||||
#define round_page_ps(va, ps) roundup2(va, ps)
|
||||
#define aligned(a, t) (trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a))
|
||||
|
||||
static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
|
||||
|
@ -787,7 +787,7 @@ start_init(void *dummy)
|
||||
/*
|
||||
* Move out the arg pointers.
|
||||
*/
|
||||
uap = (char **)((intptr_t)ucp & ~(sizeof(intptr_t)-1));
|
||||
uap = (char **)rounddown2((intptr_t)ucp, sizeof(intptr_t));
|
||||
(void)suword((caddr_t)--uap, (long)0); /* terminator */
|
||||
(void)suword((caddr_t)--uap, (long)(intptr_t)arg1);
|
||||
(void)suword((caddr_t)--uap, (long)(intptr_t)arg0);
|
||||
|
@ -1713,7 +1713,7 @@ linker_lookup_file(const char *path, int pathlen, const char *name,
|
||||
}
|
||||
|
||||
#define INT_ALIGN(base, ptr) ptr = \
|
||||
(base) + (((ptr) - (base) + sizeof(int) - 1) & ~(sizeof(int) - 1))
|
||||
(base) + roundup2((ptr) - (base), sizeof(int))
|
||||
|
||||
/*
|
||||
* Lookup KLD which contains requested module in the "linker.hints" file. If
|
||||
|
@ -164,7 +164,7 @@ struct sem_undo {
|
||||
* SEMUSZ is properly aligned.
|
||||
*/
|
||||
|
||||
#define SEM_ALIGN(bytes) (((bytes) + (sizeof(long) - 1)) & ~(sizeof(long) - 1))
|
||||
#define SEM_ALIGN(bytes) roundup2(bytes, sizeof(long))
|
||||
|
||||
/* actual size of an undo structure */
|
||||
#define SEMUSZ SEM_ALIGN(offsetof(struct sem_undo, un_ent[SEMUME]))
|
||||
|
@ -370,7 +370,7 @@ kern_shmat_locked(struct thread *td, int shmid, const void *shmaddr,
|
||||
prot |= VM_PROT_WRITE;
|
||||
if (shmaddr != NULL) {
|
||||
if ((shmflg & SHM_RND) != 0)
|
||||
attach_va = (vm_offset_t)shmaddr & ~(SHMLBA-1);
|
||||
attach_va = rounddown2((vm_offset_t)shmaddr, SHMLBA);
|
||||
else if (((vm_offset_t)shmaddr & (SHMLBA-1)) == 0)
|
||||
attach_va = (vm_offset_t)shmaddr;
|
||||
else
|
||||
|
@ -3865,7 +3865,7 @@ allocbuf(struct buf *bp, int size)
|
||||
if (bp->b_kvasize != 0 && bp->b_kvasize < size)
|
||||
panic("allocbuf: buffer too small");
|
||||
|
||||
newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
|
||||
newbsize = roundup2(size, DEV_BSIZE);
|
||||
if ((bp->b_flags & B_VMIO) == 0) {
|
||||
if ((bp->b_flags & B_MALLOC) == 0)
|
||||
newbsize = round_page(newbsize);
|
||||
|
@ -487,7 +487,7 @@ octopci_init_bar(device_t dev, unsigned b, unsigned s, unsigned f, unsigned barn
|
||||
if (PCI_BAR_IO(bar)) {
|
||||
size = ~(bar & PCIM_BAR_IO_BASE) + 1;
|
||||
|
||||
sc->sc_io_next = (sc->sc_io_next + size - 1) & ~(size - 1);
|
||||
sc->sc_io_next = roundup2(sc->sc_io_next, size);
|
||||
if (sc->sc_io_next + size > CVMX_OCT_PCI_IO_SIZE) {
|
||||
device_printf(dev, "%02x.%02x:%02x: no ports for BAR%u.\n",
|
||||
b, s, f, barnum);
|
||||
@ -527,7 +527,7 @@ octopci_init_bar(device_t dev, unsigned b, unsigned s, unsigned f, unsigned barn
|
||||
|
||||
size = ~(bar & (uint32_t)PCIM_BAR_MEM_BASE) + 1;
|
||||
|
||||
sc->sc_mem1_next = (sc->sc_mem1_next + size - 1) & ~(size - 1);
|
||||
sc->sc_mem1_next = roundup2(sc->sc_mem1_next, size);
|
||||
if (sc->sc_mem1_next + size > CVMX_OCT_PCI_MEM1_SIZE) {
|
||||
device_printf(dev, "%02x.%02x:%02x: no memory for BAR%u.\n",
|
||||
b, s, f, barnum);
|
||||
|
@ -215,7 +215,7 @@ tlb_invalidate_range(pmap_t pmap, vm_offset_t start, vm_offset_t end)
|
||||
* and round the virtual address "end" to an even page frame number.
|
||||
*/
|
||||
start &= ~((1 << TLBMASK_SHIFT) - 1);
|
||||
end = (end + (1 << TLBMASK_SHIFT) - 1) & ~((1 << TLBMASK_SHIFT) - 1);
|
||||
end = roundup2(end, 1 << TLBMASK_SHIFT);
|
||||
|
||||
s = intr_disable();
|
||||
save_asid = mips_rd_entryhi() & TLBHI_ASID_MASK;
|
||||
|
@ -1576,7 +1576,7 @@ mips_unaligned_load_store(struct trapframe *frame, int mode, register_t addr, re
|
||||
return (0);
|
||||
}
|
||||
|
||||
if (!useracc((void *)((vm_offset_t)addr & ~(size - 1)), size * 2, mode))
|
||||
if (!useracc((void *)rounddown2((vm_offset_t)addr, size), size * 2, mode))
|
||||
return (0);
|
||||
|
||||
/*
|
||||
|
@ -397,7 +397,7 @@ xlr_sec_setup_packet(xlr_sec_io_pt op,
|
||||
/* physical address of the source buffer */
|
||||
addr = (uint64_t) vtophys((void *)(unsigned long)op->source_buf);
|
||||
/* cache-aligned base of the source buffer */
|
||||
seg_addr = (addr & ~(SMP_CACHE_BYTES - 1));
|
||||
seg_addr = rounddown2(addr, SMP_CACHE_BYTES);
|
||||
/* offset in bytes to the source buffer start from the segment base */
|
||||
byte_offset = addr - seg_addr;
|
||||
/* global offset: 0-7 bytes */
|
||||
@ -417,7 +417,7 @@ xlr_sec_setup_packet(xlr_sec_io_pt op,
|
||||
len = op->source_buf_size + byte_offset - global_offset;
|
||||
if (multi_frag_flag) {
|
||||
next_seg_addr = (uint64_t)vtophys((void *)(uintptr_t)desc->next_src_buf);
|
||||
next_seg_addr = (next_seg_addr & ~(SMP_CACHE_BYTES - 1));
|
||||
next_seg_addr = rounddown2(next_seg_addr, SMP_CACHE_BYTES);
|
||||
next_len = desc->next_src_len;
|
||||
}
|
||||
/* length of the whole thing in dwords */
|
||||
@ -602,10 +602,10 @@ xlr_sec_setup_packet(xlr_sec_io_pt op,
|
||||
*/
|
||||
if (multi_frag_flag) {
|
||||
next_seg_addr = (uint64_t) vtophys((void *)(unsigned long)(desc->next_dest_buf));
|
||||
next_seg_addr = (next_seg_addr & ~(SMP_CACHE_BYTES - 1));
|
||||
next_seg_addr = rounddown2(next_seg_addr, SMP_CACHE_BYTES);
|
||||
}
|
||||
addr = (uint64_t) vtophys((void *)(unsigned long)op->dest_buf);
|
||||
seg_addr = (addr & ~(SMP_CACHE_BYTES - 1));
|
||||
seg_addr = rounddown2(addr, SMP_CACHE_BYTES);
|
||||
byte_offset = addr - seg_addr;
|
||||
global_offset = byte_offset & 0x7;
|
||||
|
||||
|
@ -345,7 +345,7 @@ zbpci_config_space_va(int bus, int slot, int func, int reg, int bytes)
|
||||
#if _BYTE_ORDER == _BIG_ENDIAN
|
||||
pa = pa ^ (4 - bytes);
|
||||
#endif
|
||||
pa_page = pa & ~(PAGE_SIZE - 1);
|
||||
pa_page = rounddown2(pa, PAGE_SIZE);
|
||||
if (zbpci_config_space[cpu].paddr != pa_page) {
|
||||
pmap_kremove(va_page);
|
||||
pmap_kenter_attr(va_page, pa_page, PTE_C_UNCACHED);
|
||||
|
@ -121,7 +121,7 @@ CTASSERT(offsetof(struct bpf_if, bif_ext) == 0);
|
||||
#include <sys/mount.h>
|
||||
#include <compat/freebsd32/freebsd32.h>
|
||||
#define BPF_ALIGNMENT32 sizeof(int32_t)
|
||||
#define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1))
|
||||
#define BPF_WORDALIGN32(x) roundup2(x, BPF_ALIGNMENT32)
|
||||
|
||||
#ifndef BURN_BRIDGES
|
||||
/*
|
||||
|
@ -340,7 +340,7 @@ ieee80211_flush_ifq(struct ifqueue *ifq, struct ieee80211vap *vap)
|
||||
*/
|
||||
#define MC_ALIGN(m, len) \
|
||||
do { \
|
||||
(m)->m_data += (MCLBYTES - (len)) &~ (sizeof(long) - 1); \
|
||||
(m)->m_data += rounddown2(MCLBYTES - (len), sizeof(long)); \
|
||||
} while (/* CONSTCOND */ 0)
|
||||
|
||||
/*
|
||||
|
@ -1673,7 +1673,7 @@ moea_pinit(mmu_t mmu, pmap_t pmap)
|
||||
}
|
||||
i = ffs(~moea_vsid_bitmap[n]) - 1;
|
||||
mask = 1 << i;
|
||||
hash &= 0xfffff & ~(VSID_NBPW - 1);
|
||||
hash &= rounddown2(0xfffff, VSID_NBPW);
|
||||
hash |= i;
|
||||
}
|
||||
KASSERT(!(moea_vsid_bitmap[n] & mask),
|
||||
@ -1865,7 +1865,7 @@ moea_bootstrap_alloc(vm_size_t size, u_int align)
|
||||
size = round_page(size);
|
||||
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
|
||||
if (align != 0)
|
||||
s = (phys_avail[i] + align - 1) & ~(align - 1);
|
||||
s = roundup2(phys_avail[i], align);
|
||||
else
|
||||
s = phys_avail[i];
|
||||
e = s + size;
|
||||
|
@ -1966,7 +1966,7 @@ moea64_get_unique_vsid(void) {
|
||||
}
|
||||
i = ffs(~moea64_vsid_bitmap[n]) - 1;
|
||||
mask = 1 << i;
|
||||
hash &= VSID_HASHMASK & ~(VSID_NBPW - 1);
|
||||
hash &= rounddown2(VSID_HASHMASK, VSID_NBPW);
|
||||
hash |= i;
|
||||
}
|
||||
if (hash == VSID_VRMA) /* also special, avoid this too */
|
||||
@ -2296,7 +2296,7 @@ moea64_bootstrap_alloc(vm_size_t size, u_int align)
|
||||
size = round_page(size);
|
||||
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
|
||||
if (align != 0)
|
||||
s = (phys_avail[i] + align - 1) & ~(align - 1);
|
||||
s = roundup2(phys_avail[i], align);
|
||||
else
|
||||
s = phys_avail[i];
|
||||
e = s + size;
|
||||
|
@ -2687,7 +2687,7 @@ mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
|
||||
/* Raw physical memory dumps don't have a virtual address. */
|
||||
/* We always map a 256MB page at 256M. */
|
||||
gran = 256 * 1024 * 1024;
|
||||
ppa = pa & ~(gran - 1);
|
||||
ppa = rounddown2(pa, gran);
|
||||
ofs = pa - ppa;
|
||||
*va = (void *)gran;
|
||||
tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
|
||||
@ -2725,7 +2725,7 @@ mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
|
||||
tlb1_write_entry(&e, i);
|
||||
|
||||
gran = 256 * 1024 * 1024;
|
||||
ppa = pa & ~(gran - 1);
|
||||
ppa = rounddown2(pa, gran);
|
||||
ofs = pa - ppa;
|
||||
if (sz > (gran - ofs)) {
|
||||
i--;
|
||||
@ -3332,7 +3332,7 @@ tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
|
||||
int idx, nents;
|
||||
|
||||
/* Round up to the next 1M */
|
||||
size = (size + (1 << 20) - 1) & ~((1 << 20) - 1);
|
||||
size = roundup2(size, 1 << 20);
|
||||
|
||||
mapped = 0;
|
||||
idx = 0;
|
||||
|
@ -449,7 +449,7 @@ cpu_flush_dcache(void *ptr, size_t len)
|
||||
addr = (uintptr_t)ptr;
|
||||
off = addr & (cacheline_size - 1);
|
||||
addr -= off;
|
||||
len = (len + off + cacheline_size - 1) & ~(cacheline_size - 1);
|
||||
len = roundup2(len + off, cacheline_size);
|
||||
|
||||
while (len > 0) {
|
||||
__asm __volatile ("dcbf 0,%0" :: "r"(addr));
|
||||
|
@ -13594,7 +13594,7 @@ clear_inodedeps(mp)
|
||||
/*
|
||||
* Find the last inode in the block with dependencies.
|
||||
*/
|
||||
firstino = inodedep->id_ino & ~(INOPB(fs) - 1);
|
||||
firstino = rounddown2(inodedep->id_ino, INOPB(fs));
|
||||
for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--)
|
||||
if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0)
|
||||
break;
|
||||
|
@ -1145,7 +1145,7 @@ ufsdirhash_getprev(struct direct *dirp, doff_t offset)
|
||||
doff_t blkoff, prevoff;
|
||||
int entrypos, i;
|
||||
|
||||
blkoff = offset & ~(DIRBLKSIZ - 1); /* offset of start of block */
|
||||
blkoff = rounddown2(offset, DIRBLKSIZ); /* offset of start of block */
|
||||
entrypos = offset & (DIRBLKSIZ - 1); /* entry relative to block */
|
||||
blkbuf = (char *)dirp - entrypos;
|
||||
prevoff = blkoff;
|
||||
|
@ -577,7 +577,7 @@ ufs_lookup_ino(struct vnode *vdp, struct vnode **vpp, struct componentname *cnp,
|
||||
* in the cache as to where the entry was found.
|
||||
*/
|
||||
if ((flags & ISLASTCN) && nameiop == LOOKUP)
|
||||
dp->i_diroff = i_offset &~ (DIRBLKSIZ - 1);
|
||||
dp->i_diroff = rounddown2(i_offset, DIRBLKSIZ);
|
||||
|
||||
/*
|
||||
* If deleting, and at end of pathname, return
|
||||
@ -1100,7 +1100,7 @@ ufs_direnter(dvp, tvp, dirp, cnp, newdirbp, isrename)
|
||||
if (dp->i_dirhash != NULL)
|
||||
ufsdirhash_checkblock(dp, dirbuf -
|
||||
(dp->i_offset & (DIRBLKSIZ - 1)),
|
||||
dp->i_offset & ~(DIRBLKSIZ - 1));
|
||||
rounddown2(dp->i_offset, DIRBLKSIZ));
|
||||
#endif
|
||||
|
||||
if (DOINGSOFTDEP(dvp)) {
|
||||
@ -1231,7 +1231,7 @@ ufs_dirremove(dvp, ip, flags, isrmdir)
|
||||
if (dp->i_dirhash != NULL)
|
||||
ufsdirhash_checkblock(dp, (char *)ep -
|
||||
((dp->i_offset - dp->i_count) & (DIRBLKSIZ - 1)),
|
||||
dp->i_offset & ~(DIRBLKSIZ - 1));
|
||||
rounddown2(dp->i_offset, DIRBLKSIZ));
|
||||
#endif
|
||||
out:
|
||||
error = 0;
|
||||
|
@ -187,7 +187,7 @@ phys_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
|
||||
{
|
||||
vm_pindex_t base, end;
|
||||
|
||||
base = pindex & (~(PHYSCLUSTER - 1));
|
||||
base = rounddown2(pindex, PHYSCLUSTER);
|
||||
end = base + (PHYSCLUSTER - 1);
|
||||
if (before != NULL)
|
||||
*before = pindex - base;
|
||||
|
@ -2157,8 +2157,8 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
|
||||
m_inc = atop(roundup2(pa, alignment) - pa);
|
||||
continue;
|
||||
}
|
||||
if (((pa ^ (pa + ptoa(npages) - 1)) & ~(boundary -
|
||||
1)) != 0) {
|
||||
if (rounddown2(pa ^ (pa + ptoa(npages) - 1),
|
||||
boundary) != 0) {
|
||||
m_inc = atop(roundup2(pa, boundary) - pa);
|
||||
continue;
|
||||
}
|
||||
@ -3495,7 +3495,7 @@ vm_page_set_valid_range(vm_page_t m, int base, int size)
|
||||
* bit is clear, we have to zero out a portion of the
|
||||
* first block.
|
||||
*/
|
||||
if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
|
||||
if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
|
||||
(m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
|
||||
pmap_zero_page_area(m, frag, base - frag);
|
||||
|
||||
@ -3505,7 +3505,7 @@ vm_page_set_valid_range(vm_page_t m, int base, int size)
|
||||
* the last block.
|
||||
*/
|
||||
endoff = base + size;
|
||||
if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
|
||||
if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
|
||||
(m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
|
||||
pmap_zero_page_area(m, endoff,
|
||||
DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
|
||||
@ -3602,7 +3602,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
|
||||
* bit is clear, we have to zero out a portion of the
|
||||
* first block.
|
||||
*/
|
||||
if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
|
||||
if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
|
||||
(m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
|
||||
pmap_zero_page_area(m, frag, base - frag);
|
||||
|
||||
@ -3612,7 +3612,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
|
||||
* the last block.
|
||||
*/
|
||||
endoff = base + size;
|
||||
if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
|
||||
if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
|
||||
(m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
|
||||
pmap_zero_page_area(m, endoff,
|
||||
DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
|
||||
|
@ -1467,9 +1467,9 @@ vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, u_long npages,
|
||||
*/
|
||||
pa = VM_PAGE_TO_PHYS(m_ret);
|
||||
pa_end = pa + size;
|
||||
if (pa >= low && pa_end <= high && (pa &
|
||||
(alignment - 1)) == 0 && ((pa ^ (pa_end -
|
||||
1)) & ~(boundary - 1)) == 0)
|
||||
if (pa >= low && pa_end <= high &&
|
||||
(pa & (alignment - 1)) == 0 &&
|
||||
rounddown2(pa ^ (pa_end - 1), boundary) == 0)
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
|
@ -327,8 +327,8 @@ dmar_gas_match_one(struct dmar_gas_match_args *a, struct dmar_map_entry *prev,
|
||||
* the boundary. Check if there is enough space after the
|
||||
* next boundary after the prev->end.
|
||||
*/
|
||||
bs = (a->entry->start + a->offset + a->common->boundary) &
|
||||
~(a->common->boundary - 1);
|
||||
bs = rounddown2(a->entry->start + a->offset + a->common->boundary,
|
||||
a->common->boundary);
|
||||
start = roundup2(bs, a->common->alignment);
|
||||
/* DMAR_PAGE_SIZE to create gap after new entry. */
|
||||
if (start + a->offset + a->size + DMAR_PAGE_SIZE <=
|
||||
|
Loading…
Reference in New Issue
Block a user