mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-01 08:27:59 +00:00
pmap_{un}map_io_transient: Use bool instead of boolean_t.
Reviewed by: imp, kib Differential Revision: https://reviews.freebsd.org/D39920
This commit is contained in:
parent
407f675718
commit
4961faaacc
@ -10365,19 +10365,19 @@ pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num)
|
||||
* \param vaddr On return contains the kernel virtual memory address
|
||||
* of the pages passed in the page parameter.
|
||||
* \param count Number of pages passed in.
|
||||
* \param can_fault TRUE if the thread using the mapped pages can take
|
||||
* page faults, FALSE otherwise.
|
||||
* \param can_fault true if the thread using the mapped pages can take
|
||||
* page faults, false otherwise.
|
||||
*
|
||||
* \returns TRUE if the caller must call pmap_unmap_io_transient when
|
||||
* finished or FALSE otherwise.
|
||||
* \returns true if the caller must call pmap_unmap_io_transient when
|
||||
* finished or false otherwise.
|
||||
*
|
||||
*/
|
||||
boolean_t
|
||||
bool
|
||||
pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
|
||||
boolean_t can_fault)
|
||||
bool can_fault)
|
||||
{
|
||||
vm_paddr_t paddr;
|
||||
boolean_t needs_mapping;
|
||||
bool needs_mapping;
|
||||
pt_entry_t *pte;
|
||||
int cache_bits, error __unused, i;
|
||||
|
||||
@ -10385,14 +10385,14 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
|
||||
* Allocate any KVA space that we need, this is done in a separate
|
||||
* loop to prevent calling vmem_alloc while pinned.
|
||||
*/
|
||||
needs_mapping = FALSE;
|
||||
needs_mapping = false;
|
||||
for (i = 0; i < count; i++) {
|
||||
paddr = VM_PAGE_TO_PHYS(page[i]);
|
||||
if (__predict_false(paddr >= dmaplimit)) {
|
||||
error = vmem_alloc(kernel_arena, PAGE_SIZE,
|
||||
M_BESTFIT | M_WAITOK, &vaddr[i]);
|
||||
KASSERT(error == 0, ("vmem_alloc failed: %d", error));
|
||||
needs_mapping = TRUE;
|
||||
needs_mapping = true;
|
||||
} else {
|
||||
vaddr[i] = PHYS_TO_DMAP(paddr);
|
||||
}
|
||||
@ -10400,7 +10400,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
|
||||
|
||||
/* Exit early if everything is covered by the DMAP */
|
||||
if (!needs_mapping)
|
||||
return (FALSE);
|
||||
return (false);
|
||||
|
||||
/*
|
||||
* NB: The sequence of updating a page table followed by accesses
|
||||
@ -10426,7 +10426,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
|
||||
} else {
|
||||
pte = vtopte(vaddr[i]);
|
||||
cache_bits = pmap_cache_bits(kernel_pmap,
|
||||
page[i]->md.pat_mode, 0);
|
||||
page[i]->md.pat_mode, false);
|
||||
pte_store(pte, paddr | X86_PG_RW | X86_PG_V |
|
||||
cache_bits);
|
||||
pmap_invlpg(kernel_pmap, vaddr[i]);
|
||||
@ -10439,7 +10439,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
|
||||
|
||||
void
|
||||
pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
|
||||
boolean_t can_fault)
|
||||
bool can_fault)
|
||||
{
|
||||
vm_paddr_t paddr;
|
||||
int i;
|
||||
|
@ -67,7 +67,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
size_t cnt;
|
||||
int error = 0;
|
||||
int save = 0;
|
||||
boolean_t mapped;
|
||||
bool mapped;
|
||||
|
||||
KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
|
||||
("uiomove_fromphys: mode"));
|
||||
@ -75,7 +75,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
("uiomove_fromphys proc"));
|
||||
save = td->td_pflags & TDP_DEADLKTREAT;
|
||||
td->td_pflags |= TDP_DEADLKTREAT;
|
||||
mapped = FALSE;
|
||||
mapped = false;
|
||||
while (n > 0 && uio->uio_resid) {
|
||||
iov = uio->uio_iov;
|
||||
cnt = iov->iov_len;
|
||||
@ -90,7 +90,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
cnt = min(cnt, PAGE_SIZE - page_offset);
|
||||
if (uio->uio_segflg != UIO_NOCOPY) {
|
||||
mapped = pmap_map_io_transient(
|
||||
&ma[offset >> PAGE_SHIFT], &vaddr, 1, TRUE);
|
||||
&ma[offset >> PAGE_SHIFT], &vaddr, 1, true);
|
||||
cp = (char *)vaddr + page_offset;
|
||||
}
|
||||
switch (uio->uio_segflg) {
|
||||
@ -114,8 +114,8 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
}
|
||||
if (__predict_false(mapped)) {
|
||||
pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT],
|
||||
&vaddr, 1, TRUE);
|
||||
mapped = FALSE;
|
||||
&vaddr, 1, true);
|
||||
mapped = false;
|
||||
}
|
||||
iov->iov_base = (char *)iov->iov_base + cnt;
|
||||
iov->iov_len -= cnt;
|
||||
@ -127,7 +127,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
out:
|
||||
if (__predict_false(mapped))
|
||||
pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], &vaddr, 1,
|
||||
TRUE);
|
||||
true);
|
||||
if (save == 0)
|
||||
td->td_pflags &= ~TDP_DEADLKTREAT;
|
||||
return (error);
|
||||
|
@ -481,8 +481,8 @@ void pmap_invalidate_cache_pages(vm_page_t *pages, int count);
|
||||
void pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
|
||||
void pmap_force_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
|
||||
void pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num);
|
||||
boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
|
||||
void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
|
||||
bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool);
|
||||
void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool);
|
||||
void pmap_map_delete(pmap_t, vm_offset_t, vm_offset_t);
|
||||
void pmap_pti_add_kva(vm_offset_t sva, vm_offset_t eva, bool exec);
|
||||
void pmap_pti_remove_kva(vm_offset_t sva, vm_offset_t eva);
|
||||
|
@ -7619,33 +7619,33 @@ pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
|
||||
* \param vaddr On return contains the kernel virtual memory address
|
||||
* of the pages passed in the page parameter.
|
||||
* \param count Number of pages passed in.
|
||||
* \param can_fault TRUE if the thread using the mapped pages can take
|
||||
* page faults, FALSE otherwise.
|
||||
* \param can_fault true if the thread using the mapped pages can take
|
||||
* page faults, false otherwise.
|
||||
*
|
||||
* \returns TRUE if the caller must call pmap_unmap_io_transient when
|
||||
* finished or FALSE otherwise.
|
||||
* \returns true if the caller must call pmap_unmap_io_transient when
|
||||
* finished or false otherwise.
|
||||
*
|
||||
*/
|
||||
boolean_t
|
||||
bool
|
||||
pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
|
||||
boolean_t can_fault)
|
||||
bool can_fault)
|
||||
{
|
||||
vm_paddr_t paddr;
|
||||
boolean_t needs_mapping;
|
||||
bool needs_mapping;
|
||||
int error __diagused, i;
|
||||
|
||||
/*
|
||||
* Allocate any KVA space that we need, this is done in a separate
|
||||
* loop to prevent calling vmem_alloc while pinned.
|
||||
*/
|
||||
needs_mapping = FALSE;
|
||||
needs_mapping = false;
|
||||
for (i = 0; i < count; i++) {
|
||||
paddr = VM_PAGE_TO_PHYS(page[i]);
|
||||
if (__predict_false(!PHYS_IN_DMAP(paddr))) {
|
||||
error = vmem_alloc(kernel_arena, PAGE_SIZE,
|
||||
M_BESTFIT | M_WAITOK, &vaddr[i]);
|
||||
KASSERT(error == 0, ("vmem_alloc failed: %d", error));
|
||||
needs_mapping = TRUE;
|
||||
needs_mapping = true;
|
||||
} else {
|
||||
vaddr[i] = PHYS_TO_DMAP(paddr);
|
||||
}
|
||||
@ -7653,7 +7653,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
|
||||
|
||||
/* Exit early if everything is covered by the DMAP */
|
||||
if (!needs_mapping)
|
||||
return (FALSE);
|
||||
return (false);
|
||||
|
||||
if (!can_fault)
|
||||
sched_pin();
|
||||
@ -7670,7 +7670,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
|
||||
|
||||
void
|
||||
pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
|
||||
boolean_t can_fault)
|
||||
bool can_fault)
|
||||
{
|
||||
vm_paddr_t paddr;
|
||||
int i;
|
||||
|
@ -65,7 +65,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
size_t cnt;
|
||||
int error = 0;
|
||||
int save = 0;
|
||||
boolean_t mapped;
|
||||
bool mapped;
|
||||
|
||||
KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
|
||||
("uiomove_fromphys: mode"));
|
||||
@ -73,7 +73,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
("uiomove_fromphys proc"));
|
||||
save = td->td_pflags & TDP_DEADLKTREAT;
|
||||
td->td_pflags |= TDP_DEADLKTREAT;
|
||||
mapped = FALSE;
|
||||
mapped = false;
|
||||
while (n > 0 && uio->uio_resid) {
|
||||
iov = uio->uio_iov;
|
||||
cnt = iov->iov_len;
|
||||
@ -88,7 +88,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
cnt = min(cnt, PAGE_SIZE - page_offset);
|
||||
if (uio->uio_segflg != UIO_NOCOPY) {
|
||||
mapped = pmap_map_io_transient(
|
||||
&ma[offset >> PAGE_SHIFT], &vaddr, 1, TRUE);
|
||||
&ma[offset >> PAGE_SHIFT], &vaddr, 1, true);
|
||||
cp = (char *)vaddr + page_offset;
|
||||
}
|
||||
switch (uio->uio_segflg) {
|
||||
@ -112,8 +112,8 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
}
|
||||
if (__predict_false(mapped)) {
|
||||
pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT],
|
||||
&vaddr, 1, TRUE);
|
||||
mapped = FALSE;
|
||||
&vaddr, 1, true);
|
||||
mapped = false;
|
||||
}
|
||||
iov->iov_base = (char *)iov->iov_base + cnt;
|
||||
iov->iov_len -= cnt;
|
||||
@ -126,7 +126,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
if (__predict_false(mapped)) {
|
||||
panic("ARM64TODO: uiomove_fromphys");
|
||||
pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], &vaddr, 1,
|
||||
TRUE);
|
||||
true);
|
||||
}
|
||||
if (save == 0)
|
||||
td->td_pflags &= ~TDP_DEADLKTREAT;
|
||||
|
@ -162,8 +162,8 @@ void *pmap_mapbios(vm_paddr_t, vm_size_t);
|
||||
void pmap_unmapdev(void *, vm_size_t);
|
||||
void pmap_unmapbios(void *, vm_size_t);
|
||||
|
||||
boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
|
||||
void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
|
||||
bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool);
|
||||
void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool);
|
||||
|
||||
bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **,
|
||||
pd_entry_t **, pt_entry_t **);
|
||||
|
@ -625,7 +625,7 @@ icl_cxgbei_conn_pdu_append_bio(struct icl_conn *ic, struct icl_pdu *ip,
|
||||
struct mbuf *m, *m_tail;
|
||||
vm_offset_t vaddr;
|
||||
size_t page_offset, todo, mtodo;
|
||||
boolean_t mapped;
|
||||
bool mapped;
|
||||
int i;
|
||||
|
||||
MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE);
|
||||
@ -712,7 +712,7 @@ icl_cxgbei_conn_pdu_append_bio(struct icl_conn *ic, struct icl_pdu *ip,
|
||||
todo = MIN(len, PAGE_SIZE - page_offset);
|
||||
|
||||
mapped = pmap_map_io_transient(bp->bio_ma + i, &vaddr, 1,
|
||||
FALSE);
|
||||
false);
|
||||
|
||||
do {
|
||||
mtodo = min(todo, M_SIZE(m) - m->m_len);
|
||||
@ -727,7 +727,7 @@ icl_cxgbei_conn_pdu_append_bio(struct icl_conn *ic, struct icl_pdu *ip,
|
||||
|
||||
if (__predict_false(mapped))
|
||||
pmap_unmap_io_transient(bp->bio_ma + 1, &vaddr, 1,
|
||||
FALSE);
|
||||
false);
|
||||
|
||||
page_offset = 0;
|
||||
len -= todo;
|
||||
@ -813,7 +813,7 @@ icl_cxgbei_conn_pdu_get_bio(struct icl_conn *ic, struct icl_pdu *ip,
|
||||
struct icl_cxgbei_pdu *icp = ip_to_icp(ip);
|
||||
vm_offset_t vaddr;
|
||||
size_t page_offset, todo;
|
||||
boolean_t mapped;
|
||||
bool mapped;
|
||||
int i;
|
||||
|
||||
if (icp->icp_flags & ICPF_RX_DDP)
|
||||
@ -834,12 +834,12 @@ icl_cxgbei_conn_pdu_get_bio(struct icl_conn *ic, struct icl_pdu *ip,
|
||||
todo = MIN(len, PAGE_SIZE - page_offset);
|
||||
|
||||
mapped = pmap_map_io_transient(bp->bio_ma + i, &vaddr, 1,
|
||||
FALSE);
|
||||
false);
|
||||
m_copydata(ip->ip_data_mbuf, pdu_off, todo, (char *)vaddr +
|
||||
page_offset);
|
||||
if (__predict_false(mapped))
|
||||
pmap_unmap_io_transient(bp->bio_ma + 1, &vaddr, 1,
|
||||
FALSE);
|
||||
false);
|
||||
|
||||
page_offset = 0;
|
||||
pdu_off += todo;
|
||||
|
@ -154,8 +154,8 @@ void *pmap_mapbios(vm_paddr_t, vm_size_t);
|
||||
void pmap_unmapdev(void *, vm_size_t);
|
||||
void pmap_unmapbios(void *, vm_size_t);
|
||||
|
||||
boolean_t pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
|
||||
void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
|
||||
bool pmap_map_io_transient(vm_page_t *, vm_offset_t *, int, bool);
|
||||
void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, bool);
|
||||
|
||||
bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **,
|
||||
pt_entry_t **);
|
||||
|
@ -4736,33 +4736,33 @@ pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
|
||||
* \param vaddr On return contains the kernel virtual memory address
|
||||
* of the pages passed in the page parameter.
|
||||
* \param count Number of pages passed in.
|
||||
* \param can_fault TRUE if the thread using the mapped pages can take
|
||||
* page faults, FALSE otherwise.
|
||||
* \param can_fault true if the thread using the mapped pages can take
|
||||
* page faults, false otherwise.
|
||||
*
|
||||
* \returns TRUE if the caller must call pmap_unmap_io_transient when
|
||||
* finished or FALSE otherwise.
|
||||
* \returns true if the caller must call pmap_unmap_io_transient when
|
||||
* finished or false otherwise.
|
||||
*
|
||||
*/
|
||||
boolean_t
|
||||
bool
|
||||
pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
|
||||
boolean_t can_fault)
|
||||
bool can_fault)
|
||||
{
|
||||
vm_paddr_t paddr;
|
||||
boolean_t needs_mapping;
|
||||
bool needs_mapping;
|
||||
int error __diagused, i;
|
||||
|
||||
/*
|
||||
* Allocate any KVA space that we need, this is done in a separate
|
||||
* loop to prevent calling vmem_alloc while pinned.
|
||||
*/
|
||||
needs_mapping = FALSE;
|
||||
needs_mapping = false;
|
||||
for (i = 0; i < count; i++) {
|
||||
paddr = VM_PAGE_TO_PHYS(page[i]);
|
||||
if (__predict_false(paddr >= DMAP_MAX_PHYSADDR)) {
|
||||
error = vmem_alloc(kernel_arena, PAGE_SIZE,
|
||||
M_BESTFIT | M_WAITOK, &vaddr[i]);
|
||||
KASSERT(error == 0, ("vmem_alloc failed: %d", error));
|
||||
needs_mapping = TRUE;
|
||||
needs_mapping = true;
|
||||
} else {
|
||||
vaddr[i] = PHYS_TO_DMAP(paddr);
|
||||
}
|
||||
@ -4770,7 +4770,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
|
||||
|
||||
/* Exit early if everything is covered by the DMAP */
|
||||
if (!needs_mapping)
|
||||
return (FALSE);
|
||||
return (false);
|
||||
|
||||
if (!can_fault)
|
||||
sched_pin();
|
||||
@ -4787,7 +4787,7 @@ pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
|
||||
|
||||
void
|
||||
pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
|
||||
boolean_t can_fault)
|
||||
bool can_fault)
|
||||
{
|
||||
vm_paddr_t paddr;
|
||||
int i;
|
||||
|
@ -65,7 +65,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
size_t cnt;
|
||||
int error = 0;
|
||||
int save = 0;
|
||||
boolean_t mapped;
|
||||
bool mapped;
|
||||
|
||||
KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
|
||||
("uiomove_fromphys: mode"));
|
||||
@ -73,7 +73,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
("uiomove_fromphys proc"));
|
||||
save = td->td_pflags & TDP_DEADLKTREAT;
|
||||
td->td_pflags |= TDP_DEADLKTREAT;
|
||||
mapped = FALSE;
|
||||
mapped = false;
|
||||
while (n > 0 && uio->uio_resid) {
|
||||
iov = uio->uio_iov;
|
||||
cnt = iov->iov_len;
|
||||
@ -88,7 +88,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
cnt = min(cnt, PAGE_SIZE - page_offset);
|
||||
if (uio->uio_segflg != UIO_NOCOPY) {
|
||||
mapped = pmap_map_io_transient(
|
||||
&ma[offset >> PAGE_SHIFT], &vaddr, 1, TRUE);
|
||||
&ma[offset >> PAGE_SHIFT], &vaddr, 1, true);
|
||||
cp = (char *)vaddr + page_offset;
|
||||
}
|
||||
switch (uio->uio_segflg) {
|
||||
@ -112,8 +112,8 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
}
|
||||
if (__predict_false(mapped)) {
|
||||
pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT],
|
||||
&vaddr, 1, TRUE);
|
||||
mapped = FALSE;
|
||||
&vaddr, 1, true);
|
||||
mapped = false;
|
||||
}
|
||||
iov->iov_base = (char *)iov->iov_base + cnt;
|
||||
iov->iov_len -= cnt;
|
||||
@ -126,7 +126,7 @@ uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
|
||||
if (__predict_false(mapped)) {
|
||||
panic("TODO 3");
|
||||
pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], &vaddr, 1,
|
||||
TRUE);
|
||||
true);
|
||||
}
|
||||
if (save == 0)
|
||||
td->td_pflags &= ~TDP_DEADLKTREAT;
|
||||
|
Loading…
Reference in New Issue
Block a user