mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-16 10:20:30 +00:00
Merge ^/head r352319 through r352435.
This commit is contained in:
commit
419f843fff
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/projects/clang900-import/; revision=352436
4
UPDATING
4
UPDATING
@ -89,8 +89,8 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 13.x IS SLOW:
|
||||
|
||||
20190507:
|
||||
The tap(4) driver has been folded into tun(4), and the module has been
|
||||
renamed to tuntap. You should update any kld_load="if_tap" or
|
||||
kld_load="if_tun" entries in /etc/rc.conf, if_tap_load="YES" or
|
||||
renamed to tuntap. You should update any kld_list="if_tap" or
|
||||
kld_list="if_tun" entries in /etc/rc.conf, if_tap_load="YES" or
|
||||
if_tun_load="YES" entries in /boot/loader.conf to load the if_tuntap
|
||||
module instead, and "device tap" or "device tun" entries in kernel
|
||||
config files to select the tuntap device instead.
|
||||
|
@ -122,7 +122,7 @@ histedit(void)
|
||||
el_set(el, EL_PROMPT, getprompt);
|
||||
el_set(el, EL_ADDFN, "sh-complete",
|
||||
"Filename completion",
|
||||
_el_fn_sh_complete);
|
||||
_el_fn_complete);
|
||||
} else {
|
||||
bad:
|
||||
out2fmt_flush("sh: can't initialize editing\n");
|
||||
|
@ -526,7 +526,7 @@ _yp_dobind(char *dom, struct dom_binding **ypdb)
|
||||
tv.tv_usec = 0;
|
||||
ysd->dom_socket = RPC_ANYSOCK;
|
||||
ysd->dom_client = clntudp_bufcreate(&ysd->dom_server_addr,
|
||||
YPPROG, YPVERS, tv, &ysd->dom_socket, 1280, 2304);
|
||||
YPPROG, YPVERS, tv, &ysd->dom_socket, 65507, 65507);
|
||||
if (ysd->dom_client == NULL) {
|
||||
clnt_pcreateerror("clntudp_create");
|
||||
ysd->dom_vers = -1;
|
||||
|
@ -31,7 +31,7 @@
|
||||
# BEFORE: sysctl
|
||||
# KEYWORD: firstboot
|
||||
|
||||
# This allows us to distribute a image
|
||||
# This allows us to distribute an image
|
||||
# and have it work on essentially any size drive.
|
||||
#
|
||||
# TODO: Figure out where this should really be ordered.
|
||||
|
@ -499,6 +499,13 @@ _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp)
|
||||
if (auxp->a_type == AT_NULL)
|
||||
break;
|
||||
}
|
||||
/* Since the auxiliary vector has moved, redigest it. */
|
||||
for (i = 0; i < AT_COUNT; i++)
|
||||
aux_info[i] = NULL;
|
||||
for (auxp = aux; auxp->a_type != AT_NULL; auxp++) {
|
||||
if (auxp->a_type < AT_COUNT)
|
||||
aux_info[auxp->a_type] = auxp;
|
||||
}
|
||||
} else {
|
||||
_rtld_error("No binary");
|
||||
rtld_die();
|
||||
|
@ -18,7 +18,6 @@ desc = <<EOD
|
||||
EOD
|
||||
scripts: {
|
||||
post-install = <<EOD
|
||||
cap_mkdb %CAP_MKDB_ENDIAN% ${PKG_ROOTDIR}/etc/login.conf
|
||||
pwd_mkdb -i -p -d ${PKG_ROOTDIR}/etc ${PKG_ROOTDIR}/etc/master.passwd
|
||||
services_mkdb %CAP_MKDB_ENDIAN% -q -o ${PKG_ROOTDIR}/var/db/services.db ${PKG_ROOTDIR}/etc/services
|
||||
chmod 1777 ${PKG_ROOTDIR}/tmp
|
||||
|
23
release/packages/utilities.ucl
Normal file
23
release/packages/utilities.ucl
Normal file
@ -0,0 +1,23 @@
|
||||
#
|
||||
# $FreeBSD$
|
||||
#
|
||||
|
||||
name = "FreeBSD-%PKGNAME%"
|
||||
origin = "base"
|
||||
version = "%VERSION%"
|
||||
comment = "%COMMENT% %VCS_REVISION%"
|
||||
categories = [ base ]
|
||||
maintainer = "re@FreeBSD.org"
|
||||
www = "https://www.FreeBSD.org"
|
||||
prefix = "/"
|
||||
vital = true
|
||||
licenselogic = "single"
|
||||
licenses = [ BSD2CLAUSE ]
|
||||
desc = <<EOD
|
||||
%DESC%
|
||||
EOD
|
||||
scripts: {
|
||||
post-install = <<EOD
|
||||
cap_mkdb %CAP_MKDB_ENDIAN% ${PKG_ROOTDIR}/etc/login.conf
|
||||
EOD
|
||||
}
|
@ -259,12 +259,18 @@ readboot(int dosfs, struct bootblock *boot)
|
||||
return FSFATAL;
|
||||
}
|
||||
|
||||
boot->ClusterOffset = (boot->bpbRootDirEnts * 32 +
|
||||
boot->FirstCluster = (boot->bpbRootDirEnts * 32 +
|
||||
boot->bpbBytesPerSec - 1) / boot->bpbBytesPerSec +
|
||||
boot->bpbResSectors + boot->bpbFATs * boot->FATsecs -
|
||||
CLUST_FIRST * boot->bpbSecPerClust;
|
||||
boot->NumClusters = (boot->NumSectors - boot->ClusterOffset) /
|
||||
boot->bpbSecPerClust;
|
||||
boot->bpbResSectors + boot->bpbFATs * boot->FATsecs;
|
||||
|
||||
if (boot->FirstCluster + boot->bpbSecPerClust > boot->NumSectors) {
|
||||
pfatal("Cluster offset too large (%u clusters)\n",
|
||||
boot->FirstCluster);
|
||||
return FSFATAL;
|
||||
}
|
||||
|
||||
boot->NumClusters = (boot->NumSectors - boot->FirstCluster) / boot->bpbSecPerClust +
|
||||
CLUST_FIRST;
|
||||
|
||||
if (boot->flags & FAT32)
|
||||
boot->ClustMask = CLUST32_MASK;
|
||||
|
@ -317,7 +317,8 @@ delete(int f, struct bootblock *boot, struct fatEntry *fat, cl_t startcl,
|
||||
break;
|
||||
e = delbuf + endoff;
|
||||
}
|
||||
off = startcl * boot->bpbSecPerClust + boot->ClusterOffset;
|
||||
off = (startcl - CLUST_FIRST) * boot->bpbSecPerClust + boot->FirstCluster;
|
||||
|
||||
off *= boot->bpbBytesPerSec;
|
||||
if (lseek(f, off, SEEK_SET) != off) {
|
||||
perr("Unable to lseek to %" PRId64, off);
|
||||
@ -457,7 +458,7 @@ check_subdirectory(int f, struct bootblock *boot, struct dosDirEntry *dir)
|
||||
off = boot->bpbResSectors + boot->bpbFATs *
|
||||
boot->FATsecs;
|
||||
} else {
|
||||
off = cl * boot->bpbSecPerClust + boot->ClusterOffset;
|
||||
off = (cl - CLUST_FIRST) * boot->bpbSecPerClust + boot->FirstCluster;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -538,7 +539,7 @@ readDosDirSection(int f, struct bootblock *boot, struct fatEntry *fat,
|
||||
boot->FATsecs;
|
||||
} else {
|
||||
last = boot->bpbSecPerClust * boot->bpbBytesPerSec;
|
||||
off = cl * boot->bpbSecPerClust + boot->ClusterOffset;
|
||||
off = (cl - CLUST_FIRST) * boot->bpbSecPerClust + boot->FirstCluster;
|
||||
}
|
||||
|
||||
off *= boot->bpbBytesPerSec;
|
||||
@ -1069,8 +1070,9 @@ reconnect(int dosfs, struct bootblock *boot, struct fatEntry *fat, cl_t head)
|
||||
lfcl = (lostDir->head < boot->NumClusters) ? lostDir->head : 0;
|
||||
return FSERROR;
|
||||
}
|
||||
lfoff = lfcl * boot->ClusterSize
|
||||
+ boot->ClusterOffset * boot->bpbBytesPerSec;
|
||||
lfoff = (lfcl - CLUST_FIRST) * boot->ClusterSize
|
||||
+ boot->FirstCluster * boot->bpbBytesPerSec;
|
||||
|
||||
if (lseek(dosfs, lfoff, SEEK_SET) != lfoff
|
||||
|| (size_t)read(dosfs, lfbuf, boot->ClusterSize) != boot->ClusterSize) {
|
||||
perr("could not read LOST.DIR");
|
||||
|
@ -74,7 +74,7 @@ struct bootblock {
|
||||
u_int32_t NumSectors; /* how many sectors are there */
|
||||
u_int32_t FATsecs; /* how many sectors are in FAT */
|
||||
u_int32_t NumFatEntries; /* how many entries really are there */
|
||||
u_int ClusterOffset; /* at what sector would sector 0 start */
|
||||
u_int FirstCluster; /* at what sector is Cluster CLUST_FIRST */
|
||||
u_int ClusterSize; /* Cluster size in bytes */
|
||||
|
||||
/* Now some statistics: */
|
||||
|
@ -3,7 +3,8 @@
|
||||
|
||||
.include <src.opts.mk>
|
||||
|
||||
MAN= assert.3 \
|
||||
MAN= arb.3 \
|
||||
assert.3 \
|
||||
ATOMIC_VAR_INIT.3 \
|
||||
bitstring.3 \
|
||||
CMSG_DATA.3 \
|
||||
@ -32,6 +33,42 @@ MAN= assert.3 \
|
||||
timeradd.3 \
|
||||
tree.3
|
||||
|
||||
MLINKS+= arb.3 ARB8_ENTRY.3 \
|
||||
arb.3 ARB16_ENTRY.3 \
|
||||
arb.3 ARB32_ENTRY.3 \
|
||||
arb.3 ARB8_HEAD.3 \
|
||||
arb.3 ARB16_HEAD.3 \
|
||||
arb.3 ARB32_HEAD.3 \
|
||||
arb.3 ARB_ALLOCSIZE.3 \
|
||||
arb.3 ARB_INITIALIZER.3 \
|
||||
arb.3 ARB_ROOT.3 \
|
||||
arb.3 ARB_EMPTY.3 \
|
||||
arb.3 ARB_FULL.3 \
|
||||
arb.3 ARB_CURNODES.3 \
|
||||
arb.3 ARB_MAXNODES.3 \
|
||||
arb.3 ARB_NEXT.3 \
|
||||
arb.3 ARB_PREV.3 \
|
||||
arb.3 ARB_MIN.3 \
|
||||
arb.3 ARB_MAX.3 \
|
||||
arb.3 ARB_FIND.3 \
|
||||
arb.3 ARB_NFIND.3 \
|
||||
arb.3 ARB_LEFT.3 \
|
||||
arb.3 ARB_LEFTIDX.3 \
|
||||
arb.3 ARB_RIGHT.3 \
|
||||
arb.3 ARB_RIGHTIDX.3 \
|
||||
arb.3 ARB_PARENT.3 \
|
||||
arb.3 ARB_PARENTIDX.3 \
|
||||
arb.3 ARB_GETFREE.3 \
|
||||
arb.3 ARB_FREEIDX.3 \
|
||||
arb.3 ARB_FOREACH.3 \
|
||||
arb.3 ARB_FOREACH_FROM.3 \
|
||||
arb.3 ARB_FOREACH_SAFE.3 \
|
||||
arb.3 ARB_FOREACH_REVERSE.3 \
|
||||
arb.3 ARB_FOREACH_REVERSE_FROM.3 \
|
||||
arb.3 ARB_FOREACH_REVERSE_SAFE.3 \
|
||||
arb.3 ARB_INIT.3 \
|
||||
arb.3 ARB_INSERT.3 \
|
||||
arb.3 ARB_REMOVE.3
|
||||
MLINKS= ATOMIC_VAR_INIT.3 atomic_compare_exchange_strong.3 \
|
||||
ATOMIC_VAR_INIT.3 atomic_compare_exchange_strong_explicit.3 \
|
||||
ATOMIC_VAR_INIT.3 atomic_compare_exchange_weak.3 \
|
||||
|
483
share/man/man3/arb.3
Normal file
483
share/man/man3/arb.3
Normal file
@ -0,0 +1,483 @@
|
||||
.\" $OpenBSD: tree.3,v 1.7 2002/06/12 01:09:20 provos Exp $
|
||||
.\"
|
||||
.\" Copyright 2002 Niels Provos <provos@citi.umich.edu>
|
||||
.\" All rights reserved.
|
||||
.\"
|
||||
.\" Redistribution and use in source and binary forms, with or without
|
||||
.\" modification, are permitted provided that the following conditions
|
||||
.\" are met:
|
||||
.\" 1. Redistributions of source code must retain the above copyright
|
||||
.\" notice, this list of conditions and the following disclaimer.
|
||||
.\" 2. Redistributions in binary form must reproduce the above copyright
|
||||
.\" notice, this list of conditions and the following disclaimer in the
|
||||
.\" documentation and/or other materials provided with the distribution.
|
||||
.\" 3. All advertising materials mentioning features or use of this software
|
||||
.\" must display the following acknowledgement:
|
||||
.\" This product includes software developed by Niels Provos.
|
||||
.\" 4. The name of the author may not be used to endorse or promote products
|
||||
.\" derived from this software without specific prior written permission.
|
||||
.\"
|
||||
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
.\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
.\" IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
.\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
.\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
.\"
|
||||
.\" $FreeBSD$
|
||||
.\"
|
||||
.Dd May 8, 2019
|
||||
.Dt ARB 3
|
||||
.Os
|
||||
.Sh NAME
|
||||
.Nm ARB_PROTOTYPE ,
|
||||
.Nm ARB_PROTOTYPE_STATIC ,
|
||||
.Nm ARB_PROTOTYPE_INSERT ,
|
||||
.Nm ARB_PROTOTYPE_INSERT_COLOR ,
|
||||
.Nm ARB_PROTOTYPE_REMOVE ,
|
||||
.Nm ARB_PROTOTYPE_REMOVE_COLOR ,
|
||||
.Nm ARB_PROTOTYPE_FIND ,
|
||||
.Nm ARB_PROTOTYPE_NFIND ,
|
||||
.Nm ARB_PROTOTYPE_NEXT ,
|
||||
.Nm ARB_PROTOTYPE_PREV ,
|
||||
.Nm ARB_PROTOTYPE_MINMAX ,
|
||||
.Nm ARB_GENERATE ,
|
||||
.Nm ARB_GENERATE_STATIC ,
|
||||
.Nm ARB_GENERATE_INSERT ,
|
||||
.Nm ARB_GENERATE_INSERT_COLOR ,
|
||||
.Nm ARB_GENERATE_REMOVE ,
|
||||
.Nm ARB_GENERATE_REMOVE_COLOR ,
|
||||
.Nm ARB_GENERATE_FIND ,
|
||||
.Nm ARB_GENERATE_NFIND ,
|
||||
.Nm ARB_GENERATE_NEXT ,
|
||||
.Nm ARB_GENERATE_PREV ,
|
||||
.Nm ARB_GENERATE_MINMAX ,
|
||||
.Nm ARB8_ENTRY ,
|
||||
.Nm ARB16_ENTRY ,
|
||||
.Nm ARB32_ENTRY ,
|
||||
.Nm ARB8_HEAD ,
|
||||
.Nm ARB16_HEAD ,
|
||||
.Nm ARB32_HEAD ,
|
||||
.Nm ARB_ALLOCSIZE ,
|
||||
.Nm ARB_INITIALIZER ,
|
||||
.Nm ARB_ROOT ,
|
||||
.Nm ARB_EMPTY ,
|
||||
.Nm ARB_FULL ,
|
||||
.Nm ARB_CURNODES ,
|
||||
.Nm ARB_MAXNODES ,
|
||||
.Nm ARB_NEXT ,
|
||||
.Nm ARB_PREV ,
|
||||
.Nm ARB_MIN ,
|
||||
.Nm ARB_MAX ,
|
||||
.Nm ARB_FIND ,
|
||||
.Nm ARB_NFIND ,
|
||||
.Nm ARB_LEFT ,
|
||||
.Nm ARB_LEFTIDX ,
|
||||
.Nm ARB_RIGHT ,
|
||||
.Nm ARB_RIGHTIDX ,
|
||||
.Nm ARB_PARENT ,
|
||||
.Nm ARB_PARENTIDX ,
|
||||
.Nm ARB_GETFREE ,
|
||||
.Nm ARB_FREEIDX ,
|
||||
.Nm ARB_FOREACH ,
|
||||
.Nm ARB_FOREACH_FROM ,
|
||||
.Nm ARB_FOREACH_SAFE ,
|
||||
.Nm ARB_FOREACH_REVERSE ,
|
||||
.Nm ARB_FOREACH_REVERSE_FROM ,
|
||||
.Nm ARB_FOREACH_REVERSE_SAFE ,
|
||||
.Nm ARB_INIT ,
|
||||
.Nm ARB_INSERT ,
|
||||
.Nm ARB_REMOVE
|
||||
.Nd "array-based red-black trees"
|
||||
.Sh SYNOPSIS
|
||||
.In sys/arb.h
|
||||
.Fn ARB_PROTOTYPE NAME TYPE FIELD CMP
|
||||
.Fn ARB_PROTOTYPE_STATIC NAME TYPE FIELD CMP
|
||||
.Fn ARB_PROTOTYPE_INSERT NAME TYPE ATTR
|
||||
.Fn ARB_PROTOTYPE_INSERT_COLOR NAME TYPE ATTR
|
||||
.Fn ARB_PROTOTYPE_REMOVE NAME TYPE ATTR
|
||||
.Fn ARB_PROTOTYPE_REMOVE_COLOR NAME TYPE ATTR
|
||||
.Fn ARB_PROTOTYPE_FIND NAME TYPE ATTR
|
||||
.Fn ARB_PROTOTYPE_NFIND NAME TYPE ATTR
|
||||
.Fn ARB_PROTOTYPE_NEXT NAME TYPE ATTR
|
||||
.Fn ARB_PROTOTYPE_PREV NAME TYPE ATTR
|
||||
.Fn ARB_PROTOTYPE_MINMAX NAME TYPE ATTR
|
||||
.Fn ARB_GENERATE NAME TYPE FIELD CMP
|
||||
.Fn ARB_GENERATE_STATIC NAME TYPE FIELD CMP
|
||||
.Fn ARB_GENERATE_INSERT NAME TYPE FIELD CMP ATTR
|
||||
.Fn ARB_GENERATE_INSERT_COLOR NAME TYPE FIELD ATTR
|
||||
.Fn ARB_GENERATE_REMOVE NAME TYPE FIELD ATTR
|
||||
.Fn ARB_GENERATE_REMOVE_COLOR NAME TYPE FIELD ATTR
|
||||
.Fn ARB_GENERATE_FIND NAME TYPE FIELD CMP ATTR
|
||||
.Fn ARB_GENERATE_NFIND NAME TYPE FIELD CMP ATTR
|
||||
.Fn ARB_GENERATE_NEXT NAME TYPE FIELD ATTR
|
||||
.Fn ARB_GENERATE_PREV NAME TYPE FIELD ATTR
|
||||
.Fn ARB_GENERATE_MINMAX NAME TYPE FIELD ATTR
|
||||
.Fn ARB<8|16|32>_ENTRY
|
||||
.Fn ARB<8|16|32>_HEAD HEADNAME TYPE
|
||||
.Ft "size_t"
|
||||
.Fn ARB_ALLOCSIZE "ARB_HEAD *head" "int<8|16|32>_t maxnodes" "struct TYPE *elm"
|
||||
.Fn ARB_INITIALIZER "ARB_HEAD *head" "int<8|16|32>_t maxnodes"
|
||||
.Ft "struct TYPE *"
|
||||
.Fn ARB_ROOT "ARB_HEAD *head"
|
||||
.Ft "bool"
|
||||
.Fn ARB_EMPTY "ARB_HEAD *head"
|
||||
.Ft "bool"
|
||||
.Fn ARB_FULL "ARB_HEAD *head"
|
||||
.Ft "int<8|16|32>_t"
|
||||
.Fn ARB_CURNODES "ARB_HEAD *head"
|
||||
.Ft "int<8|16|32>_t"
|
||||
.Fn ARB_MAXNODES "ARB_HEAD *head"
|
||||
.Ft "struct TYPE *"
|
||||
.Fn ARB_NEXT NAME "ARB_HEAD *head" "struct TYPE *elm"
|
||||
.Ft "struct TYPE *"
|
||||
.Fn ARB_PREV NAME "ARB_HEAD *head" "struct TYPE *elm"
|
||||
.Ft "struct TYPE *"
|
||||
.Fn ARB_MIN NAME "ARB_HEAD *head"
|
||||
.Ft "struct TYPE *"
|
||||
.Fn ARB_MAX NAME "ARB_HEAD *head"
|
||||
.Ft "struct TYPE *"
|
||||
.Fn ARB_FIND NAME "ARB_HEAD *head" "struct TYPE *elm"
|
||||
.Ft "struct TYPE *"
|
||||
.Fn ARB_NFIND NAME "ARB_HEAD *head" "struct TYPE *elm"
|
||||
.Ft "struct TYPE *"
|
||||
.Fn ARB_LEFT "struct TYPE *elm" "ARB_ENTRY NAME"
|
||||
.Ft "int<8|16|32>_t"
|
||||
.Fn ARB_LEFTIDX "struct TYPE *elm" "ARB_ENTRY NAME"
|
||||
.Ft "struct TYPE *"
|
||||
.Fn ARB_RIGHT "struct TYPE *elm" "ARB_ENTRY NAME"
|
||||
.Ft "int<8|16|32>_t"
|
||||
.Fn ARB_RIGHTIDX "struct TYPE *elm" "ARB_ENTRY NAME"
|
||||
.Ft "struct TYPE *"
|
||||
.Fn ARB_PARENT "struct TYPE *elm" "ARB_ENTRY NAME"
|
||||
.Ft "int<8|16|32>_t"
|
||||
.Fn ARB_PARENTIDX "struct TYPE *elm" "ARB_ENTRY NAME"
|
||||
.Ft "struct TYPE *"
|
||||
.Fn ARB_GETFREE "ARB_HEAD *head" "FIELD"
|
||||
.Ft "int<8|16|32>_t"
|
||||
.Fn ARB_FREEIDX "ARB_HEAD *head"
|
||||
.Fn ARB_FOREACH VARNAME NAME "ARB_HEAD *head"
|
||||
.Fn ARB_FOREACH_FROM "VARNAME" "NAME" "POS_VARNAME"
|
||||
.Fn ARB_FOREACH_SAFE "VARNAME" "NAME" "ARB_HEAD *head" "TEMP_VARNAME"
|
||||
.Fn ARB_FOREACH_REVERSE VARNAME NAME "ARB_HEAD *head"
|
||||
.Fn ARB_FOREACH_REVERSE_FROM "VARNAME" "NAME" "POS_VARNAME"
|
||||
.Fn ARB_FOREACH_REVERSE_SAFE "VARNAME" "NAME" "ARB_HEAD *head" "TEMP_VARNAME"
|
||||
.Ft void
|
||||
.Fn ARB_INIT "struct TYPE *elm" "FIELD" "ARB_HEAD *head" "int<8|16|32>_t maxnodes"
|
||||
.Ft "struct TYPE *"
|
||||
.Fn ARB_INSERT NAME "ARB_HEAD *head" "struct TYPE *elm"
|
||||
.Ft "struct TYPE *"
|
||||
.Fn ARB_REMOVE NAME "ARB_HEAD *head" "struct TYPE *elm"
|
||||
.Sh DESCRIPTION
|
||||
These macros define data structures for and array-based red-black trees.
|
||||
They use a single, continuous chunk of memory, and are useful
|
||||
e.g., when the tree needs to be transferred between userspace and kernel.
|
||||
.Pp
|
||||
In the macro definitions,
|
||||
.Fa TYPE
|
||||
is the name tag of a user defined structure that must contain a field of type
|
||||
.Vt ARB_ENTRY ,
|
||||
named
|
||||
.Fa ENTRYNAME .
|
||||
The argument
|
||||
.Fa HEADNAME
|
||||
is the name tag of a user defined structure that must be declared
|
||||
using the
|
||||
.Fn ARB_HEAD
|
||||
macro.
|
||||
The argument
|
||||
.Fa NAME
|
||||
has to be a unique name prefix for every tree that is defined.
|
||||
.Pp
|
||||
The function prototypes are declared with
|
||||
.Fn ARB_PROTOTYPE ,
|
||||
or
|
||||
.Fn ARB_PROTOTYPE_STATIC .
|
||||
The function bodies are generated with
|
||||
.Fn ARB_GENERATE ,
|
||||
or
|
||||
.Fn ARB_GENERATE_STATIC .
|
||||
See the examples below for further explanation of how these macros are used.
|
||||
.Pp
|
||||
A red-black tree is a binary search tree with the node color as an
|
||||
extra attribute.
|
||||
It fulfills a set of conditions:
|
||||
.Bl -enum -offset indent
|
||||
.It
|
||||
Every search path from the root to a leaf consists of the same number of
|
||||
black nodes.
|
||||
.It
|
||||
Each red node (except for the root) has a black parent.
|
||||
.It
|
||||
Each leaf node is black.
|
||||
.El
|
||||
.Pp
|
||||
Every operation on a red-black tree is bounded as
|
||||
.Fn O "lg n" .
|
||||
The maximum height of a red-black tree is
|
||||
.Fn 2lg "n + 1" .
|
||||
.Pp
|
||||
.Fn ARB_*
|
||||
trees require entries to be allocated as an array, and uses array
|
||||
indices to link entries together.
|
||||
The maximum number of
|
||||
.Fn ARB_*
|
||||
tree entries is therefore constrained by the minimum of array size and choice of
|
||||
signed integer data type used to store array indices.
|
||||
Use
|
||||
.Fn ARB_ALLOCSIZE
|
||||
to compute the size of memory chunk to allocate.
|
||||
.Pp
|
||||
A red-black tree is headed by a structure defined by the
|
||||
.Fn ARB_HEAD
|
||||
macro.
|
||||
A
|
||||
structure is declared with either of the following:
|
||||
.Bd -ragged -offset indent
|
||||
.Fn ARB<8|16|32>_HEAD HEADNAME TYPE
|
||||
.Va head ;
|
||||
.Ed
|
||||
.Pp
|
||||
where
|
||||
.Fa HEADNAME
|
||||
is the name of the structure to be defined, and struct
|
||||
.Fa TYPE
|
||||
is the type of the elements to be inserted into the tree.
|
||||
.Pp
|
||||
The
|
||||
.Fn ARB_HEAD
|
||||
variant includes a suffix denoting the signed integer data type size
|
||||
.Pq in bits
|
||||
used to store array indices.
|
||||
For example,
|
||||
.Fn ARB_HEAD8
|
||||
creates a red-black tree head strucutre with 8-bit signed array indices capable
|
||||
of indexing up to 128 entries.
|
||||
.Pp
|
||||
The
|
||||
.Fn ARB_ENTRY
|
||||
macro declares a structure that allows elements to be connected in the tree.
|
||||
Similarly to the
|
||||
.Fn ARB<8|16|32>_HEAD
|
||||
macro, the
|
||||
.Fn ARB_ENTRY
|
||||
variant includes a suffix denoting the signed integer data type size
|
||||
.Pq in bits
|
||||
used to store array indices.
|
||||
Entries should use the same number of bits as the tree head structure they will
|
||||
be linked into.
|
||||
.Pp
|
||||
In order to use the functions that manipulate the tree structure,
|
||||
their prototypes need to be declared with the
|
||||
.Fn ARB_PROTOTYPE
|
||||
or
|
||||
.Fn ARB_PROTOTYPE_STATIC
|
||||
macro,
|
||||
where
|
||||
.Fa NAME
|
||||
is a unique identifier for this particular tree.
|
||||
The
|
||||
.Fa TYPE
|
||||
argument is the type of the structure that is being managed
|
||||
by the tree.
|
||||
The
|
||||
.Fa FIELD
|
||||
argument is the name of the element defined by
|
||||
.Fn ARB_ENTRY .
|
||||
Individual prototypes can be declared with
|
||||
.Fn ARB_PROTOTYPE_INSERT ,
|
||||
.Fn ARB_PROTOTYPE_INSERT_COLOR ,
|
||||
.Fn ARB_PROTOTYPE_REMOVE ,
|
||||
.Fn ARB_PROTOTYPE_REMOVE_COLOR ,
|
||||
.Fn ARB_PROTOTYPE_FIND ,
|
||||
.Fn ARB_PROTOTYPE_NFIND ,
|
||||
.Fn ARB_PROTOTYPE_NEXT ,
|
||||
.Fn ARB_PROTOTYPE_PREV ,
|
||||
and
|
||||
.Fn ARB_PROTOTYPE_MINMAX
|
||||
in case not all functions are required.
|
||||
The individual prototype macros expect
|
||||
.Fa NAME ,
|
||||
.Fa TYPE ,
|
||||
and
|
||||
.Fa ATTR
|
||||
arguments.
|
||||
The
|
||||
.Fa ATTR
|
||||
argument must be empty for global functions or
|
||||
.Fa static
|
||||
for static functions.
|
||||
.Pp
|
||||
The function bodies are generated with the
|
||||
.Fn ARB_GENERATE
|
||||
or
|
||||
.Fn ARB_GENERATE_STATIC
|
||||
macro.
|
||||
These macros take the same arguments as the
|
||||
.Fn ARB_PROTOTYPE
|
||||
and
|
||||
.Fn ARB_PROTOTYPE_STATIC
|
||||
macros, but should be used only once.
|
||||
As an alternative individual function bodies are generated with the
|
||||
.Fn ARB_GENERATE_INSERT ,
|
||||
.Fn ARB_GENERATE_INSERT_COLOR ,
|
||||
.Fn ARB_GENERATE_REMOVE ,
|
||||
.Fn ARB_GENERATE_REMOVE_COLOR ,
|
||||
.Fn ARB_GENERATE_FIND ,
|
||||
.Fn ARB_GENERATE_NFIND ,
|
||||
.Fn ARB_GENERATE_NEXT ,
|
||||
.Fn ARB_GENERATE_PREV ,
|
||||
and
|
||||
.Fn ARB_GENERATE_MINMAX
|
||||
macros.
|
||||
.Pp
|
||||
Finally,
|
||||
the
|
||||
.Fa CMP
|
||||
argument is the name of a function used to compare tree nodes
|
||||
with each other.
|
||||
The function takes two arguments of type
|
||||
.Vt "struct TYPE *" .
|
||||
If the first argument is smaller than the second, the function returns a
|
||||
value smaller than zero.
|
||||
If they are equal, the function returns zero.
|
||||
Otherwise, it should return a value greater than zero.
|
||||
The compare
|
||||
function defines the order of the tree elements.
|
||||
.Pp
|
||||
The
|
||||
.Fn ARB_INIT
|
||||
macro initializes the tree referenced by
|
||||
.Fa head ,
|
||||
with the array length of
|
||||
.Fa maxnodes .
|
||||
.Pp
|
||||
The red-black tree can also be initialized statically by using the
|
||||
.Fn ARB_INITIALIZER
|
||||
macro:
|
||||
.Bd -ragged -offset indent
|
||||
.Fn ARB<8|16|32>_HEAD HEADNAME TYPE
|
||||
.Va head
|
||||
=
|
||||
.Fn ARB_INITIALIZER &head maxnodes ;
|
||||
.Ed
|
||||
.Pp
|
||||
The
|
||||
.Fn ARB_INSERT
|
||||
macro inserts the new element
|
||||
.Fa elm
|
||||
into the tree.
|
||||
.Pp
|
||||
The
|
||||
.Fn ARB_REMOVE
|
||||
macro removes the element
|
||||
.Fa elm
|
||||
from the tree pointed by
|
||||
.Fa head .
|
||||
.Pp
|
||||
The
|
||||
.Fn ARB_FIND
|
||||
and
|
||||
.Fn ARB_NFIND
|
||||
macros can be used to find a particular element in the tree.
|
||||
.Bd -literal -offset indent
|
||||
struct TYPE find, *res;
|
||||
find.key = 30;
|
||||
res = RB_FIND(NAME, head, &find);
|
||||
.Ed
|
||||
.Pp
|
||||
The
|
||||
.Fn ARB_ROOT ,
|
||||
.Fn ARB_MIN ,
|
||||
.Fn ARB_MAX ,
|
||||
.Fn ARB_NEXT ,
|
||||
and
|
||||
.Fn ARB_PREV
|
||||
macros can be used to traverse the tree:
|
||||
.Pp
|
||||
.Dl "for (np = RB_MIN(NAME, &head); np != NULL; np = RB_NEXT(NAME, &head, np))"
|
||||
.Pp
|
||||
Or, for simplicity, one can use the
|
||||
.Fn ARB_FOREACH
|
||||
or
|
||||
.Fn ARB_FOREACH_REVERSE
|
||||
macro:
|
||||
.Bd -ragged -offset indent
|
||||
.Fn RB_FOREACH np NAME head
|
||||
.Ed
|
||||
.Pp
|
||||
The macros
|
||||
.Fn ARB_FOREACH_SAFE
|
||||
and
|
||||
.Fn ARB_FOREACH_REVERSE_SAFE
|
||||
traverse the tree referenced by head
|
||||
in a forward or reverse direction respectively,
|
||||
assigning each element in turn to np.
|
||||
However, unlike their unsafe counterparts,
|
||||
they permit both the removal of np
|
||||
as well as freeing it from within the loop safely
|
||||
without interfering with the traversal.
|
||||
.Pp
|
||||
Both
|
||||
.Fn ARB_FOREACH_FROM
|
||||
and
|
||||
.Fn ARB_FOREACH_REVERSE_FROM
|
||||
may be used to continue an interrupted traversal
|
||||
in a forward or reverse direction respectively.
|
||||
The head pointer is not required.
|
||||
The pointer to the node from where to resume the traversal
|
||||
should be passed as their last argument,
|
||||
and will be overwritten to provide safe traversal.
|
||||
.Pp
|
||||
The
|
||||
.Fn ARB_EMPTY
|
||||
macro should be used to check whether a red-black tree is empty.
|
||||
.Pp
|
||||
Given that ARB trees have an intrinsic upper bound on the number of entries,
|
||||
some ARB-specific additional macros are defined.
|
||||
The
|
||||
.Fn ARB_FULL
|
||||
macro returns a boolean indicating whether the current number of tree entries
|
||||
equals the tree's maximum.
|
||||
The
|
||||
.Fn ARB_CURNODES
|
||||
and
|
||||
.Fn ARB_MAXNODES
|
||||
macros return the current and maximum number of entries respectively.
|
||||
The
|
||||
.Fn ARB_GETFREE
|
||||
macro returns a pointer to the next free entry in the array of entries, ready to
|
||||
be linked into the tree.
|
||||
The
|
||||
.Fn ARB_INSERT
|
||||
returns
|
||||
.Dv NULL
|
||||
if the element was inserted in the tree successfully, otherwise they
|
||||
return a pointer to the element with the colliding key.
|
||||
.Pp
|
||||
Accordingly,
|
||||
.Fn ARB_REMOVE
|
||||
returns the pointer to the removed element otherwise they return
|
||||
.Dv NULL
|
||||
to indicate an error.
|
||||
.Sh SEE ALSO
|
||||
.Xr queue 3 ,
|
||||
.Xr tree 3
|
||||
.Sh HISTORY
|
||||
The
|
||||
.Nm ARB
|
||||
macros first appeared in
|
||||
.Fx 13.0 .
|
||||
.Sh AUTHORS
|
||||
The
|
||||
.Nm ARB
|
||||
macros were implemented by
|
||||
.An Lawrence Stewart Aq Mt lstewart@FreeBSD.org ,
|
||||
based on
|
||||
.Xr tree 3
|
||||
macros written by
|
||||
.An Niels Provos .
|
@ -1329,6 +1329,7 @@ in
|
||||
mode.
|
||||
.El
|
||||
.Sh SEE ALSO
|
||||
.Xr arb 3 ,
|
||||
.Xr tree 3
|
||||
.Sh HISTORY
|
||||
The
|
||||
|
@ -674,6 +674,7 @@ return the pointer to the removed element otherwise they return
|
||||
.Dv NULL
|
||||
to indicate an error.
|
||||
.Sh SEE ALSO
|
||||
.Xr arb 3 ,
|
||||
.Xr queue 3
|
||||
.Sh AUTHORS
|
||||
The author of the tree macros is
|
||||
|
@ -65,7 +65,12 @@
|
||||
.Sh SYNOPSIS
|
||||
.In sys/types.h
|
||||
.In sys/sbuf.h
|
||||
.Ft typedef\ int ( sbuf_drain_func ) ( void\ *arg, const\ char\ *data, int\ len ) ;
|
||||
.Ft typedef int
|
||||
.Fo (sbuf_drain_func)
|
||||
.Fa "void *arg"
|
||||
.Fa "const char *data"
|
||||
.Fa "int len"
|
||||
.Fc
|
||||
.Pp
|
||||
.Ft struct sbuf *
|
||||
.Fo sbuf_new
|
||||
|
@ -51,7 +51,7 @@ The
|
||||
.Fn vm_page_wire
|
||||
and
|
||||
.Fn vm_page_wire_mapped
|
||||
function wire the page, prevent it from being reclaimed by the page
|
||||
functions wire the page, which prevents it from being reclaimed by the page
|
||||
daemon or when its containing object is destroyed.
|
||||
Both functions require that the page belong to an object.
|
||||
The
|
||||
|
@ -88,6 +88,7 @@ remko [label="Remko Lodder\nremko@FreeBSD.org\n2004/10/16"]
|
||||
rene [label="Rene Ladan\nrene@FreeBSD.org\n2008/11/03"]
|
||||
ryusuke [label="Ryusuke Suzuki\nryusuke@FreeBSD.org\n2009/12/21"]
|
||||
sevan [label="Sevan Janiyan\nsevan@FreeBSD.org\n2016/09/16"]
|
||||
sg [label="Stephen Gregoratto\nsg@FreeBSD.org\n2019/09/10"]
|
||||
simon [label="Simon L. Nielsen\nsimon@FreeBSD.org\n2003/07/20"]
|
||||
skreuzer [label="Steven Kreuzer\nskreuzer@FreeBSD.org\n2014/01/15"]
|
||||
taras [label="Taras Korenko\ntaras@FreeBSD.org\n2010/06/25"]
|
||||
@ -110,6 +111,7 @@ bcr -> allanjude
|
||||
bcr -> bhd
|
||||
bcr -> sevan
|
||||
bcr -> dexter
|
||||
bcr -> sg
|
||||
|
||||
blackend -> ale
|
||||
|
||||
|
@ -96,6 +96,7 @@ decke [label="Bernhard Froehlich\ndecke@FreeBSD.org\n2010/03/21"]
|
||||
delphij [label="Xin Li\ndelphij@FreeBSD.org\n2006/05/01"]
|
||||
demon [label="Dmitry Sivachenko\ndemon@FreeBSD.org\n2000/11/13"]
|
||||
dhn [label="Dennis Herrmann\ndhn@FreeBSD.org\n2009/03/03"]
|
||||
dmgk [label="Dmitri Goutnik\ndmgk@FreeBSD.org\n2019/09/15"]
|
||||
dryice [label="Dryice Dong Liu\ndryice@FreeBSD.org\n2006/12/25"]
|
||||
dteske [label="Devin Teske\ndteske@FreeBSD.org\n2018/03/01"]
|
||||
dumbbell [label="Jean-Sebastien Pedron\ndumbbell@FreeBSD.org\n2017/01/10"]
|
||||
@ -304,6 +305,7 @@ amdmi3 -> arrowd
|
||||
|
||||
antoine -> dumbbell
|
||||
|
||||
araujo -> dmgk
|
||||
araujo -> egypcio
|
||||
araujo -> jhixson
|
||||
araujo -> lippe
|
||||
@ -736,6 +738,7 @@ timur -> kbowling
|
||||
tmclaugh -> itetcu
|
||||
tmclaugh -> xride
|
||||
|
||||
tz -> dmgk
|
||||
tz -> joneum
|
||||
tz -> fernape
|
||||
tz -> mfechner
|
||||
|
@ -11,12 +11,16 @@
|
||||
# For each option FOO in __DEFAULT_NO_OPTIONS, MK_FOO is set to "no",
|
||||
# unless WITH_FOO is defined, in which case it is set to "yes".
|
||||
#
|
||||
# For each entry FOO/BAR in __DEFAULT_DEPENDENT_OPTIONS,
|
||||
# MK_FOO is set to "no" if WITHOUT_FOO is defined,
|
||||
# "yes" if WITH_FOO is defined, otherwise the value of MK_BAR.
|
||||
#
|
||||
# If both WITH_FOO and WITHOUT_FOO are defined, WITHOUT_FOO wins and
|
||||
# MK_FOO is set to "no" regardless of which list it was in.
|
||||
#
|
||||
# Both __DEFAULT_YES_OPTIONS and __DEFAULT_NO_OPTIONS are undef'd
|
||||
# after all this processing, allowing this file to be included
|
||||
# multiple times with different lists.
|
||||
# All of __DEFAULT_YES_OPTIONS, __DEFAULT_NO_OPTIONS and
|
||||
# __DEFAULT_DEPENDENT_OPTIONS are undef'd after all this processing,
|
||||
# allowing this file to be included multiple times with different lists.
|
||||
#
|
||||
# Other parts of the build system will set BROKEN_OPTIONS to a list
|
||||
# of options that are broken on this platform. This will not be unset
|
||||
|
@ -363,6 +363,7 @@ variable fd
|
||||
;
|
||||
|
||||
: line_buffer_resize ( len -- len )
|
||||
dup 0= if exit then
|
||||
>r
|
||||
line_buffer .len @ if
|
||||
line_buffer .addr @
|
||||
@ -376,6 +377,7 @@ variable fd
|
||||
;
|
||||
|
||||
: append_to_line_buffer ( addr len -- )
|
||||
dup 0= if 2drop exit then
|
||||
line_buffer strget
|
||||
2swap strcat
|
||||
line_buffer .len !
|
||||
|
@ -55,6 +55,9 @@ Malloc(size_t bytes, const char *file, int line)
|
||||
{
|
||||
Guard *res;
|
||||
|
||||
if (bytes == 0)
|
||||
return (NULL);
|
||||
|
||||
#ifdef USEENDGUARD
|
||||
bytes += MALLOCALIGN + 1;
|
||||
#else
|
||||
|
@ -3064,10 +3064,8 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
|
||||
{
|
||||
pd_entry_t pde, *pdep;
|
||||
pt_entry_t pte, PG_RW, PG_V;
|
||||
vm_paddr_t pa;
|
||||
vm_page_t m;
|
||||
|
||||
pa = 0;
|
||||
m = NULL;
|
||||
PG_RW = pmap_rw_bit(pmap);
|
||||
PG_V = pmap_valid_bit(pmap);
|
||||
|
@ -148,6 +148,34 @@ fix_fdt_interrupt_data(void)
|
||||
OF_setprop(socnode, "interrupt-parent", &gicxref, sizeof(gicxref));
|
||||
}
|
||||
|
||||
static void
|
||||
fix_fdt_iomuxc_data(void)
|
||||
{
|
||||
phandle_t node;
|
||||
|
||||
/*
|
||||
* The linux dts defines two nodes with the same mmio address range,
|
||||
* iomuxc-gpr and the regular iomuxc. The -grp node is a simple_mfd and
|
||||
* a syscon, but it only has access to a small subset of the iomuxc
|
||||
* registers, so it can't serve as the accessor for the iomuxc driver's
|
||||
* register IO. But right now, the simple_mfd driver attaches first,
|
||||
* preventing the real iomuxc driver from allocating its mmio register
|
||||
* range because it partially overlaps with the -gpr range.
|
||||
*
|
||||
* For now, by far the easiest thing to do to keep imx6 working is to
|
||||
* just disable the iomuxc-gpr node because we don't have a driver for
|
||||
* it anyway, we just need to prevent attachment of simple_mfd.
|
||||
*
|
||||
* If we ever write a -gpr driver, this code should probably switch to
|
||||
* modifying the reg property so that the range covers all the iomuxc
|
||||
* regs, then the -gpr driver can be a regular syscon driver that iomuxc
|
||||
* uses for register access.
|
||||
*/
|
||||
node = OF_finddevice("/soc/aips-bus@2000000/iomuxc-gpr@20e0000");
|
||||
if (node != -1)
|
||||
OF_setprop(node, "status", "disabled", sizeof("disabled"));
|
||||
}
|
||||
|
||||
static int
|
||||
imx6_attach(platform_t plat)
|
||||
{
|
||||
@ -155,6 +183,9 @@ imx6_attach(platform_t plat)
|
||||
/* Fix soc interrupt-parent property. */
|
||||
fix_fdt_interrupt_data();
|
||||
|
||||
/* Fix iomuxc-gpr and iomuxc nodes both using the same mmio range. */
|
||||
fix_fdt_iomuxc_data();
|
||||
|
||||
/* Inform the MPCore timer driver that its clock is variable. */
|
||||
arm_tmr_change_frequency(ARM_TMR_FREQUENCY_VARIES);
|
||||
|
||||
|
@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/bus.h>
|
||||
#include <sys/conf.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/module.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/mutex.h>
|
||||
|
@ -71,6 +71,9 @@ ti_sysc_probe(device_t dev)
|
||||
return (ENXIO);
|
||||
|
||||
device_set_desc(dev, "TI SYSC Interconnect");
|
||||
if (!bootverbose)
|
||||
device_quiet(dev);
|
||||
|
||||
return (BUS_PROBE_DEFAULT);
|
||||
}
|
||||
|
||||
|
@ -242,6 +242,7 @@ mount_snapshot(kthread_t *td, vnode_t **vpp, const char *fstype, char *fspath,
|
||||
if (VFS_ROOT(mp, LK_EXCLUSIVE, &mvp))
|
||||
panic("mount: lost mount");
|
||||
VOP_UNLOCK(vp, 0);
|
||||
vfs_op_exit(mp);
|
||||
vfs_unbusy(mp);
|
||||
*vpp = mvp;
|
||||
return (0);
|
||||
|
@ -41,6 +41,18 @@ __FBSDID("$FreeBSD$");
|
||||
#include <dev/iicbus/iicbus.h>
|
||||
#include "iicbus_if.h"
|
||||
|
||||
/*
|
||||
* Encode a system errno value into the IIC_Exxxxx space by setting the
|
||||
* IIC_ERRNO marker bit, so that iic2errno() can turn it back into a plain
|
||||
* system errno value later. This lets controller- and bus-layer code get
|
||||
* important system errno values (such as EINTR/ERESTART) back to the caller.
|
||||
*/
|
||||
int
|
||||
errno2iic(int errno)
|
||||
{
|
||||
return ((errno == 0) ? 0 : errno | IIC_ERRNO);
|
||||
}
|
||||
|
||||
/*
|
||||
* Translate IIC_Exxxxx status values to vaguely-equivelent errno values.
|
||||
*/
|
||||
@ -59,7 +71,22 @@ iic2errno(int iic_status)
|
||||
case IIC_ENOTSUPP: return (EOPNOTSUPP);
|
||||
case IIC_ENOADDR: return (EADDRNOTAVAIL);
|
||||
case IIC_ERESOURCE: return (ENOMEM);
|
||||
default: return (EIO);
|
||||
default:
|
||||
/*
|
||||
* If the high bit is set, that means it's a system errno value
|
||||
* that was encoded into the IIC_Exxxxxx space by setting the
|
||||
* IIC_ERRNO marker bit. If lots of high-order bits are set,
|
||||
* then it's one of the negative pseudo-errors such as ERESTART
|
||||
* and we return it as-is. Otherwise it's a plain "small
|
||||
* positive integer" errno, so just remove the IIC_ERRNO marker
|
||||
* bit. If it's some unknown number without the high bit set,
|
||||
* there isn't much we can do except call it an I/O error.
|
||||
*/
|
||||
if ((iic_status & IIC_ERRNO) == 0)
|
||||
return (EIO);
|
||||
if ((iic_status & 0xFFFF0000) != 0)
|
||||
return (iic_status);
|
||||
return (iic_status & ~IIC_ERRNO);
|
||||
}
|
||||
}
|
||||
|
||||
@ -97,7 +124,7 @@ iicbus_poll(struct iicbus_softc *sc, int how)
|
||||
return (IIC_EBUSBSY);
|
||||
}
|
||||
|
||||
return (error);
|
||||
return (errno2iic(error));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -96,12 +96,14 @@
|
||||
#define IIC_ENOTSUPP 0x8 /* request not supported */
|
||||
#define IIC_ENOADDR 0x9 /* no address assigned to the interface */
|
||||
#define IIC_ERESOURCE 0xa /* resources (memory, whatever) unavailable */
|
||||
#define IIC_ERRNO __INT_MIN /* marker bit: errno is in low-order bits */
|
||||
|
||||
/*
|
||||
* Note that all iicbus functions return IIC_Exxxxx status values,
|
||||
* except iic2errno() (obviously) and iicbus_started() (returns bool).
|
||||
*/
|
||||
extern int iic2errno(int);
|
||||
extern int errno2iic(int);
|
||||
extern int iicbus_request_bus(device_t, device_t, int);
|
||||
extern int iicbus_release_bus(device_t, device_t);
|
||||
extern device_t iicbus_alloc_bus(device_t);
|
||||
|
@ -1029,13 +1029,8 @@ ns8250_bus_transmit(struct uart_softc *sc)
|
||||
|
||||
bas = &sc->sc_bas;
|
||||
uart_lock(sc->sc_hwmtx);
|
||||
if (sc->sc_txdatasz > 1) {
|
||||
if ((uart_getreg(bas, REG_LSR) & LSR_TEMT) == 0)
|
||||
ns8250_drain(bas, UART_DRAIN_TRANSMITTER);
|
||||
} else {
|
||||
while ((uart_getreg(bas, REG_LSR) & LSR_THRE) == 0)
|
||||
DELAY(4);
|
||||
}
|
||||
while ((uart_getreg(bas, REG_LSR) & LSR_THRE) == 0)
|
||||
DELAY(4);
|
||||
for (i = 0; i < sc->sc_txdatasz; i++) {
|
||||
uart_setreg(bas, REG_DATA, sc->sc_txbuf[i]);
|
||||
uart_barrier(bas);
|
||||
|
@ -390,7 +390,7 @@ fuse_internal_invalidate_entry(struct mount *mp, struct uio *uio)
|
||||
if ((err = uiomove(&fnieo, sizeof(fnieo), uio)) != 0)
|
||||
return (err);
|
||||
|
||||
if (fnieo.namelen > sizeof(name))
|
||||
if (fnieo.namelen >= sizeof(name))
|
||||
return (EINVAL);
|
||||
|
||||
if ((err = uiomove(name, fnieo.namelen, uio)) != 0)
|
||||
|
@ -174,6 +174,8 @@ fuse_vnode_setparent(struct vnode *vp, struct vnode *dvp)
|
||||
MPASS(dvp->v_type == VDIR);
|
||||
VTOFUD(vp)->parent_nid = VTOI(dvp);
|
||||
VTOFUD(vp)->flag |= FN_PARENT_NID;
|
||||
} else {
|
||||
VTOFUD(vp)->flag &= ~FN_PARENT_NID;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1525,11 +1525,10 @@ fuse_vnop_reclaim(struct vop_reclaim_args *ap)
|
||||
fuse_filehandle_close(vp, fufh, td, NULL);
|
||||
}
|
||||
|
||||
if ((!fuse_isdeadfs(vp)) && (fvdat->nlookup)) {
|
||||
if (!fuse_isdeadfs(vp) && fvdat->nlookup > 0) {
|
||||
fuse_internal_forget_send(vnode_mount(vp), td, NULL, VTOI(vp),
|
||||
fvdat->nlookup);
|
||||
}
|
||||
fuse_vnode_setparent(vp, NULL);
|
||||
cache_purge(vp);
|
||||
vfs_hash_remove(vp);
|
||||
fuse_vnode_destroy(vp);
|
||||
|
@ -511,10 +511,10 @@ nfscl_loadattrcache(struct vnode **vpp, struct nfsvattr *nap, void *nvaper,
|
||||
* zero np->n_attrstamp to indicate that
|
||||
* the attributes are stale.
|
||||
*/
|
||||
vap->va_size = np->n_size;
|
||||
nsize = vap->va_size = np->n_size;
|
||||
setnsize = 1;
|
||||
np->n_attrstamp = 0;
|
||||
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
|
||||
vnode_pager_setsize(vp, np->n_size);
|
||||
} else if (np->n_flag & NMODIFIED) {
|
||||
/*
|
||||
* We've modified the file: Use the larger
|
||||
@ -526,7 +526,8 @@ nfscl_loadattrcache(struct vnode **vpp, struct nfsvattr *nap, void *nvaper,
|
||||
np->n_size = vap->va_size;
|
||||
np->n_flag |= NSIZECHANGED;
|
||||
}
|
||||
vnode_pager_setsize(vp, np->n_size);
|
||||
nsize = np->n_size;
|
||||
setnsize = 1;
|
||||
} else if (vap->va_size < np->n_size) {
|
||||
/*
|
||||
* When shrinking the size, the call to
|
||||
@ -538,9 +539,9 @@ nfscl_loadattrcache(struct vnode **vpp, struct nfsvattr *nap, void *nvaper,
|
||||
np->n_flag |= NSIZECHANGED;
|
||||
setnsize = 1;
|
||||
} else {
|
||||
np->n_size = vap->va_size;
|
||||
nsize = np->n_size = vap->va_size;
|
||||
np->n_flag |= NSIZECHANGED;
|
||||
vnode_pager_setsize(vp, np->n_size);
|
||||
setnsize = 1;
|
||||
}
|
||||
} else {
|
||||
np->n_size = vap->va_size;
|
||||
|
@ -135,10 +135,21 @@ pfs_add_node(struct pfs_node *parent, struct pfs_node *pn)
|
||||
pfs_fileno_alloc(pn);
|
||||
|
||||
pfs_lock(parent);
|
||||
pn->pn_next = parent->pn_nodes;
|
||||
if ((parent->pn_flags & PFS_PROCDEP) != 0)
|
||||
pn->pn_flags |= PFS_PROCDEP;
|
||||
parent->pn_nodes = pn;
|
||||
if (parent->pn_nodes == NULL) {
|
||||
KASSERT(parent->pn_last_node == NULL,
|
||||
("%s(): pn_last_node not NULL", __func__));
|
||||
parent->pn_nodes = pn;
|
||||
parent->pn_last_node = pn;
|
||||
} else {
|
||||
KASSERT(parent->pn_last_node != NULL,
|
||||
("%s(): pn_last_node is NULL", __func__));
|
||||
KASSERT(parent->pn_last_node->pn_next == NULL,
|
||||
("%s(): pn_last_node->pn_next not NULL", __func__));
|
||||
parent->pn_last_node->pn_next = pn;
|
||||
parent->pn_last_node = pn;
|
||||
}
|
||||
pfs_unlock(parent);
|
||||
}
|
||||
|
||||
@ -148,7 +159,7 @@ pfs_add_node(struct pfs_node *parent, struct pfs_node *pn)
|
||||
static void
|
||||
pfs_detach_node(struct pfs_node *pn)
|
||||
{
|
||||
struct pfs_node *parent = pn->pn_parent;
|
||||
struct pfs_node *node, *parent = pn->pn_parent;
|
||||
struct pfs_node **iter;
|
||||
|
||||
KASSERT(parent != NULL, ("%s(): node has no parent", __func__));
|
||||
@ -156,6 +167,16 @@ pfs_detach_node(struct pfs_node *pn)
|
||||
("%s(): parent has different pn_info", __func__));
|
||||
|
||||
pfs_lock(parent);
|
||||
if (pn == parent->pn_last_node) {
|
||||
if (pn == pn->pn_nodes) {
|
||||
parent->pn_last_node = NULL;
|
||||
} else {
|
||||
for (node = parent->pn_nodes;
|
||||
node->pn_next != pn; node = node->pn_next)
|
||||
continue;
|
||||
parent->pn_last_node = node;
|
||||
}
|
||||
}
|
||||
iter = &parent->pn_nodes;
|
||||
while (*iter != NULL) {
|
||||
if (*iter == pn) {
|
||||
|
@ -237,6 +237,7 @@ struct pfs_node {
|
||||
|
||||
struct pfs_node *pn_parent; /* (o) */
|
||||
struct pfs_node *pn_nodes; /* (o) */
|
||||
struct pfs_node *pn_last_node; /* (o) */
|
||||
struct pfs_node *pn_next; /* (p) */
|
||||
};
|
||||
|
||||
|
@ -190,8 +190,6 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, enum vtype type,
|
||||
/* If the root directory of the 'tmp' file system is not yet
|
||||
* allocated, this must be the request to do it. */
|
||||
MPASS(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
|
||||
KASSERT(tmp->tm_root == NULL || mp->mnt_writeopcount > 0,
|
||||
("creating node not under vn_start_write"));
|
||||
|
||||
MPASS(IFF(type == VLNK, target != NULL));
|
||||
MPASS(IFF(type == VBLK || type == VCHR, rdev != VNOVAL));
|
||||
|
@ -299,3 +299,35 @@ rangelock_trywlock(struct rangelock *lock, off_t start, off_t end,
|
||||
|
||||
return (rangelock_enqueue(lock, start, end, RL_LOCK_WRITE, ilk, true));
|
||||
}
|
||||
|
||||
#ifdef INVARIANT_SUPPORT
|
||||
void
|
||||
_rangelock_cookie_assert(void *cookie, int what, const char *file, int line)
|
||||
{
|
||||
struct rl_q_entry *entry;
|
||||
int flags;
|
||||
|
||||
MPASS(cookie != NULL);
|
||||
entry = cookie;
|
||||
flags = entry->rl_q_flags;
|
||||
switch (what) {
|
||||
case RCA_LOCKED:
|
||||
if ((flags & RL_LOCK_GRANTED) == 0)
|
||||
panic("rangelock not held @ %s:%d\n", file, line);
|
||||
break;
|
||||
case RCA_RLOCKED:
|
||||
if ((flags & (RL_LOCK_GRANTED | RL_LOCK_READ)) !=
|
||||
(RL_LOCK_GRANTED | RL_LOCK_READ))
|
||||
panic("rangelock not rlocked @ %s:%d\n", file, line);
|
||||
break;
|
||||
case RCA_WLOCKED:
|
||||
if ((flags & (RL_LOCK_GRANTED | RL_LOCK_WRITE)) !=
|
||||
(RL_LOCK_GRANTED | RL_LOCK_WRITE))
|
||||
panic("rangelock not wlocked @ %s:%d\n", file, line);
|
||||
break;
|
||||
default:
|
||||
panic("Unknown rangelock assertion: %d @ %s:%d", what, file,
|
||||
line);
|
||||
}
|
||||
}
|
||||
#endif /* INVARIANT_SUPPORT */
|
||||
|
@ -131,15 +131,19 @@ SYSINIT(dpcpu, SI_SUB_KLD, SI_ORDER_FIRST, dpcpu_startup, NULL);
|
||||
|
||||
/*
|
||||
* UMA_PCPU_ZONE zones, that are available for all kernel
|
||||
* consumers. Right now 64 bit zone is used for counter(9).
|
||||
* consumers. Right now 64 bit zone is used for counter(9)
|
||||
* and int zone is used for mount point counters.
|
||||
*/
|
||||
|
||||
uma_zone_t pcpu_zone_int;
|
||||
uma_zone_t pcpu_zone_64;
|
||||
|
||||
static void
|
||||
pcpu_zones_startup(void)
|
||||
{
|
||||
|
||||
pcpu_zone_int = uma_zcreate("int pcpu", sizeof(int),
|
||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU);
|
||||
pcpu_zone_64 = uma_zcreate("64 pcpu", sizeof(uint64_t),
|
||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_PCPU);
|
||||
}
|
||||
|
@ -601,17 +601,24 @@ vop_stdgetwritemount(ap)
|
||||
*/
|
||||
vp = ap->a_vp;
|
||||
mp = vp->v_mount;
|
||||
if (mp == NULL)
|
||||
goto out;
|
||||
MNT_ILOCK(mp);
|
||||
if (mp != vp->v_mount) {
|
||||
MNT_IUNLOCK(mp);
|
||||
mp = NULL;
|
||||
goto out;
|
||||
if (mp == NULL) {
|
||||
*(ap->a_mpp) = NULL;
|
||||
return (0);
|
||||
}
|
||||
if (vfs_op_thread_enter(mp)) {
|
||||
if (mp == vp->v_mount)
|
||||
vfs_mp_count_add_pcpu(mp, ref, 1);
|
||||
else
|
||||
mp = NULL;
|
||||
vfs_op_thread_exit(mp);
|
||||
} else {
|
||||
MNT_ILOCK(mp);
|
||||
if (mp == vp->v_mount)
|
||||
MNT_REF(mp);
|
||||
else
|
||||
mp = NULL;
|
||||
MNT_IUNLOCK(mp);
|
||||
}
|
||||
MNT_REF(mp);
|
||||
MNT_IUNLOCK(mp);
|
||||
out:
|
||||
*(ap->a_mpp) = mp;
|
||||
return (0);
|
||||
}
|
||||
|
@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/conf.h>
|
||||
#include <sys/smp.h>
|
||||
#include <sys/eventhandler.h>
|
||||
#include <sys/fcntl.h>
|
||||
#include <sys/jail.h>
|
||||
@ -123,6 +124,16 @@ mount_init(void *mem, int size, int flags)
|
||||
mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
|
||||
mtx_init(&mp->mnt_listmtx, "struct mount vlist mtx", NULL, MTX_DEF);
|
||||
lockinit(&mp->mnt_explock, PVFS, "explock", 0, 0);
|
||||
mp->mnt_thread_in_ops_pcpu = uma_zalloc_pcpu(pcpu_zone_int,
|
||||
M_WAITOK | M_ZERO);
|
||||
mp->mnt_ref_pcpu = uma_zalloc_pcpu(pcpu_zone_int,
|
||||
M_WAITOK | M_ZERO);
|
||||
mp->mnt_lockref_pcpu = uma_zalloc_pcpu(pcpu_zone_int,
|
||||
M_WAITOK | M_ZERO);
|
||||
mp->mnt_writeopcount_pcpu = uma_zalloc_pcpu(pcpu_zone_int,
|
||||
M_WAITOK | M_ZERO);
|
||||
mp->mnt_ref = 0;
|
||||
mp->mnt_vfs_ops = 1;
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -132,6 +143,10 @@ mount_fini(void *mem, int size)
|
||||
struct mount *mp;
|
||||
|
||||
mp = (struct mount *)mem;
|
||||
uma_zfree_pcpu(pcpu_zone_int, mp->mnt_writeopcount_pcpu);
|
||||
uma_zfree_pcpu(pcpu_zone_int, mp->mnt_lockref_pcpu);
|
||||
uma_zfree_pcpu(pcpu_zone_int, mp->mnt_ref_pcpu);
|
||||
uma_zfree_pcpu(pcpu_zone_int, mp->mnt_thread_in_ops_pcpu);
|
||||
lockdestroy(&mp->mnt_explock);
|
||||
mtx_destroy(&mp->mnt_listmtx);
|
||||
mtx_destroy(&mp->mnt_mtx);
|
||||
@ -445,6 +460,12 @@ vfs_ref(struct mount *mp)
|
||||
{
|
||||
|
||||
CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
|
||||
if (vfs_op_thread_enter(mp)) {
|
||||
vfs_mp_count_add_pcpu(mp, ref, 1);
|
||||
vfs_op_thread_exit(mp);
|
||||
return;
|
||||
}
|
||||
|
||||
MNT_ILOCK(mp);
|
||||
MNT_REF(mp);
|
||||
MNT_IUNLOCK(mp);
|
||||
@ -455,6 +476,12 @@ vfs_rel(struct mount *mp)
|
||||
{
|
||||
|
||||
CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
|
||||
if (vfs_op_thread_enter(mp)) {
|
||||
vfs_mp_count_sub_pcpu(mp, ref, 1);
|
||||
vfs_op_thread_exit(mp);
|
||||
return;
|
||||
}
|
||||
|
||||
MNT_ILOCK(mp);
|
||||
MNT_REL(mp);
|
||||
MNT_IUNLOCK(mp);
|
||||
@ -478,7 +505,12 @@ vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp, const char *fspath,
|
||||
mp->mnt_activevnodelistsize = 0;
|
||||
TAILQ_INIT(&mp->mnt_tmpfreevnodelist);
|
||||
mp->mnt_tmpfreevnodelistsize = 0;
|
||||
mp->mnt_ref = 0;
|
||||
if (mp->mnt_ref != 0 || mp->mnt_lockref != 0 ||
|
||||
mp->mnt_writeopcount != 0)
|
||||
panic("%s: non-zero counters on new mp %p\n", __func__, mp);
|
||||
if (mp->mnt_vfs_ops != 1)
|
||||
panic("%s: vfs_ops should be 1 but %d found\n", __func__,
|
||||
mp->mnt_vfs_ops);
|
||||
(void) vfs_busy(mp, MBF_NOWAIT);
|
||||
atomic_add_acq_int(&vfsp->vfc_refcount, 1);
|
||||
mp->mnt_op = vfsp->vfc_vfsops;
|
||||
@ -507,6 +539,11 @@ void
|
||||
vfs_mount_destroy(struct mount *mp)
|
||||
{
|
||||
|
||||
if (mp->mnt_vfs_ops == 0)
|
||||
panic("%s: entered with zero vfs_ops\n", __func__);
|
||||
|
||||
vfs_assert_mount_counters(mp);
|
||||
|
||||
MNT_ILOCK(mp);
|
||||
mp->mnt_kern_flag |= MNTK_REFEXPIRE;
|
||||
if (mp->mnt_kern_flag & MNTK_MWAIT) {
|
||||
@ -540,6 +577,11 @@ vfs_mount_destroy(struct mount *mp)
|
||||
if (mp->mnt_lockref != 0)
|
||||
panic("vfs_mount_destroy: nonzero lock refcount");
|
||||
MNT_IUNLOCK(mp);
|
||||
|
||||
if (mp->mnt_vfs_ops != 1)
|
||||
panic("%s: vfs_ops should be 1 but %d found\n", __func__,
|
||||
mp->mnt_vfs_ops);
|
||||
|
||||
if (mp->mnt_vnodecovered != NULL)
|
||||
vrele(mp->mnt_vnodecovered);
|
||||
#ifdef MAC
|
||||
@ -951,6 +993,7 @@ vfs_domount_first(
|
||||
vrele(newdp);
|
||||
if ((mp->mnt_flag & MNT_RDONLY) == 0)
|
||||
vfs_allocate_syncvnode(mp);
|
||||
vfs_op_exit(mp);
|
||||
vfs_unbusy(mp);
|
||||
return (0);
|
||||
}
|
||||
@ -1019,6 +1062,8 @@ vfs_domount_update(
|
||||
VI_UNLOCK(vp);
|
||||
VOP_UNLOCK(vp, 0);
|
||||
|
||||
vfs_op_enter(mp);
|
||||
|
||||
MNT_ILOCK(mp);
|
||||
if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
|
||||
MNT_IUNLOCK(mp);
|
||||
@ -1100,6 +1145,7 @@ vfs_domount_update(
|
||||
else
|
||||
vfs_deallocate_syncvnode(mp);
|
||||
end:
|
||||
vfs_op_exit(mp);
|
||||
vfs_unbusy(mp);
|
||||
VI_LOCK(vp);
|
||||
vp->v_iflag &= ~VI_MOUNT;
|
||||
@ -1328,6 +1374,7 @@ dounmount_cleanup(struct mount *mp, struct vnode *coveredvp, int mntkflags)
|
||||
mp->mnt_kern_flag &= ~MNTK_MWAIT;
|
||||
wakeup(mp);
|
||||
}
|
||||
vfs_op_exit_locked(mp);
|
||||
MNT_IUNLOCK(mp);
|
||||
if (coveredvp != NULL) {
|
||||
VOP_UNLOCK(coveredvp, 0);
|
||||
@ -1336,6 +1383,170 @@ dounmount_cleanup(struct mount *mp, struct vnode *coveredvp, int mntkflags)
|
||||
vn_finished_write(mp);
|
||||
}
|
||||
|
||||
/*
|
||||
* There are various reference counters associated with the mount point.
|
||||
* Normally it is permitted to modify them without taking the mnt ilock,
|
||||
* but this behavior can be temporarily disabled if stable value is needed
|
||||
* or callers are expected to block (e.g. to not allow new users during
|
||||
* forced unmount).
|
||||
*/
|
||||
void
|
||||
vfs_op_enter(struct mount *mp)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
MNT_ILOCK(mp);
|
||||
mp->mnt_vfs_ops++;
|
||||
if (mp->mnt_vfs_ops > 1) {
|
||||
MNT_IUNLOCK(mp);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Paired with a fence in vfs_op_thread_enter(). See the comment
|
||||
* above it for details.
|
||||
*/
|
||||
atomic_thread_fence_seq_cst();
|
||||
vfs_op_barrier_wait(mp);
|
||||
/*
|
||||
* Paired with a fence in vfs_op_thread_exit().
|
||||
*/
|
||||
atomic_thread_fence_acq();
|
||||
CPU_FOREACH(cpu) {
|
||||
mp->mnt_ref +=
|
||||
zpcpu_replace_cpu(mp->mnt_ref_pcpu, 0, cpu);
|
||||
mp->mnt_lockref +=
|
||||
zpcpu_replace_cpu(mp->mnt_lockref_pcpu, 0, cpu);
|
||||
mp->mnt_writeopcount +=
|
||||
zpcpu_replace_cpu(mp->mnt_writeopcount_pcpu, 0, cpu);
|
||||
}
|
||||
MNT_IUNLOCK(mp);
|
||||
vfs_assert_mount_counters(mp);
|
||||
}
|
||||
|
||||
void
|
||||
vfs_op_exit_locked(struct mount *mp)
|
||||
{
|
||||
|
||||
mtx_assert(MNT_MTX(mp), MA_OWNED);
|
||||
|
||||
if (mp->mnt_vfs_ops <= 0)
|
||||
panic("%s: invalid vfs_ops count %d for mp %p\n",
|
||||
__func__, mp->mnt_vfs_ops, mp);
|
||||
mp->mnt_vfs_ops--;
|
||||
}
|
||||
|
||||
void
|
||||
vfs_op_exit(struct mount *mp)
|
||||
{
|
||||
|
||||
MNT_ILOCK(mp);
|
||||
vfs_op_exit_locked(mp);
|
||||
MNT_IUNLOCK(mp);
|
||||
}
|
||||
|
||||
/*
|
||||
* It is assumed the caller already posted at least an acquire barrier.
|
||||
*/
|
||||
void
|
||||
vfs_op_barrier_wait(struct mount *mp)
|
||||
{
|
||||
int *in_op;
|
||||
int cpu;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
in_op = zpcpu_get_cpu(mp->mnt_thread_in_ops_pcpu, cpu);
|
||||
while (atomic_load_int(in_op))
|
||||
cpu_spinwait();
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
void
|
||||
vfs_assert_mount_counters(struct mount *mp)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (mp->mnt_vfs_ops == 0)
|
||||
return;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
if (*(int *)zpcpu_get_cpu(mp->mnt_ref_pcpu, cpu) != 0 ||
|
||||
*(int *)zpcpu_get_cpu(mp->mnt_lockref_pcpu, cpu) != 0 ||
|
||||
*(int *)zpcpu_get_cpu(mp->mnt_writeopcount_pcpu, cpu) != 0)
|
||||
vfs_dump_mount_counters(mp);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
vfs_dump_mount_counters(struct mount *mp)
|
||||
{
|
||||
int cpu, *count;
|
||||
int ref, lockref, writeopcount;
|
||||
|
||||
printf("%s: mp %p vfs_ops %d\n", __func__, mp, mp->mnt_vfs_ops);
|
||||
|
||||
printf(" ref : ");
|
||||
ref = mp->mnt_ref;
|
||||
CPU_FOREACH(cpu) {
|
||||
count = zpcpu_get_cpu(mp->mnt_ref_pcpu, cpu);
|
||||
printf("%d ", *count);
|
||||
ref += *count;
|
||||
}
|
||||
printf("\n");
|
||||
printf(" lockref : ");
|
||||
lockref = mp->mnt_lockref;
|
||||
CPU_FOREACH(cpu) {
|
||||
count = zpcpu_get_cpu(mp->mnt_lockref_pcpu, cpu);
|
||||
printf("%d ", *count);
|
||||
lockref += *count;
|
||||
}
|
||||
printf("\n");
|
||||
printf("writeopcount: ");
|
||||
writeopcount = mp->mnt_writeopcount;
|
||||
CPU_FOREACH(cpu) {
|
||||
count = zpcpu_get_cpu(mp->mnt_writeopcount_pcpu, cpu);
|
||||
printf("%d ", *count);
|
||||
writeopcount += *count;
|
||||
}
|
||||
printf("\n");
|
||||
|
||||
printf("counter struct total\n");
|
||||
printf("ref %-5d %-5d\n", mp->mnt_ref, ref);
|
||||
printf("lockref %-5d %-5d\n", mp->mnt_lockref, lockref);
|
||||
printf("writeopcount %-5d %-5d\n", mp->mnt_writeopcount, writeopcount);
|
||||
|
||||
panic("invalid counts on struct mount");
|
||||
}
|
||||
#endif
|
||||
|
||||
int
|
||||
vfs_mount_fetch_counter(struct mount *mp, enum mount_counter which)
|
||||
{
|
||||
int *base, *pcpu;
|
||||
int cpu, sum;
|
||||
|
||||
switch (which) {
|
||||
case MNT_COUNT_REF:
|
||||
base = &mp->mnt_ref;
|
||||
pcpu = mp->mnt_ref_pcpu;
|
||||
break;
|
||||
case MNT_COUNT_LOCKREF:
|
||||
base = &mp->mnt_lockref;
|
||||
pcpu = mp->mnt_lockref_pcpu;
|
||||
break;
|
||||
case MNT_COUNT_WRITEOPCOUNT:
|
||||
base = &mp->mnt_writeopcount;
|
||||
pcpu = mp->mnt_writeopcount_pcpu;
|
||||
break;
|
||||
}
|
||||
|
||||
sum = *base;
|
||||
CPU_FOREACH(cpu) {
|
||||
sum += *(int *)zpcpu_get_cpu(pcpu, cpu);
|
||||
}
|
||||
return (sum);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do the actual filesystem unmount.
|
||||
*/
|
||||
@ -1379,6 +1590,8 @@ dounmount(struct mount *mp, int flags, struct thread *td)
|
||||
return (error);
|
||||
}
|
||||
|
||||
vfs_op_enter(mp);
|
||||
|
||||
vn_start_write(NULL, &mp, V_WAIT | V_MNTREF);
|
||||
MNT_ILOCK(mp);
|
||||
if ((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 ||
|
||||
@ -1469,6 +1682,7 @@ dounmount(struct mount *mp, int flags, struct thread *td)
|
||||
mp->mnt_kern_flag &= ~MNTK_MWAIT;
|
||||
wakeup(mp);
|
||||
}
|
||||
vfs_op_exit_locked(mp);
|
||||
MNT_IUNLOCK(mp);
|
||||
if (coveredvp)
|
||||
VOP_UNLOCK(coveredvp, 0);
|
||||
|
@ -273,6 +273,7 @@ vfs_mountroot_devfs(struct thread *td, struct mount **mpp)
|
||||
|
||||
*mpp = mp;
|
||||
rootdevmp = mp;
|
||||
vfs_op_exit(mp);
|
||||
}
|
||||
|
||||
set_rootvnode();
|
||||
|
@ -641,7 +641,20 @@ vfs_busy(struct mount *mp, int flags)
|
||||
MPASS((flags & ~MBF_MASK) == 0);
|
||||
CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags);
|
||||
|
||||
if (vfs_op_thread_enter(mp)) {
|
||||
MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0);
|
||||
MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0);
|
||||
MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0);
|
||||
vfs_mp_count_add_pcpu(mp, ref, 1);
|
||||
vfs_mp_count_add_pcpu(mp, lockref, 1);
|
||||
vfs_op_thread_exit(mp);
|
||||
if (flags & MBF_MNTLSTLOCK)
|
||||
mtx_unlock(&mountlist_mtx);
|
||||
return (0);
|
||||
}
|
||||
|
||||
MNT_ILOCK(mp);
|
||||
vfs_assert_mount_counters(mp);
|
||||
MNT_REF(mp);
|
||||
/*
|
||||
* If mount point is currently being unmounted, sleep until the
|
||||
@ -684,13 +697,30 @@ vfs_busy(struct mount *mp, int flags)
|
||||
void
|
||||
vfs_unbusy(struct mount *mp)
|
||||
{
|
||||
int c;
|
||||
|
||||
CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
|
||||
|
||||
if (vfs_op_thread_enter(mp)) {
|
||||
MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0);
|
||||
vfs_mp_count_sub_pcpu(mp, lockref, 1);
|
||||
vfs_mp_count_sub_pcpu(mp, ref, 1);
|
||||
vfs_op_thread_exit(mp);
|
||||
return;
|
||||
}
|
||||
|
||||
MNT_ILOCK(mp);
|
||||
vfs_assert_mount_counters(mp);
|
||||
MNT_REL(mp);
|
||||
KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref"));
|
||||
mp->mnt_lockref--;
|
||||
if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) {
|
||||
c = --mp->mnt_lockref;
|
||||
if (mp->mnt_vfs_ops == 0) {
|
||||
MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0);
|
||||
MNT_IUNLOCK(mp);
|
||||
return;
|
||||
}
|
||||
if (c < 0)
|
||||
vfs_dump_mount_counters(mp);
|
||||
if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) {
|
||||
MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT);
|
||||
CTR1(KTR_VFS, "%s: waking up waiters", __func__);
|
||||
mp->mnt_kern_flag &= ~MNTK_DRAINING;
|
||||
@ -4017,21 +4047,25 @@ DB_SHOW_COMMAND(mount, db_show_mount)
|
||||
if (jailed(mp->mnt_cred))
|
||||
db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id);
|
||||
db_printf(" }\n");
|
||||
db_printf(" mnt_ref = %d\n", mp->mnt_ref);
|
||||
db_printf(" mnt_ref = %d (with %d in the struct)\n",
|
||||
vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref);
|
||||
db_printf(" mnt_gen = %d\n", mp->mnt_gen);
|
||||
db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize);
|
||||
db_printf(" mnt_activevnodelistsize = %d\n",
|
||||
mp->mnt_activevnodelistsize);
|
||||
db_printf(" mnt_writeopcount = %d\n", mp->mnt_writeopcount);
|
||||
db_printf(" mnt_writeopcount = %d (with %d in the struct)\n",
|
||||
vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount);
|
||||
db_printf(" mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen);
|
||||
db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max);
|
||||
db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed);
|
||||
db_printf(" mnt_lockref = %d\n", mp->mnt_lockref);
|
||||
db_printf(" mnt_lockref = %d (with %d in the struct)\n",
|
||||
vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref);
|
||||
db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes);
|
||||
db_printf(" mnt_secondary_accwrites = %d\n",
|
||||
mp->mnt_secondary_accwrites);
|
||||
db_printf(" mnt_gjprovider = %s\n",
|
||||
mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL");
|
||||
db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops);
|
||||
|
||||
db_printf("\n\nList of active vnodes\n");
|
||||
TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) {
|
||||
|
@ -1621,11 +1621,23 @@ vn_suspendable(struct mount *mp)
|
||||
* suspension is over, and then proceed.
|
||||
*/
|
||||
static int
|
||||
vn_start_write_locked(struct mount *mp, int flags)
|
||||
vn_start_write_refed(struct mount *mp, int flags, bool mplocked)
|
||||
{
|
||||
int error, mflags;
|
||||
|
||||
mtx_assert(MNT_MTX(mp), MA_OWNED);
|
||||
if (__predict_true(!mplocked) && (flags & V_XSLEEP) == 0 &&
|
||||
vfs_op_thread_enter(mp)) {
|
||||
MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) == 0);
|
||||
vfs_mp_count_add_pcpu(mp, writeopcount, 1);
|
||||
vfs_op_thread_exit(mp);
|
||||
return (0);
|
||||
}
|
||||
|
||||
if (mplocked)
|
||||
mtx_assert(MNT_MTX(mp), MA_OWNED);
|
||||
else
|
||||
MNT_ILOCK(mp);
|
||||
|
||||
error = 0;
|
||||
|
||||
/*
|
||||
@ -1694,11 +1706,10 @@ vn_start_write(struct vnode *vp, struct mount **mpp, int flags)
|
||||
* refcount for the provided mountpoint too, in order to
|
||||
* emulate a vfs_ref().
|
||||
*/
|
||||
MNT_ILOCK(mp);
|
||||
if (vp == NULL && (flags & V_MNTREF) == 0)
|
||||
MNT_REF(mp);
|
||||
vfs_ref(mp);
|
||||
|
||||
return (vn_start_write_locked(mp, flags));
|
||||
return (vn_start_write_refed(mp, flags, false));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1780,15 +1791,30 @@ vn_start_secondary_write(struct vnode *vp, struct mount **mpp, int flags)
|
||||
void
|
||||
vn_finished_write(struct mount *mp)
|
||||
{
|
||||
int c;
|
||||
|
||||
if (mp == NULL || !vn_suspendable(mp))
|
||||
return;
|
||||
|
||||
if (vfs_op_thread_enter(mp)) {
|
||||
vfs_mp_count_sub_pcpu(mp, writeopcount, 1);
|
||||
vfs_mp_count_sub_pcpu(mp, ref, 1);
|
||||
vfs_op_thread_exit(mp);
|
||||
return;
|
||||
}
|
||||
|
||||
MNT_ILOCK(mp);
|
||||
vfs_assert_mount_counters(mp);
|
||||
MNT_REL(mp);
|
||||
mp->mnt_writeopcount--;
|
||||
if (mp->mnt_writeopcount < 0)
|
||||
panic("vn_finished_write: neg cnt");
|
||||
if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 &&
|
||||
mp->mnt_writeopcount <= 0)
|
||||
c = --mp->mnt_writeopcount;
|
||||
if (mp->mnt_vfs_ops == 0) {
|
||||
MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) == 0);
|
||||
MNT_IUNLOCK(mp);
|
||||
return;
|
||||
}
|
||||
if (c < 0)
|
||||
vfs_dump_mount_counters(mp);
|
||||
if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0 && c == 0)
|
||||
wakeup(&mp->mnt_writeopcount);
|
||||
MNT_IUNLOCK(mp);
|
||||
}
|
||||
@ -1827,8 +1853,12 @@ vfs_write_suspend(struct mount *mp, int flags)
|
||||
|
||||
MPASS(vn_suspendable(mp));
|
||||
|
||||
vfs_op_enter(mp);
|
||||
|
||||
MNT_ILOCK(mp);
|
||||
vfs_assert_mount_counters(mp);
|
||||
if (mp->mnt_susp_owner == curthread) {
|
||||
vfs_op_exit_locked(mp);
|
||||
MNT_IUNLOCK(mp);
|
||||
return (EALREADY);
|
||||
}
|
||||
@ -1845,6 +1875,7 @@ vfs_write_suspend(struct mount *mp, int flags)
|
||||
*/
|
||||
if ((flags & VS_SKIP_UNMOUNT) != 0 &&
|
||||
(mp->mnt_kern_flag & MNTK_UNMOUNT) != 0) {
|
||||
vfs_op_exit_locked(mp);
|
||||
MNT_IUNLOCK(mp);
|
||||
return (EBUSY);
|
||||
}
|
||||
@ -1856,8 +1887,10 @@ vfs_write_suspend(struct mount *mp, int flags)
|
||||
MNT_MTX(mp), (PUSER - 1)|PDROP, "suspwt", 0);
|
||||
else
|
||||
MNT_IUNLOCK(mp);
|
||||
if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0)
|
||||
if ((error = VFS_SYNC(mp, MNT_SUSPEND)) != 0) {
|
||||
vfs_write_resume(mp, 0);
|
||||
vfs_op_exit(mp);
|
||||
}
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -1886,9 +1919,10 @@ vfs_write_resume(struct mount *mp, int flags)
|
||||
MNT_IUNLOCK(mp);
|
||||
if ((flags & VR_NO_SUSPCLR) == 0)
|
||||
VFS_SUSP_CLEAN(mp);
|
||||
vfs_op_exit(mp);
|
||||
} else if ((flags & VR_START_WRITE) != 0) {
|
||||
MNT_REF(mp);
|
||||
vn_start_write_locked(mp, 0);
|
||||
vn_start_write_refed(mp, 0, true);
|
||||
} else {
|
||||
MNT_IUNLOCK(mp);
|
||||
}
|
||||
|
@ -51,7 +51,13 @@
|
||||
#else
|
||||
#define PCPU_MD_MIPS32_FIELDS \
|
||||
PCPU_MD_COMMON_FIELDS \
|
||||
char __pad[125]
|
||||
pt_entry_t *pc_cmap1_ptep; /* PTE for copy window 1 KVA */ \
|
||||
pt_entry_t *pc_cmap2_ptep; /* PTE for copy window 2 KVA */ \
|
||||
vm_offset_t pc_cmap1_addr; /* KVA page for copy window 1 */ \
|
||||
vm_offset_t pc_cmap2_addr; /* KVA page for copy window 2 */ \
|
||||
vm_offset_t pc_qmap_addr; /* KVA page for temporary mappings */ \
|
||||
pt_entry_t *pc_qmap_ptep; /* PTE for temporary mapping KVA */ \
|
||||
char __pad[101]
|
||||
#endif
|
||||
|
||||
#ifdef __mips_n64
|
||||
|
@ -138,6 +138,8 @@ pd_entry_t *kernel_segmap;
|
||||
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
|
||||
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
|
||||
|
||||
static int need_local_mappings;
|
||||
|
||||
static int nkpt;
|
||||
unsigned pmap_max_asid; /* max ASID supported by the system */
|
||||
|
||||
@ -187,104 +189,96 @@ static void pmap_invalidate_range_action(void *arg);
|
||||
static void pmap_update_page_action(void *arg);
|
||||
|
||||
#ifndef __mips_n64
|
||||
|
||||
static vm_offset_t crashdumpva;
|
||||
|
||||
/*
|
||||
* This structure is for high memory (memory above 512Meg in 32 bit) support.
|
||||
* These functions are for high memory (memory above 512Meg in 32 bit) support.
|
||||
* The highmem area does not have a KSEG0 mapping, and we need a mechanism to
|
||||
* do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc.
|
||||
*
|
||||
* At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To
|
||||
* access a highmem physical address on a CPU, we map the physical address to
|
||||
* the reserved virtual address for the CPU in the kernel pagetable. This is
|
||||
* done with interrupts disabled(although a spinlock and sched_pin would be
|
||||
* sufficient).
|
||||
* the reserved virtual address for the CPU in the kernel pagetable.
|
||||
*/
|
||||
struct local_sysmaps {
|
||||
vm_offset_t base;
|
||||
uint32_t saved_intr;
|
||||
uint16_t valid1, valid2;
|
||||
};
|
||||
static struct local_sysmaps sysmap_lmem[MAXCPU];
|
||||
|
||||
static void
|
||||
pmap_init_reserved_pages(void)
|
||||
{
|
||||
struct pcpu *pc;
|
||||
vm_offset_t pages;
|
||||
int i;
|
||||
|
||||
if (need_local_mappings == 0)
|
||||
return;
|
||||
|
||||
CPU_FOREACH(i) {
|
||||
pc = pcpu_find(i);
|
||||
/*
|
||||
* Skip if the mapping has already been initialized,
|
||||
* i.e. this is the BSP.
|
||||
*/
|
||||
if (pc->pc_cmap1_addr != 0)
|
||||
continue;
|
||||
pages = kva_alloc(PAGE_SIZE * 3);
|
||||
if (pages == 0)
|
||||
panic("%s: unable to allocate KVA", __func__);
|
||||
pc->pc_cmap1_ptep = pmap_pte(kernel_pmap, pages);
|
||||
pc->pc_cmap2_ptep = pmap_pte(kernel_pmap, pages + PAGE_SIZE);
|
||||
pc->pc_qmap_ptep =
|
||||
pmap_pte(kernel_pmap, pages + (PAGE_SIZE * 2));
|
||||
pc->pc_cmap1_addr = pages;
|
||||
pc->pc_cmap2_addr = pages + PAGE_SIZE;
|
||||
pc->pc_qmap_addr = pages + (PAGE_SIZE * 2);
|
||||
}
|
||||
}
|
||||
SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL);
|
||||
|
||||
static __inline void
|
||||
pmap_alloc_lmem_map(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MAXCPU; i++) {
|
||||
sysmap_lmem[i].base = virtual_avail;
|
||||
virtual_avail += PAGE_SIZE * 2;
|
||||
sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
|
||||
}
|
||||
PCPU_SET(cmap1_addr, virtual_avail);
|
||||
PCPU_SET(cmap2_addr, virtual_avail + PAGE_SIZE);
|
||||
PCPU_SET(cmap1_ptep, pmap_pte(kernel_pmap, virtual_avail));
|
||||
PCPU_SET(cmap2_ptep, pmap_pte(kernel_pmap, virtual_avail + PAGE_SIZE));
|
||||
PCPU_SET(qmap_addr, virtual_avail + (2 * PAGE_SIZE));
|
||||
PCPU_SET(qmap_ptep, pmap_pte(kernel_pmap, virtual_avail + (2 * PAGE_SIZE)));
|
||||
crashdumpva = virtual_avail + (3 * PAGE_SIZE);
|
||||
virtual_avail += PAGE_SIZE * 4;
|
||||
}
|
||||
|
||||
static __inline vm_offset_t
|
||||
pmap_lmem_map1(vm_paddr_t phys)
|
||||
{
|
||||
struct local_sysmaps *sysm;
|
||||
pt_entry_t *pte, npte;
|
||||
vm_offset_t va;
|
||||
uint32_t intr;
|
||||
int cpu;
|
||||
|
||||
intr = intr_disable();
|
||||
cpu = PCPU_GET(cpuid);
|
||||
sysm = &sysmap_lmem[cpu];
|
||||
sysm->saved_intr = intr;
|
||||
va = sysm->base;
|
||||
npte = TLBLO_PA_TO_PFN(phys) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
|
||||
pte = pmap_pte(kernel_pmap, va);
|
||||
*pte = npte;
|
||||
sysm->valid1 = 1;
|
||||
return (va);
|
||||
critical_enter();
|
||||
*PCPU_GET(cmap1_ptep) =
|
||||
TLBLO_PA_TO_PFN(phys) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
|
||||
return (PCPU_GET(cmap1_addr));
|
||||
}
|
||||
|
||||
static __inline vm_offset_t
|
||||
pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
|
||||
{
|
||||
struct local_sysmaps *sysm;
|
||||
pt_entry_t *pte, npte;
|
||||
vm_offset_t va1, va2;
|
||||
uint32_t intr;
|
||||
int cpu;
|
||||
|
||||
intr = intr_disable();
|
||||
cpu = PCPU_GET(cpuid);
|
||||
sysm = &sysmap_lmem[cpu];
|
||||
sysm->saved_intr = intr;
|
||||
va1 = sysm->base;
|
||||
va2 = sysm->base + PAGE_SIZE;
|
||||
npte = TLBLO_PA_TO_PFN(phys1) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
|
||||
pte = pmap_pte(kernel_pmap, va1);
|
||||
*pte = npte;
|
||||
npte = TLBLO_PA_TO_PFN(phys2) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
|
||||
pte = pmap_pte(kernel_pmap, va2);
|
||||
*pte = npte;
|
||||
sysm->valid1 = 1;
|
||||
sysm->valid2 = 1;
|
||||
return (va1);
|
||||
critical_enter();
|
||||
*PCPU_GET(cmap1_ptep) =
|
||||
TLBLO_PA_TO_PFN(phys1) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
|
||||
*PCPU_GET(cmap2_ptep) =
|
||||
TLBLO_PA_TO_PFN(phys2) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G;
|
||||
return (PCPU_GET(cmap1_addr));
|
||||
}
|
||||
|
||||
static __inline void
|
||||
pmap_lmem_unmap(void)
|
||||
{
|
||||
struct local_sysmaps *sysm;
|
||||
pt_entry_t *pte;
|
||||
int cpu;
|
||||
|
||||
cpu = PCPU_GET(cpuid);
|
||||
sysm = &sysmap_lmem[cpu];
|
||||
pte = pmap_pte(kernel_pmap, sysm->base);
|
||||
*pte = PTE_G;
|
||||
tlb_invalidate_address(kernel_pmap, sysm->base);
|
||||
sysm->valid1 = 0;
|
||||
if (sysm->valid2) {
|
||||
pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE);
|
||||
*pte = PTE_G;
|
||||
tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE);
|
||||
sysm->valid2 = 0;
|
||||
}
|
||||
intr_restore(sysm->saved_intr);
|
||||
*PCPU_GET(cmap1_ptep) = PTE_G;
|
||||
tlb_invalidate_address(kernel_pmap, PCPU_GET(cmap1_addr));
|
||||
if (*PCPU_GET(cmap2_ptep) != PTE_G) {
|
||||
*PCPU_GET(cmap2_ptep) = PTE_G;
|
||||
tlb_invalidate_address(kernel_pmap, PCPU_GET(cmap2_addr));
|
||||
}
|
||||
critical_exit();
|
||||
}
|
||||
|
||||
#else /* __mips_n64 */
|
||||
|
||||
static __inline void
|
||||
@ -495,7 +489,6 @@ void
|
||||
pmap_bootstrap(void)
|
||||
{
|
||||
int i;
|
||||
int need_local_mappings = 0;
|
||||
|
||||
/* Sort. */
|
||||
again:
|
||||
@ -588,9 +581,9 @@ pmap_bootstrap(void)
|
||||
printf("pcpu is available at virtual address %p.\n", pcpup);
|
||||
#endif
|
||||
|
||||
pmap_create_kernel_pagetable();
|
||||
if (need_local_mappings)
|
||||
pmap_alloc_lmem_map();
|
||||
pmap_create_kernel_pagetable();
|
||||
pmap_max_asid = VMNUM_PIDS;
|
||||
mips_wr_entryhi(0);
|
||||
mips_wr_pagemask(0);
|
||||
@ -2381,28 +2374,16 @@ pmap_kenter_temporary(vm_paddr_t pa, int i)
|
||||
va = MIPS_PHYS_TO_DIRECT(pa);
|
||||
} else {
|
||||
#ifndef __mips_n64 /* XXX : to be converted to new style */
|
||||
int cpu;
|
||||
register_t intr;
|
||||
struct local_sysmaps *sysm;
|
||||
pt_entry_t *pte, npte;
|
||||
|
||||
/* If this is used other than for dumps, we may need to leave
|
||||
* interrupts disasbled on return. If crash dumps don't work when
|
||||
* we get to this point, we might want to consider this (leaving things
|
||||
* disabled as a starting point ;-)
|
||||
*/
|
||||
intr = intr_disable();
|
||||
cpu = PCPU_GET(cpuid);
|
||||
sysm = &sysmap_lmem[cpu];
|
||||
pte = pmap_pte(kernel_pmap, crashdumpva);
|
||||
|
||||
/* Since this is for the debugger, no locks or any other fun */
|
||||
npte = TLBLO_PA_TO_PFN(pa) | PTE_C_CACHE | PTE_D | PTE_V |
|
||||
PTE_G;
|
||||
pte = pmap_pte(kernel_pmap, sysm->base);
|
||||
*pte = npte;
|
||||
sysm->valid1 = 1;
|
||||
pmap_update_page(kernel_pmap, sysm->base, npte);
|
||||
va = sysm->base;
|
||||
intr_restore(intr);
|
||||
pmap_update_page(kernel_pmap, crashdumpva, npte);
|
||||
va = crashdumpva;
|
||||
#endif
|
||||
}
|
||||
return ((void *)va);
|
||||
@ -2411,29 +2392,17 @@ pmap_kenter_temporary(vm_paddr_t pa, int i)
|
||||
void
|
||||
pmap_kenter_temporary_free(vm_paddr_t pa)
|
||||
{
|
||||
#ifndef __mips_n64 /* XXX : to be converted to new style */
|
||||
int cpu;
|
||||
register_t intr;
|
||||
struct local_sysmaps *sysm;
|
||||
#endif
|
||||
|
||||
#ifndef __mips_n64 /* XXX : to be converted to new style */
|
||||
pt_entry_t *pte;
|
||||
#endif
|
||||
if (MIPS_DIRECT_MAPPABLE(pa)) {
|
||||
/* nothing to do for this case */
|
||||
return;
|
||||
}
|
||||
#ifndef __mips_n64 /* XXX : to be converted to new style */
|
||||
cpu = PCPU_GET(cpuid);
|
||||
sysm = &sysmap_lmem[cpu];
|
||||
if (sysm->valid1) {
|
||||
pt_entry_t *pte;
|
||||
|
||||
intr = intr_disable();
|
||||
pte = pmap_pte(kernel_pmap, sysm->base);
|
||||
*pte = PTE_G;
|
||||
pmap_invalidate_page(kernel_pmap, sysm->base);
|
||||
intr_restore(intr);
|
||||
sysm->valid1 = 0;
|
||||
}
|
||||
pte = pmap_pte(kernel_pmap, crashdumpva);
|
||||
*pte = PTE_G;
|
||||
pmap_invalidate_page(kernel_pmap, crashdumpva);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -2687,8 +2656,8 @@ pmap_quick_enter_page(vm_page_t m)
|
||||
#if defined(__mips_n64)
|
||||
return MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
|
||||
#else
|
||||
vm_offset_t qaddr;
|
||||
vm_paddr_t pa;
|
||||
struct local_sysmaps *sysm;
|
||||
pt_entry_t *pte, npte;
|
||||
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
@ -2700,17 +2669,16 @@ pmap_quick_enter_page(vm_page_t m)
|
||||
return (MIPS_PHYS_TO_DIRECT(pa));
|
||||
}
|
||||
critical_enter();
|
||||
sysm = &sysmap_lmem[PCPU_GET(cpuid)];
|
||||
qaddr = PCPU_GET(qmap_addr);
|
||||
pte = PCPU_GET(qmap_ptep);
|
||||
|
||||
KASSERT(sysm->valid1 == 0, ("pmap_quick_enter_page: PTE busy"));
|
||||
KASSERT(*pte == PTE_G, ("pmap_quick_enter_page: PTE busy"));
|
||||
|
||||
pte = pmap_pte(kernel_pmap, sysm->base);
|
||||
npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G;
|
||||
PMAP_PTE_SET_CACHE_BITS(npte, pa, m);
|
||||
*pte = npte;
|
||||
sysm->valid1 = 1;
|
||||
|
||||
return (sysm->base);
|
||||
return (qaddr);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -2720,23 +2688,20 @@ pmap_quick_remove_page(vm_offset_t addr)
|
||||
mips_dcache_wbinv_range(addr, PAGE_SIZE);
|
||||
|
||||
#if !defined(__mips_n64)
|
||||
struct local_sysmaps *sysm;
|
||||
pt_entry_t *pte;
|
||||
|
||||
if (addr >= MIPS_KSEG0_START && addr < MIPS_KSEG0_END)
|
||||
return;
|
||||
|
||||
sysm = &sysmap_lmem[PCPU_GET(cpuid)];
|
||||
pte = PCPU_GET(qmap_ptep);
|
||||
|
||||
KASSERT(sysm->valid1 != 0,
|
||||
KASSERT(*pte != PTE_G,
|
||||
("pmap_quick_remove_page: PTE not in use"));
|
||||
KASSERT(sysm->base == addr,
|
||||
KASSERT(PCPU_GET(qmap_addr) == addr,
|
||||
("pmap_quick_remove_page: invalid address"));
|
||||
|
||||
pte = pmap_pte(kernel_pmap, addr);
|
||||
*pte = PTE_G;
|
||||
tlb_invalidate_address(kernel_pmap, addr);
|
||||
sysm->valid1 = 0;
|
||||
critical_exit();
|
||||
#endif
|
||||
}
|
||||
|
@ -132,16 +132,16 @@ sctp_delayed_cksum(struct mbuf *m, uint32_t offset)
|
||||
SCTP_STAT_INCR(sctps_sendswcrc);
|
||||
offset += offsetof(struct sctphdr, checksum);
|
||||
|
||||
if (offset + sizeof(uint32_t) > (uint32_t)(m->m_len)) {
|
||||
if (offset + sizeof(uint32_t) > (uint32_t)(m->m_pkthdr.len)) {
|
||||
#ifdef INVARIANTS
|
||||
panic("sctp_delayed_cksum(): m->m_len: %d, offset: %u.",
|
||||
m->m_len, offset);
|
||||
panic("sctp_delayed_cksum(): m->m_pkthdr.len: %d, offset: %u.",
|
||||
m->m_pkthdr.len, offset);
|
||||
#else
|
||||
SCTP_PRINTF("sctp_delayed_cksum(): m->m_len: %d, offset: %u.\n",
|
||||
m->m_len, offset);
|
||||
SCTP_PRINTF("sctp_delayed_cksum(): m->m_pkthdr.len: %d, offset: %u.\n",
|
||||
m->m_pkthdr.len, offset);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
*(uint32_t *)(m->m_data + offset) = checksum;
|
||||
m_copyback(m, (int)offset, (int)sizeof(uint32_t), (caddr_t)&checksum);
|
||||
}
|
||||
#endif
|
||||
|
@ -235,7 +235,7 @@ tcp_update_dsack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
|
||||
saved_blks[n].start = mid_blk.start;
|
||||
saved_blks[n++].end = mid_blk.end;
|
||||
}
|
||||
for (j = 0; (j < tp->rcv_numsacks) && (j < MAX_SACK_BLKS-1); j++) {
|
||||
for (j = 0; (j < tp->rcv_numsacks) && (n < MAX_SACK_BLKS); j++) {
|
||||
if (((SEQ_LT(tp->sackblks[j].end, mid_blk.start) ||
|
||||
SEQ_GT(tp->sackblks[j].start, mid_blk.end)) &&
|
||||
(SEQ_GT(tp->sackblks[j].start, tp->rcv_nxt))))
|
||||
|
@ -71,6 +71,7 @@ options RACCT # Resource accounting framework
|
||||
options RACCT_DEFAULT_TO_DISABLED # Set kern.racct.enable=0 by default
|
||||
options RCTL # Resource limits
|
||||
options SMP
|
||||
options EARLY_AP_STARTUP
|
||||
options INTRNG
|
||||
|
||||
# RISC-V SBI console
|
||||
|
@ -37,10 +37,34 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/sched.h>
|
||||
#include <sys/smp.h>
|
||||
|
||||
void
|
||||
cpu_initclocks(void)
|
||||
{
|
||||
#ifdef EARLY_AP_STARTUP
|
||||
struct thread *td;
|
||||
int i;
|
||||
|
||||
td = curthread;
|
||||
cpu_initclocks_bsp();
|
||||
CPU_FOREACH(i) {
|
||||
if (i == 0)
|
||||
continue;
|
||||
thread_lock(td);
|
||||
sched_bind(td, i);
|
||||
thread_unlock(td);
|
||||
cpu_initclocks_ap();
|
||||
}
|
||||
thread_lock(td);
|
||||
if (sched_is_bound(td))
|
||||
sched_unbind(td);
|
||||
thread_unlock(td);
|
||||
#else
|
||||
cpu_initclocks_bsp();
|
||||
#endif
|
||||
}
|
||||
|
@ -257,8 +257,10 @@ init_secondary(uint64_t hart)
|
||||
/* Enable software interrupts */
|
||||
riscv_unmask_ipi();
|
||||
|
||||
#ifndef EARLY_AP_STARTUP
|
||||
/* Start per-CPU event timers. */
|
||||
cpu_initclocks_ap();
|
||||
#endif
|
||||
|
||||
/* Enable external (PLIC) interrupts */
|
||||
csr_set(sie, SIE_SEIE);
|
||||
|
779
sys/sys/arb.h
Normal file
779
sys/sys/arb.h
Normal file
@ -0,0 +1,779 @@
|
||||
/* $NetBSD: tree.h,v 1.8 2004/03/28 19:38:30 provos Exp $ */
|
||||
/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
|
||||
/* $FreeBSD$ */
|
||||
|
||||
/*-
|
||||
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
||||
*
|
||||
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifndef _SYS_ARB_H_
|
||||
#define _SYS_ARB_H_
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
|
||||
/* Array-based red-black trees. */
|
||||
|
||||
#define ARB_NULLIDX -1
|
||||
#define ARB_NULLCOL -1
|
||||
|
||||
#define ARB_BLACK 0
|
||||
#define ARB_RED 1
|
||||
|
||||
#define ARB_NEGINF -1
|
||||
#define ARB_INF 1
|
||||
|
||||
#define ARB_HEAD(name, type, idxbits) \
|
||||
struct name { \
|
||||
int##idxbits##_t arb_curnodes; \
|
||||
int##idxbits##_t arb_maxnodes; \
|
||||
int##idxbits##_t arb_root_idx; \
|
||||
int##idxbits##_t arb_free_idx; \
|
||||
int##idxbits##_t arb_min_idx; \
|
||||
int##idxbits##_t arb_max_idx; \
|
||||
struct type arb_nodes[]; \
|
||||
}
|
||||
#define ARB8_HEAD(name, type) ARB_HEAD(name, type, 8)
|
||||
#define ARB16_HEAD(name, type) ARB_HEAD(name, type, 16)
|
||||
#define ARB32_HEAD(name, type) ARB_HEAD(name, type, 32)
|
||||
|
||||
#define ARB_ALLOCSIZE(head, maxn, x) \
|
||||
(sizeof(*head) + (maxn) * sizeof(*x))
|
||||
|
||||
#define ARB_INITIALIZER(name, maxn) \
|
||||
((struct name){ 0, maxn, ARB_NULLIDX, ARB_NULLIDX, \
|
||||
ARB_NULLIDX, ARB_NULLIDX })
|
||||
|
||||
#define ARB_INIT(x, field, head, maxn) \
|
||||
(head)->arb_curnodes = 0; \
|
||||
(head)->arb_maxnodes = (maxn); \
|
||||
(head)->arb_root_idx = (head)->arb_free_idx = \
|
||||
(head)->arb_min_idx = (head)->arb_max_idx = ARB_NULLIDX; \
|
||||
/* The ARB_RETURNFREE() puts all entries on the free list. */ \
|
||||
ARB_ARRFOREACH_REVWCOND(x, field, head, \
|
||||
ARB_RETURNFREE(head, x, field))
|
||||
|
||||
#define ARB_ENTRY(idxbits) \
|
||||
struct { \
|
||||
int##idxbits##_t arbe_parent_idx; \
|
||||
int##idxbits##_t arbe_left_idx; \
|
||||
int##idxbits##_t arbe_right_idx; \
|
||||
int8_t arbe_color; \
|
||||
}
|
||||
#define ARB8_ENTRY() ARB_ENTRY(8)
|
||||
#define ARB16_ENTRY() ARB_ENTRY(16)
|
||||
#define ARB32_ENTRY() ARB_ENTRY(32)
|
||||
|
||||
#define ARB_ENTRYINIT(elm, field) do { \
|
||||
(elm)->field.arbe_parent_idx = \
|
||||
(elm)->field.arbe_left_idx = \
|
||||
(elm)->field.arbe_right_idx = ARB_NULLIDX; \
|
||||
(elm)->field.arbe_color = ARB_NULLCOL; \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
#define ARB_ELMTYPE(head) __typeof(&(head)->arb_nodes[0])
|
||||
#define ARB_NODES(head) (head)->arb_nodes
|
||||
#define ARB_MAXNODES(head) (head)->arb_maxnodes
|
||||
#define ARB_CURNODES(head) (head)->arb_curnodes
|
||||
#define ARB_EMPTY(head) ((head)->arb_curnodes == 0)
|
||||
#define ARB_FULL(head) ((head)->arb_curnodes >= (head)->arb_maxnodes)
|
||||
#define ARB_CNODE(head, idx) \
|
||||
((((intptr_t)(idx) <= ARB_NULLIDX) || ((idx) >= ARB_MAXNODES(head))) ? \
|
||||
NULL : ((const ARB_ELMTYPE(head))(ARB_NODES(head) + (idx))))
|
||||
#define ARB_NODE(head, idx) \
|
||||
(__DECONST(ARB_ELMTYPE(head), ARB_CNODE(head, idx)))
|
||||
#define ARB_ROOT(head) ARB_NODE(head, ARB_ROOTIDX(head))
|
||||
#define ARB_LEFT(head, elm, field) ARB_NODE(head, ARB_LEFTIDX(elm, field))
|
||||
#define ARB_RIGHT(head, elm, field) ARB_NODE(head, ARB_RIGHTIDX(elm, field))
|
||||
#define ARB_PARENT(head, elm, field) ARB_NODE(head, ARB_PARENTIDX(elm, field))
|
||||
#define ARB_FREEIDX(head) (head)->arb_free_idx
|
||||
#define ARB_ROOTIDX(head) (head)->arb_root_idx
|
||||
#define ARB_MINIDX(head) (head)->arb_min_idx
|
||||
#define ARB_MAXIDX(head) (head)->arb_max_idx
|
||||
#define ARB_SELFIDX(head, elm) \
|
||||
((elm) ? ((intptr_t)((((const uint8_t *)(elm)) - \
|
||||
((const uint8_t *)ARB_NODES(head))) / sizeof(*(elm)))) : \
|
||||
(intptr_t)ARB_NULLIDX)
|
||||
#define ARB_LEFTIDX(elm, field) (elm)->field.arbe_left_idx
|
||||
#define ARB_RIGHTIDX(elm, field) (elm)->field.arbe_right_idx
|
||||
#define ARB_PARENTIDX(elm, field) (elm)->field.arbe_parent_idx
|
||||
#define ARB_COLOR(elm, field) (elm)->field.arbe_color
|
||||
#define ARB_PREVFREE(head, elm, field) \
|
||||
ARB_NODE(head, ARB_PREVFREEIDX(elm, field))
|
||||
#define ARB_PREVFREEIDX(elm, field) ARB_LEFTIDX(elm, field)
|
||||
#define ARB_NEXTFREE(head, elm, field) \
|
||||
ARB_NODE(head, ARB_NEXTFREEIDX(elm, field))
|
||||
#define ARB_NEXTFREEIDX(elm, field) ARB_RIGHTIDX(elm, field)
|
||||
#define ARB_ISFREE(elm, field) (ARB_COLOR(elm, field) == ARB_NULLCOL)
|
||||
|
||||
#define ARB_SET(head, elm, parent, field) do { \
|
||||
ARB_PARENTIDX(elm, field) = \
|
||||
parent ? ARB_SELFIDX(head, parent) : ARB_NULLIDX; \
|
||||
ARB_LEFTIDX(elm, field) = ARB_RIGHTIDX(elm, field) = ARB_NULLIDX; \
|
||||
ARB_COLOR(elm, field) = ARB_RED; \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
#define ARB_SET_BLACKRED(black, red, field) do { \
|
||||
ARB_COLOR(black, field) = ARB_BLACK; \
|
||||
ARB_COLOR(red, field) = ARB_RED; \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
#ifndef ARB_AUGMENT
|
||||
#define ARB_AUGMENT(x) do {} while (0)
|
||||
#endif
|
||||
|
||||
#define ARB_ROTATE_LEFT(head, elm, tmp, field) do { \
|
||||
__typeof(ARB_RIGHTIDX(elm, field)) _tmpidx; \
|
||||
(tmp) = ARB_RIGHT(head, elm, field); \
|
||||
_tmpidx = ARB_RIGHTIDX(elm, field); \
|
||||
ARB_RIGHTIDX(elm, field) = ARB_LEFTIDX(tmp, field); \
|
||||
if (ARB_RIGHTIDX(elm, field) != ARB_NULLIDX) { \
|
||||
ARB_PARENTIDX(ARB_LEFT(head, tmp, field), field) = \
|
||||
ARB_SELFIDX(head, elm); \
|
||||
} \
|
||||
ARB_AUGMENT(elm); \
|
||||
ARB_PARENTIDX(tmp, field) = ARB_PARENTIDX(elm, field); \
|
||||
if (ARB_PARENTIDX(tmp, field) != ARB_NULLIDX) { \
|
||||
if (ARB_SELFIDX(head, elm) == \
|
||||
ARB_LEFTIDX(ARB_PARENT(head, elm, field), field)) \
|
||||
ARB_LEFTIDX(ARB_PARENT(head, elm, field), \
|
||||
field) = _tmpidx; \
|
||||
else \
|
||||
ARB_RIGHTIDX(ARB_PARENT(head, elm, field), \
|
||||
field) = _tmpidx; \
|
||||
} else \
|
||||
ARB_ROOTIDX(head) = _tmpidx; \
|
||||
ARB_LEFTIDX(tmp, field) = ARB_SELFIDX(head, elm); \
|
||||
ARB_PARENTIDX(elm, field) = _tmpidx; \
|
||||
ARB_AUGMENT(tmp); \
|
||||
if (ARB_PARENTIDX(tmp, field) != ARB_NULLIDX) \
|
||||
ARB_AUGMENT(ARB_PARENT(head, tmp, field)); \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
#define ARB_ROTATE_RIGHT(head, elm, tmp, field) do { \
|
||||
__typeof(ARB_LEFTIDX(elm, field)) _tmpidx; \
|
||||
(tmp) = ARB_LEFT(head, elm, field); \
|
||||
_tmpidx = ARB_LEFTIDX(elm, field); \
|
||||
ARB_LEFTIDX(elm, field) = ARB_RIGHTIDX(tmp, field); \
|
||||
if (ARB_LEFTIDX(elm, field) != ARB_NULLIDX) { \
|
||||
ARB_PARENTIDX(ARB_RIGHT(head, tmp, field), field) = \
|
||||
ARB_SELFIDX(head, elm); \
|
||||
} \
|
||||
ARB_AUGMENT(elm); \
|
||||
ARB_PARENTIDX(tmp, field) = ARB_PARENTIDX(elm, field); \
|
||||
if (ARB_PARENTIDX(tmp, field) != ARB_NULLIDX) { \
|
||||
if (ARB_SELFIDX(head, elm) == \
|
||||
ARB_LEFTIDX(ARB_PARENT(head, elm, field), field)) \
|
||||
ARB_LEFTIDX(ARB_PARENT(head, elm, field), \
|
||||
field) = _tmpidx; \
|
||||
else \
|
||||
ARB_RIGHTIDX(ARB_PARENT(head, elm, field), \
|
||||
field) = _tmpidx; \
|
||||
} else \
|
||||
ARB_ROOTIDX(head) = _tmpidx; \
|
||||
ARB_RIGHTIDX(tmp, field) = ARB_SELFIDX(head, elm); \
|
||||
ARB_PARENTIDX(elm, field) = _tmpidx; \
|
||||
ARB_AUGMENT(tmp); \
|
||||
if (ARB_PARENTIDX(tmp, field) != ARB_NULLIDX) \
|
||||
ARB_AUGMENT(ARB_PARENT(head, tmp, field)); \
|
||||
} while (/*CONSTCOND*/ 0)
|
||||
|
||||
#define ARB_RETURNFREE(head, elm, field) \
|
||||
({ \
|
||||
ARB_COLOR(elm, field) = ARB_NULLCOL; \
|
||||
ARB_NEXTFREEIDX(elm, field) = ARB_FREEIDX(head); \
|
||||
ARB_FREEIDX(head) = ARB_SELFIDX(head, elm); \
|
||||
elm; \
|
||||
})
|
||||
|
||||
#define ARB_GETFREEAT(head, field, fidx) \
|
||||
({ \
|
||||
__typeof(ARB_NODE(head, 0)) _elm, _prevelm; \
|
||||
int _idx = fidx; \
|
||||
if (ARB_FREEIDX(head) == ARB_NULLIDX && !ARB_FULL(head)) { \
|
||||
/* Populate the free list. */ \
|
||||
ARB_ARRFOREACH_REVERSE(_elm, field, head) { \
|
||||
if (ARB_ISFREE(_elm, field)) \
|
||||
ARB_RETURNFREE(head, _elm, field); \
|
||||
} \
|
||||
} \
|
||||
_elm = _prevelm = ARB_NODE(head, ARB_FREEIDX(head)); \
|
||||
for (; _idx > 0 && _elm != NULL; _idx--, _prevelm = _elm) \
|
||||
_elm = ARB_NODE(head, ARB_NEXTFREEIDX(_elm, field)); \
|
||||
if (_elm) { \
|
||||
if (fidx == 0) \
|
||||
ARB_FREEIDX(head) = \
|
||||
ARB_NEXTFREEIDX(_elm, field); \
|
||||
else \
|
||||
ARB_NEXTFREEIDX(_prevelm, field) = \
|
||||
ARB_NEXTFREEIDX(_elm, field); \
|
||||
} \
|
||||
_elm; \
|
||||
})
|
||||
#define ARB_GETFREE(head, field) ARB_GETFREEAT(head, field, 0)
|
||||
|
||||
/* Generates prototypes and inline functions */
|
||||
#define ARB_PROTOTYPE(name, type, field, cmp) \
|
||||
ARB_PROTOTYPE_INTERNAL(name, type, field, cmp,)
|
||||
#define ARB_PROTOTYPE_STATIC(name, type, field, cmp) \
|
||||
ARB_PROTOTYPE_INTERNAL(name, type, field, cmp, __unused static)
|
||||
#define ARB_PROTOTYPE_INTERNAL(name, type, field, cmp, attr) \
|
||||
ARB_PROTOTYPE_INSERT_COLOR(name, type, attr); \
|
||||
ARB_PROTOTYPE_REMOVE_COLOR(name, type, attr); \
|
||||
ARB_PROTOTYPE_INSERT(name, type, attr); \
|
||||
ARB_PROTOTYPE_REMOVE(name, type, attr); \
|
||||
ARB_PROTOTYPE_CFIND(name, type, attr); \
|
||||
ARB_PROTOTYPE_FIND(name, type, attr); \
|
||||
ARB_PROTOTYPE_NFIND(name, type, attr); \
|
||||
ARB_PROTOTYPE_CNEXT(name, type, attr); \
|
||||
ARB_PROTOTYPE_NEXT(name, type, attr); \
|
||||
ARB_PROTOTYPE_CPREV(name, type, attr); \
|
||||
ARB_PROTOTYPE_PREV(name, type, attr); \
|
||||
ARB_PROTOTYPE_CMINMAX(name, type, attr); \
|
||||
ARB_PROTOTYPE_MINMAX(name, type, attr); \
|
||||
ARB_PROTOTYPE_REBALANCE(name, type, attr);
|
||||
#define ARB_PROTOTYPE_INSERT_COLOR(name, type, attr) \
|
||||
attr void name##_ARB_INSERT_COLOR(struct name *, struct type *)
|
||||
#define ARB_PROTOTYPE_REMOVE_COLOR(name, type, attr) \
|
||||
attr void name##_ARB_REMOVE_COLOR(struct name *, struct type *, struct type *)
|
||||
#define ARB_PROTOTYPE_REMOVE(name, type, attr) \
|
||||
attr struct type *name##_ARB_REMOVE(struct name *, struct type *)
|
||||
#define ARB_PROTOTYPE_INSERT(name, type, attr) \
|
||||
attr struct type *name##_ARB_INSERT(struct name *, struct type *)
|
||||
#define ARB_PROTOTYPE_CFIND(name, type, attr) \
|
||||
attr const struct type *name##_ARB_CFIND(const struct name *, \
|
||||
const struct type *)
|
||||
#define ARB_PROTOTYPE_FIND(name, type, attr) \
|
||||
attr struct type *name##_ARB_FIND(const struct name *, \
|
||||
const struct type *)
|
||||
#define ARB_PROTOTYPE_NFIND(name, type, attr) \
|
||||
attr struct type *name##_ARB_NFIND(struct name *, struct type *)
|
||||
#define ARB_PROTOTYPE_CNFIND(name, type, attr) \
|
||||
attr const struct type *name##_ARB_CNFIND(const struct name *, \
|
||||
const struct type *)
|
||||
#define ARB_PROTOTYPE_CNEXT(name, type, attr) \
|
||||
attr const struct type *name##_ARB_CNEXT(const struct name *head,\
|
||||
const struct type *)
|
||||
#define ARB_PROTOTYPE_NEXT(name, type, attr) \
|
||||
attr struct type *name##_ARB_NEXT(const struct name *, \
|
||||
const struct type *)
|
||||
#define ARB_PROTOTYPE_CPREV(name, type, attr) \
|
||||
attr const struct type *name##_ARB_CPREV(const struct name *, \
|
||||
const struct type *)
|
||||
#define ARB_PROTOTYPE_PREV(name, type, attr) \
|
||||
attr struct type *name##_ARB_PREV(const struct name *, \
|
||||
const struct type *)
|
||||
#define ARB_PROTOTYPE_CMINMAX(name, type, attr) \
|
||||
attr const struct type *name##_ARB_CMINMAX(const struct name *, int)
|
||||
#define ARB_PROTOTYPE_MINMAX(name, type, attr) \
|
||||
attr struct type *name##_ARB_MINMAX(const struct name *, int)
|
||||
#define ARB_PROTOTYPE_REBALANCE(name, type, attr) \
|
||||
attr struct type *name##_ARB_REBALANCE(struct name *, struct type *)
|
||||
|
||||
#define ARB_GENERATE(name, type, field, cmp) \
|
||||
ARB_GENERATE_INTERNAL(name, type, field, cmp,)
|
||||
#define ARB_GENERATE_STATIC(name, type, field, cmp) \
|
||||
ARB_GENERATE_INTERNAL(name, type, field, cmp, __unused static)
|
||||
#define ARB_GENERATE_INTERNAL(name, type, field, cmp, attr) \
|
||||
ARB_GENERATE_INSERT_COLOR(name, type, field, attr) \
|
||||
ARB_GENERATE_REMOVE_COLOR(name, type, field, attr) \
|
||||
ARB_GENERATE_INSERT(name, type, field, cmp, attr) \
|
||||
ARB_GENERATE_REMOVE(name, type, field, attr) \
|
||||
ARB_GENERATE_CFIND(name, type, field, cmp, attr) \
|
||||
ARB_GENERATE_FIND(name, type, field, cmp, attr) \
|
||||
ARB_GENERATE_CNEXT(name, type, field, attr) \
|
||||
ARB_GENERATE_NEXT(name, type, field, attr) \
|
||||
ARB_GENERATE_CPREV(name, type, field, attr) \
|
||||
ARB_GENERATE_PREV(name, type, field, attr) \
|
||||
ARB_GENERATE_CMINMAX(name, type, field, attr) \
|
||||
ARB_GENERATE_MINMAX(name, type, field, attr) \
|
||||
ARB_GENERATE_REBALANCE(name, type, field, cmp, attr)
|
||||
|
||||
#define ARB_GENERATE_INSERT_COLOR(name, type, field, attr) \
|
||||
attr void \
|
||||
name##_ARB_INSERT_COLOR(struct name *head, struct type *elm) \
|
||||
{ \
|
||||
struct type *parent, *gparent, *tmp; \
|
||||
while ((parent = ARB_PARENT(head, elm, field)) != NULL && \
|
||||
ARB_COLOR(parent, field) == ARB_RED) { \
|
||||
gparent = ARB_PARENT(head, parent, field); \
|
||||
if (parent == ARB_LEFT(head, gparent, field)) { \
|
||||
tmp = ARB_RIGHT(head, gparent, field); \
|
||||
if (tmp && ARB_COLOR(tmp, field) == ARB_RED) { \
|
||||
ARB_COLOR(tmp, field) = ARB_BLACK; \
|
||||
ARB_SET_BLACKRED(parent, gparent, field); \
|
||||
elm = gparent; \
|
||||
continue; \
|
||||
} \
|
||||
if (ARB_RIGHT(head, parent, field) == elm) { \
|
||||
ARB_ROTATE_LEFT(head, parent, tmp, field); \
|
||||
tmp = parent; \
|
||||
parent = elm; \
|
||||
elm = tmp; \
|
||||
} \
|
||||
ARB_SET_BLACKRED(parent, gparent, field); \
|
||||
ARB_ROTATE_RIGHT(head, gparent, tmp, field); \
|
||||
} else { \
|
||||
tmp = ARB_LEFT(head, gparent, field); \
|
||||
if (tmp && ARB_COLOR(tmp, field) == ARB_RED) { \
|
||||
ARB_COLOR(tmp, field) = ARB_BLACK; \
|
||||
ARB_SET_BLACKRED(parent, gparent, field); \
|
||||
elm = gparent; \
|
||||
continue; \
|
||||
} \
|
||||
if (ARB_LEFT(head, parent, field) == elm) { \
|
||||
ARB_ROTATE_RIGHT(head, parent, tmp, field); \
|
||||
tmp = parent; \
|
||||
parent = elm; \
|
||||
elm = tmp; \
|
||||
} \
|
||||
ARB_SET_BLACKRED(parent, gparent, field); \
|
||||
ARB_ROTATE_LEFT(head, gparent, tmp, field); \
|
||||
} \
|
||||
} \
|
||||
ARB_COLOR(ARB_ROOT(head), field) = ARB_BLACK; \
|
||||
}
|
||||
|
||||
#define ARB_GENERATE_REMOVE_COLOR(name, type, field, attr) \
|
||||
attr void \
|
||||
name##_ARB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \
|
||||
{ \
|
||||
struct type *tmp; \
|
||||
while ((elm == NULL || ARB_COLOR(elm, field) == ARB_BLACK) && \
|
||||
elm != ARB_ROOT(head)) { \
|
||||
if (ARB_LEFT(head, parent, field) == elm) { \
|
||||
tmp = ARB_RIGHT(head, parent, field); \
|
||||
if (ARB_COLOR(tmp, field) == ARB_RED) { \
|
||||
ARB_SET_BLACKRED(tmp, parent, field); \
|
||||
ARB_ROTATE_LEFT(head, parent, tmp, field); \
|
||||
tmp = ARB_RIGHT(head, parent, field); \
|
||||
} \
|
||||
if ((ARB_LEFT(head, tmp, field) == NULL || \
|
||||
ARB_COLOR(ARB_LEFT(head, tmp, field), field) == ARB_BLACK) && \
|
||||
(ARB_RIGHT(head, tmp, field) == NULL || \
|
||||
ARB_COLOR(ARB_RIGHT(head, tmp, field), field) == ARB_BLACK)) { \
|
||||
ARB_COLOR(tmp, field) = ARB_RED; \
|
||||
elm = parent; \
|
||||
parent = ARB_PARENT(head, elm, field); \
|
||||
} else { \
|
||||
if (ARB_RIGHT(head, tmp, field) == NULL || \
|
||||
ARB_COLOR(ARB_RIGHT(head, tmp, field), field) == ARB_BLACK) { \
|
||||
struct type *oleft; \
|
||||
if ((oleft = ARB_LEFT(head, tmp, field)) \
|
||||
!= NULL) \
|
||||
ARB_COLOR(oleft, field) = ARB_BLACK; \
|
||||
ARB_COLOR(tmp, field) = ARB_RED; \
|
||||
ARB_ROTATE_RIGHT(head, tmp, oleft, field); \
|
||||
tmp = ARB_RIGHT(head, parent, field); \
|
||||
} \
|
||||
ARB_COLOR(tmp, field) = ARB_COLOR(parent, field); \
|
||||
ARB_COLOR(parent, field) = ARB_BLACK; \
|
||||
if (ARB_RIGHT(head, tmp, field)) \
|
||||
ARB_COLOR(ARB_RIGHT(head, tmp, field), field) = ARB_BLACK; \
|
||||
ARB_ROTATE_LEFT(head, parent, tmp, field); \
|
||||
elm = ARB_ROOT(head); \
|
||||
break; \
|
||||
} \
|
||||
} else { \
|
||||
tmp = ARB_LEFT(head, parent, field); \
|
||||
if (ARB_COLOR(tmp, field) == ARB_RED) { \
|
||||
ARB_SET_BLACKRED(tmp, parent, field); \
|
||||
ARB_ROTATE_RIGHT(head, parent, tmp, field); \
|
||||
tmp = ARB_LEFT(head, parent, field); \
|
||||
} \
|
||||
if ((ARB_LEFT(head, tmp, field) == NULL || \
|
||||
ARB_COLOR(ARB_LEFT(head, tmp, field), field) == ARB_BLACK) && \
|
||||
(ARB_RIGHT(head, tmp, field) == NULL || \
|
||||
ARB_COLOR(ARB_RIGHT(head, tmp, field), field) == ARB_BLACK)) { \
|
||||
ARB_COLOR(tmp, field) = ARB_RED; \
|
||||
elm = parent; \
|
||||
parent = ARB_PARENT(head, elm, field); \
|
||||
} else { \
|
||||
if (ARB_LEFT(head, tmp, field) == NULL || \
|
||||
ARB_COLOR(ARB_LEFT(head, tmp, field), field) == ARB_BLACK) { \
|
||||
struct type *oright; \
|
||||
if ((oright = ARB_RIGHT(head, tmp, field)) \
|
||||
!= NULL) \
|
||||
ARB_COLOR(oright, field) = ARB_BLACK; \
|
||||
ARB_COLOR(tmp, field) = ARB_RED; \
|
||||
ARB_ROTATE_LEFT(head, tmp, oright, field); \
|
||||
tmp = ARB_LEFT(head, parent, field); \
|
||||
} \
|
||||
ARB_COLOR(tmp, field) = ARB_COLOR(parent, field); \
|
||||
ARB_COLOR(parent, field) = ARB_BLACK; \
|
||||
if (ARB_LEFT(head, tmp, field)) \
|
||||
ARB_COLOR(ARB_LEFT(head, tmp, field), field) = ARB_BLACK; \
|
||||
ARB_ROTATE_RIGHT(head, parent, tmp, field); \
|
||||
elm = ARB_ROOT(head); \
|
||||
break; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
if (elm) \
|
||||
ARB_COLOR(elm, field) = ARB_BLACK; \
|
||||
}
|
||||
|
||||
#define ARB_GENERATE_REMOVE(name, type, field, attr) \
|
||||
attr struct type * \
|
||||
name##_ARB_REMOVE(struct name *head, struct type *elm) \
|
||||
{ \
|
||||
struct type *child, *parent, *old = elm; \
|
||||
int color; \
|
||||
if (ARB_LEFT(head, elm, field) == NULL) \
|
||||
child = ARB_RIGHT(head, elm, field); \
|
||||
else if (ARB_RIGHT(head, elm, field) == NULL) \
|
||||
child = ARB_LEFT(head, elm, field); \
|
||||
else { \
|
||||
struct type *left; \
|
||||
elm = ARB_RIGHT(head, elm, field); \
|
||||
while ((left = ARB_LEFT(head, elm, field)) != NULL) \
|
||||
elm = left; \
|
||||
child = ARB_RIGHT(head, elm, field); \
|
||||
parent = ARB_PARENT(head, elm, field); \
|
||||
color = ARB_COLOR(elm, field); \
|
||||
if (child) \
|
||||
ARB_PARENTIDX(child, field) = \
|
||||
ARB_SELFIDX(head, parent); \
|
||||
if (parent) { \
|
||||
if (ARB_LEFT(head, parent, field) == elm) \
|
||||
ARB_LEFTIDX(parent, field) = \
|
||||
ARB_SELFIDX(head, child); \
|
||||
else \
|
||||
ARB_RIGHTIDX(parent, field) = \
|
||||
ARB_SELFIDX(head, child); \
|
||||
ARB_AUGMENT(parent); \
|
||||
} else \
|
||||
ARB_ROOTIDX(head) = ARB_SELFIDX(head, child); \
|
||||
if (ARB_PARENT(head, elm, field) == old) \
|
||||
parent = elm; \
|
||||
(elm)->field = (old)->field; \
|
||||
if (ARB_PARENT(head, old, field)) { \
|
||||
if (ARB_LEFT(head, ARB_PARENT(head, old, field), \
|
||||
field) == old) \
|
||||
ARB_LEFTIDX(ARB_PARENT(head, old, field), \
|
||||
field) = ARB_SELFIDX(head, elm); \
|
||||
else \
|
||||
ARB_RIGHTIDX(ARB_PARENT(head, old, field),\
|
||||
field) = ARB_SELFIDX(head, elm); \
|
||||
ARB_AUGMENT(ARB_PARENT(head, old, field)); \
|
||||
} else \
|
||||
ARB_ROOTIDX(head) = ARB_SELFIDX(head, elm); \
|
||||
ARB_PARENTIDX(ARB_LEFT(head, old, field), field) = \
|
||||
ARB_SELFIDX(head, elm); \
|
||||
if (ARB_RIGHT(head, old, field)) \
|
||||
ARB_PARENTIDX(ARB_RIGHT(head, old, field), \
|
||||
field) = ARB_SELFIDX(head, elm); \
|
||||
if (parent) { \
|
||||
left = parent; \
|
||||
do { \
|
||||
ARB_AUGMENT(left); \
|
||||
} while ((left = ARB_PARENT(head, left, field)) \
|
||||
!= NULL); \
|
||||
} \
|
||||
goto color; \
|
||||
} \
|
||||
parent = ARB_PARENT(head, elm, field); \
|
||||
color = ARB_COLOR(elm, field); \
|
||||
if (child) \
|
||||
ARB_PARENTIDX(child, field) = ARB_SELFIDX(head, parent);\
|
||||
if (parent) { \
|
||||
if (ARB_LEFT(head, parent, field) == elm) \
|
||||
ARB_LEFTIDX(parent, field) = \
|
||||
ARB_SELFIDX(head, child); \
|
||||
else \
|
||||
ARB_RIGHTIDX(parent, field) = \
|
||||
ARB_SELFIDX(head, child); \
|
||||
ARB_AUGMENT(parent); \
|
||||
} else \
|
||||
ARB_ROOTIDX(head) = ARB_SELFIDX(head, child); \
|
||||
color: \
|
||||
if (color == ARB_BLACK) \
|
||||
name##_ARB_REMOVE_COLOR(head, parent, child); \
|
||||
ARB_CURNODES(head) -= 1; \
|
||||
if (ARB_MINIDX(head) == ARB_SELFIDX(head, old)) \
|
||||
ARB_MINIDX(head) = ARB_PARENTIDX(old, field); \
|
||||
if (ARB_MAXIDX(head) == ARB_SELFIDX(head, old)) \
|
||||
ARB_MAXIDX(head) = ARB_PARENTIDX(old, field); \
|
||||
ARB_RETURNFREE(head, old, field); \
|
||||
return (old); \
|
||||
} \
|
||||
|
||||
#define ARB_GENERATE_INSERT(name, type, field, cmp, attr) \
|
||||
/* Inserts a node into the RB tree */ \
|
||||
attr struct type * \
|
||||
name##_ARB_INSERT(struct name *head, struct type *elm) \
|
||||
{ \
|
||||
struct type *tmp; \
|
||||
struct type *parent = NULL; \
|
||||
int comp = 0; \
|
||||
tmp = ARB_ROOT(head); \
|
||||
while (tmp) { \
|
||||
parent = tmp; \
|
||||
comp = (cmp)(elm, parent); \
|
||||
if (comp < 0) \
|
||||
tmp = ARB_LEFT(head, tmp, field); \
|
||||
else if (comp > 0) \
|
||||
tmp = ARB_RIGHT(head, tmp, field); \
|
||||
else \
|
||||
return (tmp); \
|
||||
} \
|
||||
ARB_SET(head, elm, parent, field); \
|
||||
if (parent != NULL) { \
|
||||
if (comp < 0) \
|
||||
ARB_LEFTIDX(parent, field) = \
|
||||
ARB_SELFIDX(head, elm); \
|
||||
else \
|
||||
ARB_RIGHTIDX(parent, field) = \
|
||||
ARB_SELFIDX(head, elm); \
|
||||
ARB_AUGMENT(parent); \
|
||||
} else \
|
||||
ARB_ROOTIDX(head) = ARB_SELFIDX(head, elm); \
|
||||
name##_ARB_INSERT_COLOR(head, elm); \
|
||||
ARB_CURNODES(head) += 1; \
|
||||
if (ARB_MINIDX(head) == ARB_NULLIDX || \
|
||||
(ARB_PARENTIDX(elm, field) == ARB_MINIDX(head) && \
|
||||
ARB_LEFTIDX(parent, field) == ARB_SELFIDX(head, elm))) \
|
||||
ARB_MINIDX(head) = ARB_SELFIDX(head, elm); \
|
||||
if (ARB_MAXIDX(head) == ARB_NULLIDX || \
|
||||
(ARB_PARENTIDX(elm, field) == ARB_MAXIDX(head) && \
|
||||
ARB_RIGHTIDX(parent, field) == ARB_SELFIDX(head, elm))) \
|
||||
ARB_MAXIDX(head) = ARB_SELFIDX(head, elm); \
|
||||
return (NULL); \
|
||||
}
|
||||
|
||||
#define ARB_GENERATE_CFIND(name, type, field, cmp, attr) \
|
||||
/* Finds the node with the same key as elm */ \
|
||||
attr const struct type * \
|
||||
name##_ARB_CFIND(const struct name *head, const struct type *elm) \
|
||||
{ \
|
||||
const struct type *tmp = ARB_ROOT(head); \
|
||||
int comp; \
|
||||
while (tmp) { \
|
||||
comp = cmp(elm, tmp); \
|
||||
if (comp < 0) \
|
||||
tmp = ARB_LEFT(head, tmp, field); \
|
||||
else if (comp > 0) \
|
||||
tmp = ARB_RIGHT(head, tmp, field); \
|
||||
else \
|
||||
return (tmp); \
|
||||
} \
|
||||
return (NULL); \
|
||||
}
|
||||
|
||||
#define ARB_GENERATE_FIND(name, type, field, cmp, attr) \
|
||||
attr struct type * \
|
||||
name##_ARB_FIND(const struct name *head, const struct type *elm) \
|
||||
{ return (__DECONST(struct type *, name##_ARB_CFIND(head, elm))); }
|
||||
|
||||
#define ARB_GENERATE_CNFIND(name, type, field, cmp, attr) \
|
||||
/* Finds the first node greater than or equal to the search key */ \
|
||||
attr const struct type * \
|
||||
name##_ARB_CNFIND(const struct name *head, const struct type *elm) \
|
||||
{ \
|
||||
const struct type *tmp = ARB_ROOT(head); \
|
||||
const struct type *res = NULL; \
|
||||
int comp; \
|
||||
while (tmp) { \
|
||||
comp = cmp(elm, tmp); \
|
||||
if (comp < 0) { \
|
||||
res = tmp; \
|
||||
tmp = ARB_LEFT(head, tmp, field); \
|
||||
} \
|
||||
else if (comp > 0) \
|
||||
tmp = ARB_RIGHT(head, tmp, field); \
|
||||
else \
|
||||
return (tmp); \
|
||||
} \
|
||||
return (res); \
|
||||
}
|
||||
|
||||
#define ARB_GENERATE_NFIND(name, type, field, cmp, attr) \
|
||||
attr struct type * \
|
||||
name##_ARB_NFIND(const struct name *head, const struct type *elm) \
|
||||
{ return (__DECONST(struct type *, name##_ARB_CNFIND(head, elm))); }
|
||||
|
||||
#define ARB_GENERATE_CNEXT(name, type, field, attr) \
|
||||
/* ARGSUSED */ \
|
||||
attr const struct type * \
|
||||
name##_ARB_CNEXT(const struct name *head, const struct type *elm) \
|
||||
{ \
|
||||
if (ARB_RIGHT(head, elm, field)) { \
|
||||
elm = ARB_RIGHT(head, elm, field); \
|
||||
while (ARB_LEFT(head, elm, field)) \
|
||||
elm = ARB_LEFT(head, elm, field); \
|
||||
} else { \
|
||||
if (ARB_PARENT(head, elm, field) && \
|
||||
(elm == ARB_LEFT(head, ARB_PARENT(head, elm, field),\
|
||||
field))) \
|
||||
elm = ARB_PARENT(head, elm, field); \
|
||||
else { \
|
||||
while (ARB_PARENT(head, elm, field) && \
|
||||
(elm == ARB_RIGHT(head, ARB_PARENT(head, \
|
||||
elm, field), field))) \
|
||||
elm = ARB_PARENT(head, elm, field); \
|
||||
elm = ARB_PARENT(head, elm, field); \
|
||||
} \
|
||||
} \
|
||||
return (elm); \
|
||||
}
|
||||
|
||||
#define ARB_GENERATE_NEXT(name, type, field, attr) \
|
||||
attr struct type * \
|
||||
name##_ARB_NEXT(const struct name *head, const struct type *elm) \
|
||||
{ return (__DECONST(struct type *, name##_ARB_CNEXT(head, elm))); }
|
||||
|
||||
#define ARB_GENERATE_CPREV(name, type, field, attr) \
|
||||
/* ARGSUSED */ \
|
||||
attr const struct type * \
|
||||
name##_ARB_CPREV(const struct name *head, const struct type *elm) \
|
||||
{ \
|
||||
if (ARB_LEFT(head, elm, field)) { \
|
||||
elm = ARB_LEFT(head, elm, field); \
|
||||
while (ARB_RIGHT(head, elm, field)) \
|
||||
elm = ARB_RIGHT(head, elm, field); \
|
||||
} else { \
|
||||
if (ARB_PARENT(head, elm, field) && \
|
||||
(elm == ARB_RIGHT(head, ARB_PARENT(head, elm, \
|
||||
field), field))) \
|
||||
elm = ARB_PARENT(head, elm, field); \
|
||||
else { \
|
||||
while (ARB_PARENT(head, elm, field) && \
|
||||
(elm == ARB_LEFT(head, ARB_PARENT(head, elm,\
|
||||
field), field))) \
|
||||
elm = ARB_PARENT(head, elm, field); \
|
||||
elm = ARB_PARENT(head, elm, field); \
|
||||
} \
|
||||
} \
|
||||
return (elm); \
|
||||
}
|
||||
|
||||
#define ARB_GENERATE_PREV(name, type, field, attr) \
|
||||
attr struct type * \
|
||||
name##_ARB_PREV(const struct name *head, const struct type *elm) \
|
||||
{ return (__DECONST(struct type *, name##_ARB_CPREV(head, elm))); }
|
||||
|
||||
#define ARB_GENERATE_CMINMAX(name, type, field, attr) \
|
||||
attr const struct type * \
|
||||
name##_ARB_CMINMAX(const struct name *head, int val) \
|
||||
{ \
|
||||
const struct type *tmp = ARB_EMPTY(head) ? NULL : ARB_ROOT(head);\
|
||||
const struct type *parent = NULL; \
|
||||
while (tmp) { \
|
||||
parent = tmp; \
|
||||
if (val < 0) \
|
||||
tmp = ARB_LEFT(head, tmp, field); \
|
||||
else \
|
||||
tmp = ARB_RIGHT(head, tmp, field); \
|
||||
} \
|
||||
return (__DECONST(struct type *, parent)); \
|
||||
}
|
||||
|
||||
#define ARB_GENERATE_MINMAX(name, type, field, attr) \
|
||||
attr struct type * \
|
||||
name##_ARB_MINMAX(const struct name *head, int val) \
|
||||
{ return (__DECONST(struct type *, name##_ARB_CMINMAX(head, val))); }
|
||||
|
||||
#define ARB_GENERATE_REBALANCE(name, type, field, cmp, attr) \
|
||||
attr struct type * \
|
||||
name##_ARB_REBALANCE(struct name *head, struct type *elm) \
|
||||
{ \
|
||||
struct type *cmpelm; \
|
||||
if (((cmpelm = ARB_PREV(name, head, elm)) != NULL && \
|
||||
(cmp)(cmpelm, elm) >= 0) || \
|
||||
((cmpelm = ARB_NEXT(name, head, elm)) != NULL && \
|
||||
(cmp)(elm, cmpelm) >= 0)) { \
|
||||
/* XXXLAS: Remove/insert is heavy handed. */ \
|
||||
ARB_REMOVE(name, head, elm); \
|
||||
/* Remove puts elm on the free list. */ \
|
||||
elm = ARB_GETFREE(head, field); \
|
||||
return (ARB_INSERT(name, head, elm)); \
|
||||
} \
|
||||
return (NULL); \
|
||||
} \
|
||||
|
||||
#define ARB_INSERT(name, x, y) name##_ARB_INSERT(x, y)
|
||||
#define ARB_REMOVE(name, x, y) name##_ARB_REMOVE(x, y)
|
||||
#define ARB_CFIND(name, x, y) name##_ARB_CFIND(x, y)
|
||||
#define ARB_FIND(name, x, y) name##_ARB_FIND(x, y)
|
||||
#define ARB_CNFIND(name, x, y) name##_ARB_CNFIND(x, y)
|
||||
#define ARB_NFIND(name, x, y) name##_ARB_NFIND(x, y)
|
||||
#define ARB_CNEXT(name, x, y) name##_ARB_CNEXT(x, y)
|
||||
#define ARB_NEXT(name, x, y) name##_ARB_NEXT(x, y)
|
||||
#define ARB_CPREV(name, x, y) name##_ARB_CPREV(x, y)
|
||||
#define ARB_PREV(name, x, y) name##_ARB_PREV(x, y)
|
||||
#define ARB_CMIN(name, x) (ARB_MINIDX(x) == ARB_NULLIDX ? \
|
||||
name##_ARB_CMINMAX(x, ARB_NEGINF) : ARB_CNODE(x, ARB_MINIDX(x)))
|
||||
#define ARB_MIN(name, x) (ARB_MINIDX(x) == ARB_NULLIDX ? \
|
||||
name##_ARB_MINMAX(x, ARB_NEGINF) : ARB_NODE(x, ARB_MINIDX(x)))
|
||||
#define ARB_CMAX(name, x) (ARB_MAXIDX(x) == ARB_NULLIDX ? \
|
||||
name##_ARB_CMINMAX(x, ARB_INF) : ARB_CNODE(x, ARB_MAXIDX(x)))
|
||||
#define ARB_MAX(name, x) (ARB_MAXIDX(x) == ARB_NULLIDX ? \
|
||||
name##_ARB_MINMAX(x, ARB_INF) : ARB_NODE(x, ARB_MAXIDX(x)))
|
||||
#define ARB_REBALANCE(name, x, y) name##_ARB_REBALANCE(x, y)
|
||||
|
||||
#define ARB_FOREACH(x, name, head) \
|
||||
for ((x) = ARB_MIN(name, head); \
|
||||
(x) != NULL; \
|
||||
(x) = name##_ARB_NEXT(head, x))
|
||||
|
||||
#define ARB_FOREACH_FROM(x, name, y) \
|
||||
for ((x) = (y); \
|
||||
((x) != NULL) && ((y) = name##_ARB_NEXT(x), (x) != NULL); \
|
||||
(x) = (y))
|
||||
|
||||
#define ARB_FOREACH_SAFE(x, name, head, y) \
|
||||
for ((x) = ARB_MIN(name, head); \
|
||||
((x) != NULL) && ((y) = name##_ARB_NEXT(x), (x) != NULL); \
|
||||
(x) = (y))
|
||||
|
||||
#define ARB_FOREACH_REVERSE(x, name, head) \
|
||||
for ((x) = ARB_MAX(name, head); \
|
||||
(x) != NULL; \
|
||||
(x) = name##_ARB_PREV(x))
|
||||
|
||||
#define ARB_FOREACH_REVERSE_FROM(x, name, y) \
|
||||
for ((x) = (y); \
|
||||
((x) != NULL) && ((y) = name##_ARB_PREV(x), (x) != NULL); \
|
||||
(x) = (y))
|
||||
|
||||
#define ARB_FOREACH_REVERSE_SAFE(x, name, head, y) \
|
||||
for ((x) = ARB_MAX(name, head); \
|
||||
((x) != NULL) && ((y) = name##_ARB_PREV(x), (x) != NULL); \
|
||||
(x) = (y))
|
||||
|
||||
#define ARB_ARRFOREACH(x, field, head) \
|
||||
for ((x) = ARB_NODES(head); \
|
||||
ARB_SELFIDX(head, x) < ARB_MAXNODES(head); \
|
||||
(x)++)
|
||||
|
||||
#define ARB_ARRFOREACH_REVWCOND(x, field, head, extracond) \
|
||||
for ((x) = ARB_NODES(head) + (ARB_MAXNODES(head) - 1); \
|
||||
(x) >= ARB_NODES(head) && (extracond); \
|
||||
(x)--)
|
||||
|
||||
#define ARB_ARRFOREACH_REVERSE(x, field, head) \
|
||||
ARB_ARRFOREACH_REVWCOND(x, field, head, 1)
|
||||
|
||||
#endif /* _SYS_ARB_H_ */
|
@ -226,6 +226,11 @@ struct mount {
|
||||
struct lock mnt_explock; /* vfs_export walkers lock */
|
||||
TAILQ_ENTRY(mount) mnt_upper_link; /* (m) we in the all uppers */
|
||||
TAILQ_HEAD(, mount) mnt_uppers; /* (m) upper mounts over us*/
|
||||
int mnt_vfs_ops; /* (i) pending vfs ops */
|
||||
int *mnt_thread_in_ops_pcpu;
|
||||
int *mnt_ref_pcpu;
|
||||
int *mnt_lockref_pcpu;
|
||||
int *mnt_writeopcount_pcpu;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -265,15 +270,17 @@ void __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *);
|
||||
#define MNT_ITRYLOCK(mp) mtx_trylock(&(mp)->mnt_mtx)
|
||||
#define MNT_IUNLOCK(mp) mtx_unlock(&(mp)->mnt_mtx)
|
||||
#define MNT_MTX(mp) (&(mp)->mnt_mtx)
|
||||
|
||||
#define MNT_REF(mp) do { \
|
||||
mtx_assert(MNT_MTX(mp), MA_OWNED); \
|
||||
(mp)->mnt_ref++; \
|
||||
mp->mnt_ref++; \
|
||||
} while (0)
|
||||
#define MNT_REL(mp) do { \
|
||||
mtx_assert(MNT_MTX(mp), MA_OWNED); \
|
||||
KASSERT((mp)->mnt_ref > 0, ("negative mnt_ref")); \
|
||||
(mp)->mnt_ref--; \
|
||||
if ((mp)->mnt_ref == 0) \
|
||||
if ((mp)->mnt_vfs_ops && (mp)->mnt_ref < 0) \
|
||||
vfs_dump_mount_counters(mp); \
|
||||
if ((mp)->mnt_ref == 0 && (mp)->mnt_vfs_ops) \
|
||||
wakeup((mp)); \
|
||||
} while (0)
|
||||
|
||||
@ -941,6 +948,74 @@ vfs_sysctl_t vfs_stdsysctl;
|
||||
void syncer_suspend(void);
|
||||
void syncer_resume(void);
|
||||
|
||||
void vfs_op_barrier_wait(struct mount *);
|
||||
void vfs_op_enter(struct mount *);
|
||||
void vfs_op_exit_locked(struct mount *);
|
||||
void vfs_op_exit(struct mount *);
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
void vfs_assert_mount_counters(struct mount *);
|
||||
void vfs_dump_mount_counters(struct mount *);
|
||||
#else
|
||||
#define vfs_assert_mount_counters(mp) do { } while (0)
|
||||
#define vfs_dump_mount_counters(mp) do { } while (0)
|
||||
#endif
|
||||
|
||||
enum mount_counter { MNT_COUNT_REF, MNT_COUNT_LOCKREF, MNT_COUNT_WRITEOPCOUNT };
|
||||
int vfs_mount_fetch_counter(struct mount *, enum mount_counter);
|
||||
|
||||
/*
|
||||
* We mark ourselves as entering the section and post a sequentially consistent
|
||||
* fence, meaning the store is completed before we get into the section and
|
||||
* mnt_vfs_ops is only read afterwards.
|
||||
*
|
||||
* Any thread transitioning the ops counter 0->1 does things in the opposite
|
||||
* order - first bumps the count, posts a sequentially consistent fence and
|
||||
* observes all CPUs not executing within the section.
|
||||
*
|
||||
* This provides an invariant that by the time the last CPU is observed not
|
||||
* executing, everyone else entering will see the counter > 0 and exit.
|
||||
*
|
||||
* Note there is no barrier between vfs_ops and the rest of the code in the
|
||||
* section. It is not necessary as the writer has to wait for everyone to drain
|
||||
* before making any changes or only make changes safe while the section is
|
||||
* executed.
|
||||
*/
|
||||
#define vfs_op_thread_entered(mp) ({ \
|
||||
MPASS(curthread->td_critnest > 0); \
|
||||
*(int *)zpcpu_get(mp->mnt_thread_in_ops_pcpu) == 1; \
|
||||
})
|
||||
|
||||
#define vfs_op_thread_enter(mp) ({ \
|
||||
bool _retval = true; \
|
||||
critical_enter(); \
|
||||
MPASS(!vfs_op_thread_entered(mp)); \
|
||||
*(int *)zpcpu_get(mp->mnt_thread_in_ops_pcpu) = 1; \
|
||||
atomic_thread_fence_seq_cst(); \
|
||||
if (__predict_false(mp->mnt_vfs_ops > 0)) { \
|
||||
vfs_op_thread_exit(mp); \
|
||||
_retval = false; \
|
||||
} \
|
||||
_retval; \
|
||||
})
|
||||
|
||||
#define vfs_op_thread_exit(mp) do { \
|
||||
MPASS(vfs_op_thread_entered(mp)); \
|
||||
atomic_thread_fence_rel(); \
|
||||
*(int *)zpcpu_get(mp->mnt_thread_in_ops_pcpu) = 0; \
|
||||
critical_exit(); \
|
||||
} while (0)
|
||||
|
||||
#define vfs_mp_count_add_pcpu(mp, count, val) do { \
|
||||
MPASS(vfs_op_thread_entered(mp)); \
|
||||
(*(int *)zpcpu_get(mp->mnt_##count##_pcpu)) += val; \
|
||||
} while (0)
|
||||
|
||||
#define vfs_mp_count_sub_pcpu(mp, count, val) do { \
|
||||
MPASS(vfs_op_thread_entered(mp)); \
|
||||
(*(int *)zpcpu_get(mp->mnt_##count##_pcpu)) -= val; \
|
||||
} while (0)
|
||||
|
||||
#else /* !_KERNEL */
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
|
@ -242,6 +242,18 @@ zpcpu_get_cpu(void *base, int cpu)
|
||||
return ((char *)(base) + UMA_PCPU_ALLOC_SIZE * cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* This operation is NOT atomic and does not post any barriers.
|
||||
* If you use this the assumption is that the target CPU will not
|
||||
* be modifying this variable.
|
||||
* If you need atomicity use xchg.
|
||||
* */
|
||||
#define zpcpu_replace_cpu(base, val, cpu) ({ \
|
||||
__typeof(val) _old = *(__typeof(val) *)zpcpu_get_cpu(base, cpu);\
|
||||
*(__typeof(val) *)zpcpu_get_cpu(base, cpu) = val; \
|
||||
_old; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Machine dependent callouts. cpu_pcpu_init() is responsible for
|
||||
* initializing machine dependent fields of struct pcpu, and
|
||||
|
@ -82,6 +82,29 @@ void *rangelock_wlock(struct rangelock *lock, off_t start, off_t end,
|
||||
void *rangelock_trywlock(struct rangelock *lock, off_t start, off_t end,
|
||||
struct mtx *ilk);
|
||||
void rlqentry_free(struct rl_q_entry *rlqe);
|
||||
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
|
||||
void _rangelock_cookie_assert(void *cookie, int what, const char *file,
|
||||
int line);
|
||||
#endif
|
||||
|
||||
#ifdef INVARIANTS
|
||||
#define rangelock_cookie_assert_(cookie, what, file, line) \
|
||||
_rangelock_cookie_assert((cookie), (what), (file), (line))
|
||||
#else
|
||||
#define rangelock_cookie_assert_(cookie, what, file, line) (void)0
|
||||
#endif
|
||||
|
||||
#define rangelock_cookie_assert(cookie, what) \
|
||||
rangelock_cookie_assert_((cookie), (what), __FILE__, __LINE__)
|
||||
|
||||
/*
|
||||
* Assertion flags.
|
||||
*/
|
||||
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
|
||||
#define RCA_LOCKED 0x0001
|
||||
#define RCA_RLOCKED 0x0002
|
||||
#define RCA_WLOCKED 0x0004
|
||||
#endif
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
|
@ -2954,17 +2954,26 @@ journal_suspend(ump)
|
||||
{
|
||||
struct jblocks *jblocks;
|
||||
struct mount *mp;
|
||||
bool set;
|
||||
|
||||
mp = UFSTOVFS(ump);
|
||||
if ((mp->mnt_kern_flag & MNTK_SUSPEND) != 0)
|
||||
return;
|
||||
|
||||
jblocks = ump->softdep_jblocks;
|
||||
vfs_op_enter(mp);
|
||||
set = false;
|
||||
MNT_ILOCK(mp);
|
||||
if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) {
|
||||
stat_journal_min++;
|
||||
mp->mnt_kern_flag |= MNTK_SUSPEND;
|
||||
mp->mnt_susp_owner = ump->softdep_flushtd;
|
||||
set = true;
|
||||
}
|
||||
jblocks->jb_suspended = 1;
|
||||
MNT_IUNLOCK(mp);
|
||||
if (!set)
|
||||
vfs_op_exit(mp);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -13394,10 +13403,11 @@ softdep_request_cleanup(fs, vp, cred, resource)
|
||||
* (fs_minfree).
|
||||
*/
|
||||
if (resource == FLUSH_INODES_WAIT) {
|
||||
needed = vp->v_mount->mnt_writeopcount + 2;
|
||||
needed = vfs_mount_fetch_counter(vp->v_mount,
|
||||
MNT_COUNT_WRITEOPCOUNT) + 2;
|
||||
} else if (resource == FLUSH_BLOCKS_WAIT) {
|
||||
needed = (vp->v_mount->mnt_writeopcount + 2) *
|
||||
fs->fs_contigsumsize;
|
||||
needed = (vfs_mount_fetch_counter(vp->v_mount,
|
||||
MNT_COUNT_WRITEOPCOUNT) + 2) * fs->fs_contigsumsize;
|
||||
if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE))
|
||||
needed += fragstoblks(fs,
|
||||
roundup((fs->fs_dsize * fs->fs_minfree / 100) -
|
||||
|
@ -650,6 +650,7 @@ int uma_zone_exhausted_nolock(uma_zone_t zone);
|
||||
/*
|
||||
* Common UMA_ZONE_PCPU zones.
|
||||
*/
|
||||
extern uma_zone_t pcpu_zone_int;
|
||||
extern uma_zone_t pcpu_zone_64;
|
||||
|
||||
/*
|
||||
|
@ -1250,7 +1250,6 @@ vm_map_entry_link(vm_map_t map, vm_map_entry_t entry)
|
||||
}
|
||||
|
||||
enum unlink_merge_type {
|
||||
UNLINK_MERGE_PREV,
|
||||
UNLINK_MERGE_NONE,
|
||||
UNLINK_MERGE_NEXT
|
||||
};
|
||||
@ -1266,17 +1265,9 @@ vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry,
|
||||
KASSERT(root != NULL,
|
||||
("vm_map_entry_unlink: unlink object not mapped"));
|
||||
|
||||
vm_map_splay_findnext(root, &rlist);
|
||||
switch (op) {
|
||||
case UNLINK_MERGE_PREV:
|
||||
vm_map_splay_findprev(root, &llist);
|
||||
llist->end = root->end;
|
||||
y = root->right;
|
||||
root = llist;
|
||||
llist = root->right;
|
||||
root->right = y;
|
||||
break;
|
||||
case UNLINK_MERGE_NEXT:
|
||||
vm_map_splay_findnext(root, &rlist);
|
||||
rlist->start = root->start;
|
||||
rlist->offset = root->offset;
|
||||
y = root->left;
|
||||
@ -1286,7 +1277,6 @@ vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry,
|
||||
break;
|
||||
case UNLINK_MERGE_NONE:
|
||||
vm_map_splay_findprev(root, &llist);
|
||||
vm_map_splay_findnext(root, &rlist);
|
||||
if (llist != &map->header) {
|
||||
root = llist;
|
||||
llist = root->right;
|
||||
|
@ -2596,17 +2596,24 @@ vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unmap the page and check for new
|
||||
* wirings that may have been acquired
|
||||
* through a pmap lookup.
|
||||
*/
|
||||
if (object->ref_count != 0 &&
|
||||
!vm_page_try_remove_all(m)) {
|
||||
vm_page_free(m_new);
|
||||
error = EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Replace "m" with the new page. For
|
||||
* vm_page_replace(), "m" must be busy
|
||||
* and dequeued. Finally, change "m"
|
||||
* as if vm_page_free() was called.
|
||||
*/
|
||||
if (object->ref_count != 0 &&
|
||||
!vm_page_try_remove_all(m)) {
|
||||
error = EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
m_new->aflags = m->aflags &
|
||||
~PGA_QUEUE_STATE_MASK;
|
||||
KASSERT(m_new->oflags == VPO_UNMANAGED,
|
||||
@ -3308,13 +3315,18 @@ vm_page_dequeue_deferred_free(vm_page_t m)
|
||||
|
||||
KASSERT(m->ref_count == 0, ("page %p has references", m));
|
||||
|
||||
if ((m->aflags & PGA_DEQUEUE) != 0)
|
||||
return;
|
||||
atomic_thread_fence_acq();
|
||||
if ((queue = m->queue) == PQ_NONE)
|
||||
return;
|
||||
vm_page_aflag_set(m, PGA_DEQUEUE);
|
||||
vm_page_pqbatch_submit(m, queue);
|
||||
for (;;) {
|
||||
if ((m->aflags & PGA_DEQUEUE) != 0)
|
||||
return;
|
||||
atomic_thread_fence_acq();
|
||||
if ((queue = atomic_load_8(&m->queue)) == PQ_NONE)
|
||||
return;
|
||||
if (vm_page_pqstate_cmpset(m, queue, queue, PGA_DEQUEUE,
|
||||
PGA_DEQUEUE)) {
|
||||
vm_page_pqbatch_submit(m, queue);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -783,8 +783,6 @@ vm_page_pqstate_cmpset(vm_page_t m, uint32_t oldq, uint32_t newq,
|
||||
{
|
||||
uint32_t *addr, nval, oval, qsmask;
|
||||
|
||||
vm_page_assert_locked(m);
|
||||
|
||||
fflags <<= VM_PAGE_AFLAG_SHIFT;
|
||||
nflags <<= VM_PAGE_AFLAG_SHIFT;
|
||||
newq <<= VM_PAGE_QUEUE_SHIFT;
|
||||
@ -904,13 +902,17 @@ vm_page_in_laundry(vm_page_t m)
|
||||
static inline u_int
|
||||
vm_page_drop(vm_page_t m, u_int val)
|
||||
{
|
||||
u_int old;
|
||||
|
||||
/*
|
||||
* Synchronize with vm_page_free_prep(): ensure that all updates to the
|
||||
* page structure are visible before it is freed.
|
||||
*/
|
||||
atomic_thread_fence_rel();
|
||||
return (atomic_fetchadd_int(&m->ref_count, -val));
|
||||
old = atomic_fetchadd_int(&m->ref_count, -val);
|
||||
KASSERT(old != VPRC_BLOCKED,
|
||||
("vm_page_drop: page %p has an invalid refcount value", m));
|
||||
return (old);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -204,7 +204,7 @@ TEST_F(Create, eexist)
|
||||
EXPECT_LOOKUP(FUSE_ROOT_ID, RELPATH)
|
||||
.WillOnce(Invoke(ReturnErrno(ENOENT)));
|
||||
expect_create(RELPATH, mode, ReturnErrno(EEXIST));
|
||||
EXPECT_NE(0, open(FULLPATH, O_CREAT | O_EXCL, mode));
|
||||
EXPECT_EQ(-1, open(FULLPATH, O_CREAT | O_EXCL, mode));
|
||||
EXPECT_EQ(EEXIST, errno);
|
||||
}
|
||||
|
||||
@ -342,7 +342,7 @@ TEST_F(Create, eperm)
|
||||
.WillOnce(Invoke(ReturnErrno(ENOENT)));
|
||||
expect_create(RELPATH, mode, ReturnErrno(EPERM));
|
||||
|
||||
EXPECT_NE(0, open(FULLPATH, O_CREAT | O_EXCL, mode));
|
||||
EXPECT_EQ(-1, open(FULLPATH, O_CREAT | O_EXCL, mode));
|
||||
EXPECT_EQ(EPERM, errno);
|
||||
}
|
||||
|
||||
|
@ -749,7 +749,7 @@ TEST_F(Open, eacces)
|
||||
expect_getattr(FUSE_ROOT_ID, S_IFDIR | 0755, UINT64_MAX, 1);
|
||||
expect_lookup(RELPATH, ino, S_IFREG | 0644, UINT64_MAX);
|
||||
|
||||
EXPECT_NE(0, open(FULLPATH, O_RDWR));
|
||||
EXPECT_EQ(-1, open(FULLPATH, O_RDWR));
|
||||
EXPECT_EQ(EACCES, errno);
|
||||
}
|
||||
|
||||
|
@ -108,11 +108,11 @@ int m_backing_fd, m_control_fd, m_test_fd;
|
||||
off_t m_filesize;
|
||||
bool m_direct_io;
|
||||
|
||||
Io(): m_backing_fd(-1), m_control_fd(-1), m_test_fd(-1), m_direct_io(false) {};
|
||||
Io(): m_backing_fd(-1), m_control_fd(-1), m_test_fd(-1), m_filesize(0),
|
||||
m_direct_io(false) {};
|
||||
|
||||
void SetUp()
|
||||
{
|
||||
m_filesize = 0;
|
||||
m_backing_fd = open("backing_file", O_RDWR | O_CREAT | O_TRUNC, 0644);
|
||||
if (m_backing_fd < 0)
|
||||
FAIL() << strerror(errno);
|
||||
|
@ -55,8 +55,11 @@ const static mode_t c_umask = 022;
|
||||
|
||||
public:
|
||||
|
||||
virtual void SetUp() {
|
||||
Mknod() {
|
||||
m_oldmask = umask(c_umask);
|
||||
}
|
||||
|
||||
virtual void SetUp() {
|
||||
if (geteuid() != 0) {
|
||||
GTEST_SKIP() << "Only root may use most mknod(2) variations";
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ TEST_F(Opendir, eperm)
|
||||
expect_lookup(RELPATH, ino);
|
||||
expect_opendir(ino, O_RDONLY, ReturnErrno(EPERM));
|
||||
|
||||
EXPECT_NE(0, open(FULLPATH, O_DIRECTORY));
|
||||
EXPECT_EQ(-1, open(FULLPATH, O_DIRECTORY));
|
||||
EXPECT_EQ(EPERM, errno);
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
TESTSDIR= ${TESTSBASE}/sys/sys
|
||||
|
||||
ATF_TESTS_C= bitstring_test qmath_test rb_test splay_test
|
||||
ATF_TESTS_C= arb_test bitstring_test qmath_test rb_test splay_test
|
||||
|
||||
WARNS?= 5
|
||||
|
||||
|
115
tests/sys/sys/arb_test.c
Normal file
115
tests/sys/sys/arb_test.c
Normal file
@ -0,0 +1,115 @@
|
||||
/* $OpenBSD: rb-test.c,v 1.4 2008/04/13 00:22:17 djm Exp $ */
|
||||
/*
|
||||
* Copyright 2019 Edward Tomasz Napierala <trasz@FreeBSD.org>
|
||||
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the author may not be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <sys/arb.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <atf-c.h>
|
||||
|
||||
struct node {
|
||||
ARB32_ENTRY() next;
|
||||
int key;
|
||||
};
|
||||
|
||||
ARB32_HEAD(tree, node) *root;
|
||||
|
||||
static int
|
||||
compare(const struct node *a, const struct node *b)
|
||||
{
|
||||
if (a->key < b->key) return (-1);
|
||||
else if (a->key > b->key) return (1);
|
||||
return (0);
|
||||
}
|
||||
|
||||
ARB_PROTOTYPE(tree, node, next, compare);
|
||||
|
||||
ARB_GENERATE(tree, node, next, compare);
|
||||
|
||||
#define ITER 150
|
||||
#define MIN 5
|
||||
#define MAX 5000
|
||||
|
||||
ATF_TC_WITHOUT_HEAD(arb_test);
|
||||
ATF_TC_BODY(arb_test, tc)
|
||||
{
|
||||
struct node *tmp, *ins;
|
||||
int i, max, min;
|
||||
|
||||
max = min = 42; /* pacify gcc */
|
||||
|
||||
root = (struct tree *)calloc(1, ARB_ALLOCSIZE(root, ITER, tmp));
|
||||
|
||||
ARB_INIT(tmp, next, root, ITER);
|
||||
|
||||
for (i = 0; i < ITER; i++) {
|
||||
tmp = ARB_GETFREE(root, next);
|
||||
ATF_REQUIRE_MSG(tmp != NULL, "ARB_GETFREE failed");
|
||||
do {
|
||||
tmp->key = arc4random_uniform(MAX-MIN);
|
||||
tmp->key += MIN;
|
||||
} while (ARB_FIND(tree, root, tmp) != NULL);
|
||||
if (i == 0)
|
||||
max = min = tmp->key;
|
||||
else {
|
||||
if (tmp->key > max)
|
||||
max = tmp->key;
|
||||
if (tmp->key < min)
|
||||
min = tmp->key;
|
||||
}
|
||||
ATF_REQUIRE_EQ(NULL, ARB_INSERT(tree, root, tmp));
|
||||
}
|
||||
|
||||
ins = ARB_MIN(tree, root);
|
||||
ATF_REQUIRE_MSG(ins != NULL, "ARB_MIN error");
|
||||
ATF_CHECK_EQ(min, ins->key);
|
||||
tmp = ins;
|
||||
ins = ARB_MAX(tree, root);
|
||||
ATF_REQUIRE_MSG(ins != NULL, "ARB_MAX error");
|
||||
ATF_CHECK_EQ(max, ins->key);
|
||||
|
||||
ATF_CHECK_EQ(tmp, ARB_REMOVE(tree, root, tmp));
|
||||
|
||||
for (i = 0; i < ITER - 1; i++) {
|
||||
tmp = ARB_ROOT(root);
|
||||
ATF_REQUIRE_MSG(tmp != NULL, "ARB_ROOT error");
|
||||
ATF_CHECK_EQ(tmp, ARB_REMOVE(tree, root, tmp));
|
||||
}
|
||||
}
|
||||
|
||||
ATF_TP_ADD_TCS(tp)
|
||||
{
|
||||
|
||||
ATF_TP_ADD_TC(tp, arb_test);
|
||||
|
||||
return (atf_no_error());
|
||||
}
|
@ -446,6 +446,7 @@
|
||||
11/28 Nik Clayton <nik@FreeBSD.org> born in Peterborough, United Kingdom, 1973
|
||||
11/28 Stanislav Sedov <stas@FreeBSD.org> born in Chelyabinsk, USSR, 1985
|
||||
11/29 Doug Moore <dougm@FreeBSD.org> born in Arlington, Texas, United States, 1960
|
||||
11/30 Dmitri Goutnik <dmgk@FreeBSD.org> born in Minsk, USSR, 1969
|
||||
12/01 Hajimu Umemoto <ume@FreeBSD.org> born in Nara, Japan, 1961
|
||||
12/01 Alexey Dokuchaev <danfe@FreeBSD.org> born in Magadan, USSR, 1980
|
||||
12/02 Ermal Luçi <eri@FreeBSD.org> born in Tirane, Albania, 1980
|
||||
|
Loading…
Reference in New Issue
Block a user