mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-02 08:42:48 +00:00
* Add a "how" argument to uma_zone constructors and initialization functions
so that they know whether the allocation is supposed to be able to sleep or not. * Allow uma_zone constructors and initialation functions to return either success or error. Almost all of the ones in the tree currently return success unconditionally, but mbuf is a notable exception: the packet zone constructor wants to be able to fail if it cannot suballocate an mbuf cluster, and the mbuf allocators want to be able to fail in general in a MAC kernel if the MAC mbuf initializer fails. This fixes the panics people are seeing when they run out of memory for mbuf clusters. * Allow debug.nosleepwithlocks on WITNESS to be disabled, without changing the default. Both bmilekic and jeff have reviewed the changes made to make failable zone allocations work.
This commit is contained in:
parent
154b8df2ed
commit
b23f72e98a
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=132987
@ -437,25 +437,21 @@ en_dump_packet(struct en_softc *sc, struct mbuf *m)
|
||||
*
|
||||
* LOCK: any, not needed
|
||||
*/
|
||||
static void
|
||||
en_map_ctor(void *mem, int size, void *arg)
|
||||
static int
|
||||
en_map_ctor(void *mem, int size, void *arg, int flags)
|
||||
{
|
||||
struct en_softc *sc = arg;
|
||||
struct en_map *map = mem;
|
||||
int err;
|
||||
|
||||
if (map->sc == NULL)
|
||||
map->sc = sc;
|
||||
|
||||
if (!(map->flags & ENMAP_ALLOC)) {
|
||||
err = bus_dmamap_create(sc->txtag, 0, &map->map);
|
||||
if (err != 0)
|
||||
if_printf(&sc->ifatm.ifnet,
|
||||
"cannot create DMA map %d\n", err);
|
||||
else
|
||||
map->flags |= ENMAP_ALLOC;
|
||||
err = bus_dmamap_create(sc->txtag, 0, &map->map);
|
||||
if (err != 0) {
|
||||
if_printf(&sc->ifatm.ifnet, "cannot create DMA map %d\n", err);
|
||||
return (err);
|
||||
}
|
||||
map->flags &= ~ENMAP_LOADED;
|
||||
map->flags = ENMAP_ALLOC;
|
||||
map->sc = sc;
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -490,8 +486,7 @@ en_map_fini(void *mem, int size)
|
||||
{
|
||||
struct en_map *map = mem;
|
||||
|
||||
if (map->flags & ENMAP_ALLOC)
|
||||
bus_dmamap_destroy(map->sc->txtag, map->map);
|
||||
bus_dmamap_destroy(map->sc->txtag, map->map);
|
||||
}
|
||||
|
||||
/*********************************************************************/
|
||||
@ -1041,11 +1036,9 @@ en_start(struct ifnet *ifp)
|
||||
* locks.
|
||||
*/
|
||||
map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
|
||||
if (map == NULL || !(map->flags & ENMAP_ALLOC)) {
|
||||
if (map == NULL) {
|
||||
/* drop that packet */
|
||||
EN_COUNT(sc->stats.txnomap);
|
||||
if (map != NULL)
|
||||
uma_zfree(sc->map_zone, map);
|
||||
EN_UNLOCK(sc);
|
||||
m_freem(m);
|
||||
continue;
|
||||
@ -2330,13 +2323,11 @@ en_service(struct en_softc *sc)
|
||||
if (m != NULL) {
|
||||
/* M_NOWAIT - called from interrupt context */
|
||||
map = uma_zalloc_arg(sc->map_zone, sc, M_NOWAIT);
|
||||
if (map == NULL || !(map->flags & ENMAP_ALLOC)) {
|
||||
if (map == NULL) {
|
||||
rx.post_skip += mlen;
|
||||
m_freem(m);
|
||||
DBG(sc, SERV, ("rx%td: out of maps",
|
||||
slot - sc->rxslot));
|
||||
if (map->map != NULL)
|
||||
uma_zfree(sc->map_zone, map);
|
||||
goto skip;
|
||||
}
|
||||
rx.m = m;
|
||||
|
@ -111,13 +111,13 @@ uma_zone_t zone_pack;
|
||||
/*
|
||||
* Local prototypes.
|
||||
*/
|
||||
static void mb_ctor_mbuf(void *, int, void *);
|
||||
static void mb_ctor_clust(void *, int, void *);
|
||||
static void mb_ctor_pack(void *, int, void *);
|
||||
static int mb_ctor_mbuf(void *, int, void *, int);
|
||||
static int mb_ctor_clust(void *, int, void *, int);
|
||||
static int mb_ctor_pack(void *, int, void *, int);
|
||||
static void mb_dtor_mbuf(void *, int, void *);
|
||||
static void mb_dtor_clust(void *, int, void *); /* XXX */
|
||||
static void mb_dtor_pack(void *, int, void *); /* XXX */
|
||||
static void mb_init_pack(void *, int);
|
||||
static int mb_init_pack(void *, int, int);
|
||||
static void mb_fini_pack(void *, int);
|
||||
|
||||
static void mb_reclaim(void *);
|
||||
@ -180,19 +180,20 @@ mbuf_init(void *dummy)
|
||||
* contains call-specific information required to support the
|
||||
* mbuf allocation API.
|
||||
*/
|
||||
static void
|
||||
mb_ctor_mbuf(void *mem, int size, void *arg)
|
||||
static int
|
||||
mb_ctor_mbuf(void *mem, int size, void *arg, int how)
|
||||
{
|
||||
struct mbuf *m;
|
||||
struct mb_args *args;
|
||||
#ifdef MAC
|
||||
int error;
|
||||
#endif
|
||||
int flags;
|
||||
int how;
|
||||
short type;
|
||||
|
||||
m = (struct mbuf *)mem;
|
||||
args = (struct mb_args *)arg;
|
||||
flags = args->flags;
|
||||
how = args->how;
|
||||
type = args->type;
|
||||
|
||||
m->m_type = type;
|
||||
@ -206,17 +207,14 @@ mb_ctor_mbuf(void *mem, int size, void *arg)
|
||||
SLIST_INIT(&m->m_pkthdr.tags);
|
||||
#ifdef MAC
|
||||
/* If the label init fails, fail the alloc */
|
||||
if (mac_init_mbuf(m, how) != 0) {
|
||||
m_free(m);
|
||||
/* XXX*/ panic("mb_ctor_mbuf(): can't deal with failure!");
|
||||
/* return 0; */
|
||||
}
|
||||
error = mac_init_mbuf(m, how);
|
||||
if (error)
|
||||
return (error);
|
||||
#endif
|
||||
} else
|
||||
m->m_data = m->m_dat;
|
||||
mbstat.m_mbufs += 1; /* XXX */
|
||||
/* return 1;
|
||||
*/
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -252,8 +250,8 @@ mb_dtor_pack(void *mem, int size, void *arg)
|
||||
* Here the 'arg' pointer points to the Mbuf which we
|
||||
* are configuring cluster storage for.
|
||||
*/
|
||||
static void
|
||||
mb_ctor_clust(void *mem, int size, void *arg)
|
||||
static int
|
||||
mb_ctor_clust(void *mem, int size, void *arg, int how)
|
||||
{
|
||||
struct mbuf *m;
|
||||
|
||||
@ -269,8 +267,7 @@ mb_ctor_clust(void *mem, int size, void *arg)
|
||||
m->m_ext.ext_buf);
|
||||
*(m->m_ext.ref_cnt) = 1;
|
||||
mbstat.m_mclusts += 1; /* XXX */
|
||||
/* return 1;
|
||||
*/
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* XXX */
|
||||
@ -284,17 +281,18 @@ mb_dtor_clust(void *mem, int size, void *arg)
|
||||
* The Packet secondary zone's init routine, executed on the
|
||||
* object's transition from keg slab to zone cache.
|
||||
*/
|
||||
static void
|
||||
mb_init_pack(void *mem, int size)
|
||||
static int
|
||||
mb_init_pack(void *mem, int size, int how)
|
||||
{
|
||||
struct mbuf *m;
|
||||
|
||||
m = (struct mbuf *)mem;
|
||||
m->m_ext.ext_buf = NULL;
|
||||
uma_zalloc_arg(zone_clust, m, M_NOWAIT);
|
||||
if (m->m_ext.ext_buf == NULL) /* XXX */
|
||||
panic("mb_init_pack(): Can't deal with failure yet.");
|
||||
uma_zalloc_arg(zone_clust, m, how);
|
||||
if (m->m_ext.ext_buf == NULL)
|
||||
return (ENOMEM);
|
||||
mbstat.m_mclusts -= 1; /* XXX */
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -315,19 +313,21 @@ mb_fini_pack(void *mem, int size)
|
||||
/*
|
||||
* The "packet" keg constructor.
|
||||
*/
|
||||
static void
|
||||
mb_ctor_pack(void *mem, int size, void *arg)
|
||||
static int
|
||||
mb_ctor_pack(void *mem, int size, void *arg, int how)
|
||||
{
|
||||
struct mbuf *m;
|
||||
struct mb_args *args;
|
||||
int flags, how;
|
||||
#ifdef MAC
|
||||
int error;
|
||||
#endif
|
||||
int flags;
|
||||
short type;
|
||||
|
||||
m = (struct mbuf *)mem;
|
||||
args = (struct mb_args *)arg;
|
||||
flags = args->flags;
|
||||
type = args->type;
|
||||
how = args->how;
|
||||
|
||||
m->m_type = type;
|
||||
m->m_next = NULL;
|
||||
@ -346,17 +346,14 @@ mb_ctor_pack(void *mem, int size, void *arg)
|
||||
SLIST_INIT(&m->m_pkthdr.tags);
|
||||
#ifdef MAC
|
||||
/* If the label init fails, fail the alloc */
|
||||
if (mac_init_mbuf(m, how) != 0) {
|
||||
m_free(m);
|
||||
/* XXX*/ panic("mb_ctor_pack(): can't deal with failure!");
|
||||
/* return 0; */
|
||||
}
|
||||
error = mac_init_mbuf(m, how);
|
||||
if (error)
|
||||
return (error);
|
||||
#endif
|
||||
}
|
||||
mbstat.m_mbufs += 1; /* XXX */
|
||||
mbstat.m_mclusts += 1; /* XXX */
|
||||
/* return 1;
|
||||
*/
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -74,9 +74,9 @@ static void doenterpgrp(struct proc *, struct pgrp *);
|
||||
static void orphanpg(struct pgrp *pg);
|
||||
static void pgadjustjobc(struct pgrp *pgrp, int entering);
|
||||
static void pgdelete(struct pgrp *);
|
||||
static void proc_ctor(void *mem, int size, void *arg);
|
||||
static int proc_ctor(void *mem, int size, void *arg, int flags);
|
||||
static void proc_dtor(void *mem, int size, void *arg);
|
||||
static void proc_init(void *mem, int size);
|
||||
static int proc_init(void *mem, int size, int flags);
|
||||
static void proc_fini(void *mem, int size);
|
||||
|
||||
/*
|
||||
@ -128,12 +128,13 @@ procinit()
|
||||
/*
|
||||
* Prepare a proc for use.
|
||||
*/
|
||||
static void
|
||||
proc_ctor(void *mem, int size, void *arg)
|
||||
static int
|
||||
proc_ctor(void *mem, int size, void *arg, int flags)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
p = (struct proc *)mem;
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -178,8 +179,8 @@ proc_dtor(void *mem, int size, void *arg)
|
||||
/*
|
||||
* Initialize type-stable parts of a proc (when newly created).
|
||||
*/
|
||||
static void
|
||||
proc_init(void *mem, int size)
|
||||
static int
|
||||
proc_init(void *mem, int size, int flags)
|
||||
{
|
||||
struct proc *p;
|
||||
struct thread *td;
|
||||
@ -195,6 +196,7 @@ proc_init(void *mem, int size)
|
||||
proc_linkup(p, kg, ke, td);
|
||||
bzero(&p->p_mtx, sizeof(struct mtx));
|
||||
mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -143,8 +143,8 @@ MTX_SYSINIT(tid_lock, &tid_lock, "TID lock", MTX_DEF);
|
||||
/*
|
||||
* Prepare a thread for use.
|
||||
*/
|
||||
static void
|
||||
thread_ctor(void *mem, int size, void *arg)
|
||||
static int
|
||||
thread_ctor(void *mem, int size, void *arg, int flags)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
@ -165,6 +165,7 @@ thread_ctor(void *mem, int size, void *arg)
|
||||
* next thread.
|
||||
*/
|
||||
td->td_critnest = 1;
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -202,8 +203,8 @@ thread_dtor(void *mem, int size, void *arg)
|
||||
/*
|
||||
* Initialize type-stable parts of a thread (when newly created).
|
||||
*/
|
||||
static void
|
||||
thread_init(void *mem, int size)
|
||||
static int
|
||||
thread_init(void *mem, int size, int flags)
|
||||
{
|
||||
struct thread *td;
|
||||
struct tid_bitmap_part *bmp, *new;
|
||||
@ -251,6 +252,7 @@ thread_init(void *mem, int size)
|
||||
td->td_sleepqueue = sleepq_alloc();
|
||||
td->td_turnstile = turnstile_alloc();
|
||||
td->td_sched = (struct td_sched *)&td[1];
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -287,25 +289,27 @@ thread_fini(void *mem, int size)
|
||||
/*
|
||||
* Initialize type-stable parts of a kse (when newly created).
|
||||
*/
|
||||
static void
|
||||
kse_init(void *mem, int size)
|
||||
static int
|
||||
kse_init(void *mem, int size, int flags)
|
||||
{
|
||||
struct kse *ke;
|
||||
|
||||
ke = (struct kse *)mem;
|
||||
ke->ke_sched = (struct ke_sched *)&ke[1];
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize type-stable parts of a ksegrp (when newly created).
|
||||
*/
|
||||
static void
|
||||
ksegrp_init(void *mem, int size)
|
||||
static int
|
||||
ksegrp_init(void *mem, int size, int flags)
|
||||
{
|
||||
struct ksegrp *kg;
|
||||
|
||||
kg = (struct ksegrp *)mem;
|
||||
kg->kg_sched = (struct kg_sched *)&kg[1];
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -182,9 +182,9 @@ static void pipe_clone_write_buffer(struct pipe *wpipe);
|
||||
static int pipespace(struct pipe *cpipe, int size);
|
||||
static int pipespace_new(struct pipe *cpipe, int size);
|
||||
|
||||
static void pipe_zone_ctor(void *mem, int size, void *arg);
|
||||
static int pipe_zone_ctor(void *mem, int size, void *arg, int flags);
|
||||
static void pipe_zone_dtor(void *mem, int size, void *arg);
|
||||
static void pipe_zone_init(void *mem, int size);
|
||||
static int pipe_zone_init(void *mem, int size, int flags);
|
||||
static void pipe_zone_fini(void *mem, int size);
|
||||
|
||||
static uma_zone_t pipe_zone;
|
||||
@ -201,8 +201,8 @@ pipeinit(void *dummy __unused)
|
||||
KASSERT(pipe_zone != NULL, ("pipe_zone not initialized"));
|
||||
}
|
||||
|
||||
static void
|
||||
pipe_zone_ctor(void *mem, int size, void *arg)
|
||||
static int
|
||||
pipe_zone_ctor(void *mem, int size, void *arg, int flags)
|
||||
{
|
||||
struct pipepair *pp;
|
||||
struct pipe *rpipe, *wpipe;
|
||||
@ -247,6 +247,7 @@ pipe_zone_ctor(void *mem, int size, void *arg)
|
||||
pp->pp_label = NULL;
|
||||
|
||||
atomic_add_int(&amountpipes, 2);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -261,8 +262,8 @@ pipe_zone_dtor(void *mem, int size, void *arg)
|
||||
atomic_subtract_int(&amountpipes, 2);
|
||||
}
|
||||
|
||||
static void
|
||||
pipe_zone_init(void *mem, int size)
|
||||
static int
|
||||
pipe_zone_init(void *mem, int size, int flags)
|
||||
{
|
||||
struct pipepair *pp;
|
||||
|
||||
@ -271,6 +272,7 @@ pipe_zone_init(void *mem, int size)
|
||||
pp = (struct pipepair *)mem;
|
||||
|
||||
mtx_init(&pp->pp_mtx, "pipe mutex", NULL, MTX_DEF | MTX_RECURSE);
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -45,7 +45,7 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
uma_zone_t zone_label;
|
||||
|
||||
static void mac_labelzone_ctor(void *mem, int size, void *arg);
|
||||
static int mac_labelzone_ctor(void *mem, int size, void *arg, int flags);
|
||||
static void mac_labelzone_dtor(void *mem, int size, void *arg);
|
||||
|
||||
void
|
||||
@ -57,8 +57,8 @@ mac_labelzone_init(void)
|
||||
UMA_ALIGN_PTR, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
mac_labelzone_ctor(void *mem, int size, void *arg)
|
||||
static int
|
||||
mac_labelzone_ctor(void *mem, int size, void *arg, int flags)
|
||||
{
|
||||
struct label *label;
|
||||
|
||||
@ -66,6 +66,7 @@ mac_labelzone_ctor(void *mem, int size, void *arg)
|
||||
label = mem;
|
||||
bzero(label, sizeof(*label));
|
||||
label->l_flags = MAC_FLAG_INITIALIZED;
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -72,7 +72,6 @@
|
||||
*/
|
||||
struct mb_args {
|
||||
int flags; /* Flags for mbuf being allocated */
|
||||
int how; /* How to allocate: M_WAITOK or M_DONTWAIT */
|
||||
short type; /* Type of mbuf being allocated */
|
||||
};
|
||||
#endif /* _KERNEL */
|
||||
@ -343,7 +342,6 @@ m_get(int how, short type)
|
||||
struct mb_args args;
|
||||
|
||||
args.flags = 0;
|
||||
args.how = how;
|
||||
args.type = type;
|
||||
return (uma_zalloc_arg(zone_mbuf, &args, how));
|
||||
}
|
||||
@ -357,7 +355,6 @@ m_getclr(int how, short type)
|
||||
struct mb_args args;
|
||||
|
||||
args.flags = 0;
|
||||
args.how = how;
|
||||
args.type = type;
|
||||
m = uma_zalloc_arg(zone_mbuf, &args, how);
|
||||
if (m != NULL)
|
||||
@ -372,7 +369,6 @@ m_gethdr(int how, short type)
|
||||
struct mb_args args;
|
||||
|
||||
args.flags = M_PKTHDR;
|
||||
args.how = how;
|
||||
args.type = type;
|
||||
return (uma_zalloc_arg(zone_mbuf, &args, how));
|
||||
}
|
||||
@ -384,7 +380,6 @@ m_getcl(int how, short type, int flags)
|
||||
struct mb_args args;
|
||||
|
||||
args.flags = flags;
|
||||
args.how = how;
|
||||
args.type = type;
|
||||
return (uma_zalloc_arg(zone_pack, &args, how));
|
||||
}
|
||||
|
12
sys/vm/uma.h
12
sys/vm/uma.h
@ -54,15 +54,17 @@ typedef struct uma_zone * uma_zone_t;
|
||||
* item A pointer to the memory which has been allocated.
|
||||
* arg The arg field passed to uma_zalloc_arg
|
||||
* size The size of the allocated item
|
||||
* flags See zalloc flags
|
||||
*
|
||||
* Returns:
|
||||
* Nothing
|
||||
* 0 on success
|
||||
* errno on failure
|
||||
*
|
||||
* Discussion:
|
||||
* The constructor is called just before the memory is returned
|
||||
* to the user. It may block if necessary.
|
||||
*/
|
||||
typedef void (*uma_ctor)(void *mem, int size, void *arg);
|
||||
typedef int (*uma_ctor)(void *mem, int size, void *arg, int flags);
|
||||
|
||||
/*
|
||||
* Item destructor
|
||||
@ -88,15 +90,17 @@ typedef void (*uma_dtor)(void *mem, int size, void *arg);
|
||||
* Arguments:
|
||||
* item A pointer to the memory which has been allocated.
|
||||
* size The size of the item being initialized.
|
||||
* flags See zalloc flags
|
||||
*
|
||||
* Returns:
|
||||
* Nothing
|
||||
* 0 on success
|
||||
* errno on failure
|
||||
*
|
||||
* Discussion:
|
||||
* The initializer is called when the memory is cached in the uma zone.
|
||||
* this should be the same state that the destructor leaves the object in.
|
||||
*/
|
||||
typedef void (*uma_init)(void *mem, int size);
|
||||
typedef int (*uma_init)(void *mem, int size, int flags);
|
||||
|
||||
/*
|
||||
* Item discard function
|
||||
|
@ -186,6 +186,8 @@ struct uma_bucket_zone bucket_zones[] = {
|
||||
|
||||
uint8_t bucket_size[BUCKET_ZONES];
|
||||
|
||||
enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
|
||||
|
||||
/* Prototypes.. */
|
||||
|
||||
static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
|
||||
@ -196,11 +198,11 @@ static uma_slab_t slab_zalloc(uma_zone_t, int);
|
||||
static void cache_drain(uma_zone_t);
|
||||
static void bucket_drain(uma_zone_t, uma_bucket_t);
|
||||
static void bucket_cache_drain(uma_zone_t zone);
|
||||
static void keg_ctor(void *, int, void *);
|
||||
static int keg_ctor(void *, int, void *, int);
|
||||
static void keg_dtor(void *, int, void *);
|
||||
static void zone_ctor(void *, int, void *);
|
||||
static int zone_ctor(void *, int, void *, int);
|
||||
static void zone_dtor(void *, int, void *);
|
||||
static void zero_init(void *, int);
|
||||
static int zero_init(void *, int, int);
|
||||
static void zone_small_init(uma_zone_t zone);
|
||||
static void zone_large_init(uma_zone_t zone);
|
||||
static void zone_foreach(void (*zfunc)(uma_zone_t));
|
||||
@ -211,7 +213,7 @@ static void hash_free(struct uma_hash *hash);
|
||||
static void uma_timeout(void *);
|
||||
static void uma_startup3(void);
|
||||
static void *uma_zalloc_internal(uma_zone_t, void *, int);
|
||||
static void uma_zfree_internal(uma_zone_t, void *, void *, int);
|
||||
static void uma_zfree_internal(uma_zone_t, void *, void *, enum zfreeskip);
|
||||
static void bucket_enable(void);
|
||||
static void bucket_init(void);
|
||||
static uma_bucket_t bucket_alloc(int, int);
|
||||
@ -221,7 +223,7 @@ static int uma_zalloc_bucket(uma_zone_t zone, int flags);
|
||||
static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags);
|
||||
static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab);
|
||||
static void zone_drain(uma_zone_t);
|
||||
static void uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
|
||||
static uma_zone_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
|
||||
uma_fini fini, int align, u_int16_t flags);
|
||||
|
||||
void uma_print_zone(uma_zone_t);
|
||||
@ -230,7 +232,7 @@ static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
|
||||
|
||||
#ifdef WITNESS
|
||||
static int nosleepwithlocks = 1;
|
||||
SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RD, &nosleepwithlocks,
|
||||
SYSCTL_INT(_debug, OID_AUTO, nosleepwithlocks, CTLFLAG_RW, &nosleepwithlocks,
|
||||
0, "Convert M_WAITOK to M_NOWAIT to avoid lock-held-across-sleep paths");
|
||||
#else
|
||||
static int nosleepwithlocks = 0;
|
||||
@ -312,7 +314,7 @@ bucket_free(uma_bucket_t bucket)
|
||||
|
||||
idx = howmany(bucket->ub_entries, 1 << BUCKET_SHIFT);
|
||||
ubz = &bucket_zones[bucket_size[idx]];
|
||||
uma_zfree_internal(ubz->ubz_zone, bucket, NULL, 0);
|
||||
uma_zfree_internal(ubz->ubz_zone, bucket, NULL, SKIP_NONE);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -532,7 +534,7 @@ hash_free(struct uma_hash *hash)
|
||||
return;
|
||||
if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
|
||||
uma_zfree_internal(hashzone,
|
||||
hash->uh_slab_hash, NULL, 0);
|
||||
hash->uh_slab_hash, NULL, SKIP_NONE);
|
||||
else
|
||||
free(hash->uh_slab_hash, M_UMAHASH);
|
||||
}
|
||||
@ -581,7 +583,7 @@ bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
|
||||
*/
|
||||
if (mzone)
|
||||
slab = vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
|
||||
uma_zfree_internal(zone, item, slab, 1);
|
||||
uma_zfree_internal(zone, item, slab, SKIP_DTOR);
|
||||
}
|
||||
}
|
||||
|
||||
@ -740,7 +742,8 @@ zone_drain(uma_zone_t zone)
|
||||
obj);
|
||||
}
|
||||
if (keg->uk_flags & UMA_ZONE_OFFPAGE)
|
||||
uma_zfree_internal(keg->uk_slabzone, slab, NULL, 0);
|
||||
uma_zfree_internal(keg->uk_slabzone, slab, NULL,
|
||||
SKIP_NONE);
|
||||
#ifdef UMA_DEBUG
|
||||
printf("%s: Returning %d bytes.\n",
|
||||
zone->uz_name, UMA_SLAB_SIZE * keg->uk_ppera);
|
||||
@ -801,6 +804,8 @@ slab_zalloc(uma_zone_t zone, int wait)
|
||||
mem = keg->uk_allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE,
|
||||
&flags, wait);
|
||||
if (mem == NULL) {
|
||||
if (keg->uk_flags & UMA_ZONE_OFFPAGE)
|
||||
uma_zfree_internal(keg->uk_slabzone, slab, NULL, 0);
|
||||
ZONE_LOCK(zone);
|
||||
return (NULL);
|
||||
}
|
||||
@ -828,10 +833,32 @@ slab_zalloc(uma_zone_t zone, int wait)
|
||||
slabref->us_freelist[i].us_refcnt = 0;
|
||||
}
|
||||
|
||||
if (keg->uk_init)
|
||||
if (keg->uk_init != NULL) {
|
||||
for (i = 0; i < keg->uk_ipers; i++)
|
||||
keg->uk_init(slab->us_data + (keg->uk_rsize * i),
|
||||
keg->uk_size);
|
||||
if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
|
||||
keg->uk_size, wait) != 0)
|
||||
break;
|
||||
if (i != keg->uk_ipers) {
|
||||
if (keg->uk_fini != NULL) {
|
||||
for (i--; i > -1; i--)
|
||||
keg->uk_fini(slab->us_data +
|
||||
(keg->uk_rsize * i),
|
||||
keg->uk_size);
|
||||
}
|
||||
if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
|
||||
(keg->uk_flags & UMA_ZONE_REFCNT))
|
||||
for (i = 0; i < keg->uk_ppera; i++)
|
||||
vsetobj((vm_offset_t)mem +
|
||||
(i * PAGE_SIZE), NULL);
|
||||
if (keg->uk_flags & UMA_ZONE_OFFPAGE)
|
||||
uma_zfree_internal(keg->uk_slabzone, slab,
|
||||
NULL, SKIP_NONE);
|
||||
keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera,
|
||||
flags);
|
||||
ZONE_LOCK(zone);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
ZONE_LOCK(zone);
|
||||
|
||||
if (keg->uk_flags & UMA_ZONE_HASH)
|
||||
@ -996,10 +1023,11 @@ page_free(void *mem, int size, u_int8_t flags)
|
||||
*
|
||||
* Arguments/Returns follow uma_init specifications
|
||||
*/
|
||||
static void
|
||||
zero_init(void *mem, int size)
|
||||
static int
|
||||
zero_init(void *mem, int size, int flags)
|
||||
{
|
||||
bzero(mem, size);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1122,8 +1150,8 @@ zone_large_init(uma_zone_t zone)
|
||||
* Arguments/Returns follow uma_ctor specifications
|
||||
* udata Actually uma_kctor_args
|
||||
*/
|
||||
static void
|
||||
keg_ctor(void *mem, int size, void *udata)
|
||||
static int
|
||||
keg_ctor(void *mem, int size, void *udata, int flags)
|
||||
{
|
||||
struct uma_kctor_args *arg = udata;
|
||||
uma_keg_t keg = mem;
|
||||
@ -1262,6 +1290,7 @@ keg_ctor(void *mem, int size, void *udata)
|
||||
mtx_lock(&uma_mtx);
|
||||
LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
|
||||
mtx_unlock(&uma_mtx);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1271,8 +1300,8 @@ keg_ctor(void *mem, int size, void *udata)
|
||||
* udata Actually uma_zctor_args
|
||||
*/
|
||||
|
||||
static void
|
||||
zone_ctor(void *mem, int size, void *udata)
|
||||
static int
|
||||
zone_ctor(void *mem, int size, void *udata, int flags)
|
||||
{
|
||||
struct uma_zctor_args *arg = udata;
|
||||
uma_zone_t zone = mem;
|
||||
@ -1307,10 +1336,12 @@ zone_ctor(void *mem, int size, void *udata)
|
||||
ZONE_UNLOCK(zone);
|
||||
mtx_unlock(&uma_mtx);
|
||||
} else if (arg->keg == NULL) {
|
||||
uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
|
||||
arg->align, arg->flags);
|
||||
if (uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
|
||||
arg->align, arg->flags) == NULL)
|
||||
return (ENOMEM);
|
||||
} else {
|
||||
struct uma_kctor_args karg;
|
||||
int error;
|
||||
|
||||
/* We should only be here from uma_startup() */
|
||||
karg.size = arg->size;
|
||||
@ -1319,7 +1350,10 @@ zone_ctor(void *mem, int size, void *udata)
|
||||
karg.align = arg->align;
|
||||
karg.flags = arg->flags;
|
||||
karg.zone = zone;
|
||||
keg_ctor(arg->keg, sizeof(struct uma_keg), &karg);
|
||||
error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
|
||||
flags);
|
||||
if (error)
|
||||
return (error);
|
||||
}
|
||||
keg = zone->uz_keg;
|
||||
zone->uz_lock = &keg->uk_lock;
|
||||
@ -1331,7 +1365,7 @@ zone_ctor(void *mem, int size, void *udata)
|
||||
if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
|
||||
KASSERT((keg->uk_flags & UMA_ZONE_SECONDARY) == 0,
|
||||
("Secondary zone requested UMA_ZFLAG_INTERNAL"));
|
||||
return;
|
||||
return (0);
|
||||
}
|
||||
|
||||
if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
|
||||
@ -1340,6 +1374,7 @@ zone_ctor(void *mem, int size, void *udata)
|
||||
zone->uz_count = keg->uk_ipers;
|
||||
else
|
||||
zone->uz_count = BUCKET_MAX;
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1406,7 +1441,7 @@ zone_dtor(void *arg, int size, void *udata)
|
||||
LIST_REMOVE(keg, uk_link);
|
||||
LIST_REMOVE(zone, uz_link);
|
||||
mtx_unlock(&uma_mtx);
|
||||
uma_zfree_internal(kegs, keg, NULL, 0);
|
||||
uma_zfree_internal(kegs, keg, NULL, SKIP_NONE);
|
||||
}
|
||||
zone->uz_keg = NULL;
|
||||
}
|
||||
@ -1543,7 +1578,7 @@ uma_startup(void *bootmem)
|
||||
args.align = 32 - 1;
|
||||
args.flags = UMA_ZFLAG_INTERNAL;
|
||||
/* The initial zone has no Per cpu queues so it's smaller */
|
||||
zone_ctor(kegs, sizeof(struct uma_zone), &args);
|
||||
zone_ctor(kegs, sizeof(struct uma_zone), &args, M_WAITOK);
|
||||
|
||||
#ifdef UMA_DEBUG
|
||||
printf("Filling boot free list.\n");
|
||||
@ -1570,7 +1605,7 @@ uma_startup(void *bootmem)
|
||||
args.align = 32 - 1;
|
||||
args.flags = UMA_ZFLAG_INTERNAL;
|
||||
/* The initial zone has no Per cpu queues so it's smaller */
|
||||
zone_ctor(zones, sizeof(struct uma_zone), &args);
|
||||
zone_ctor(zones, sizeof(struct uma_zone), &args, M_WAITOK);
|
||||
|
||||
#ifdef UMA_DEBUG
|
||||
printf("Initializing pcpu cache locks.\n");
|
||||
@ -1653,7 +1688,7 @@ uma_startup3(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
static uma_zone_t
|
||||
uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
|
||||
int align, u_int16_t flags)
|
||||
{
|
||||
@ -1665,7 +1700,7 @@ uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
|
||||
args.align = align;
|
||||
args.flags = flags;
|
||||
args.zone = zone;
|
||||
zone = uma_zalloc_internal(kegs, &args, M_WAITOK);
|
||||
return (uma_zalloc_internal(kegs, &args, M_WAITOK));
|
||||
}
|
||||
|
||||
/* See uma.h */
|
||||
@ -1714,7 +1749,7 @@ uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
|
||||
void
|
||||
uma_zdestroy(uma_zone_t zone)
|
||||
{
|
||||
uma_zfree_internal(zones, zone, NULL, 0);
|
||||
uma_zfree_internal(zones, zone, NULL, SKIP_NONE);
|
||||
}
|
||||
|
||||
/* See uma.h */
|
||||
@ -1735,13 +1770,22 @@ uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
|
||||
if (!(flags & M_NOWAIT)) {
|
||||
KASSERT(curthread->td_intr_nesting_level == 0,
|
||||
("malloc(M_WAITOK) in interrupt context"));
|
||||
badness = nosleepwithlocks;
|
||||
if (nosleepwithlocks) {
|
||||
#ifdef WITNESS
|
||||
badness = WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
|
||||
NULL,
|
||||
"malloc(M_WAITOK) of \"%s\", forcing M_NOWAIT",
|
||||
zone->uz_name);
|
||||
badness = WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK,
|
||||
NULL,
|
||||
"malloc(M_WAITOK) of \"%s\", forcing M_NOWAIT",
|
||||
zone->uz_name);
|
||||
#else
|
||||
badness = 1;
|
||||
#endif
|
||||
} else {
|
||||
badness = 0;
|
||||
#ifdef WITNESS
|
||||
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
|
||||
"malloc(M_WAITOK) of \"%s\"", zone->uz_name);
|
||||
#endif
|
||||
}
|
||||
if (badness) {
|
||||
flags &= ~M_WAITOK;
|
||||
flags |= M_NOWAIT;
|
||||
@ -1772,8 +1816,14 @@ uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
|
||||
ZONE_UNLOCK(zone);
|
||||
#endif
|
||||
CPU_UNLOCK(cpu);
|
||||
if (zone->uz_ctor)
|
||||
zone->uz_ctor(item,zone->uz_keg->uk_size,udata);
|
||||
if (zone->uz_ctor != NULL) {
|
||||
if (zone->uz_ctor(item, zone->uz_keg->uk_size,
|
||||
udata, flags) != 0) {
|
||||
uma_zfree_internal(zone, item, udata,
|
||||
SKIP_DTOR);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
if (flags & M_ZERO)
|
||||
bzero(item, zone->uz_keg->uk_size);
|
||||
return (item);
|
||||
@ -1959,7 +2009,7 @@ uma_zalloc_bucket(uma_zone_t zone, int flags)
|
||||
uma_bucket_t bucket;
|
||||
uma_slab_t slab;
|
||||
int16_t saved;
|
||||
int max;
|
||||
int max, origflags = flags;
|
||||
|
||||
/*
|
||||
* Try this zone's free list first so we don't allocate extra buckets.
|
||||
@ -2021,8 +2071,21 @@ uma_zalloc_bucket(uma_zone_t zone, int flags)
|
||||
|
||||
ZONE_UNLOCK(zone);
|
||||
for (i = saved; i < bucket->ub_cnt; i++)
|
||||
zone->uz_init(bucket->ub_bucket[i],
|
||||
zone->uz_keg->uk_size);
|
||||
if (zone->uz_init(bucket->ub_bucket[i],
|
||||
zone->uz_keg->uk_size, origflags) != 0)
|
||||
break;
|
||||
/*
|
||||
* If we couldn't initialize the whole bucket, put the
|
||||
* rest back onto the freelist.
|
||||
*/
|
||||
if (i != bucket->ub_cnt) {
|
||||
int j;
|
||||
|
||||
for (j = i; j < bucket->ub_cnt; j++)
|
||||
uma_zfree_internal(zone, bucket->ub_bucket[j],
|
||||
NULL, SKIP_FINI);
|
||||
bucket->ub_cnt = i;
|
||||
}
|
||||
ZONE_LOCK(zone);
|
||||
}
|
||||
|
||||
@ -2083,10 +2146,18 @@ uma_zalloc_internal(uma_zone_t zone, void *udata, int flags)
|
||||
* a keg slab directly to the user, and the user is expecting it
|
||||
* to be both zone-init'd as well as zone-ctor'd.
|
||||
*/
|
||||
if (zone->uz_init != NULL)
|
||||
zone->uz_init(item, keg->uk_size);
|
||||
if (zone->uz_ctor != NULL)
|
||||
zone->uz_ctor(item, keg->uk_size, udata);
|
||||
if (zone->uz_init != NULL) {
|
||||
if (zone->uz_init(item, keg->uk_size, flags) != 0) {
|
||||
uma_zfree_internal(zone, item, udata, SKIP_FINI);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
if (zone->uz_ctor != NULL) {
|
||||
if (zone->uz_ctor(item, keg->uk_size, udata, flags) != 0) {
|
||||
uma_zfree_internal(zone, item, udata, SKIP_DTOR);
|
||||
return (NULL);
|
||||
}
|
||||
}
|
||||
if (flags & M_ZERO)
|
||||
bzero(item, keg->uk_size);
|
||||
|
||||
@ -2102,10 +2173,10 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
|
||||
uma_bucket_t bucket;
|
||||
int bflags;
|
||||
int cpu;
|
||||
int skip;
|
||||
enum zfreeskip skip;
|
||||
|
||||
/* This is the fast path free */
|
||||
skip = 0;
|
||||
skip = SKIP_NONE;
|
||||
keg = zone->uz_keg;
|
||||
|
||||
#ifdef UMA_DEBUG_ALLOC_1
|
||||
@ -2121,7 +2192,7 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
|
||||
|
||||
if (zone->uz_dtor) {
|
||||
zone->uz_dtor(item, keg->uk_size, udata);
|
||||
skip = 1;
|
||||
skip = SKIP_DTOR;
|
||||
}
|
||||
|
||||
zfree_restart:
|
||||
@ -2255,10 +2326,11 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
|
||||
* zone The zone to free to
|
||||
* item The item we're freeing
|
||||
* udata User supplied data for the dtor
|
||||
* skip Skip the dtor, it was done in uma_zfree_arg
|
||||
* skip Skip dtors and finis
|
||||
*/
|
||||
static void
|
||||
uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
|
||||
uma_zfree_internal(uma_zone_t zone, void *item, void *udata,
|
||||
enum zfreeskip skip)
|
||||
{
|
||||
uma_slab_t slab;
|
||||
uma_keg_t keg;
|
||||
@ -2267,9 +2339,9 @@ uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
|
||||
|
||||
keg = zone->uz_keg;
|
||||
|
||||
if (!skip && zone->uz_dtor)
|
||||
if (skip < SKIP_DTOR && zone->uz_dtor)
|
||||
zone->uz_dtor(item, keg->uk_size, udata);
|
||||
if (zone->uz_fini)
|
||||
if (skip < SKIP_FINI && zone->uz_fini)
|
||||
zone->uz_fini(item, keg->uk_size);
|
||||
|
||||
ZONE_LOCK(zone);
|
||||
@ -2386,6 +2458,7 @@ uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
|
||||
}
|
||||
|
||||
/* See uma.h */
|
||||
/* XXX uk_freef is not actually used with the zone locked */
|
||||
void
|
||||
uma_zone_set_freef(uma_zone_t zone, uma_free freef)
|
||||
{
|
||||
@ -2395,6 +2468,7 @@ uma_zone_set_freef(uma_zone_t zone, uma_free freef)
|
||||
}
|
||||
|
||||
/* See uma.h */
|
||||
/* XXX uk_allocf is not actually used with the zone locked */
|
||||
void
|
||||
uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
|
||||
{
|
||||
|
@ -51,13 +51,14 @@ __FBSDID("$FreeBSD$");
|
||||
static const u_int32_t uma_junk = 0xdeadc0de;
|
||||
|
||||
/*
|
||||
* Checks an item to make sure it hasn't been overwritten since freed.
|
||||
* Checks an item to make sure it hasn't been overwritten since it was freed,
|
||||
* prior to subsequent reallocation.
|
||||
*
|
||||
* Complies with standard ctor arg/return
|
||||
*
|
||||
*/
|
||||
void
|
||||
trash_ctor(void *mem, int size, void *arg)
|
||||
int
|
||||
trash_ctor(void *mem, int size, void *arg, int flags)
|
||||
{
|
||||
int cnt;
|
||||
u_int32_t *p;
|
||||
@ -68,6 +69,7 @@ trash_ctor(void *mem, int size, void *arg)
|
||||
if (*p != uma_junk)
|
||||
panic("Memory modified after free %p(%d) val=%x @ %p\n",
|
||||
mem, size, *p, p);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -94,10 +96,11 @@ trash_dtor(void *mem, int size, void *arg)
|
||||
* Complies with standard init arg/return
|
||||
*
|
||||
*/
|
||||
void
|
||||
trash_init(void *mem, int size)
|
||||
int
|
||||
trash_init(void *mem, int size, int flags)
|
||||
{
|
||||
trash_dtor(mem, size, NULL);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -109,17 +112,11 @@ trash_init(void *mem, int size)
|
||||
void
|
||||
trash_fini(void *mem, int size)
|
||||
{
|
||||
trash_ctor(mem, size, NULL);
|
||||
(void)trash_ctor(mem, size, NULL, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks an item to make sure it hasn't been overwritten since freed.
|
||||
*
|
||||
* Complies with standard ctor arg/return
|
||||
*
|
||||
*/
|
||||
void
|
||||
mtrash_ctor(void *mem, int size, void *arg)
|
||||
int
|
||||
mtrash_ctor(void *mem, int size, void *arg, int flags)
|
||||
{
|
||||
struct malloc_type **ksp;
|
||||
u_int32_t *p = mem;
|
||||
@ -137,6 +134,7 @@ mtrash_ctor(void *mem, int size, void *arg)
|
||||
panic("Most recently used by %s\n", (*ksp == NULL)?
|
||||
"none" : (*ksp)->ks_shortdesc);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -164,8 +162,8 @@ mtrash_dtor(void *mem, int size, void *arg)
|
||||
* Complies with standard init arg/return
|
||||
*
|
||||
*/
|
||||
void
|
||||
mtrash_init(void *mem, int size)
|
||||
int
|
||||
mtrash_init(void *mem, int size, int flags)
|
||||
{
|
||||
struct malloc_type **ksp;
|
||||
|
||||
@ -174,10 +172,12 @@ mtrash_init(void *mem, int size)
|
||||
ksp = (struct malloc_type **)mem;
|
||||
ksp += (size / sizeof(struct malloc_type *)) - 1;
|
||||
*ksp = NULL;
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks an item to make sure it hasn't been overwritten since it was freed.
|
||||
* Checks an item to make sure it hasn't been overwritten since it was freed,
|
||||
* prior to freeing it back to available memory.
|
||||
*
|
||||
* Complies with standard fini arg/return
|
||||
*
|
||||
@ -185,7 +185,7 @@ mtrash_init(void *mem, int size)
|
||||
void
|
||||
mtrash_fini(void *mem, int size)
|
||||
{
|
||||
mtrash_ctor(mem, size, NULL);
|
||||
(void)mtrash_ctor(mem, size, NULL, 0);
|
||||
}
|
||||
|
||||
static uma_slab_t
|
||||
|
@ -37,15 +37,15 @@
|
||||
#ifndef VM_UMA_DBG_H
|
||||
#define VM_UMA_DBG_H
|
||||
|
||||
void trash_ctor(void *mem, int size, void *arg);
|
||||
int trash_ctor(void *mem, int size, void *arg, int flags);
|
||||
void trash_dtor(void *mem, int size, void *arg);
|
||||
void trash_init(void *mem, int size);
|
||||
int trash_init(void *mem, int size, int flags);
|
||||
void trash_fini(void *mem, int size);
|
||||
|
||||
/* For use only by malloc */
|
||||
void mtrash_ctor(void *mem, int size, void *arg);
|
||||
int mtrash_ctor(void *mem, int size, void *arg, int flags);
|
||||
void mtrash_dtor(void *mem, int size, void *arg);
|
||||
void mtrash_init(void *mem, int size);
|
||||
int mtrash_init(void *mem, int size, int flags);
|
||||
void mtrash_fini(void *mem, int size);
|
||||
|
||||
void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
|
||||
|
@ -137,9 +137,9 @@ static uma_zone_t kmapentzone;
|
||||
static uma_zone_t mapzone;
|
||||
static uma_zone_t vmspace_zone;
|
||||
static struct vm_object kmapentobj;
|
||||
static void vmspace_zinit(void *mem, int size);
|
||||
static int vmspace_zinit(void *mem, int size, int flags);
|
||||
static void vmspace_zfini(void *mem, int size);
|
||||
static void vm_map_zinit(void *mem, int size);
|
||||
static int vm_map_zinit(void *mem, int ize, int flags);
|
||||
static void vm_map_zfini(void *mem, int size);
|
||||
static void _vm_map_init(vm_map_t map, vm_offset_t min, vm_offset_t max);
|
||||
|
||||
@ -179,15 +179,16 @@ vmspace_zfini(void *mem, int size)
|
||||
vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
|
||||
}
|
||||
|
||||
static void
|
||||
vmspace_zinit(void *mem, int size)
|
||||
static int
|
||||
vmspace_zinit(void *mem, int size, int flags)
|
||||
{
|
||||
struct vmspace *vm;
|
||||
|
||||
vm = (struct vmspace *)mem;
|
||||
|
||||
vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map));
|
||||
(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
|
||||
pmap_pinit(vmspace_pmap(vm));
|
||||
return (0);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -200,8 +201,8 @@ vm_map_zfini(void *mem, int size)
|
||||
sx_destroy(&map->lock);
|
||||
}
|
||||
|
||||
static void
|
||||
vm_map_zinit(void *mem, int size)
|
||||
static int
|
||||
vm_map_zinit(void *mem, int size, int flags)
|
||||
{
|
||||
vm_map_t map;
|
||||
|
||||
@ -211,6 +212,7 @@ vm_map_zinit(void *mem, int size)
|
||||
map->infork = 0;
|
||||
mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
|
||||
sx_init(&map->lock, "user map");
|
||||
return (0);
|
||||
}
|
||||
|
||||
#ifdef INVARIANTS
|
||||
|
@ -149,7 +149,7 @@ static int next_index;
|
||||
static uma_zone_t obj_zone;
|
||||
#define VM_OBJECTS_INIT 256
|
||||
|
||||
static void vm_object_zinit(void *mem, int size);
|
||||
static int vm_object_zinit(void *mem, int size, int flags);
|
||||
|
||||
#ifdef INVARIANTS
|
||||
static void vm_object_zdtor(void *mem, int size, void *arg);
|
||||
@ -175,8 +175,8 @@ vm_object_zdtor(void *mem, int size, void *arg)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void
|
||||
vm_object_zinit(void *mem, int size)
|
||||
static int
|
||||
vm_object_zinit(void *mem, int size, int flags)
|
||||
{
|
||||
vm_object_t object;
|
||||
|
||||
@ -188,6 +188,7 @@ vm_object_zinit(void *mem, int size)
|
||||
object->paging_in_progress = 0;
|
||||
object->resident_page_count = 0;
|
||||
object->shadow_count = 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
|
Loading…
Reference in New Issue
Block a user