1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-05 09:14:03 +00:00

First step towards an MP-safe zone allocator:

- have zalloc() and zfree() always lock the vm_zone.
 - remove zalloci() and zfreei(), which are now redundant.

Reviewed by:	bmilekic, jasone
This commit is contained in:
Dag-Erling Smørgrav 2001-01-21 22:23:11 +00:00
parent 0dc3067b45
commit a3ea6d41b9
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=71350
9 changed files with 19 additions and 57 deletions

View File

@ -1638,7 +1638,7 @@ static PMAP_INLINE void
free_pv_entry(pv_entry_t pv)
{
pv_entry_count--;
zfreei(pvzone, pv);
zfree(pvzone, pv);
}
/*
@ -1657,7 +1657,7 @@ get_pv_entry(void)
pmap_pagedaemon_waken = 1;
wakeup (&vm_pages_needed);
}
return zalloci(pvzone);
return zalloc(pvzone);
}
/*

View File

@ -1450,7 +1450,7 @@ free_pv_entry(pv)
pv_entry_t pv;
{
pv_entry_count--;
zfreei(pvzone, pv);
zfree(pvzone, pv);
}
/*
@ -1469,7 +1469,7 @@ get_pv_entry(void)
pmap_pagedaemon_waken = 1;
wakeup (&vm_pages_needed);
}
return zalloci(pvzone);
return zalloc(pvzone);
}
/*

View File

@ -1450,7 +1450,7 @@ free_pv_entry(pv)
pv_entry_t pv;
{
pv_entry_count--;
zfreei(pvzone, pv);
zfree(pvzone, pv);
}
/*
@ -1469,7 +1469,7 @@ get_pv_entry(void)
pmap_pagedaemon_waken = 1;
wakeup (&vm_pages_needed);
}
return zalloci(pvzone);
return zalloc(pvzone);
}
/*

View File

@ -740,7 +740,7 @@ static PMAP_INLINE void
free_pv_entry(pv_entry_t pv)
{
pv_entry_count--;
zfreei(pvzone, pv);
zfree(pvzone, pv);
}
/*
@ -753,7 +753,7 @@ static pv_entry_t
get_pv_entry(void)
{
if (!pvinit)
return zalloci(pvbootzone);
return zalloc(pvbootzone);
pv_entry_count++;
if (pv_entry_high_water &&
@ -762,7 +762,7 @@ get_pv_entry(void)
pmap_pagedaemon_waken = 1;
wakeup (&vm_pages_needed);
}
return (pv_entry_t) IA64_PHYS_TO_RR7(vtophys(zalloci(pvzone)));
return (pv_entry_t) IA64_PHYS_TO_RR7(vtophys(zalloc(pvzone)));
}
/*

View File

@ -113,7 +113,7 @@ soalloc(waitok)
{
struct socket *so;
so = zalloci(socket_zone);
so = zalloc(socket_zone);
if (so) {
/* XXX race condition for reentrant kernel */
bzero(so, sizeof *so);
@ -211,7 +211,7 @@ sodealloc(so)
}
#endif
crfree(so->so_cred);
zfreei(so->so_zone, so);
zfree(so->so_zone, so);
}
int

View File

@ -147,7 +147,7 @@ in_pcballoc(so, pcbinfo, p)
{
register struct inpcb *inp;
inp = zalloci(pcbinfo->ipi_zone);
inp = zalloc(pcbinfo->ipi_zone);
if (inp == NULL)
return (ENOBUFS);
bzero((caddr_t)inp, sizeof(*inp));
@ -582,7 +582,7 @@ in_pcbdetach(inp)
}
ip_freemoptions(inp->inp_moptions);
inp->inp_vflag = 0;
zfreei(ipi->ipi_zone, inp);
zfree(ipi->ipi_zone, inp);
}
/*

View File

@ -624,7 +624,7 @@ in6_pcbdetach(inp)
ip_freemoptions(inp->inp_moptions);
inp->inp_vflag = 0;
zfreei(ipi->ipi_zone, inp);
zfree(ipi->ipi_zone, inp);
}
/*

View File

@ -39,6 +39,9 @@ static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
#define ZONE_ROUNDING 32
#define ZENTRY_FREE 0x12342378
static void *_zget(vm_zone_t z);
/*
* void *zalloc(vm_zone_t zone) --
* Returns an item from a specified zone.
@ -298,40 +301,10 @@ zunlock(vm_zone_t z, int s)
* void zfree(vm_zone_t zone, void *item) --
* Frees an item back to a specified zone.
*
* void *zalloci(vm_zone_t zone) --
* Returns an item from a specified zone, interrupt safe.
*
* void zfreei(vm_zone_t zone, void *item) --
* Frees an item back to a specified zone, interrupt safe.
*
*/
void *
zalloc(vm_zone_t z)
{
#if defined(SMP)
return zalloci(z);
#else
return _zalloc(z);
#endif
}
void
zfree(vm_zone_t z, void *item)
{
#ifdef SMP
zfreei(z, item);
#else
_zfree(z, item);
#endif
}
/*
* Zone allocator/deallocator. These are interrupt / (or potentially SMP)
* safe. The raw zalloc/zfree routines are not interrupt safe, but are fast.
*/
void *
zalloci(vm_zone_t z)
{
int s;
void *item;
@ -343,7 +316,7 @@ zalloci(vm_zone_t z)
}
void
zfreei(vm_zone_t z, void *item)
zfree(vm_zone_t z, void *item)
{
int s;
@ -356,7 +329,7 @@ zfreei(vm_zone_t z, void *item)
/*
* Internal zone routine. Not to be called from external (non vm_zone) code.
*/
void *
static void *
_zget(vm_zone_t z)
{
int i;
@ -405,24 +378,16 @@ _zget(vm_zone_t z)
if (lockstatus(&kernel_map->lock, NULL)) {
int s;
s = splvm();
#ifdef SMP
simple_unlock(&z->zlock);
#endif
item = (void *) kmem_malloc(kmem_map, nbytes, M_WAITOK);
#ifdef SMP
simple_lock(&z->zlock);
#endif
if (item != NULL)
zone_kmem_pages += z->zalloc;
splx(s);
} else {
#ifdef SMP
simple_unlock(&z->zlock);
#endif
item = (void *) kmem_alloc(kernel_map, nbytes);
#ifdef SMP
simple_lock(&z->zlock);
#endif
if (item != NULL)
zone_kern_pages += z->zalloc;
}

View File

@ -51,10 +51,7 @@ int zinitna __P((vm_zone_t z, struct vm_object *obj, char *name,
int size, int nentries, int flags, int zalloc));
void * zalloc __P((vm_zone_t z));
void zfree __P((vm_zone_t z, void *item));
void * zalloci __P((vm_zone_t z));
void zfreei __P((vm_zone_t z, void *item));
void zbootinit __P((vm_zone_t z, char *name, int size, void *item,
int nitems));
void * _zget __P((vm_zone_t z));
#endif /* _SYS_ZONE_H */