mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-01 08:27:59 +00:00
Convert all simplelocks to mutexes and remove the simplelock implementations.
This commit is contained in:
parent
69b4045657
commit
1b367556b5
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=71576
@ -277,92 +277,19 @@ globaldata_find(int cpuid)
|
||||
return cpuid_to_globaldata[cpuid];
|
||||
}
|
||||
|
||||
/* Implementation of simplelocks */
|
||||
|
||||
/*
|
||||
* Atomically swap the value of *p with val. Return the old value of *p.
|
||||
*/
|
||||
static __inline int
|
||||
atomic_xchg(volatile u_int *p, u_int val)
|
||||
{
|
||||
u_int32_t oldval, temp;
|
||||
__asm__ __volatile__ (
|
||||
"1:\tldl_l %0,%3\n\t" /* load current value */
|
||||
"mov %4,%1\n\t" /* value to store */
|
||||
"stl_c %1,%2\n\t" /* attempt to store */
|
||||
"beq %1,2f\n\t" /* if the store failed, spin */
|
||||
"br 3f\n" /* it worked, exit */
|
||||
"2:\tbr 1b\n" /* *p not updated, loop */
|
||||
"3:\n" /* it worked */
|
||||
: "=&r"(oldval), "=r"(temp), "=m" (*p)
|
||||
: "m"(*p), "r"(val)
|
||||
: "memory");
|
||||
return oldval;
|
||||
}
|
||||
|
||||
void
|
||||
s_lock_init(struct simplelock *lkp)
|
||||
{
|
||||
lkp->lock_data = 0;
|
||||
}
|
||||
|
||||
void
|
||||
s_lock(struct simplelock *lkp)
|
||||
{
|
||||
for (;;) {
|
||||
if (s_lock_try(lkp))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Spin until clear.
|
||||
*/
|
||||
while (lkp->lock_data)
|
||||
;
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
s_lock_try(struct simplelock *lkp)
|
||||
{
|
||||
u_int32_t oldval, temp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1:\tldl_l %0,%3\n\t" /* load current value */
|
||||
"blbs %0,2f\n" /* if set, give up now */
|
||||
"mov 1,%1\n\t" /* value to store */
|
||||
"stl_c %1,%2\n\t" /* attempt to store */
|
||||
"beq %1,3f\n\t" /* if the store failed, spin */
|
||||
"2:" /* exit */
|
||||
".section .text2,\"ax\"\n" /* improve branch prediction */
|
||||
"3:\tbr 1b\n" /* *p not updated, loop */
|
||||
".previous\n"
|
||||
: "=&r"(oldval), "=r"(temp), "=m" (lkp->lock_data)
|
||||
: "m"(lkp->lock_data)
|
||||
: "memory");
|
||||
|
||||
if (!oldval) {
|
||||
/*
|
||||
* It was clear, return success.
|
||||
*/
|
||||
alpha_mb();
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Other stuff */
|
||||
|
||||
/* lock around the MP rendezvous */
|
||||
static struct simplelock smp_rv_lock;
|
||||
static struct mtx smp_rv_mtx;
|
||||
|
||||
/* only 1 CPU can panic at a time :) */
|
||||
struct simplelock panic_lock;
|
||||
struct mtx panic_mtx;
|
||||
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
s_lock_init(&smp_rv_lock);
|
||||
s_lock_init(&panic_lock);
|
||||
mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
|
||||
mtx_init(&panic_mtx, "panic", MTX_DEF);
|
||||
}
|
||||
|
||||
void
|
||||
@ -912,14 +839,9 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
void (* teardown_func)(void *),
|
||||
void *arg)
|
||||
{
|
||||
int s;
|
||||
|
||||
/* disable interrupts on this CPU, save interrupt status */
|
||||
s = save_intr();
|
||||
disable_intr();
|
||||
|
||||
/* obtain rendezvous lock */
|
||||
s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */
|
||||
mtx_enter(&smp_rv_mtx, MTX_SPIN);
|
||||
|
||||
/* set static function pointers */
|
||||
smp_rv_setup_func = setup_func;
|
||||
@ -936,10 +858,7 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
smp_rendezvous_action();
|
||||
|
||||
/* release lock */
|
||||
s_unlock(&smp_rv_lock);
|
||||
|
||||
/* restore interrupt flag */
|
||||
restore_intr(s);
|
||||
mtx_exit(&smp_rv_mtx, MTX_SPIN);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -29,44 +29,7 @@
|
||||
#ifndef _MACHINE_LOCK_H_
|
||||
#define _MACHINE_LOCK_H_
|
||||
|
||||
|
||||
/*
|
||||
* Simple spin lock.
|
||||
* It is an error to hold one of these locks while a process is sleeping.
|
||||
*/
|
||||
struct simplelock {
|
||||
volatile u_int lock_data;
|
||||
};
|
||||
|
||||
/* functions in mp_machdep.c */
|
||||
void s_lock_init __P((struct simplelock *));
|
||||
void s_lock __P((struct simplelock *));
|
||||
int s_lock_try __P((struct simplelock *));
|
||||
void ss_lock __P((struct simplelock *));
|
||||
void ss_unlock __P((struct simplelock *));
|
||||
void s_lock_np __P((struct simplelock *));
|
||||
void s_unlock_np __P((struct simplelock *));
|
||||
|
||||
/* inline simplelock functions */
|
||||
static __inline void
|
||||
s_unlock(struct simplelock *lkp)
|
||||
{
|
||||
alpha_mb();
|
||||
lkp->lock_data = 0;
|
||||
}
|
||||
|
||||
extern struct simplelock panic_lock;
|
||||
|
||||
#if !defined(SIMPLELOCK_DEBUG) && MAXCPU > 1
|
||||
/*
|
||||
* This set of defines turns on the real functions in i386/isa/apic_ipl.s.
|
||||
*/
|
||||
#define simple_lock_init(alp) s_lock_init(alp)
|
||||
#define simple_lock(alp) s_lock(alp)
|
||||
#define simple_lock_try(alp) s_lock_try(alp)
|
||||
#define simple_unlock(alp) s_unlock(alp)
|
||||
|
||||
#endif /* !SIMPLELOCK_DEBUG && MAXCPU > 1 */
|
||||
extern struct mtx panic_mtx;
|
||||
|
||||
#define COM_LOCK()
|
||||
#define COM_UNLOCK()
|
||||
|
@ -1896,6 +1896,13 @@ init386(first)
|
||||
LIST_INIT(&proc0.p_contested);
|
||||
|
||||
mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE);
|
||||
#ifdef SMP
|
||||
/*
|
||||
* Interrupts can happen very early, so initialize imen_mtx here, rather
|
||||
* than in init_locks().
|
||||
*/
|
||||
mtx_init(&imen_mtx, "imen", MTX_SPIN);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Giant is used early for at least debugger traps and unexpected traps.
|
||||
|
@ -238,7 +238,7 @@ typedef struct BASETABLE_ENTRY {
|
||||
#define MP_ANNOUNCE_POST 0x19
|
||||
|
||||
/* used to hold the AP's until we are ready to release them */
|
||||
struct simplelock ap_boot_lock;
|
||||
struct mtx ap_boot_mtx;
|
||||
|
||||
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
|
||||
int current_postcode;
|
||||
@ -318,6 +318,9 @@ SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
|
||||
* Local data and functions.
|
||||
*/
|
||||
|
||||
/* Set to 1 once we're ready to let the APs out of the pen. */
|
||||
static volatile int aps_ready = 0;
|
||||
|
||||
static int mp_capable;
|
||||
static u_int boot_address;
|
||||
static u_int base_memory;
|
||||
@ -345,36 +348,40 @@ static void release_aps(void *dummy);
|
||||
*/
|
||||
|
||||
/* critical region around IO APIC, apic_imen */
|
||||
struct simplelock imen_lock;
|
||||
struct mtx imen_mtx;
|
||||
|
||||
/* lock region used by kernel profiling */
|
||||
struct simplelock mcount_lock;
|
||||
struct mtx mcount_mtx;
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
/* locks com (tty) data/hardware accesses: a FASTINTR() */
|
||||
struct simplelock com_lock;
|
||||
struct mtx com_mtx;
|
||||
#endif /* USE_COMLOCK */
|
||||
|
||||
/* lock around the MP rendezvous */
|
||||
static struct simplelock smp_rv_lock;
|
||||
static struct mtx smp_rv_mtx;
|
||||
|
||||
/* only 1 CPU can panic at a time :) */
|
||||
struct simplelock panic_lock;
|
||||
struct mtx panic_mtx;
|
||||
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
s_lock_init(&mcount_lock);
|
||||
/*
|
||||
* XXX The mcount mutex probably needs to be statically initialized,
|
||||
* since it will be used even in the function calls that get us to this
|
||||
* point.
|
||||
*/
|
||||
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
|
||||
|
||||
s_lock_init(&imen_lock);
|
||||
s_lock_init(&smp_rv_lock);
|
||||
s_lock_init(&panic_lock);
|
||||
mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
|
||||
mtx_init(&panic_mtx, "panic", MTX_DEF);
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
s_lock_init(&com_lock);
|
||||
mtx_init(&com_mtx, "com", MTX_SPIN);
|
||||
#endif /* USE_COMLOCK */
|
||||
|
||||
s_lock_init(&ap_boot_lock);
|
||||
mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -655,9 +662,6 @@ mp_enable(u_int boot_addr)
|
||||
/* initialize all SMP locks */
|
||||
init_locks();
|
||||
|
||||
/* obtain the ap_boot_lock */
|
||||
s_lock(&ap_boot_lock);
|
||||
|
||||
/* start each Application Processor */
|
||||
start_all_aps(boot_addr);
|
||||
}
|
||||
@ -2247,8 +2251,12 @@ ap_init(void)
|
||||
{
|
||||
u_int apic_id;
|
||||
|
||||
/* spin until all the AP's are ready */
|
||||
while (!aps_ready)
|
||||
/* spin */ ;
|
||||
|
||||
/* lock against other AP's that are waking up */
|
||||
s_lock(&ap_boot_lock);
|
||||
mtx_enter(&ap_boot_mtx, MTX_SPIN);
|
||||
|
||||
/* BSP may have changed PTD while we're waiting for the lock */
|
||||
cpu_invltlb();
|
||||
@ -2297,7 +2305,7 @@ ap_init(void)
|
||||
}
|
||||
|
||||
/* let other AP's wake up now */
|
||||
s_unlock(&ap_boot_lock);
|
||||
mtx_exit(&ap_boot_mtx, MTX_SPIN);
|
||||
|
||||
/* wait until all the AP's are up */
|
||||
while (smp_started == 0)
|
||||
@ -2851,10 +2859,9 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
void (* teardown_func)(void *),
|
||||
void *arg)
|
||||
{
|
||||
u_int efl;
|
||||
|
||||
|
||||
/* obtain rendezvous lock */
|
||||
s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */
|
||||
mtx_enter(&smp_rv_mtx, MTX_SPIN);
|
||||
|
||||
/* set static function pointers */
|
||||
smp_rv_setup_func = setup_func;
|
||||
@ -2864,27 +2871,22 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
smp_rv_waiters[0] = 0;
|
||||
smp_rv_waiters[1] = 0;
|
||||
|
||||
/* disable interrupts on this CPU, save interrupt status */
|
||||
efl = read_eflags();
|
||||
write_eflags(efl & ~PSL_I);
|
||||
|
||||
/* signal other processors, which will enter the IPI with interrupts off */
|
||||
/*
|
||||
* signal other processors, which will enter the IPI with interrupts off
|
||||
*/
|
||||
all_but_self_ipi(XRENDEZVOUS_OFFSET);
|
||||
|
||||
/* call executor function */
|
||||
smp_rendezvous_action();
|
||||
|
||||
/* restore interrupt flag */
|
||||
write_eflags(efl);
|
||||
|
||||
/* release lock */
|
||||
s_unlock(&smp_rv_lock);
|
||||
mtx_exit(&smp_rv_mtx, MTX_SPIN);
|
||||
}
|
||||
|
||||
void
|
||||
release_aps(void *dummy __unused)
|
||||
{
|
||||
s_unlock(&ap_boot_lock);
|
||||
atomic_store_rel_int(&aps_ready, 1);
|
||||
}
|
||||
|
||||
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
|
||||
|
@ -238,7 +238,7 @@ typedef struct BASETABLE_ENTRY {
|
||||
#define MP_ANNOUNCE_POST 0x19
|
||||
|
||||
/* used to hold the AP's until we are ready to release them */
|
||||
struct simplelock ap_boot_lock;
|
||||
struct mtx ap_boot_mtx;
|
||||
|
||||
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
|
||||
int current_postcode;
|
||||
@ -318,6 +318,9 @@ SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
|
||||
* Local data and functions.
|
||||
*/
|
||||
|
||||
/* Set to 1 once we're ready to let the APs out of the pen. */
|
||||
static volatile int aps_ready = 0;
|
||||
|
||||
static int mp_capable;
|
||||
static u_int boot_address;
|
||||
static u_int base_memory;
|
||||
@ -345,36 +348,40 @@ static void release_aps(void *dummy);
|
||||
*/
|
||||
|
||||
/* critical region around IO APIC, apic_imen */
|
||||
struct simplelock imen_lock;
|
||||
struct mtx imen_mtx;
|
||||
|
||||
/* lock region used by kernel profiling */
|
||||
struct simplelock mcount_lock;
|
||||
struct mtx mcount_mtx;
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
/* locks com (tty) data/hardware accesses: a FASTINTR() */
|
||||
struct simplelock com_lock;
|
||||
struct mtx com_mtx;
|
||||
#endif /* USE_COMLOCK */
|
||||
|
||||
/* lock around the MP rendezvous */
|
||||
static struct simplelock smp_rv_lock;
|
||||
static struct mtx smp_rv_mtx;
|
||||
|
||||
/* only 1 CPU can panic at a time :) */
|
||||
struct simplelock panic_lock;
|
||||
struct mtx panic_mtx;
|
||||
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
s_lock_init(&mcount_lock);
|
||||
/*
|
||||
* XXX The mcount mutex probably needs to be statically initialized,
|
||||
* since it will be used even in the function calls that get us to this
|
||||
* point.
|
||||
*/
|
||||
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
|
||||
|
||||
s_lock_init(&imen_lock);
|
||||
s_lock_init(&smp_rv_lock);
|
||||
s_lock_init(&panic_lock);
|
||||
mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
|
||||
mtx_init(&panic_mtx, "panic", MTX_DEF);
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
s_lock_init(&com_lock);
|
||||
mtx_init(&com_mtx, "com", MTX_SPIN);
|
||||
#endif /* USE_COMLOCK */
|
||||
|
||||
s_lock_init(&ap_boot_lock);
|
||||
mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -655,9 +662,6 @@ mp_enable(u_int boot_addr)
|
||||
/* initialize all SMP locks */
|
||||
init_locks();
|
||||
|
||||
/* obtain the ap_boot_lock */
|
||||
s_lock(&ap_boot_lock);
|
||||
|
||||
/* start each Application Processor */
|
||||
start_all_aps(boot_addr);
|
||||
}
|
||||
@ -2247,8 +2251,12 @@ ap_init(void)
|
||||
{
|
||||
u_int apic_id;
|
||||
|
||||
/* spin until all the AP's are ready */
|
||||
while (!aps_ready)
|
||||
/* spin */ ;
|
||||
|
||||
/* lock against other AP's that are waking up */
|
||||
s_lock(&ap_boot_lock);
|
||||
mtx_enter(&ap_boot_mtx, MTX_SPIN);
|
||||
|
||||
/* BSP may have changed PTD while we're waiting for the lock */
|
||||
cpu_invltlb();
|
||||
@ -2297,7 +2305,7 @@ ap_init(void)
|
||||
}
|
||||
|
||||
/* let other AP's wake up now */
|
||||
s_unlock(&ap_boot_lock);
|
||||
mtx_exit(&ap_boot_mtx, MTX_SPIN);
|
||||
|
||||
/* wait until all the AP's are up */
|
||||
while (smp_started == 0)
|
||||
@ -2851,10 +2859,9 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
void (* teardown_func)(void *),
|
||||
void *arg)
|
||||
{
|
||||
u_int efl;
|
||||
|
||||
|
||||
/* obtain rendezvous lock */
|
||||
s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */
|
||||
mtx_enter(&smp_rv_mtx, MTX_SPIN);
|
||||
|
||||
/* set static function pointers */
|
||||
smp_rv_setup_func = setup_func;
|
||||
@ -2864,27 +2871,22 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
smp_rv_waiters[0] = 0;
|
||||
smp_rv_waiters[1] = 0;
|
||||
|
||||
/* disable interrupts on this CPU, save interrupt status */
|
||||
efl = read_eflags();
|
||||
write_eflags(efl & ~PSL_I);
|
||||
|
||||
/* signal other processors, which will enter the IPI with interrupts off */
|
||||
/*
|
||||
* signal other processors, which will enter the IPI with interrupts off
|
||||
*/
|
||||
all_but_self_ipi(XRENDEZVOUS_OFFSET);
|
||||
|
||||
/* call executor function */
|
||||
smp_rendezvous_action();
|
||||
|
||||
/* restore interrupt flag */
|
||||
write_eflags(efl);
|
||||
|
||||
/* release lock */
|
||||
s_unlock(&smp_rv_lock);
|
||||
mtx_exit(&smp_rv_mtx, MTX_SPIN);
|
||||
}
|
||||
|
||||
void
|
||||
release_aps(void *dummy __unused)
|
||||
{
|
||||
s_unlock(&ap_boot_lock);
|
||||
atomic_store_rel_int(&aps_ready, 1);
|
||||
}
|
||||
|
||||
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
|
||||
|
@ -238,7 +238,7 @@ typedef struct BASETABLE_ENTRY {
|
||||
#define MP_ANNOUNCE_POST 0x19
|
||||
|
||||
/* used to hold the AP's until we are ready to release them */
|
||||
struct simplelock ap_boot_lock;
|
||||
struct mtx ap_boot_mtx;
|
||||
|
||||
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
|
||||
int current_postcode;
|
||||
@ -318,6 +318,9 @@ SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
|
||||
* Local data and functions.
|
||||
*/
|
||||
|
||||
/* Set to 1 once we're ready to let the APs out of the pen. */
|
||||
static volatile int aps_ready = 0;
|
||||
|
||||
static int mp_capable;
|
||||
static u_int boot_address;
|
||||
static u_int base_memory;
|
||||
@ -345,36 +348,40 @@ static void release_aps(void *dummy);
|
||||
*/
|
||||
|
||||
/* critical region around IO APIC, apic_imen */
|
||||
struct simplelock imen_lock;
|
||||
struct mtx imen_mtx;
|
||||
|
||||
/* lock region used by kernel profiling */
|
||||
struct simplelock mcount_lock;
|
||||
struct mtx mcount_mtx;
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
/* locks com (tty) data/hardware accesses: a FASTINTR() */
|
||||
struct simplelock com_lock;
|
||||
struct mtx com_mtx;
|
||||
#endif /* USE_COMLOCK */
|
||||
|
||||
/* lock around the MP rendezvous */
|
||||
static struct simplelock smp_rv_lock;
|
||||
static struct mtx smp_rv_mtx;
|
||||
|
||||
/* only 1 CPU can panic at a time :) */
|
||||
struct simplelock panic_lock;
|
||||
struct mtx panic_mtx;
|
||||
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
s_lock_init(&mcount_lock);
|
||||
/*
|
||||
* XXX The mcount mutex probably needs to be statically initialized,
|
||||
* since it will be used even in the function calls that get us to this
|
||||
* point.
|
||||
*/
|
||||
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
|
||||
|
||||
s_lock_init(&imen_lock);
|
||||
s_lock_init(&smp_rv_lock);
|
||||
s_lock_init(&panic_lock);
|
||||
mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
|
||||
mtx_init(&panic_mtx, "panic", MTX_DEF);
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
s_lock_init(&com_lock);
|
||||
mtx_init(&com_mtx, "com", MTX_SPIN);
|
||||
#endif /* USE_COMLOCK */
|
||||
|
||||
s_lock_init(&ap_boot_lock);
|
||||
mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -655,9 +662,6 @@ mp_enable(u_int boot_addr)
|
||||
/* initialize all SMP locks */
|
||||
init_locks();
|
||||
|
||||
/* obtain the ap_boot_lock */
|
||||
s_lock(&ap_boot_lock);
|
||||
|
||||
/* start each Application Processor */
|
||||
start_all_aps(boot_addr);
|
||||
}
|
||||
@ -2247,8 +2251,12 @@ ap_init(void)
|
||||
{
|
||||
u_int apic_id;
|
||||
|
||||
/* spin until all the AP's are ready */
|
||||
while (!aps_ready)
|
||||
/* spin */ ;
|
||||
|
||||
/* lock against other AP's that are waking up */
|
||||
s_lock(&ap_boot_lock);
|
||||
mtx_enter(&ap_boot_mtx, MTX_SPIN);
|
||||
|
||||
/* BSP may have changed PTD while we're waiting for the lock */
|
||||
cpu_invltlb();
|
||||
@ -2297,7 +2305,7 @@ ap_init(void)
|
||||
}
|
||||
|
||||
/* let other AP's wake up now */
|
||||
s_unlock(&ap_boot_lock);
|
||||
mtx_exit(&ap_boot_mtx, MTX_SPIN);
|
||||
|
||||
/* wait until all the AP's are up */
|
||||
while (smp_started == 0)
|
||||
@ -2851,10 +2859,9 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
void (* teardown_func)(void *),
|
||||
void *arg)
|
||||
{
|
||||
u_int efl;
|
||||
|
||||
|
||||
/* obtain rendezvous lock */
|
||||
s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */
|
||||
mtx_enter(&smp_rv_mtx, MTX_SPIN);
|
||||
|
||||
/* set static function pointers */
|
||||
smp_rv_setup_func = setup_func;
|
||||
@ -2864,27 +2871,22 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
smp_rv_waiters[0] = 0;
|
||||
smp_rv_waiters[1] = 0;
|
||||
|
||||
/* disable interrupts on this CPU, save interrupt status */
|
||||
efl = read_eflags();
|
||||
write_eflags(efl & ~PSL_I);
|
||||
|
||||
/* signal other processors, which will enter the IPI with interrupts off */
|
||||
/*
|
||||
* signal other processors, which will enter the IPI with interrupts off
|
||||
*/
|
||||
all_but_self_ipi(XRENDEZVOUS_OFFSET);
|
||||
|
||||
/* call executor function */
|
||||
smp_rendezvous_action();
|
||||
|
||||
/* restore interrupt flag */
|
||||
write_eflags(efl);
|
||||
|
||||
/* release lock */
|
||||
s_unlock(&smp_rv_lock);
|
||||
mtx_exit(&smp_rv_mtx, MTX_SPIN);
|
||||
}
|
||||
|
||||
void
|
||||
release_aps(void *dummy __unused)
|
||||
{
|
||||
s_unlock(&ap_boot_lock);
|
||||
atomic_store_rel_int(&aps_ready, 1);
|
||||
}
|
||||
|
||||
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
|
||||
|
@ -66,8 +66,8 @@
|
||||
#ifdef SMP
|
||||
#define MCOUNT_ENTER(s) { s = read_eflags(); \
|
||||
__asm __volatile("cli" : : : "memory"); \
|
||||
s_lock_np(&mcount_lock); }
|
||||
#define MCOUNT_EXIT(s) { s_unlock_np(&mcount_lock); write_eflags(s); }
|
||||
mtx_enter(&mcount_mtx, MTX_DEF); }
|
||||
#define MCOUNT_EXIT(s) { mtx_exit(&mcount_mtx, MTX_DEF); write_eflags(s); }
|
||||
#else
|
||||
#define MCOUNT_ENTER(s) { s = read_eflags(); disable_intr(); }
|
||||
#define MCOUNT_EXIT(s) (write_eflags(s))
|
||||
|
@ -28,20 +28,6 @@
|
||||
#ifndef _MACHINE_LOCK_H_
|
||||
#define _MACHINE_LOCK_H_
|
||||
|
||||
/*
|
||||
* Simple spin lock.
|
||||
* It is an error to hold one of these locks while a process is sleeping.
|
||||
*/
|
||||
struct simplelock {
|
||||
volatile u_int lock_data;
|
||||
};
|
||||
|
||||
/* functions in mp_machdep.c */
|
||||
void s_lock_init __P((struct simplelock *));
|
||||
void s_lock __P((struct simplelock *));
|
||||
int s_lock_try __P((struct simplelock *));
|
||||
void s_unlock_np __P((struct simplelock *));
|
||||
|
||||
#define COM_LOCK()
|
||||
#define COM_UNLOCK()
|
||||
#define COM_DISABLE_INTR() COM_LOCK()
|
||||
|
@ -180,7 +180,6 @@ i386/i386/perfmon.c optional perfmon
|
||||
i386/i386/perfmon.c optional perfmon profiling-routine
|
||||
i386/i386/pmap.c standard
|
||||
i386/i386/procfs_machdep.c standard
|
||||
i386/i386/simplelock.s optional smp
|
||||
i386/i386/support.s standard
|
||||
i386/i386/swtch.s standard
|
||||
i386/i386/sys_machdep.c standard
|
||||
|
@ -171,7 +171,6 @@ i386/i386/perfmon.c optional perfmon
|
||||
i386/i386/perfmon.c optional perfmon profiling-routine
|
||||
i386/i386/pmap.c standard
|
||||
i386/i386/procfs_machdep.c standard
|
||||
i386/i386/simplelock.s optional smp
|
||||
i386/i386/support.s standard
|
||||
i386/i386/swtch.s standard
|
||||
i386/i386/sys_machdep.c standard
|
||||
|
@ -47,8 +47,7 @@
|
||||
#include <sys/vnode.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include <machine/mutex.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <isofs/cd9660/iso.h>
|
||||
#include <isofs/cd9660/cd9660_node.h>
|
||||
@ -60,9 +59,7 @@
|
||||
static struct iso_node **isohashtbl;
|
||||
static u_long isohash;
|
||||
#define INOHASH(device, inum) ((minor(device) + ((inum)>>12)) & isohash)
|
||||
#ifndef NULL_SIMPLELOCKS
|
||||
static struct simplelock cd9660_ihash_slock;
|
||||
#endif
|
||||
static struct mtx cd9660_ihash_mtx;
|
||||
|
||||
static void cd9660_ihashrem __P((struct iso_node *));
|
||||
static unsigned cd9660_chars2ui __P((unsigned char *begin, int len));
|
||||
@ -76,7 +73,7 @@ cd9660_init(vfsp)
|
||||
{
|
||||
|
||||
isohashtbl = hashinit(desiredvnodes, M_ISOFSMNT, &isohash);
|
||||
simple_lock_init(&cd9660_ihash_slock);
|
||||
mtx_init(&cd9660_ihash_mtx, "cd9660_ihash", MTX_DEF);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -105,18 +102,18 @@ cd9660_ihashget(dev, inum)
|
||||
struct vnode *vp;
|
||||
|
||||
loop:
|
||||
simple_lock(&cd9660_ihash_slock);
|
||||
mtx_enter(&cd9660_ihash_mtx, MTX_DEF);
|
||||
for (ip = isohashtbl[INOHASH(dev, inum)]; ip; ip = ip->i_next) {
|
||||
if (inum == ip->i_number && dev == ip->i_dev) {
|
||||
vp = ITOV(ip);
|
||||
mtx_enter(&vp->v_interlock, MTX_DEF);
|
||||
simple_unlock(&cd9660_ihash_slock);
|
||||
mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
|
||||
goto loop;
|
||||
return (vp);
|
||||
}
|
||||
}
|
||||
simple_unlock(&cd9660_ihash_slock);
|
||||
mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@ -130,14 +127,14 @@ cd9660_ihashins(ip)
|
||||
struct proc *p = curproc; /* XXX */
|
||||
struct iso_node **ipp, *iq;
|
||||
|
||||
simple_lock(&cd9660_ihash_slock);
|
||||
mtx_enter(&cd9660_ihash_mtx, MTX_DEF);
|
||||
ipp = &isohashtbl[INOHASH(ip->i_dev, ip->i_number)];
|
||||
if ((iq = *ipp) != NULL)
|
||||
iq->i_prev = &ip->i_next;
|
||||
ip->i_next = iq;
|
||||
ip->i_prev = ipp;
|
||||
*ipp = ip;
|
||||
simple_unlock(&cd9660_ihash_slock);
|
||||
mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
|
||||
|
||||
lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p);
|
||||
}
|
||||
@ -151,7 +148,7 @@ cd9660_ihashrem(ip)
|
||||
{
|
||||
register struct iso_node *iq;
|
||||
|
||||
simple_lock(&cd9660_ihash_slock);
|
||||
mtx_enter(&cd9660_ihash_mtx, MTX_DEF);
|
||||
if ((iq = ip->i_next) != NULL)
|
||||
iq->i_prev = ip->i_prev;
|
||||
*ip->i_prev = iq;
|
||||
@ -159,7 +156,7 @@ cd9660_ihashrem(ip)
|
||||
ip->i_next = NULL;
|
||||
ip->i_prev = NULL;
|
||||
#endif
|
||||
simple_unlock(&cd9660_ihash_slock);
|
||||
mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -188,7 +185,7 @@ cd9660_inactive(ap)
|
||||
* so that it can be reused immediately.
|
||||
*/
|
||||
if (ip->inode.iso_mode == 0)
|
||||
vrecycle(vp, (struct simplelock *)0, p);
|
||||
vrecycle(vp, NULL, p);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -335,9 +335,7 @@ struct hpfsmount {
|
||||
#define H_INVAL 0x0010 /* Invalid node */
|
||||
struct hpfsnode {
|
||||
struct lock h_lock; /* Must be first, for std vops */
|
||||
#ifndef NULL_SIMPLELOCKS
|
||||
struct simplelock h_interlock;
|
||||
#endif
|
||||
struct mtx h_interlock;
|
||||
|
||||
LIST_ENTRY(hpfsnode) h_hash;
|
||||
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include <sys/mount.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <fs/hpfs/hpfs.h>
|
||||
|
||||
@ -53,9 +54,7 @@ MALLOC_DEFINE(M_HPFSHASH, "HPFS hash", "HPFS node hash tables");
|
||||
static LIST_HEAD(hphashhead, hpfsnode) *hpfs_hphashtbl;
|
||||
static u_long hpfs_hphash; /* size of hash table - 1 */
|
||||
#define HPNOHASH(dev, lsn) (&hpfs_hphashtbl[(minor(dev) + (lsn)) & hpfs_hphash])
|
||||
#ifndef NULL_SIMPLELOCKS
|
||||
static struct simplelock hpfs_hphash_slock;
|
||||
#endif
|
||||
static struct mtx hpfs_hphash_mtx;
|
||||
struct lock hpfs_hphash_lock;
|
||||
|
||||
/*
|
||||
@ -68,7 +67,7 @@ hpfs_hphashinit()
|
||||
lockinit (&hpfs_hphash_lock, PINOD, "hpfs_hphashlock", 0, 0);
|
||||
hpfs_hphashtbl = HASHINIT(desiredvnodes, M_HPFSHASH, M_WAITOK,
|
||||
&hpfs_hphash);
|
||||
simple_lock_init(&hpfs_hphash_slock);
|
||||
mtx_init(&hpfs_hphash_mtx, "hpfs hphash", MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -79,6 +78,7 @@ hpfs_hphashdestroy(void)
|
||||
{
|
||||
|
||||
lockdestroy(&hpfs_hphash_lock);
|
||||
mtx_destroy(&hpfs_hphash_mtx);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -92,11 +92,11 @@ hpfs_hphashlookup(dev, ino)
|
||||
{
|
||||
struct hpfsnode *hp;
|
||||
|
||||
simple_lock(&hpfs_hphash_slock);
|
||||
mtx_enter(&hpfs_hphash_mtx, MTX_DEF);
|
||||
LIST_FOREACH(hp, HPNOHASH(dev, ino), h_hash)
|
||||
if (ino == hp->h_no && dev == hp->h_dev)
|
||||
break;
|
||||
simple_unlock(&hpfs_hphash_slock);
|
||||
mtx_exit(&hpfs_hphash_mtx, MTX_DEF);
|
||||
|
||||
return (hp);
|
||||
}
|
||||
@ -110,14 +110,14 @@ hpfs_hphashget(dev, ino)
|
||||
struct hpfsnode *hp;
|
||||
|
||||
loop:
|
||||
simple_lock(&hpfs_hphash_slock);
|
||||
mtx_enter(&hpfs_hphash_mtx, MTX_DEF);
|
||||
LIST_FOREACH(hp, HPNOHASH(dev, ino), h_hash) {
|
||||
if (ino == hp->h_no && dev == hp->h_dev) {
|
||||
LOCKMGR(&hp->h_intlock, LK_EXCLUSIVE | LK_INTERLOCK, &hpfs_hphash_slock, NULL);
|
||||
return (hp);
|
||||
}
|
||||
}
|
||||
simple_unlock(&hpfs_hphash_slock);
|
||||
mtx_exit(&hpfs_hphash_mtx, MTX_DEF);
|
||||
return (hp);
|
||||
}
|
||||
#endif
|
||||
@ -132,7 +132,7 @@ hpfs_hphashvget(dev, ino, p)
|
||||
struct vnode *vp;
|
||||
|
||||
loop:
|
||||
simple_lock(&hpfs_hphash_slock);
|
||||
mtx_enter(&hpfs_hphash_mtx, MTX_DEF);
|
||||
LIST_FOREACH(hp, HPNOHASH(dev, ino), h_hash) {
|
||||
if (ino == hp->h_no && dev == hp->h_dev) {
|
||||
vp = HPTOV(hp);
|
||||
@ -143,7 +143,7 @@ hpfs_hphashvget(dev, ino, p)
|
||||
return (vp);
|
||||
}
|
||||
}
|
||||
simple_unlock(&hpfs_hphash_slock);
|
||||
mtx_exit(&hpfs_hphash_mtx, MTX_DEF);
|
||||
return (NULLVP);
|
||||
}
|
||||
|
||||
@ -156,11 +156,11 @@ hpfs_hphashins(hp)
|
||||
{
|
||||
struct hphashhead *hpp;
|
||||
|
||||
simple_lock(&hpfs_hphash_slock);
|
||||
mtx_enter(&hpfs_hphash_mtx, MTX_DEF);
|
||||
hpp = HPNOHASH(hp->h_dev, hp->h_no);
|
||||
hp->h_flag |= H_HASHED;
|
||||
LIST_INSERT_HEAD(hpp, hp, h_hash);
|
||||
simple_unlock(&hpfs_hphash_slock);
|
||||
mtx_exit(&hpfs_hphash_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -170,7 +170,7 @@ void
|
||||
hpfs_hphashrem(hp)
|
||||
struct hpfsnode *hp;
|
||||
{
|
||||
simple_lock(&hpfs_hphash_slock);
|
||||
mtx_enter(&hpfs_hphash_mtx, MTX_DEF);
|
||||
if (hp->h_flag & H_HASHED) {
|
||||
hp->h_flag &= ~H_HASHED;
|
||||
LIST_REMOVE(hp, h_hash);
|
||||
@ -179,5 +179,5 @@ hpfs_hphashrem(hp)
|
||||
hp->h_hash.le_prev = NULL;
|
||||
#endif
|
||||
}
|
||||
simple_unlock(&hpfs_hphash_slock);
|
||||
mtx_exit(&hpfs_hphash_mtx, MTX_DEF);
|
||||
}
|
||||
|
@ -686,7 +686,8 @@ hpfs_vget(
|
||||
if (ino == (ino_t)hpmp->hpm_su.su_rootfno)
|
||||
vp->v_flag |= VROOT;
|
||||
|
||||
simple_lock_init(&hp->h_interlock);
|
||||
|
||||
mtx_init(&hp->h_interlock, "hpfsnode interlock", MTX_DEF);
|
||||
lockinit(&hp->h_lock, PINOD, "hpnode", 0, 0);
|
||||
|
||||
hp->h_flag = H_INVAL;
|
||||
|
@ -665,7 +665,7 @@ hpfs_inactive(ap)
|
||||
if (hp->h_flag & H_INVAL) {
|
||||
VOP__UNLOCK(vp,0,ap->a_p);
|
||||
#if defined(__FreeBSD__)
|
||||
vrecycle(vp, (struct simplelock *)0, ap->a_p);
|
||||
vrecycle(vp, NULL, ap->a_p);
|
||||
#else /* defined(__NetBSD__) */
|
||||
vgone(vp);
|
||||
#endif
|
||||
@ -700,6 +700,7 @@ hpfs_reclaim(ap)
|
||||
}
|
||||
|
||||
lockdestroy(&hp->h_lock);
|
||||
mtx_destroy(&hp->h_interlock);
|
||||
|
||||
vp->v_data = NULL;
|
||||
|
||||
|
@ -56,12 +56,11 @@
|
||||
#include <sys/bio.h>
|
||||
#include <sys/buf.h>
|
||||
#include <sys/vnode.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_extern.h>
|
||||
|
||||
#include <machine/mutex.h>
|
||||
|
||||
#include <msdosfs/bpb.h>
|
||||
#include <msdosfs/msdosfsmount.h>
|
||||
#include <msdosfs/direntry.h>
|
||||
@ -74,9 +73,7 @@ static struct denode **dehashtbl;
|
||||
static u_long dehash; /* size of hash table - 1 */
|
||||
#define DEHASH(dev, dcl, doff) (dehashtbl[(minor(dev) + (dcl) + (doff) / \
|
||||
sizeof(struct direntry)) & dehash])
|
||||
#ifndef NULL_SIMPLELOCKS
|
||||
static struct simplelock dehash_slock;
|
||||
#endif
|
||||
static struct mtx dehash_mtx;
|
||||
|
||||
union _qcvt {
|
||||
quad_t qcvt;
|
||||
@ -107,7 +104,7 @@ msdosfs_init(vfsp)
|
||||
struct vfsconf *vfsp;
|
||||
{
|
||||
dehashtbl = hashinit(desiredvnodes/2, M_MSDOSFSMNT, &dehash);
|
||||
simple_lock_init(&dehash_slock);
|
||||
mtx_init(&dehash_mtx, "msdosfs dehash", MTX_DEF);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -118,6 +115,7 @@ msdosfs_uninit(vfsp)
|
||||
|
||||
if (dehashtbl)
|
||||
free(dehashtbl, M_MSDOSFSMNT);
|
||||
mtx_destroy(&dehash_mtx);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -132,7 +130,7 @@ msdosfs_hashget(dev, dirclust, diroff)
|
||||
struct vnode *vp;
|
||||
|
||||
loop:
|
||||
simple_lock(&dehash_slock);
|
||||
mtx_enter(&dehash_mtx, MTX_DEF);
|
||||
for (dep = DEHASH(dev, dirclust, diroff); dep; dep = dep->de_next) {
|
||||
if (dirclust == dep->de_dirclust
|
||||
&& diroff == dep->de_diroffset
|
||||
@ -140,13 +138,13 @@ msdosfs_hashget(dev, dirclust, diroff)
|
||||
&& dep->de_refcnt != 0) {
|
||||
vp = DETOV(dep);
|
||||
mtx_enter(&vp->v_interlock, MTX_DEF);
|
||||
simple_unlock(&dehash_slock);
|
||||
mtx_exit(&dehash_mtx, MTX_DEF);
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
|
||||
goto loop;
|
||||
return (dep);
|
||||
}
|
||||
}
|
||||
simple_unlock(&dehash_slock);
|
||||
mtx_exit(&dehash_mtx, MTX_DEF);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@ -156,7 +154,7 @@ msdosfs_hashins(dep)
|
||||
{
|
||||
struct denode **depp, *deq;
|
||||
|
||||
simple_lock(&dehash_slock);
|
||||
mtx_enter(&dehash_mtx, MTX_DEF);
|
||||
depp = &DEHASH(dep->de_dev, dep->de_dirclust, dep->de_diroffset);
|
||||
deq = *depp;
|
||||
if (deq)
|
||||
@ -164,7 +162,7 @@ msdosfs_hashins(dep)
|
||||
dep->de_next = deq;
|
||||
dep->de_prev = depp;
|
||||
*depp = dep;
|
||||
simple_unlock(&dehash_slock);
|
||||
mtx_exit(&dehash_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -173,7 +171,7 @@ msdosfs_hashrem(dep)
|
||||
{
|
||||
struct denode *deq;
|
||||
|
||||
simple_lock(&dehash_slock);
|
||||
mtx_enter(&dehash_mtx, MTX_DEF);
|
||||
deq = dep->de_next;
|
||||
if (deq)
|
||||
deq->de_prev = dep->de_prev;
|
||||
@ -182,7 +180,7 @@ msdosfs_hashrem(dep)
|
||||
dep->de_next = NULL;
|
||||
dep->de_prev = NULL;
|
||||
#endif
|
||||
simple_unlock(&dehash_slock);
|
||||
mtx_exit(&dehash_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -721,6 +719,6 @@ msdosfs_inactive(ap)
|
||||
dep->de_Name[0]);
|
||||
#endif
|
||||
if (dep->de_Name[0] == SLOT_DELETED)
|
||||
vrecycle(vp, (struct simplelock *)0, p);
|
||||
vrecycle(vp, NULL, p);
|
||||
return (error);
|
||||
}
|
||||
|
@ -61,8 +61,7 @@
|
||||
#include <sys/fcntl.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/stat.h> /* defines ALLPERMS */
|
||||
|
||||
#include <machine/mutex.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <msdosfs/bpb.h>
|
||||
#include <msdosfs/bootsect.h>
|
||||
@ -863,7 +862,7 @@ msdosfs_sync(mp, waitfor, cred, p)
|
||||
/*
|
||||
* Write back each (modified) denode.
|
||||
*/
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
loop:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
|
||||
/*
|
||||
@ -883,10 +882,10 @@ msdosfs_sync(mp, waitfor, cred, p)
|
||||
mtx_exit(&vp->v_interlock, MTX_DEF);
|
||||
continue;
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
|
||||
if (error) {
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
if (error == ENOENT)
|
||||
goto loop;
|
||||
continue;
|
||||
@ -896,9 +895,9 @@ msdosfs_sync(mp, waitfor, cred, p)
|
||||
allerror = error;
|
||||
VOP_UNLOCK(vp, 0, p);
|
||||
vrele(vp);
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
|
||||
/*
|
||||
* Flush filesystem control info.
|
||||
|
@ -43,6 +43,7 @@
|
||||
#include <sys/vnode.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <ntfs/ntfs.h>
|
||||
#include <ntfs/ntfs_inode.h>
|
||||
@ -56,9 +57,7 @@ MALLOC_DEFINE(M_NTFSNTHASH, "NTFS nthash", "NTFS ntnode hash tables");
|
||||
static LIST_HEAD(nthashhead, ntnode) *ntfs_nthashtbl;
|
||||
static u_long ntfs_nthash; /* size of hash table - 1 */
|
||||
#define NTNOHASH(device, inum) (&ntfs_nthashtbl[(minor(device) + (inum)) & ntfs_nthash])
|
||||
#ifndef NULL_SIMPLELOCKS
|
||||
static struct simplelock ntfs_nthash_slock;
|
||||
#endif
|
||||
static struct mtx ntfs_nthash_mtx;
|
||||
struct lock ntfs_hashlock;
|
||||
|
||||
/*
|
||||
@ -70,7 +69,7 @@ ntfs_nthashinit()
|
||||
lockinit(&ntfs_hashlock, PINOD, "ntfs_nthashlock", 0, 0);
|
||||
ntfs_nthashtbl = HASHINIT(desiredvnodes, M_NTFSNTHASH, M_WAITOK,
|
||||
&ntfs_nthash);
|
||||
simple_lock_init(&ntfs_nthash_slock);
|
||||
mtx_init(&ntfs_nthash_mtx, "ntfs nthash", MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -80,6 +79,7 @@ void
|
||||
ntfs_nthashdestroy(void)
|
||||
{
|
||||
lockdestroy(&ntfs_hashlock);
|
||||
mtx_destroy(&ntfs_nthash_mtx);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -93,11 +93,11 @@ ntfs_nthashlookup(dev, inum)
|
||||
{
|
||||
struct ntnode *ip;
|
||||
|
||||
simple_lock(&ntfs_nthash_slock);
|
||||
mtx_enter(&ntfs_nthash_mtx, MTX_DEF);
|
||||
for (ip = NTNOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next)
|
||||
if (inum == ip->i_number && dev == ip->i_dev)
|
||||
break;
|
||||
simple_unlock(&ntfs_nthash_slock);
|
||||
mtx_exit(&ntfs_nthash_mtx, MTX_DEF);
|
||||
|
||||
return (ip);
|
||||
}
|
||||
@ -111,11 +111,11 @@ ntfs_nthashins(ip)
|
||||
{
|
||||
struct nthashhead *ipp;
|
||||
|
||||
simple_lock(&ntfs_nthash_slock);
|
||||
mtx_enter(&ntfs_nthash_mtx, MTX_DEF);
|
||||
ipp = NTNOHASH(ip->i_dev, ip->i_number);
|
||||
LIST_INSERT_HEAD(ipp, ip, i_hash);
|
||||
ip->i_flag |= IN_HASHED;
|
||||
simple_unlock(&ntfs_nthash_slock);
|
||||
mtx_exit(&ntfs_nthash_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -125,7 +125,7 @@ void
|
||||
ntfs_nthashrem(ip)
|
||||
struct ntnode *ip;
|
||||
{
|
||||
simple_lock(&ntfs_nthash_slock);
|
||||
mtx_enter(&ntfs_nthash_mtx, MTX_DEF);
|
||||
if (ip->i_flag & IN_HASHED) {
|
||||
ip->i_flag &= ~IN_HASHED;
|
||||
LIST_REMOVE(ip, i_hash);
|
||||
@ -134,5 +134,5 @@ ntfs_nthashrem(ip)
|
||||
ip->i_hash.le_prev = NULL;
|
||||
#endif
|
||||
}
|
||||
simple_unlock(&ntfs_nthash_slock);
|
||||
mtx_exit(&ntfs_nthash_mtx, MTX_DEF);
|
||||
}
|
||||
|
@ -40,8 +40,7 @@
|
||||
#include <sys/lock.h>
|
||||
#include <sys/vnode.h>
|
||||
#include <sys/malloc.h>
|
||||
|
||||
#include <machine/mutex.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <ufs/ufs/quota.h>
|
||||
#include <ufs/ufs/inode.h>
|
||||
@ -54,9 +53,7 @@ static MALLOC_DEFINE(M_UFSIHASH, "UFS ihash", "UFS Inode hash tables");
|
||||
static LIST_HEAD(ihashhead, inode) *ihashtbl;
|
||||
static u_long ihash; /* size of hash table - 1 */
|
||||
#define INOHASH(device, inum) (&ihashtbl[(minor(device) + (inum)) & ihash])
|
||||
#ifndef NULL_SIMPLELOCKS
|
||||
static struct simplelock ufs_ihash_slock;
|
||||
#endif
|
||||
static struct mtx ufs_ihash_mtx;
|
||||
|
||||
/*
|
||||
* Initialize inode hash table.
|
||||
@ -66,7 +63,7 @@ ufs_ihashinit()
|
||||
{
|
||||
|
||||
ihashtbl = hashinit(desiredvnodes, M_UFSIHASH, &ihash);
|
||||
simple_lock_init(&ufs_ihash_slock);
|
||||
mtx_init(&ufs_ihash_mtx, "ufs ihash", MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -80,11 +77,11 @@ ufs_ihashlookup(dev, inum)
|
||||
{
|
||||
struct inode *ip;
|
||||
|
||||
simple_lock(&ufs_ihash_slock);
|
||||
mtx_enter(&ufs_ihash_mtx, MTX_DEF);
|
||||
for (ip = INOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next)
|
||||
if (inum == ip->i_number && dev == ip->i_dev)
|
||||
break;
|
||||
simple_unlock(&ufs_ihash_slock);
|
||||
mtx_exit(&ufs_ihash_mtx, MTX_DEF);
|
||||
|
||||
if (ip)
|
||||
return (ITOV(ip));
|
||||
@ -105,18 +102,18 @@ ufs_ihashget(dev, inum)
|
||||
struct vnode *vp;
|
||||
|
||||
loop:
|
||||
simple_lock(&ufs_ihash_slock);
|
||||
mtx_enter(&ufs_ihash_mtx, MTX_DEF);
|
||||
for (ip = INOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next) {
|
||||
if (inum == ip->i_number && dev == ip->i_dev) {
|
||||
vp = ITOV(ip);
|
||||
mtx_enter(&vp->v_interlock, MTX_DEF);
|
||||
simple_unlock(&ufs_ihash_slock);
|
||||
mtx_exit(&ufs_ihash_mtx, MTX_DEF);
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
|
||||
goto loop;
|
||||
return (vp);
|
||||
}
|
||||
}
|
||||
simple_unlock(&ufs_ihash_slock);
|
||||
mtx_exit(&ufs_ihash_mtx, MTX_DEF);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@ -133,11 +130,11 @@ ufs_ihashins(ip)
|
||||
/* lock the inode, then put it on the appropriate hash list */
|
||||
lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p);
|
||||
|
||||
simple_lock(&ufs_ihash_slock);
|
||||
mtx_enter(&ufs_ihash_mtx, MTX_DEF);
|
||||
ipp = INOHASH(ip->i_dev, ip->i_number);
|
||||
LIST_INSERT_HEAD(ipp, ip, i_hash);
|
||||
ip->i_flag |= IN_HASHED;
|
||||
simple_unlock(&ufs_ihash_slock);
|
||||
mtx_exit(&ufs_ihash_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -147,7 +144,7 @@ void
|
||||
ufs_ihashrem(ip)
|
||||
struct inode *ip;
|
||||
{
|
||||
simple_lock(&ufs_ihash_slock);
|
||||
mtx_enter(&ufs_ihash_mtx, MTX_DEF);
|
||||
if (ip->i_flag & IN_HASHED) {
|
||||
ip->i_flag &= ~IN_HASHED;
|
||||
LIST_REMOVE(ip, i_hash);
|
||||
@ -156,5 +153,5 @@ ufs_ihashrem(ip)
|
||||
ip->i_hash.le_prev = NULL;
|
||||
#endif
|
||||
}
|
||||
simple_unlock(&ufs_ihash_slock);
|
||||
mtx_exit(&ufs_ihash_mtx, MTX_DEF);
|
||||
}
|
||||
|
@ -56,8 +56,7 @@
|
||||
#include <sys/disklabel.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include <machine/mutex.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <ufs/ufs/extattr.h>
|
||||
#include <ufs/ufs/quota.h>
|
||||
@ -575,23 +574,23 @@ ext2_reload(mountp, cred, p)
|
||||
brelse(bp);
|
||||
|
||||
loop:
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
|
||||
if (vp->v_mount != mountp) {
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
goto loop;
|
||||
}
|
||||
nvp = vp->v_mntvnodes.le_next;
|
||||
/*
|
||||
* Step 4: invalidate all inactive vnodes.
|
||||
*/
|
||||
if (vrecycle(vp, &mntvnode_slock, p))
|
||||
if (vrecycle(vp, &mntvnode_mtx, p))
|
||||
goto loop;
|
||||
/*
|
||||
* Step 5: invalidate all cached file data.
|
||||
*/
|
||||
mtx_enter(&vp->v_interlock, MTX_DEF);
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
|
||||
goto loop;
|
||||
}
|
||||
@ -613,9 +612,9 @@ ext2_reload(mountp, cred, p)
|
||||
&ip->i_din);
|
||||
brelse(bp);
|
||||
vput(vp);
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -940,7 +939,7 @@ ext2_sync(mp, waitfor, cred, p)
|
||||
/*
|
||||
* Write back each (modified) inode.
|
||||
*/
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
loop:
|
||||
for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
|
||||
/*
|
||||
@ -959,10 +958,10 @@ ext2_sync(mp, waitfor, cred, p)
|
||||
mtx_exit(&vp->v_interlock, MTX_DEF);
|
||||
continue;
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
|
||||
if (error) {
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
if (error == ENOENT)
|
||||
goto loop;
|
||||
continue;
|
||||
@ -971,9 +970,9 @@ ext2_sync(mp, waitfor, cred, p)
|
||||
allerror = error;
|
||||
VOP_UNLOCK(vp, 0, p);
|
||||
vrele(vp);
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
/*
|
||||
* Force stale file system control information to be flushed.
|
||||
*/
|
||||
|
@ -56,8 +56,7 @@
|
||||
#include <sys/disklabel.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include <machine/mutex.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <ufs/ufs/extattr.h>
|
||||
#include <ufs/ufs/quota.h>
|
||||
@ -575,23 +574,23 @@ ext2_reload(mountp, cred, p)
|
||||
brelse(bp);
|
||||
|
||||
loop:
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
|
||||
if (vp->v_mount != mountp) {
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
goto loop;
|
||||
}
|
||||
nvp = vp->v_mntvnodes.le_next;
|
||||
/*
|
||||
* Step 4: invalidate all inactive vnodes.
|
||||
*/
|
||||
if (vrecycle(vp, &mntvnode_slock, p))
|
||||
if (vrecycle(vp, &mntvnode_mtx, p))
|
||||
goto loop;
|
||||
/*
|
||||
* Step 5: invalidate all cached file data.
|
||||
*/
|
||||
mtx_enter(&vp->v_interlock, MTX_DEF);
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
|
||||
goto loop;
|
||||
}
|
||||
@ -613,9 +612,9 @@ ext2_reload(mountp, cred, p)
|
||||
&ip->i_din);
|
||||
brelse(bp);
|
||||
vput(vp);
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -940,7 +939,7 @@ ext2_sync(mp, waitfor, cred, p)
|
||||
/*
|
||||
* Write back each (modified) inode.
|
||||
*/
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
loop:
|
||||
for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
|
||||
/*
|
||||
@ -959,10 +958,10 @@ ext2_sync(mp, waitfor, cred, p)
|
||||
mtx_exit(&vp->v_interlock, MTX_DEF);
|
||||
continue;
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
|
||||
if (error) {
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
if (error == ENOENT)
|
||||
goto loop;
|
||||
continue;
|
||||
@ -971,9 +970,9 @@ ext2_sync(mp, waitfor, cred, p)
|
||||
allerror = error;
|
||||
VOP_UNLOCK(vp, 0, p);
|
||||
vrele(vp);
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
/*
|
||||
* Force stale file system control information to be flushed.
|
||||
*/
|
||||
|
@ -1896,6 +1896,13 @@ init386(first)
|
||||
LIST_INIT(&proc0.p_contested);
|
||||
|
||||
mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE);
|
||||
#ifdef SMP
|
||||
/*
|
||||
* Interrupts can happen very early, so initialize imen_mtx here, rather
|
||||
* than in init_locks().
|
||||
*/
|
||||
mtx_init(&imen_mtx, "imen", MTX_SPIN);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Giant is used early for at least debugger traps and unexpected traps.
|
||||
|
@ -238,7 +238,7 @@ typedef struct BASETABLE_ENTRY {
|
||||
#define MP_ANNOUNCE_POST 0x19
|
||||
|
||||
/* used to hold the AP's until we are ready to release them */
|
||||
struct simplelock ap_boot_lock;
|
||||
struct mtx ap_boot_mtx;
|
||||
|
||||
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
|
||||
int current_postcode;
|
||||
@ -318,6 +318,9 @@ SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
|
||||
* Local data and functions.
|
||||
*/
|
||||
|
||||
/* Set to 1 once we're ready to let the APs out of the pen. */
|
||||
static volatile int aps_ready = 0;
|
||||
|
||||
static int mp_capable;
|
||||
static u_int boot_address;
|
||||
static u_int base_memory;
|
||||
@ -345,36 +348,40 @@ static void release_aps(void *dummy);
|
||||
*/
|
||||
|
||||
/* critical region around IO APIC, apic_imen */
|
||||
struct simplelock imen_lock;
|
||||
struct mtx imen_mtx;
|
||||
|
||||
/* lock region used by kernel profiling */
|
||||
struct simplelock mcount_lock;
|
||||
struct mtx mcount_mtx;
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
/* locks com (tty) data/hardware accesses: a FASTINTR() */
|
||||
struct simplelock com_lock;
|
||||
struct mtx com_mtx;
|
||||
#endif /* USE_COMLOCK */
|
||||
|
||||
/* lock around the MP rendezvous */
|
||||
static struct simplelock smp_rv_lock;
|
||||
static struct mtx smp_rv_mtx;
|
||||
|
||||
/* only 1 CPU can panic at a time :) */
|
||||
struct simplelock panic_lock;
|
||||
struct mtx panic_mtx;
|
||||
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
s_lock_init(&mcount_lock);
|
||||
/*
|
||||
* XXX The mcount mutex probably needs to be statically initialized,
|
||||
* since it will be used even in the function calls that get us to this
|
||||
* point.
|
||||
*/
|
||||
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
|
||||
|
||||
s_lock_init(&imen_lock);
|
||||
s_lock_init(&smp_rv_lock);
|
||||
s_lock_init(&panic_lock);
|
||||
mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
|
||||
mtx_init(&panic_mtx, "panic", MTX_DEF);
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
s_lock_init(&com_lock);
|
||||
mtx_init(&com_mtx, "com", MTX_SPIN);
|
||||
#endif /* USE_COMLOCK */
|
||||
|
||||
s_lock_init(&ap_boot_lock);
|
||||
mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -655,9 +662,6 @@ mp_enable(u_int boot_addr)
|
||||
/* initialize all SMP locks */
|
||||
init_locks();
|
||||
|
||||
/* obtain the ap_boot_lock */
|
||||
s_lock(&ap_boot_lock);
|
||||
|
||||
/* start each Application Processor */
|
||||
start_all_aps(boot_addr);
|
||||
}
|
||||
@ -2247,8 +2251,12 @@ ap_init(void)
|
||||
{
|
||||
u_int apic_id;
|
||||
|
||||
/* spin until all the AP's are ready */
|
||||
while (!aps_ready)
|
||||
/* spin */ ;
|
||||
|
||||
/* lock against other AP's that are waking up */
|
||||
s_lock(&ap_boot_lock);
|
||||
mtx_enter(&ap_boot_mtx, MTX_SPIN);
|
||||
|
||||
/* BSP may have changed PTD while we're waiting for the lock */
|
||||
cpu_invltlb();
|
||||
@ -2297,7 +2305,7 @@ ap_init(void)
|
||||
}
|
||||
|
||||
/* let other AP's wake up now */
|
||||
s_unlock(&ap_boot_lock);
|
||||
mtx_exit(&ap_boot_mtx, MTX_SPIN);
|
||||
|
||||
/* wait until all the AP's are up */
|
||||
while (smp_started == 0)
|
||||
@ -2851,10 +2859,9 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
void (* teardown_func)(void *),
|
||||
void *arg)
|
||||
{
|
||||
u_int efl;
|
||||
|
||||
|
||||
/* obtain rendezvous lock */
|
||||
s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */
|
||||
mtx_enter(&smp_rv_mtx, MTX_SPIN);
|
||||
|
||||
/* set static function pointers */
|
||||
smp_rv_setup_func = setup_func;
|
||||
@ -2864,27 +2871,22 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
smp_rv_waiters[0] = 0;
|
||||
smp_rv_waiters[1] = 0;
|
||||
|
||||
/* disable interrupts on this CPU, save interrupt status */
|
||||
efl = read_eflags();
|
||||
write_eflags(efl & ~PSL_I);
|
||||
|
||||
/* signal other processors, which will enter the IPI with interrupts off */
|
||||
/*
|
||||
* signal other processors, which will enter the IPI with interrupts off
|
||||
*/
|
||||
all_but_self_ipi(XRENDEZVOUS_OFFSET);
|
||||
|
||||
/* call executor function */
|
||||
smp_rendezvous_action();
|
||||
|
||||
/* restore interrupt flag */
|
||||
write_eflags(efl);
|
||||
|
||||
/* release lock */
|
||||
s_unlock(&smp_rv_lock);
|
||||
mtx_exit(&smp_rv_mtx, MTX_SPIN);
|
||||
}
|
||||
|
||||
void
|
||||
release_aps(void *dummy __unused)
|
||||
{
|
||||
s_unlock(&ap_boot_lock);
|
||||
atomic_store_rel_int(&aps_ready, 1);
|
||||
}
|
||||
|
||||
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
|
||||
|
@ -238,7 +238,7 @@ typedef struct BASETABLE_ENTRY {
|
||||
#define MP_ANNOUNCE_POST 0x19
|
||||
|
||||
/* used to hold the AP's until we are ready to release them */
|
||||
struct simplelock ap_boot_lock;
|
||||
struct mtx ap_boot_mtx;
|
||||
|
||||
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
|
||||
int current_postcode;
|
||||
@ -318,6 +318,9 @@ SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
|
||||
* Local data and functions.
|
||||
*/
|
||||
|
||||
/* Set to 1 once we're ready to let the APs out of the pen. */
|
||||
static volatile int aps_ready = 0;
|
||||
|
||||
static int mp_capable;
|
||||
static u_int boot_address;
|
||||
static u_int base_memory;
|
||||
@ -345,36 +348,40 @@ static void release_aps(void *dummy);
|
||||
*/
|
||||
|
||||
/* critical region around IO APIC, apic_imen */
|
||||
struct simplelock imen_lock;
|
||||
struct mtx imen_mtx;
|
||||
|
||||
/* lock region used by kernel profiling */
|
||||
struct simplelock mcount_lock;
|
||||
struct mtx mcount_mtx;
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
/* locks com (tty) data/hardware accesses: a FASTINTR() */
|
||||
struct simplelock com_lock;
|
||||
struct mtx com_mtx;
|
||||
#endif /* USE_COMLOCK */
|
||||
|
||||
/* lock around the MP rendezvous */
|
||||
static struct simplelock smp_rv_lock;
|
||||
static struct mtx smp_rv_mtx;
|
||||
|
||||
/* only 1 CPU can panic at a time :) */
|
||||
struct simplelock panic_lock;
|
||||
struct mtx panic_mtx;
|
||||
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
s_lock_init(&mcount_lock);
|
||||
/*
|
||||
* XXX The mcount mutex probably needs to be statically initialized,
|
||||
* since it will be used even in the function calls that get us to this
|
||||
* point.
|
||||
*/
|
||||
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
|
||||
|
||||
s_lock_init(&imen_lock);
|
||||
s_lock_init(&smp_rv_lock);
|
||||
s_lock_init(&panic_lock);
|
||||
mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
|
||||
mtx_init(&panic_mtx, "panic", MTX_DEF);
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
s_lock_init(&com_lock);
|
||||
mtx_init(&com_mtx, "com", MTX_SPIN);
|
||||
#endif /* USE_COMLOCK */
|
||||
|
||||
s_lock_init(&ap_boot_lock);
|
||||
mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -655,9 +662,6 @@ mp_enable(u_int boot_addr)
|
||||
/* initialize all SMP locks */
|
||||
init_locks();
|
||||
|
||||
/* obtain the ap_boot_lock */
|
||||
s_lock(&ap_boot_lock);
|
||||
|
||||
/* start each Application Processor */
|
||||
start_all_aps(boot_addr);
|
||||
}
|
||||
@ -2247,8 +2251,12 @@ ap_init(void)
|
||||
{
|
||||
u_int apic_id;
|
||||
|
||||
/* spin until all the AP's are ready */
|
||||
while (!aps_ready)
|
||||
/* spin */ ;
|
||||
|
||||
/* lock against other AP's that are waking up */
|
||||
s_lock(&ap_boot_lock);
|
||||
mtx_enter(&ap_boot_mtx, MTX_SPIN);
|
||||
|
||||
/* BSP may have changed PTD while we're waiting for the lock */
|
||||
cpu_invltlb();
|
||||
@ -2297,7 +2305,7 @@ ap_init(void)
|
||||
}
|
||||
|
||||
/* let other AP's wake up now */
|
||||
s_unlock(&ap_boot_lock);
|
||||
mtx_exit(&ap_boot_mtx, MTX_SPIN);
|
||||
|
||||
/* wait until all the AP's are up */
|
||||
while (smp_started == 0)
|
||||
@ -2851,10 +2859,9 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
void (* teardown_func)(void *),
|
||||
void *arg)
|
||||
{
|
||||
u_int efl;
|
||||
|
||||
|
||||
/* obtain rendezvous lock */
|
||||
s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */
|
||||
mtx_enter(&smp_rv_mtx, MTX_SPIN);
|
||||
|
||||
/* set static function pointers */
|
||||
smp_rv_setup_func = setup_func;
|
||||
@ -2864,27 +2871,22 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
smp_rv_waiters[0] = 0;
|
||||
smp_rv_waiters[1] = 0;
|
||||
|
||||
/* disable interrupts on this CPU, save interrupt status */
|
||||
efl = read_eflags();
|
||||
write_eflags(efl & ~PSL_I);
|
||||
|
||||
/* signal other processors, which will enter the IPI with interrupts off */
|
||||
/*
|
||||
* signal other processors, which will enter the IPI with interrupts off
|
||||
*/
|
||||
all_but_self_ipi(XRENDEZVOUS_OFFSET);
|
||||
|
||||
/* call executor function */
|
||||
smp_rendezvous_action();
|
||||
|
||||
/* restore interrupt flag */
|
||||
write_eflags(efl);
|
||||
|
||||
/* release lock */
|
||||
s_unlock(&smp_rv_lock);
|
||||
mtx_exit(&smp_rv_mtx, MTX_SPIN);
|
||||
}
|
||||
|
||||
void
|
||||
release_aps(void *dummy __unused)
|
||||
{
|
||||
s_unlock(&ap_boot_lock);
|
||||
atomic_store_rel_int(&aps_ready, 1);
|
||||
}
|
||||
|
||||
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
|
||||
|
@ -1,317 +0,0 @@
|
||||
/*-
|
||||
* Copyright (c) 1997, by Steve Passe
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. The name of the developer may NOT be used to endorse or promote products
|
||||
* derived from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
/*
|
||||
* credit to Bruce Evans <bde@zeta.org.au> for help with asm optimization.
|
||||
*/
|
||||
|
||||
#include <machine/asmacros.h> /* miscellaneous macros */
|
||||
#include <i386/isa/intr_machdep.h>
|
||||
#include <machine/psl.h>
|
||||
|
||||
#include <machine/smptests.h> /** FAST_HI */
|
||||
|
||||
#include "assym.s"
|
||||
|
||||
/*
|
||||
* The following impliments the primitives described in i386/i386/param.h
|
||||
* necessary for the Lite2 lock manager system.
|
||||
* The major difference is that the "volatility" of the lock datum has been
|
||||
* pushed down from the various functions to lock_data itself.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The simple-lock routines are the primitives out of which the lock
|
||||
* package is built. The machine-dependent code must implement an
|
||||
* atomic test_and_set operation that indivisibly sets the simple lock
|
||||
* to non-zero and returns its old value. It also assumes that the
|
||||
* setting of the lock to zero below is indivisible. Simple locks may
|
||||
* only be used for exclusive locks.
|
||||
*
|
||||
* struct simplelock {
|
||||
* volatile int lock_data;
|
||||
* };
|
||||
*/
|
||||
|
||||
/*
|
||||
* void
|
||||
* s_lock_init(struct simplelock *lkp)
|
||||
* {
|
||||
* lkp->lock_data = 0;
|
||||
* }
|
||||
*/
|
||||
ENTRY(s_lock_init)
|
||||
movl 4(%esp), %eax /* get the address of the lock */
|
||||
movl $0, (%eax)
|
||||
ret
|
||||
|
||||
|
||||
/*
|
||||
* void
|
||||
* s_lock(struct simplelock *lkp)
|
||||
* {
|
||||
* while (test_and_set(&lkp->lock_data))
|
||||
* continue;
|
||||
* }
|
||||
*
|
||||
* Note:
|
||||
* If the acquire fails we do a loop of reads waiting for the lock to
|
||||
* become free instead of continually beating on the lock with xchgl.
|
||||
* The theory here is that the CPU will stay within its cache until
|
||||
* a write by the other CPU updates it, instead of continually updating
|
||||
* the local cache (and thus causing external bus writes) with repeated
|
||||
* writes to the lock.
|
||||
*/
|
||||
#ifndef SL_DEBUG
|
||||
|
||||
ENTRY(s_lock)
|
||||
movl 4(%esp), %eax /* get the address of the lock */
|
||||
movl $1, %ecx
|
||||
setlock:
|
||||
xchgl %ecx, (%eax)
|
||||
testl %ecx, %ecx
|
||||
jz gotit /* it was clear, return */
|
||||
wait:
|
||||
cmpl $0, (%eax) /* wait to empty */
|
||||
jne wait /* still set... */
|
||||
jmp setlock /* empty again, try once more */
|
||||
gotit:
|
||||
ret
|
||||
|
||||
#else /* SL_DEBUG */
|
||||
|
||||
ENTRY(s_lock)
|
||||
movl 4(%esp), %edx /* get the address of the lock */
|
||||
setlock:
|
||||
movl PCPU(CPU_LOCKID), %ecx /* add cpu id portion */
|
||||
incl %ecx /* add lock portion */
|
||||
movl $0, %eax
|
||||
lock
|
||||
cmpxchgl %ecx, (%edx)
|
||||
jz gotit /* it was clear, return */
|
||||
pushl %eax /* save what we xchanged */
|
||||
decl %eax /* remove lock portion */
|
||||
cmpl PCPU(CPU_LOCKID), %eax /* do we hold it? */
|
||||
je bad_slock /* yes, thats not good... */
|
||||
addl $4, %esp /* clear the stack */
|
||||
wait:
|
||||
cmpl $0, (%edx) /* wait to empty */
|
||||
jne wait /* still set... */
|
||||
jmp setlock /* empty again, try once more */
|
||||
gotit:
|
||||
ret
|
||||
|
||||
ALIGN_TEXT
|
||||
bad_slock:
|
||||
/* %eax (current lock) is already on the stack */
|
||||
pushl %edx
|
||||
pushl PCPU(CPUID)
|
||||
pushl $bsl1
|
||||
call _panic
|
||||
|
||||
bsl1: .asciz "rslock: cpu: %d, addr: 0x%08x, lock: 0x%08x"
|
||||
|
||||
#endif /* SL_DEBUG */
|
||||
|
||||
|
||||
/*
|
||||
* int
|
||||
* s_lock_try(struct simplelock *lkp)
|
||||
* {
|
||||
* return (!test_and_set(&lkp->lock_data));
|
||||
* }
|
||||
*/
|
||||
#ifndef SL_DEBUG
|
||||
|
||||
ENTRY(s_lock_try)
|
||||
movl 4(%esp), %eax /* get the address of the lock */
|
||||
movl $1, %ecx
|
||||
|
||||
xchgl %ecx, (%eax)
|
||||
testl %ecx, %ecx
|
||||
setz %al /* 1 if previous value was 0 */
|
||||
movzbl %al, %eax /* convert to an int */
|
||||
|
||||
ret
|
||||
|
||||
#else /* SL_DEBUG */
|
||||
|
||||
ENTRY(s_lock_try)
|
||||
movl 4(%esp), %edx /* get the address of the lock */
|
||||
movl PCPU(CPU_LOCKID), %ecx /* add cpu id portion */
|
||||
incl %ecx /* add lock portion */
|
||||
|
||||
xorl %eax, %eax
|
||||
lock
|
||||
cmpxchgl %ecx, (%edx)
|
||||
setz %al /* 1 if previous value was 0 */
|
||||
movzbl %al, %eax /* convert to an int */
|
||||
|
||||
ret
|
||||
|
||||
#endif /* SL_DEBUG */
|
||||
|
||||
|
||||
/*
|
||||
* void
|
||||
* s_unlock(struct simplelock *lkp)
|
||||
* {
|
||||
* lkp->lock_data = 0;
|
||||
* }
|
||||
*/
|
||||
ENTRY(s_unlock)
|
||||
movl 4(%esp), %eax /* get the address of the lock */
|
||||
movl $0, (%eax)
|
||||
ret
|
||||
|
||||
#if 0
|
||||
|
||||
/*
|
||||
* XXX CRUFTY SS_LOCK IMPLEMENTATION REMOVED XXX
|
||||
*
|
||||
* These versions of simple_lock block interrupts,
|
||||
* making it suitable for regions accessed by both top and bottom levels.
|
||||
* This is done by saving the current value of the cpu flags in a per-cpu
|
||||
* global, and disabling interrupts when the lock is taken. When the
|
||||
* lock is released, interrupts might be enabled, depending upon the saved
|
||||
* cpu flags.
|
||||
* Because of this, it must ONLY be used for SHORT, deterministic paths!
|
||||
*
|
||||
* Note:
|
||||
* It would appear to be "bad behaviour" to blindly store a value in
|
||||
* ss_eflags, as this could destroy the previous contents. But since ss_eflags
|
||||
* is a per-cpu variable, and its fatal to attempt to acquire a simplelock
|
||||
* that you already hold, we get away with it. This needs to be cleaned
|
||||
* up someday...
|
||||
*/
|
||||
|
||||
/*
|
||||
* void ss_lock(struct simplelock *lkp)
|
||||
*/
|
||||
#ifndef SL_DEBUG
|
||||
|
||||
ENTRY(ss_lock)
|
||||
movl 4(%esp), %eax /* get the address of the lock */
|
||||
movl $1, %ecx /* value for a held lock */
|
||||
ssetlock:
|
||||
pushfl
|
||||
cli
|
||||
xchgl %ecx, (%eax) /* compete */
|
||||
testl %ecx, %ecx
|
||||
jz sgotit /* it was clear, return */
|
||||
popfl /* previous value while waiting */
|
||||
swait:
|
||||
cmpl $0, (%eax) /* wait to empty */
|
||||
jne swait /* still set... */
|
||||
jmp ssetlock /* empty again, try once more */
|
||||
sgotit:
|
||||
popl PCPU(SS_EFLAGS) /* save the old eflags */
|
||||
ret
|
||||
|
||||
#else /* SL_DEBUG */
|
||||
|
||||
ENTRY(ss_lock)
|
||||
movl 4(%esp), %edx /* get the address of the lock */
|
||||
ssetlock:
|
||||
movl PCPU(CPU_LOCKID), %ecx /* add cpu id portion */
|
||||
incl %ecx /* add lock portion */
|
||||
pushfl
|
||||
cli
|
||||
movl $0, %eax
|
||||
lock
|
||||
cmpxchgl %ecx, (%edx) /* compete */
|
||||
jz sgotit /* it was clear, return */
|
||||
pushl %eax /* save what we xchanged */
|
||||
decl %eax /* remove lock portion */
|
||||
cmpl PCPU(CPU_LOCKID), %eax /* do we hold it? */
|
||||
je sbad_slock /* yes, thats not good... */
|
||||
addl $4, %esp /* clear the stack */
|
||||
popfl
|
||||
swait:
|
||||
cmpl $0, (%edx) /* wait to empty */
|
||||
jne swait /* still set... */
|
||||
jmp ssetlock /* empty again, try once more */
|
||||
sgotit:
|
||||
popl PCPU(SS_EFLAGS) /* save the old task priority */
|
||||
sgotit2:
|
||||
ret
|
||||
|
||||
ALIGN_TEXT
|
||||
sbad_slock:
|
||||
/* %eax (current lock) is already on the stack */
|
||||
pushl %edx
|
||||
pushl PCPU(CPUID)
|
||||
pushl $sbsl1
|
||||
call _panic
|
||||
|
||||
sbsl1: .asciz "rsslock: cpu: %d, addr: 0x%08x, lock: 0x%08x"
|
||||
|
||||
#endif /* SL_DEBUG */
|
||||
|
||||
/*
|
||||
* void ss_unlock(struct simplelock *lkp)
|
||||
*/
|
||||
ENTRY(ss_unlock)
|
||||
movl 4(%esp), %eax /* get the address of the lock */
|
||||
movl $0, (%eax) /* clear the simple lock */
|
||||
testl $PSL_I, PCPU(SS_EFLAGS)
|
||||
jz ss_unlock2
|
||||
sti
|
||||
ss_unlock2:
|
||||
ret
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These versions of simple_lock does not contain calls to profiling code.
|
||||
* Thus they can be called from the profiling code.
|
||||
*/
|
||||
|
||||
/*
|
||||
* void s_lock_np(struct simplelock *lkp)
|
||||
*/
|
||||
NON_GPROF_ENTRY(s_lock_np)
|
||||
movl 4(%esp), %eax /* get the address of the lock */
|
||||
movl $1, %ecx
|
||||
1:
|
||||
xchgl %ecx, (%eax)
|
||||
testl %ecx, %ecx
|
||||
jz 3f
|
||||
2:
|
||||
cmpl $0, (%eax) /* wait to empty */
|
||||
jne 2b /* still set... */
|
||||
jmp 1b /* empty again, try once more */
|
||||
3:
|
||||
NON_GPROF_RET
|
||||
|
||||
/*
|
||||
* void s_unlock_np(struct simplelock *lkp)
|
||||
*/
|
||||
NON_GPROF_ENTRY(s_unlock_np)
|
||||
movl 4(%esp), %eax /* get the address of the lock */
|
||||
movl $0, (%eax)
|
||||
NON_GPROF_RET
|
@ -233,7 +233,7 @@
|
||||
#define _Giant Giant
|
||||
#define _idle idle
|
||||
#define _imen imen
|
||||
#define _imen_lock imen_lock
|
||||
#define _imen_mtx imen_mtx
|
||||
#define _in_vm86call in_vm86call
|
||||
#define _init386 init386
|
||||
#define _init_secondary init_secondary
|
||||
@ -282,8 +282,6 @@
|
||||
#define _reg_u_sub reg_u_sub
|
||||
#define _rel_mplock rel_mplock
|
||||
#define _round_reg round_reg
|
||||
#define _s_lock s_lock
|
||||
#define _s_unlock s_unlock
|
||||
#define _sched_ithd sched_ithd
|
||||
#define _sched_lock sched_lock
|
||||
#define _set_precision_flag_down set_precision_flag_down
|
||||
@ -296,8 +294,6 @@
|
||||
#define _smp_rendezvous_action smp_rendezvous_action
|
||||
#define _softclock softclock
|
||||
#define _spending spending
|
||||
#define _ss_lock ss_lock
|
||||
#define _ss_unlock ss_unlock
|
||||
#define _started_cpus started_cpus
|
||||
#define _stopped_cpus stopped_cpus
|
||||
#define _svr4_sigcode svr4_sigcode
|
||||
|
@ -39,13 +39,8 @@
|
||||
/*
|
||||
* Protects the IO APIC and apic_imen as a critical region.
|
||||
*/
|
||||
#define IMASK_LOCK \
|
||||
pushl $_imen_lock ; /* address of lock */ \
|
||||
call _s_lock ; /* MP-safe */ \
|
||||
addl $4, %esp
|
||||
|
||||
#define IMASK_UNLOCK \
|
||||
movl $0, _imen_lock
|
||||
#define IMASK_LOCK MTX_ENTER(_imen_mtx, MTX_SPIN)
|
||||
#define IMASK_UNLOCK MTX_EXIT(_imen_mtx, MTX_SPIN)
|
||||
|
||||
#else /* SMP */
|
||||
|
||||
@ -67,8 +62,8 @@
|
||||
* XXX should rc (RISCom/8) use this?
|
||||
*/
|
||||
#ifdef USE_COMLOCK
|
||||
#define COM_LOCK() s_lock(&com_lock)
|
||||
#define COM_UNLOCK() s_unlock(&com_lock)
|
||||
#define COM_LOCK() mtx_enter(&com_mtx, MTX_SPIN)
|
||||
#define COM_UNLOCK() mtx_exit(&com_mtx, MTX_SPIN)
|
||||
#else
|
||||
#define COM_LOCK()
|
||||
#define COM_UNLOCK()
|
||||
@ -81,46 +76,11 @@
|
||||
|
||||
#endif /* SMP */
|
||||
|
||||
/*
|
||||
* Simple spin lock.
|
||||
* It is an error to hold one of these locks while a process is sleeping.
|
||||
*/
|
||||
struct simplelock {
|
||||
volatile int lock_data;
|
||||
};
|
||||
|
||||
/* functions in simplelock.s */
|
||||
void s_lock_init __P((struct simplelock *));
|
||||
void s_lock __P((struct simplelock *));
|
||||
int s_lock_try __P((struct simplelock *));
|
||||
void ss_lock __P((struct simplelock *));
|
||||
void ss_unlock __P((struct simplelock *));
|
||||
void s_lock_np __P((struct simplelock *));
|
||||
void s_unlock_np __P((struct simplelock *));
|
||||
|
||||
/* inline simplelock functions */
|
||||
static __inline void
|
||||
s_unlock(struct simplelock *lkp)
|
||||
{
|
||||
lkp->lock_data = 0;
|
||||
}
|
||||
|
||||
/* global data in mp_machdep.c */
|
||||
extern struct simplelock imen_lock;
|
||||
extern struct simplelock com_lock;
|
||||
extern struct simplelock mcount_lock;
|
||||
extern struct simplelock panic_lock;
|
||||
|
||||
#if !defined(SIMPLELOCK_DEBUG) && MAXCPU > 1
|
||||
/*
|
||||
* This set of defines turns on the real functions in i386/isa/apic_ipl.s.
|
||||
*/
|
||||
#define simple_lock_init(alp) s_lock_init(alp)
|
||||
#define simple_lock(alp) s_lock(alp)
|
||||
#define simple_lock_try(alp) s_lock_try(alp)
|
||||
#define simple_unlock(alp) s_unlock(alp)
|
||||
|
||||
#endif /* !SIMPLELOCK_DEBUG && MAXCPU > 1 */
|
||||
extern struct mtx imen_mtx;
|
||||
extern struct mtx com_mtx;
|
||||
extern struct mtx mcount_mtx;
|
||||
extern struct mtx panic_mtx;
|
||||
|
||||
#endif /* LOCORE */
|
||||
|
||||
|
@ -238,7 +238,7 @@ typedef struct BASETABLE_ENTRY {
|
||||
#define MP_ANNOUNCE_POST 0x19
|
||||
|
||||
/* used to hold the AP's until we are ready to release them */
|
||||
struct simplelock ap_boot_lock;
|
||||
struct mtx ap_boot_mtx;
|
||||
|
||||
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
|
||||
int current_postcode;
|
||||
@ -318,6 +318,9 @@ SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
|
||||
* Local data and functions.
|
||||
*/
|
||||
|
||||
/* Set to 1 once we're ready to let the APs out of the pen. */
|
||||
static volatile int aps_ready = 0;
|
||||
|
||||
static int mp_capable;
|
||||
static u_int boot_address;
|
||||
static u_int base_memory;
|
||||
@ -345,36 +348,40 @@ static void release_aps(void *dummy);
|
||||
*/
|
||||
|
||||
/* critical region around IO APIC, apic_imen */
|
||||
struct simplelock imen_lock;
|
||||
struct mtx imen_mtx;
|
||||
|
||||
/* lock region used by kernel profiling */
|
||||
struct simplelock mcount_lock;
|
||||
struct mtx mcount_mtx;
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
/* locks com (tty) data/hardware accesses: a FASTINTR() */
|
||||
struct simplelock com_lock;
|
||||
struct mtx com_mtx;
|
||||
#endif /* USE_COMLOCK */
|
||||
|
||||
/* lock around the MP rendezvous */
|
||||
static struct simplelock smp_rv_lock;
|
||||
static struct mtx smp_rv_mtx;
|
||||
|
||||
/* only 1 CPU can panic at a time :) */
|
||||
struct simplelock panic_lock;
|
||||
struct mtx panic_mtx;
|
||||
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
s_lock_init(&mcount_lock);
|
||||
/*
|
||||
* XXX The mcount mutex probably needs to be statically initialized,
|
||||
* since it will be used even in the function calls that get us to this
|
||||
* point.
|
||||
*/
|
||||
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
|
||||
|
||||
s_lock_init(&imen_lock);
|
||||
s_lock_init(&smp_rv_lock);
|
||||
s_lock_init(&panic_lock);
|
||||
mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
|
||||
mtx_init(&panic_mtx, "panic", MTX_DEF);
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
s_lock_init(&com_lock);
|
||||
mtx_init(&com_mtx, "com", MTX_SPIN);
|
||||
#endif /* USE_COMLOCK */
|
||||
|
||||
s_lock_init(&ap_boot_lock);
|
||||
mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -655,9 +662,6 @@ mp_enable(u_int boot_addr)
|
||||
/* initialize all SMP locks */
|
||||
init_locks();
|
||||
|
||||
/* obtain the ap_boot_lock */
|
||||
s_lock(&ap_boot_lock);
|
||||
|
||||
/* start each Application Processor */
|
||||
start_all_aps(boot_addr);
|
||||
}
|
||||
@ -2247,8 +2251,12 @@ ap_init(void)
|
||||
{
|
||||
u_int apic_id;
|
||||
|
||||
/* spin until all the AP's are ready */
|
||||
while (!aps_ready)
|
||||
/* spin */ ;
|
||||
|
||||
/* lock against other AP's that are waking up */
|
||||
s_lock(&ap_boot_lock);
|
||||
mtx_enter(&ap_boot_mtx, MTX_SPIN);
|
||||
|
||||
/* BSP may have changed PTD while we're waiting for the lock */
|
||||
cpu_invltlb();
|
||||
@ -2297,7 +2305,7 @@ ap_init(void)
|
||||
}
|
||||
|
||||
/* let other AP's wake up now */
|
||||
s_unlock(&ap_boot_lock);
|
||||
mtx_exit(&ap_boot_mtx, MTX_SPIN);
|
||||
|
||||
/* wait until all the AP's are up */
|
||||
while (smp_started == 0)
|
||||
@ -2851,10 +2859,9 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
void (* teardown_func)(void *),
|
||||
void *arg)
|
||||
{
|
||||
u_int efl;
|
||||
|
||||
|
||||
/* obtain rendezvous lock */
|
||||
s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */
|
||||
mtx_enter(&smp_rv_mtx, MTX_SPIN);
|
||||
|
||||
/* set static function pointers */
|
||||
smp_rv_setup_func = setup_func;
|
||||
@ -2864,27 +2871,22 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
smp_rv_waiters[0] = 0;
|
||||
smp_rv_waiters[1] = 0;
|
||||
|
||||
/* disable interrupts on this CPU, save interrupt status */
|
||||
efl = read_eflags();
|
||||
write_eflags(efl & ~PSL_I);
|
||||
|
||||
/* signal other processors, which will enter the IPI with interrupts off */
|
||||
/*
|
||||
* signal other processors, which will enter the IPI with interrupts off
|
||||
*/
|
||||
all_but_self_ipi(XRENDEZVOUS_OFFSET);
|
||||
|
||||
/* call executor function */
|
||||
smp_rendezvous_action();
|
||||
|
||||
/* restore interrupt flag */
|
||||
write_eflags(efl);
|
||||
|
||||
/* release lock */
|
||||
s_unlock(&smp_rv_lock);
|
||||
mtx_exit(&smp_rv_mtx, MTX_SPIN);
|
||||
}
|
||||
|
||||
void
|
||||
release_aps(void *dummy __unused)
|
||||
{
|
||||
s_unlock(&ap_boot_lock);
|
||||
atomic_store_rel_int(&aps_ready, 1);
|
||||
}
|
||||
|
||||
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
|
||||
|
@ -66,8 +66,8 @@
|
||||
#ifdef SMP
|
||||
#define MCOUNT_ENTER(s) { s = read_eflags(); \
|
||||
__asm __volatile("cli" : : : "memory"); \
|
||||
s_lock_np(&mcount_lock); }
|
||||
#define MCOUNT_EXIT(s) { s_unlock_np(&mcount_lock); write_eflags(s); }
|
||||
mtx_enter(&mcount_mtx, MTX_DEF); }
|
||||
#define MCOUNT_EXIT(s) { mtx_exit(&mcount_mtx, MTX_DEF); write_eflags(s); }
|
||||
#else
|
||||
#define MCOUNT_ENTER(s) { s = read_eflags(); disable_intr(); }
|
||||
#define MCOUNT_EXIT(s) (write_eflags(s))
|
||||
|
@ -108,50 +108,16 @@ globaldata_find(int cpuno)
|
||||
return cpuno_to_globaldata[cpuno];
|
||||
}
|
||||
|
||||
/* Implementation of simplelocks */
|
||||
|
||||
void
|
||||
s_lock_init(struct simplelock *lkp)
|
||||
{
|
||||
lkp->lock_data = 0;
|
||||
}
|
||||
|
||||
void
|
||||
s_lock(struct simplelock *lkp)
|
||||
{
|
||||
for (;;) {
|
||||
if (s_lock_try(lkp))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Spin until clear.
|
||||
*/
|
||||
while (lkp->lock_data)
|
||||
;
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
s_lock_try(struct simplelock *lkp)
|
||||
{
|
||||
return 1; /* XXX needed? */
|
||||
}
|
||||
|
||||
void
|
||||
s_unlock(struct simplelock *lkp)
|
||||
{
|
||||
ia64_st_rel_32(&lkp->lock_data, 0);
|
||||
}
|
||||
|
||||
/* Other stuff */
|
||||
|
||||
/* lock around the MP rendezvous */
|
||||
static struct mtx smp_rv_lock;
|
||||
static struct mtx smp_rv_mtx;
|
||||
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
mtx_init(&smp_rv_lock, "smp_rendezvous", MTX_SPIN);
|
||||
|
||||
mtx_init(&smp_rv_mtx, "smp_rendezvous", MTX_SPIN);
|
||||
}
|
||||
|
||||
void
|
||||
@ -660,8 +626,9 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
void (* teardown_func)(void *),
|
||||
void *arg)
|
||||
{
|
||||
|
||||
/* obtain rendezvous lock */
|
||||
mtx_enter(&smp_rv_lock, MTX_SPIN); /* XXX sleep here? NOWAIT flag? */
|
||||
mtx_enter(&smp_rv_mtx, MTX_SPIN);
|
||||
|
||||
/* set static function pointers */
|
||||
smp_rv_setup_func = setup_func;
|
||||
@ -678,7 +645,7 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
smp_rendezvous_action();
|
||||
|
||||
/* release lock */
|
||||
mtx_exit(&smp_rv_lock, MTX_SPIN);
|
||||
mtx_exit(&smp_rv_mtx, MTX_SPIN);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -29,26 +29,6 @@
|
||||
#ifndef _MACHINE_LOCK_H_
|
||||
#define _MACHINE_LOCK_H_
|
||||
|
||||
|
||||
/*
|
||||
* Simple spin lock.
|
||||
* It is an error to hold one of these locks while a process is sleeping.
|
||||
*/
|
||||
struct simplelock {
|
||||
volatile int lock_data;
|
||||
};
|
||||
|
||||
/* functions in mp_machdep.c */
|
||||
void s_lock_init __P((struct simplelock *));
|
||||
void s_lock __P((struct simplelock *));
|
||||
int s_lock_try __P((struct simplelock *));
|
||||
void s_unlock __P((struct simplelock *));
|
||||
|
||||
#define simple_lock_init(alp) s_lock_init(alp)
|
||||
#define simple_lock(alp) s_lock(alp)
|
||||
#define simple_lock_try(alp) s_lock_try(alp)
|
||||
#define simple_unlock(alp) s_unlock(alp)
|
||||
|
||||
#define COM_LOCK()
|
||||
#define COM_UNLOCK()
|
||||
|
||||
|
@ -47,8 +47,7 @@
|
||||
#include <sys/vnode.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/stat.h>
|
||||
|
||||
#include <machine/mutex.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <isofs/cd9660/iso.h>
|
||||
#include <isofs/cd9660/cd9660_node.h>
|
||||
@ -60,9 +59,7 @@
|
||||
static struct iso_node **isohashtbl;
|
||||
static u_long isohash;
|
||||
#define INOHASH(device, inum) ((minor(device) + ((inum)>>12)) & isohash)
|
||||
#ifndef NULL_SIMPLELOCKS
|
||||
static struct simplelock cd9660_ihash_slock;
|
||||
#endif
|
||||
static struct mtx cd9660_ihash_mtx;
|
||||
|
||||
static void cd9660_ihashrem __P((struct iso_node *));
|
||||
static unsigned cd9660_chars2ui __P((unsigned char *begin, int len));
|
||||
@ -76,7 +73,7 @@ cd9660_init(vfsp)
|
||||
{
|
||||
|
||||
isohashtbl = hashinit(desiredvnodes, M_ISOFSMNT, &isohash);
|
||||
simple_lock_init(&cd9660_ihash_slock);
|
||||
mtx_init(&cd9660_ihash_mtx, "cd9660_ihash", MTX_DEF);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -105,18 +102,18 @@ cd9660_ihashget(dev, inum)
|
||||
struct vnode *vp;
|
||||
|
||||
loop:
|
||||
simple_lock(&cd9660_ihash_slock);
|
||||
mtx_enter(&cd9660_ihash_mtx, MTX_DEF);
|
||||
for (ip = isohashtbl[INOHASH(dev, inum)]; ip; ip = ip->i_next) {
|
||||
if (inum == ip->i_number && dev == ip->i_dev) {
|
||||
vp = ITOV(ip);
|
||||
mtx_enter(&vp->v_interlock, MTX_DEF);
|
||||
simple_unlock(&cd9660_ihash_slock);
|
||||
mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
|
||||
goto loop;
|
||||
return (vp);
|
||||
}
|
||||
}
|
||||
simple_unlock(&cd9660_ihash_slock);
|
||||
mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@ -130,14 +127,14 @@ cd9660_ihashins(ip)
|
||||
struct proc *p = curproc; /* XXX */
|
||||
struct iso_node **ipp, *iq;
|
||||
|
||||
simple_lock(&cd9660_ihash_slock);
|
||||
mtx_enter(&cd9660_ihash_mtx, MTX_DEF);
|
||||
ipp = &isohashtbl[INOHASH(ip->i_dev, ip->i_number)];
|
||||
if ((iq = *ipp) != NULL)
|
||||
iq->i_prev = &ip->i_next;
|
||||
ip->i_next = iq;
|
||||
ip->i_prev = ipp;
|
||||
*ipp = ip;
|
||||
simple_unlock(&cd9660_ihash_slock);
|
||||
mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
|
||||
|
||||
lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p);
|
||||
}
|
||||
@ -151,7 +148,7 @@ cd9660_ihashrem(ip)
|
||||
{
|
||||
register struct iso_node *iq;
|
||||
|
||||
simple_lock(&cd9660_ihash_slock);
|
||||
mtx_enter(&cd9660_ihash_mtx, MTX_DEF);
|
||||
if ((iq = ip->i_next) != NULL)
|
||||
iq->i_prev = ip->i_prev;
|
||||
*ip->i_prev = iq;
|
||||
@ -159,7 +156,7 @@ cd9660_ihashrem(ip)
|
||||
ip->i_next = NULL;
|
||||
ip->i_prev = NULL;
|
||||
#endif
|
||||
simple_unlock(&cd9660_ihash_slock);
|
||||
mtx_exit(&cd9660_ihash_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -188,7 +185,7 @@ cd9660_inactive(ap)
|
||||
* so that it can be reused immediately.
|
||||
*/
|
||||
if (ip->inode.iso_mode == 0)
|
||||
vrecycle(vp, (struct simplelock *)0, p);
|
||||
vrecycle(vp, NULL, p);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -54,12 +54,6 @@
|
||||
* Locks provide shared/exclusive sychronization.
|
||||
*/
|
||||
|
||||
#ifdef SIMPLELOCK_DEBUG
|
||||
#define COUNT(p, x) if (p) (p)->p_locks += (x)
|
||||
#else
|
||||
#define COUNT(p, x)
|
||||
#endif
|
||||
|
||||
#define LOCK_WAIT_TIME 100
|
||||
#define LOCK_SAMPLE_WAIT 7
|
||||
|
||||
@ -137,9 +131,7 @@ shareunlock(struct lock *lkp, int decr) {
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the waitloop optimization, and note for this to work
|
||||
* simple_lock and simple_unlock should be subroutines to avoid
|
||||
* optimization troubles.
|
||||
* This is the waitloop optimization.
|
||||
*/
|
||||
static int
|
||||
apause(struct lock *lkp, int flags)
|
||||
@ -280,7 +272,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
|
||||
if (error)
|
||||
break;
|
||||
sharelock(lkp, 1);
|
||||
COUNT(p, 1);
|
||||
break;
|
||||
}
|
||||
/*
|
||||
@ -288,7 +279,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
|
||||
* An alternative would be to fail with EDEADLK.
|
||||
*/
|
||||
sharelock(lkp, 1);
|
||||
COUNT(p, 1);
|
||||
/* fall into downgrade */
|
||||
|
||||
case LK_DOWNGRADE:
|
||||
@ -310,7 +300,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
|
||||
*/
|
||||
if (lkp->lk_flags & LK_WANT_UPGRADE) {
|
||||
shareunlock(lkp, 1);
|
||||
COUNT(p, -1);
|
||||
error = EBUSY;
|
||||
break;
|
||||
}
|
||||
@ -328,7 +317,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
|
||||
if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0))
|
||||
panic("lockmgr: upgrade exclusive lock");
|
||||
shareunlock(lkp, 1);
|
||||
COUNT(p, -1);
|
||||
/*
|
||||
* If we are just polling, check to see if we will block.
|
||||
*/
|
||||
@ -360,7 +348,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
|
||||
lkp->lk_lineno = line;
|
||||
lkp->lk_lockername = name;
|
||||
#endif
|
||||
COUNT(p, 1);
|
||||
break;
|
||||
}
|
||||
/*
|
||||
@ -382,7 +369,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
|
||||
panic("lockmgr: locking against myself");
|
||||
if ((extflags & LK_CANRECURSE) != 0) {
|
||||
lkp->lk_exclusivecount++;
|
||||
COUNT(p, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -418,7 +404,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
|
||||
lkp->lk_lineno = line;
|
||||
lkp->lk_lockername = name;
|
||||
#endif
|
||||
COUNT(p, 1);
|
||||
break;
|
||||
|
||||
case LK_RELEASE:
|
||||
@ -429,9 +414,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
|
||||
pid, "exclusive lock holder",
|
||||
lkp->lk_lockholder);
|
||||
}
|
||||
if (lkp->lk_lockholder != LK_KERNPROC) {
|
||||
COUNT(p, -1);
|
||||
}
|
||||
if (lkp->lk_exclusivecount == 1) {
|
||||
lkp->lk_flags &= ~LK_HAVE_EXCL;
|
||||
lkp->lk_lockholder = LK_NOPROC;
|
||||
@ -439,10 +421,8 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
|
||||
} else {
|
||||
lkp->lk_exclusivecount--;
|
||||
}
|
||||
} else if (lkp->lk_flags & LK_SHARE_NONZERO) {
|
||||
} else if (lkp->lk_flags & LK_SHARE_NONZERO)
|
||||
shareunlock(lkp, 1);
|
||||
COUNT(p, -1);
|
||||
}
|
||||
if (lkp->lk_flags & LK_WAIT_NONZERO)
|
||||
wakeup((void *)lkp);
|
||||
break;
|
||||
@ -468,7 +448,6 @@ debuglockmgr(lkp, flags, interlkp, p, name, file, line)
|
||||
lkp->lk_lineno = line;
|
||||
lkp->lk_lockername = name;
|
||||
#endif
|
||||
COUNT(p, 1);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -627,100 +606,3 @@ lockmgr_printinfo(lkp)
|
||||
if (lkp->lk_waitcount > 0)
|
||||
printf(" with %d pending", lkp->lk_waitcount);
|
||||
}
|
||||
|
||||
#if defined(SIMPLELOCK_DEBUG) && (MAXCPU == 1 || defined(COMPILING_LINT))
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/sysctl.h>
|
||||
|
||||
static int lockpausetime = 0;
|
||||
SYSCTL_INT(_debug, OID_AUTO, lockpausetime, CTLFLAG_RW, &lockpausetime, 0, "");
|
||||
|
||||
static int simplelockrecurse;
|
||||
|
||||
/*
|
||||
* Simple lock functions so that the debugger can see from whence
|
||||
* they are being called.
|
||||
*/
|
||||
void
|
||||
simple_lock_init(alp)
|
||||
struct simplelock *alp;
|
||||
{
|
||||
|
||||
alp->lock_data = 0;
|
||||
}
|
||||
|
||||
void
|
||||
_simple_lock(alp, id, l)
|
||||
struct simplelock *alp;
|
||||
const char *id;
|
||||
int l;
|
||||
{
|
||||
|
||||
if (simplelockrecurse)
|
||||
return;
|
||||
if (alp->lock_data == 1) {
|
||||
if (lockpausetime == -1)
|
||||
panic("%s:%d: simple_lock: lock held", id, l);
|
||||
printf("%s:%d: simple_lock: lock held\n", id, l);
|
||||
if (lockpausetime == 1) {
|
||||
Debugger("simple_lock");
|
||||
/*BACKTRACE(curproc); */
|
||||
} else if (lockpausetime > 1) {
|
||||
printf("%s:%d: simple_lock: lock held...", id, l);
|
||||
tsleep(&lockpausetime, PCATCH | PPAUSE, "slock",
|
||||
lockpausetime * hz);
|
||||
printf(" continuing\n");
|
||||
}
|
||||
}
|
||||
alp->lock_data = 1;
|
||||
if (curproc)
|
||||
curproc->p_simple_locks++;
|
||||
}
|
||||
|
||||
int
|
||||
_simple_lock_try(alp, id, l)
|
||||
struct simplelock *alp;
|
||||
const char *id;
|
||||
int l;
|
||||
{
|
||||
|
||||
if (alp->lock_data)
|
||||
return (0);
|
||||
if (simplelockrecurse)
|
||||
return (1);
|
||||
alp->lock_data = 1;
|
||||
if (curproc)
|
||||
curproc->p_simple_locks++;
|
||||
return (1);
|
||||
}
|
||||
|
||||
void
|
||||
_simple_unlock(alp, id, l)
|
||||
struct simplelock *alp;
|
||||
const char *id;
|
||||
int l;
|
||||
{
|
||||
|
||||
if (simplelockrecurse)
|
||||
return;
|
||||
if (alp->lock_data == 0) {
|
||||
if (lockpausetime == -1)
|
||||
panic("%s:%d: simple_unlock: lock not held", id, l);
|
||||
printf("%s:%d: simple_unlock: lock not held\n", id, l);
|
||||
if (lockpausetime == 1) {
|
||||
Debugger("simple_unlock");
|
||||
/* BACKTRACE(curproc); */
|
||||
} else if (lockpausetime > 1) {
|
||||
printf("%s:%d: simple_unlock: lock not held...", id, l);
|
||||
tsleep(&lockpausetime, PCATCH | PPAUSE, "sunlock",
|
||||
lockpausetime * hz);
|
||||
printf(" continuing\n");
|
||||
}
|
||||
}
|
||||
alp->lock_data = 0;
|
||||
if (curproc)
|
||||
curproc->p_simple_locks--;
|
||||
}
|
||||
#elif defined(SIMPLELOCK_DEBUG)
|
||||
#error "SIMPLELOCK_DEBUG is not compatible with SMP!"
|
||||
#endif /* SIMPLELOCK_DEBUG && MAXCPU == 1 */
|
||||
|
@ -1099,6 +1099,12 @@ static char *spin_order_list[] = {
|
||||
/*
|
||||
* leaf locks
|
||||
*/
|
||||
#ifdef __i386__
|
||||
"ap boot",
|
||||
"imen",
|
||||
#endif
|
||||
"com",
|
||||
"smp rendezvous",
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -540,7 +540,7 @@ panic(const char *fmt, ...)
|
||||
|
||||
#ifdef SMP
|
||||
/* Only 1 CPU can panic at a time */
|
||||
s_lock(&panic_lock);
|
||||
mtx_enter(&panic_mtx, MTX_DEF);
|
||||
#endif
|
||||
|
||||
bootopt = RB_AUTOBOOT | RB_DUMP;
|
||||
|
@ -62,6 +62,7 @@
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/bus.h> /* XXX debugging */
|
||||
#include <machine/bus.h>
|
||||
#include <sys/rman.h>
|
||||
@ -75,9 +76,7 @@
|
||||
static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
|
||||
|
||||
struct rman_head rman_head;
|
||||
#ifndef NULL_SIMPLELOCKS
|
||||
static struct simplelock rman_lock; /* mutex to protect rman_head */
|
||||
#endif
|
||||
static struct mtx rman_mtx; /* mutex to protect rman_head */
|
||||
static int int_rman_activate_resource(struct rman *rm, struct resource *r,
|
||||
struct resource **whohas);
|
||||
static int int_rman_deactivate_resource(struct resource *r);
|
||||
@ -91,7 +90,7 @@ rman_init(struct rman *rm)
|
||||
if (once == 0) {
|
||||
once = 1;
|
||||
TAILQ_INIT(&rman_head);
|
||||
simple_lock_init(&rman_lock);
|
||||
mtx_init(&rman_mtx, "rman head", MTX_DEF);
|
||||
}
|
||||
|
||||
if (rm->rm_type == RMAN_UNINIT)
|
||||
@ -100,14 +99,14 @@ rman_init(struct rman *rm)
|
||||
panic("implement RMAN_GAUGE");
|
||||
|
||||
TAILQ_INIT(&rm->rm_list);
|
||||
rm->rm_slock = malloc(sizeof *rm->rm_slock, M_RMAN, M_NOWAIT);
|
||||
if (rm->rm_slock == 0)
|
||||
rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT);
|
||||
if (rm->rm_mtx == 0)
|
||||
return ENOMEM;
|
||||
simple_lock_init(rm->rm_slock);
|
||||
mtx_init(rm->rm_mtx, "rman", MTX_DEF);
|
||||
|
||||
simple_lock(&rman_lock);
|
||||
mtx_enter(&rman_mtx, MTX_DEF);
|
||||
TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
|
||||
simple_unlock(&rman_lock);
|
||||
mtx_exit(&rman_mtx, MTX_DEF);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -130,7 +129,7 @@ rman_manage_region(struct rman *rm, u_long start, u_long end)
|
||||
r->r_dev = 0;
|
||||
r->r_rm = rm;
|
||||
|
||||
simple_lock(rm->rm_slock);
|
||||
mtx_enter(rm->rm_mtx, MTX_DEF);
|
||||
for (s = TAILQ_FIRST(&rm->rm_list);
|
||||
s && s->r_end < r->r_start;
|
||||
s = TAILQ_NEXT(s, r_link))
|
||||
@ -142,7 +141,7 @@ rman_manage_region(struct rman *rm, u_long start, u_long end)
|
||||
TAILQ_INSERT_BEFORE(s, r, r_link);
|
||||
}
|
||||
|
||||
simple_unlock(rm->rm_slock);
|
||||
mtx_exit(rm->rm_mtx, MTX_DEF);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -151,10 +150,10 @@ rman_fini(struct rman *rm)
|
||||
{
|
||||
struct resource *r;
|
||||
|
||||
simple_lock(rm->rm_slock);
|
||||
mtx_enter(rm->rm_mtx, MTX_DEF);
|
||||
TAILQ_FOREACH(r, &rm->rm_list, r_link) {
|
||||
if (r->r_flags & RF_ALLOCATED) {
|
||||
simple_unlock(rm->rm_slock);
|
||||
mtx_exit(rm->rm_mtx, MTX_DEF);
|
||||
return EBUSY;
|
||||
}
|
||||
}
|
||||
@ -168,11 +167,12 @@ rman_fini(struct rman *rm)
|
||||
TAILQ_REMOVE(&rm->rm_list, r, r_link);
|
||||
free(r, M_RMAN);
|
||||
}
|
||||
simple_unlock(rm->rm_slock);
|
||||
simple_lock(&rman_lock);
|
||||
mtx_exit(rm->rm_mtx, MTX_DEF);
|
||||
mtx_enter(&rman_mtx, MTX_DEF);
|
||||
TAILQ_REMOVE(&rman_head, rm, rm_link);
|
||||
simple_unlock(&rman_lock);
|
||||
free(rm->rm_slock, M_RMAN);
|
||||
mtx_exit(&rman_mtx, MTX_DEF);
|
||||
mtx_destroy(rm->rm_mtx);
|
||||
free(rm->rm_mtx, M_RMAN);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -193,7 +193,7 @@ rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
|
||||
want_activate = (flags & RF_ACTIVE);
|
||||
flags &= ~RF_ACTIVE;
|
||||
|
||||
simple_lock(rm->rm_slock);
|
||||
mtx_enter(rm->rm_mtx, MTX_DEF);
|
||||
|
||||
for (r = TAILQ_FIRST(&rm->rm_list);
|
||||
r && r->r_end < start;
|
||||
@ -370,7 +370,7 @@ rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
|
||||
}
|
||||
}
|
||||
|
||||
simple_unlock(rm->rm_slock);
|
||||
mtx_exit(rm->rm_mtx, MTX_DEF);
|
||||
return (rv);
|
||||
}
|
||||
|
||||
@ -417,9 +417,9 @@ rman_activate_resource(struct resource *r)
|
||||
struct rman *rm;
|
||||
|
||||
rm = r->r_rm;
|
||||
simple_lock(rm->rm_slock);
|
||||
mtx_enter(rm->rm_mtx, MTX_DEF);
|
||||
rv = int_rman_activate_resource(rm, r, &whohas);
|
||||
simple_unlock(rm->rm_slock);
|
||||
mtx_exit(rm->rm_mtx, MTX_DEF);
|
||||
return rv;
|
||||
}
|
||||
|
||||
@ -432,28 +432,28 @@ rman_await_resource(struct resource *r, int pri, int timo)
|
||||
|
||||
rm = r->r_rm;
|
||||
for (;;) {
|
||||
simple_lock(rm->rm_slock);
|
||||
mtx_enter(rm->rm_mtx, MTX_DEF);
|
||||
rv = int_rman_activate_resource(rm, r, &whohas);
|
||||
if (rv != EBUSY)
|
||||
return (rv); /* returns with simplelock */
|
||||
return (rv); /* returns with mutex held */
|
||||
|
||||
if (r->r_sharehead == 0)
|
||||
panic("rman_await_resource");
|
||||
/*
|
||||
* splhigh hopefully will prevent a race between
|
||||
* simple_unlock and tsleep where a process
|
||||
* mtx_exit and tsleep where a process
|
||||
* could conceivably get in and release the resource
|
||||
* before we have a chance to sleep on it.
|
||||
*/
|
||||
s = splhigh();
|
||||
whohas->r_flags |= RF_WANTED;
|
||||
simple_unlock(rm->rm_slock);
|
||||
mtx_exit(rm->rm_mtx, MTX_DEF);
|
||||
rv = tsleep(r->r_sharehead, pri, "rmwait", timo);
|
||||
if (rv) {
|
||||
splx(s);
|
||||
return rv;
|
||||
}
|
||||
simple_lock(rm->rm_slock);
|
||||
mtx_enter(rm->rm_mtx, MTX_DEF);
|
||||
splx(s);
|
||||
}
|
||||
}
|
||||
@ -478,9 +478,9 @@ rman_deactivate_resource(struct resource *r)
|
||||
struct rman *rm;
|
||||
|
||||
rm = r->r_rm;
|
||||
simple_lock(rm->rm_slock);
|
||||
mtx_enter(rm->rm_mtx, MTX_DEF);
|
||||
int_rman_deactivate_resource(r);
|
||||
simple_unlock(rm->rm_slock);
|
||||
mtx_exit(rm->rm_mtx, MTX_DEF);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -576,9 +576,9 @@ rman_release_resource(struct resource *r)
|
||||
int rv;
|
||||
struct rman *rm = r->r_rm;
|
||||
|
||||
simple_lock(rm->rm_slock);
|
||||
mtx_enter(rm->rm_mtx, MTX_DEF);
|
||||
rv = int_rman_release_resource(rm, r);
|
||||
simple_unlock(rm->rm_slock);
|
||||
mtx_exit(rm->rm_mtx, MTX_DEF);
|
||||
return (rv);
|
||||
}
|
||||
|
||||
|
@ -238,7 +238,7 @@ typedef struct BASETABLE_ENTRY {
|
||||
#define MP_ANNOUNCE_POST 0x19
|
||||
|
||||
/* used to hold the AP's until we are ready to release them */
|
||||
struct simplelock ap_boot_lock;
|
||||
struct mtx ap_boot_mtx;
|
||||
|
||||
/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */
|
||||
int current_postcode;
|
||||
@ -318,6 +318,9 @@ SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW,
|
||||
* Local data and functions.
|
||||
*/
|
||||
|
||||
/* Set to 1 once we're ready to let the APs out of the pen. */
|
||||
static volatile int aps_ready = 0;
|
||||
|
||||
static int mp_capable;
|
||||
static u_int boot_address;
|
||||
static u_int base_memory;
|
||||
@ -345,36 +348,40 @@ static void release_aps(void *dummy);
|
||||
*/
|
||||
|
||||
/* critical region around IO APIC, apic_imen */
|
||||
struct simplelock imen_lock;
|
||||
struct mtx imen_mtx;
|
||||
|
||||
/* lock region used by kernel profiling */
|
||||
struct simplelock mcount_lock;
|
||||
struct mtx mcount_mtx;
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
/* locks com (tty) data/hardware accesses: a FASTINTR() */
|
||||
struct simplelock com_lock;
|
||||
struct mtx com_mtx;
|
||||
#endif /* USE_COMLOCK */
|
||||
|
||||
/* lock around the MP rendezvous */
|
||||
static struct simplelock smp_rv_lock;
|
||||
static struct mtx smp_rv_mtx;
|
||||
|
||||
/* only 1 CPU can panic at a time :) */
|
||||
struct simplelock panic_lock;
|
||||
struct mtx panic_mtx;
|
||||
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
s_lock_init(&mcount_lock);
|
||||
/*
|
||||
* XXX The mcount mutex probably needs to be statically initialized,
|
||||
* since it will be used even in the function calls that get us to this
|
||||
* point.
|
||||
*/
|
||||
mtx_init(&mcount_mtx, "mcount", MTX_DEF);
|
||||
|
||||
s_lock_init(&imen_lock);
|
||||
s_lock_init(&smp_rv_lock);
|
||||
s_lock_init(&panic_lock);
|
||||
mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
|
||||
mtx_init(&panic_mtx, "panic", MTX_DEF);
|
||||
|
||||
#ifdef USE_COMLOCK
|
||||
s_lock_init(&com_lock);
|
||||
mtx_init(&com_mtx, "com", MTX_SPIN);
|
||||
#endif /* USE_COMLOCK */
|
||||
|
||||
s_lock_init(&ap_boot_lock);
|
||||
mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -655,9 +662,6 @@ mp_enable(u_int boot_addr)
|
||||
/* initialize all SMP locks */
|
||||
init_locks();
|
||||
|
||||
/* obtain the ap_boot_lock */
|
||||
s_lock(&ap_boot_lock);
|
||||
|
||||
/* start each Application Processor */
|
||||
start_all_aps(boot_addr);
|
||||
}
|
||||
@ -2247,8 +2251,12 @@ ap_init(void)
|
||||
{
|
||||
u_int apic_id;
|
||||
|
||||
/* spin until all the AP's are ready */
|
||||
while (!aps_ready)
|
||||
/* spin */ ;
|
||||
|
||||
/* lock against other AP's that are waking up */
|
||||
s_lock(&ap_boot_lock);
|
||||
mtx_enter(&ap_boot_mtx, MTX_SPIN);
|
||||
|
||||
/* BSP may have changed PTD while we're waiting for the lock */
|
||||
cpu_invltlb();
|
||||
@ -2297,7 +2305,7 @@ ap_init(void)
|
||||
}
|
||||
|
||||
/* let other AP's wake up now */
|
||||
s_unlock(&ap_boot_lock);
|
||||
mtx_exit(&ap_boot_mtx, MTX_SPIN);
|
||||
|
||||
/* wait until all the AP's are up */
|
||||
while (smp_started == 0)
|
||||
@ -2851,10 +2859,9 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
void (* teardown_func)(void *),
|
||||
void *arg)
|
||||
{
|
||||
u_int efl;
|
||||
|
||||
|
||||
/* obtain rendezvous lock */
|
||||
s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */
|
||||
mtx_enter(&smp_rv_mtx, MTX_SPIN);
|
||||
|
||||
/* set static function pointers */
|
||||
smp_rv_setup_func = setup_func;
|
||||
@ -2864,27 +2871,22 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
smp_rv_waiters[0] = 0;
|
||||
smp_rv_waiters[1] = 0;
|
||||
|
||||
/* disable interrupts on this CPU, save interrupt status */
|
||||
efl = read_eflags();
|
||||
write_eflags(efl & ~PSL_I);
|
||||
|
||||
/* signal other processors, which will enter the IPI with interrupts off */
|
||||
/*
|
||||
* signal other processors, which will enter the IPI with interrupts off
|
||||
*/
|
||||
all_but_self_ipi(XRENDEZVOUS_OFFSET);
|
||||
|
||||
/* call executor function */
|
||||
smp_rendezvous_action();
|
||||
|
||||
/* restore interrupt flag */
|
||||
write_eflags(efl);
|
||||
|
||||
/* release lock */
|
||||
s_unlock(&smp_rv_lock);
|
||||
mtx_exit(&smp_rv_mtx, MTX_SPIN);
|
||||
}
|
||||
|
||||
void
|
||||
release_aps(void *dummy __unused)
|
||||
{
|
||||
s_unlock(&ap_boot_lock);
|
||||
atomic_store_rel_int(&aps_ready, 1);
|
||||
}
|
||||
|
||||
SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL);
|
||||
|
@ -1099,6 +1099,12 @@ static char *spin_order_list[] = {
|
||||
/*
|
||||
* leaf locks
|
||||
*/
|
||||
#ifdef __i386__
|
||||
"ap boot",
|
||||
"imen",
|
||||
#endif
|
||||
"com",
|
||||
"smp rendezvous",
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -1099,6 +1099,12 @@ static char *spin_order_list[] = {
|
||||
/*
|
||||
* leaf locks
|
||||
*/
|
||||
#ifdef __i386__
|
||||
"ap boot",
|
||||
"imen",
|
||||
#endif
|
||||
"com",
|
||||
"smp rendezvous",
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -152,26 +152,25 @@ struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
|
||||
struct mtx mountlist_mtx;
|
||||
|
||||
/* For any iteration/modification of mnt_vnodelist */
|
||||
struct simplelock mntvnode_slock;
|
||||
struct mtx mntvnode_mtx;
|
||||
|
||||
/*
|
||||
* Cache for the mount type id assigned to NFS. This is used for
|
||||
* special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
|
||||
*/
|
||||
int nfs_mount_type = -1;
|
||||
|
||||
#ifndef NULL_SIMPLELOCKS
|
||||
/* To keep more than one thread at a time from running vfs_getnewfsid */
|
||||
static struct simplelock mntid_slock;
|
||||
static struct mtx mntid_mtx;
|
||||
|
||||
/* For any iteration/modification of vnode_free_list */
|
||||
static struct simplelock vnode_free_list_slock;
|
||||
static struct mtx vnode_free_list_mtx;
|
||||
|
||||
/*
|
||||
* For any iteration/modification of dev->si_hlist (linked through
|
||||
* v_specnext)
|
||||
*/
|
||||
static struct simplelock spechash_slock;
|
||||
#endif
|
||||
static struct mtx spechash_mtx;
|
||||
|
||||
/* Publicly exported FS */
|
||||
struct nfs_public nfs_pub;
|
||||
@ -250,11 +249,11 @@ vntblinit(void *dummy __unused)
|
||||
|
||||
desiredvnodes = maxproc + cnt.v_page_count / 4;
|
||||
mtx_init(&mountlist_mtx, "mountlist", MTX_DEF);
|
||||
simple_lock_init(&mntvnode_slock);
|
||||
simple_lock_init(&mntid_slock);
|
||||
simple_lock_init(&spechash_slock);
|
||||
mtx_init(&mntvnode_mtx, "mntvnode", MTX_DEF);
|
||||
mtx_init(&mntid_mtx, "mntid", MTX_DEF);
|
||||
mtx_init(&spechash_mtx, "spechash", MTX_DEF);
|
||||
TAILQ_INIT(&vnode_free_list);
|
||||
simple_lock_init(&vnode_free_list_slock);
|
||||
mtx_init(&vnode_free_list_mtx, "vnode_free_list", MTX_DEF);
|
||||
vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5);
|
||||
/*
|
||||
* Initialize the filesystem syncer.
|
||||
@ -423,7 +422,7 @@ vfs_getnewfsid(mp)
|
||||
fsid_t tfsid;
|
||||
int mtype;
|
||||
|
||||
simple_lock(&mntid_slock);
|
||||
mtx_enter(&mntid_mtx, MTX_DEF);
|
||||
mtype = mp->mnt_vfc->vfc_typenum;
|
||||
tfsid.val[1] = mtype;
|
||||
mtype = (mtype & 0xFF) << 24;
|
||||
@ -436,7 +435,7 @@ vfs_getnewfsid(mp)
|
||||
}
|
||||
mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
|
||||
mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
|
||||
simple_unlock(&mntid_slock);
|
||||
mtx_exit(&mntid_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -539,7 +538,7 @@ getnewvnode(tag, mp, vops, vpp)
|
||||
*/
|
||||
|
||||
s = splbio();
|
||||
simple_lock(&vnode_free_list_slock);
|
||||
mtx_enter(&vnode_free_list_mtx, MTX_DEF);
|
||||
|
||||
if (wantfreevnodes && freevnodes < wantfreevnodes) {
|
||||
vp = NULL;
|
||||
@ -579,7 +578,7 @@ getnewvnode(tag, mp, vops, vpp)
|
||||
vp->v_flag |= VDOOMED;
|
||||
vp->v_flag &= ~VFREE;
|
||||
freevnodes--;
|
||||
simple_unlock(&vnode_free_list_slock);
|
||||
mtx_exit(&vnode_free_list_mtx, MTX_DEF);
|
||||
cache_purge(vp);
|
||||
vp->v_lease = NULL;
|
||||
if (vp->v_type != VBAD) {
|
||||
@ -610,11 +609,12 @@ getnewvnode(tag, mp, vops, vpp)
|
||||
vp->v_clen = 0;
|
||||
vp->v_socket = 0;
|
||||
} else {
|
||||
simple_unlock(&vnode_free_list_slock);
|
||||
mtx_exit(&vnode_free_list_mtx, MTX_DEF);
|
||||
vp = (struct vnode *) zalloc(vnode_zone);
|
||||
bzero((char *) vp, sizeof *vp);
|
||||
mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
|
||||
vp->v_dd = vp;
|
||||
mtx_init(&vp->v_pollinfo.vpi_lock, "vnode pollinfo", MTX_DEF);
|
||||
cache_purge(vp);
|
||||
LIST_INIT(&vp->v_cache_src);
|
||||
TAILQ_INIT(&vp->v_cache_dst);
|
||||
@ -646,7 +646,7 @@ insmntque(vp, mp)
|
||||
register struct mount *mp;
|
||||
{
|
||||
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
/*
|
||||
* Delete from old mount point vnode list, if on one.
|
||||
*/
|
||||
@ -656,11 +656,11 @@ insmntque(vp, mp)
|
||||
* Insert into list of vnodes for the new mount point, if available.
|
||||
*/
|
||||
if ((vp->v_mount = mp) == NULL) {
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
return;
|
||||
}
|
||||
LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1402,9 +1402,9 @@ addalias(nvp, dev)
|
||||
|
||||
KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
|
||||
nvp->v_rdev = dev;
|
||||
simple_lock(&spechash_slock);
|
||||
mtx_enter(&spechash_mtx, MTX_DEF);
|
||||
SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
|
||||
simple_unlock(&spechash_slock);
|
||||
mtx_exit(&spechash_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1628,7 +1628,7 @@ vflush(mp, skipvp, flags)
|
||||
struct vnode *vp, *nvp;
|
||||
int busy = 0;
|
||||
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
loop:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
|
||||
/*
|
||||
@ -1667,9 +1667,9 @@ vflush(mp, skipvp, flags)
|
||||
* vnode data structures and we are done.
|
||||
*/
|
||||
if (vp->v_usecount == 0) {
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
vgonel(vp, p);
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1679,7 +1679,7 @@ vflush(mp, skipvp, flags)
|
||||
* all other files, just kill them.
|
||||
*/
|
||||
if (flags & FORCECLOSE) {
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
if (vp->v_type != VCHR) {
|
||||
vgonel(vp, p);
|
||||
} else {
|
||||
@ -1687,7 +1687,7 @@ vflush(mp, skipvp, flags)
|
||||
vp->v_op = spec_vnodeop_p;
|
||||
insmntque(vp, (struct mount *) 0);
|
||||
}
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
continue;
|
||||
}
|
||||
#ifdef DIAGNOSTIC
|
||||
@ -1697,7 +1697,7 @@ vflush(mp, skipvp, flags)
|
||||
mtx_exit(&vp->v_interlock, MTX_DEF);
|
||||
busy++;
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
if (busy)
|
||||
return (EBUSY);
|
||||
return (0);
|
||||
@ -1842,9 +1842,9 @@ vop_revoke(ap)
|
||||
}
|
||||
dev = vp->v_rdev;
|
||||
for (;;) {
|
||||
simple_lock(&spechash_slock);
|
||||
mtx_enter(&spechash_mtx, MTX_DEF);
|
||||
vq = SLIST_FIRST(&dev->si_hlist);
|
||||
simple_unlock(&spechash_slock);
|
||||
mtx_exit(&spechash_mtx, MTX_DEF);
|
||||
if (!vq)
|
||||
break;
|
||||
vgone(vq);
|
||||
@ -1859,14 +1859,14 @@ vop_revoke(ap)
|
||||
int
|
||||
vrecycle(vp, inter_lkp, p)
|
||||
struct vnode *vp;
|
||||
struct simplelock *inter_lkp;
|
||||
struct mtx *inter_lkp;
|
||||
struct proc *p;
|
||||
{
|
||||
|
||||
mtx_enter(&vp->v_interlock, MTX_DEF);
|
||||
if (vp->v_usecount == 0) {
|
||||
if (inter_lkp) {
|
||||
simple_unlock(inter_lkp);
|
||||
mtx_exit(inter_lkp, MTX_DEF);
|
||||
}
|
||||
vgonel(vp, p);
|
||||
return (1);
|
||||
@ -1926,10 +1926,10 @@ vgonel(vp, p)
|
||||
* if it is on one.
|
||||
*/
|
||||
if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
|
||||
simple_lock(&spechash_slock);
|
||||
mtx_enter(&spechash_mtx, MTX_DEF);
|
||||
SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
|
||||
freedev(vp->v_rdev);
|
||||
simple_unlock(&spechash_slock);
|
||||
mtx_exit(&spechash_mtx, MTX_DEF);
|
||||
vp->v_rdev = NULL;
|
||||
}
|
||||
|
||||
@ -1945,14 +1945,14 @@ vgonel(vp, p)
|
||||
*/
|
||||
if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
|
||||
s = splbio();
|
||||
simple_lock(&vnode_free_list_slock);
|
||||
mtx_enter(&vnode_free_list_mtx, MTX_DEF);
|
||||
if (vp->v_flag & VFREE)
|
||||
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
|
||||
else
|
||||
freevnodes++;
|
||||
vp->v_flag |= VFREE;
|
||||
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
|
||||
simple_unlock(&vnode_free_list_slock);
|
||||
mtx_exit(&vnode_free_list_mtx, MTX_DEF);
|
||||
splx(s);
|
||||
}
|
||||
|
||||
@ -1971,15 +1971,15 @@ vfinddev(dev, type, vpp)
|
||||
{
|
||||
struct vnode *vp;
|
||||
|
||||
simple_lock(&spechash_slock);
|
||||
mtx_enter(&spechash_mtx, MTX_DEF);
|
||||
SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
|
||||
if (type == vp->v_type) {
|
||||
*vpp = vp;
|
||||
simple_unlock(&spechash_slock);
|
||||
mtx_exit(&spechash_mtx, MTX_DEF);
|
||||
return (1);
|
||||
}
|
||||
}
|
||||
simple_unlock(&spechash_slock);
|
||||
mtx_exit(&spechash_mtx, MTX_DEF);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -1994,10 +1994,10 @@ vcount(vp)
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
simple_lock(&spechash_slock);
|
||||
mtx_enter(&spechash_mtx, MTX_DEF);
|
||||
SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext)
|
||||
count += vq->v_usecount;
|
||||
simple_unlock(&spechash_slock);
|
||||
mtx_exit(&spechash_mtx, MTX_DEF);
|
||||
return (count);
|
||||
}
|
||||
|
||||
@ -2204,7 +2204,7 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
|
||||
continue;
|
||||
}
|
||||
again:
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist);
|
||||
vp != NULL;
|
||||
vp = nvp) {
|
||||
@ -2214,17 +2214,17 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
|
||||
* recycled onto the same filesystem.
|
||||
*/
|
||||
if (vp->v_mount != mp) {
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
goto again;
|
||||
}
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
|
||||
(error = SYSCTL_OUT(req, vp, VNODESZ)))
|
||||
return (error);
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
mtx_enter(&mountlist_mtx, MTX_DEF);
|
||||
nmp = TAILQ_NEXT(mp, mnt_list);
|
||||
vfs_unbusy(mp, p);
|
||||
@ -2633,7 +2633,7 @@ vfree(vp)
|
||||
int s;
|
||||
|
||||
s = splbio();
|
||||
simple_lock(&vnode_free_list_slock);
|
||||
mtx_enter(&vnode_free_list_mtx, MTX_DEF);
|
||||
KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free"));
|
||||
if (vp->v_flag & VAGE) {
|
||||
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
|
||||
@ -2641,7 +2641,7 @@ vfree(vp)
|
||||
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
|
||||
}
|
||||
freevnodes++;
|
||||
simple_unlock(&vnode_free_list_slock);
|
||||
mtx_exit(&vnode_free_list_mtx, MTX_DEF);
|
||||
vp->v_flag &= ~VAGE;
|
||||
vp->v_flag |= VFREE;
|
||||
splx(s);
|
||||
@ -2657,11 +2657,11 @@ vbusy(vp)
|
||||
int s;
|
||||
|
||||
s = splbio();
|
||||
simple_lock(&vnode_free_list_slock);
|
||||
mtx_enter(&vnode_free_list_mtx, MTX_DEF);
|
||||
KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free"));
|
||||
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
|
||||
freevnodes--;
|
||||
simple_unlock(&vnode_free_list_slock);
|
||||
mtx_exit(&vnode_free_list_mtx, MTX_DEF);
|
||||
vp->v_flag &= ~(VFREE|VAGE);
|
||||
splx(s);
|
||||
}
|
||||
@ -2680,7 +2680,7 @@ vn_pollrecord(vp, p, events)
|
||||
struct proc *p;
|
||||
short events;
|
||||
{
|
||||
simple_lock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
if (vp->v_pollinfo.vpi_revents & events) {
|
||||
/*
|
||||
* This leaves events we are not interested
|
||||
@ -2692,12 +2692,12 @@ vn_pollrecord(vp, p, events)
|
||||
events &= vp->v_pollinfo.vpi_revents;
|
||||
vp->v_pollinfo.vpi_revents &= ~events;
|
||||
|
||||
simple_unlock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
return events;
|
||||
}
|
||||
vp->v_pollinfo.vpi_events |= events;
|
||||
selrecord(p, &vp->v_pollinfo.vpi_selinfo);
|
||||
simple_unlock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2712,7 +2712,7 @@ vn_pollevent(vp, events)
|
||||
struct vnode *vp;
|
||||
short events;
|
||||
{
|
||||
simple_lock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
if (vp->v_pollinfo.vpi_events & events) {
|
||||
/*
|
||||
* We clear vpi_events so that we don't
|
||||
@ -2729,7 +2729,7 @@ vn_pollevent(vp, events)
|
||||
vp->v_pollinfo.vpi_revents |= events;
|
||||
selwakeup(&vp->v_pollinfo.vpi_selinfo);
|
||||
}
|
||||
simple_unlock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2741,12 +2741,12 @@ void
|
||||
vn_pollgone(vp)
|
||||
struct vnode *vp;
|
||||
{
|
||||
simple_lock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
if (vp->v_pollinfo.vpi_events) {
|
||||
vp->v_pollinfo.vpi_events = 0;
|
||||
selwakeup(&vp->v_pollinfo.vpi_selinfo);
|
||||
}
|
||||
simple_unlock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
}
|
||||
|
||||
|
||||
|
@ -152,26 +152,25 @@ struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
|
||||
struct mtx mountlist_mtx;
|
||||
|
||||
/* For any iteration/modification of mnt_vnodelist */
|
||||
struct simplelock mntvnode_slock;
|
||||
struct mtx mntvnode_mtx;
|
||||
|
||||
/*
|
||||
* Cache for the mount type id assigned to NFS. This is used for
|
||||
* special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
|
||||
*/
|
||||
int nfs_mount_type = -1;
|
||||
|
||||
#ifndef NULL_SIMPLELOCKS
|
||||
/* To keep more than one thread at a time from running vfs_getnewfsid */
|
||||
static struct simplelock mntid_slock;
|
||||
static struct mtx mntid_mtx;
|
||||
|
||||
/* For any iteration/modification of vnode_free_list */
|
||||
static struct simplelock vnode_free_list_slock;
|
||||
static struct mtx vnode_free_list_mtx;
|
||||
|
||||
/*
|
||||
* For any iteration/modification of dev->si_hlist (linked through
|
||||
* v_specnext)
|
||||
*/
|
||||
static struct simplelock spechash_slock;
|
||||
#endif
|
||||
static struct mtx spechash_mtx;
|
||||
|
||||
/* Publicly exported FS */
|
||||
struct nfs_public nfs_pub;
|
||||
@ -250,11 +249,11 @@ vntblinit(void *dummy __unused)
|
||||
|
||||
desiredvnodes = maxproc + cnt.v_page_count / 4;
|
||||
mtx_init(&mountlist_mtx, "mountlist", MTX_DEF);
|
||||
simple_lock_init(&mntvnode_slock);
|
||||
simple_lock_init(&mntid_slock);
|
||||
simple_lock_init(&spechash_slock);
|
||||
mtx_init(&mntvnode_mtx, "mntvnode", MTX_DEF);
|
||||
mtx_init(&mntid_mtx, "mntid", MTX_DEF);
|
||||
mtx_init(&spechash_mtx, "spechash", MTX_DEF);
|
||||
TAILQ_INIT(&vnode_free_list);
|
||||
simple_lock_init(&vnode_free_list_slock);
|
||||
mtx_init(&vnode_free_list_mtx, "vnode_free_list", MTX_DEF);
|
||||
vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5);
|
||||
/*
|
||||
* Initialize the filesystem syncer.
|
||||
@ -423,7 +422,7 @@ vfs_getnewfsid(mp)
|
||||
fsid_t tfsid;
|
||||
int mtype;
|
||||
|
||||
simple_lock(&mntid_slock);
|
||||
mtx_enter(&mntid_mtx, MTX_DEF);
|
||||
mtype = mp->mnt_vfc->vfc_typenum;
|
||||
tfsid.val[1] = mtype;
|
||||
mtype = (mtype & 0xFF) << 24;
|
||||
@ -436,7 +435,7 @@ vfs_getnewfsid(mp)
|
||||
}
|
||||
mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
|
||||
mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
|
||||
simple_unlock(&mntid_slock);
|
||||
mtx_exit(&mntid_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -539,7 +538,7 @@ getnewvnode(tag, mp, vops, vpp)
|
||||
*/
|
||||
|
||||
s = splbio();
|
||||
simple_lock(&vnode_free_list_slock);
|
||||
mtx_enter(&vnode_free_list_mtx, MTX_DEF);
|
||||
|
||||
if (wantfreevnodes && freevnodes < wantfreevnodes) {
|
||||
vp = NULL;
|
||||
@ -579,7 +578,7 @@ getnewvnode(tag, mp, vops, vpp)
|
||||
vp->v_flag |= VDOOMED;
|
||||
vp->v_flag &= ~VFREE;
|
||||
freevnodes--;
|
||||
simple_unlock(&vnode_free_list_slock);
|
||||
mtx_exit(&vnode_free_list_mtx, MTX_DEF);
|
||||
cache_purge(vp);
|
||||
vp->v_lease = NULL;
|
||||
if (vp->v_type != VBAD) {
|
||||
@ -610,11 +609,12 @@ getnewvnode(tag, mp, vops, vpp)
|
||||
vp->v_clen = 0;
|
||||
vp->v_socket = 0;
|
||||
} else {
|
||||
simple_unlock(&vnode_free_list_slock);
|
||||
mtx_exit(&vnode_free_list_mtx, MTX_DEF);
|
||||
vp = (struct vnode *) zalloc(vnode_zone);
|
||||
bzero((char *) vp, sizeof *vp);
|
||||
mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
|
||||
vp->v_dd = vp;
|
||||
mtx_init(&vp->v_pollinfo.vpi_lock, "vnode pollinfo", MTX_DEF);
|
||||
cache_purge(vp);
|
||||
LIST_INIT(&vp->v_cache_src);
|
||||
TAILQ_INIT(&vp->v_cache_dst);
|
||||
@ -646,7 +646,7 @@ insmntque(vp, mp)
|
||||
register struct mount *mp;
|
||||
{
|
||||
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
/*
|
||||
* Delete from old mount point vnode list, if on one.
|
||||
*/
|
||||
@ -656,11 +656,11 @@ insmntque(vp, mp)
|
||||
* Insert into list of vnodes for the new mount point, if available.
|
||||
*/
|
||||
if ((vp->v_mount = mp) == NULL) {
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
return;
|
||||
}
|
||||
LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1402,9 +1402,9 @@ addalias(nvp, dev)
|
||||
|
||||
KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
|
||||
nvp->v_rdev = dev;
|
||||
simple_lock(&spechash_slock);
|
||||
mtx_enter(&spechash_mtx, MTX_DEF);
|
||||
SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
|
||||
simple_unlock(&spechash_slock);
|
||||
mtx_exit(&spechash_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1628,7 +1628,7 @@ vflush(mp, skipvp, flags)
|
||||
struct vnode *vp, *nvp;
|
||||
int busy = 0;
|
||||
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
loop:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
|
||||
/*
|
||||
@ -1667,9 +1667,9 @@ vflush(mp, skipvp, flags)
|
||||
* vnode data structures and we are done.
|
||||
*/
|
||||
if (vp->v_usecount == 0) {
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
vgonel(vp, p);
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1679,7 +1679,7 @@ vflush(mp, skipvp, flags)
|
||||
* all other files, just kill them.
|
||||
*/
|
||||
if (flags & FORCECLOSE) {
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
if (vp->v_type != VCHR) {
|
||||
vgonel(vp, p);
|
||||
} else {
|
||||
@ -1687,7 +1687,7 @@ vflush(mp, skipvp, flags)
|
||||
vp->v_op = spec_vnodeop_p;
|
||||
insmntque(vp, (struct mount *) 0);
|
||||
}
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
continue;
|
||||
}
|
||||
#ifdef DIAGNOSTIC
|
||||
@ -1697,7 +1697,7 @@ vflush(mp, skipvp, flags)
|
||||
mtx_exit(&vp->v_interlock, MTX_DEF);
|
||||
busy++;
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
if (busy)
|
||||
return (EBUSY);
|
||||
return (0);
|
||||
@ -1842,9 +1842,9 @@ vop_revoke(ap)
|
||||
}
|
||||
dev = vp->v_rdev;
|
||||
for (;;) {
|
||||
simple_lock(&spechash_slock);
|
||||
mtx_enter(&spechash_mtx, MTX_DEF);
|
||||
vq = SLIST_FIRST(&dev->si_hlist);
|
||||
simple_unlock(&spechash_slock);
|
||||
mtx_exit(&spechash_mtx, MTX_DEF);
|
||||
if (!vq)
|
||||
break;
|
||||
vgone(vq);
|
||||
@ -1859,14 +1859,14 @@ vop_revoke(ap)
|
||||
int
|
||||
vrecycle(vp, inter_lkp, p)
|
||||
struct vnode *vp;
|
||||
struct simplelock *inter_lkp;
|
||||
struct mtx *inter_lkp;
|
||||
struct proc *p;
|
||||
{
|
||||
|
||||
mtx_enter(&vp->v_interlock, MTX_DEF);
|
||||
if (vp->v_usecount == 0) {
|
||||
if (inter_lkp) {
|
||||
simple_unlock(inter_lkp);
|
||||
mtx_exit(inter_lkp, MTX_DEF);
|
||||
}
|
||||
vgonel(vp, p);
|
||||
return (1);
|
||||
@ -1926,10 +1926,10 @@ vgonel(vp, p)
|
||||
* if it is on one.
|
||||
*/
|
||||
if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
|
||||
simple_lock(&spechash_slock);
|
||||
mtx_enter(&spechash_mtx, MTX_DEF);
|
||||
SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
|
||||
freedev(vp->v_rdev);
|
||||
simple_unlock(&spechash_slock);
|
||||
mtx_exit(&spechash_mtx, MTX_DEF);
|
||||
vp->v_rdev = NULL;
|
||||
}
|
||||
|
||||
@ -1945,14 +1945,14 @@ vgonel(vp, p)
|
||||
*/
|
||||
if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
|
||||
s = splbio();
|
||||
simple_lock(&vnode_free_list_slock);
|
||||
mtx_enter(&vnode_free_list_mtx, MTX_DEF);
|
||||
if (vp->v_flag & VFREE)
|
||||
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
|
||||
else
|
||||
freevnodes++;
|
||||
vp->v_flag |= VFREE;
|
||||
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
|
||||
simple_unlock(&vnode_free_list_slock);
|
||||
mtx_exit(&vnode_free_list_mtx, MTX_DEF);
|
||||
splx(s);
|
||||
}
|
||||
|
||||
@ -1971,15 +1971,15 @@ vfinddev(dev, type, vpp)
|
||||
{
|
||||
struct vnode *vp;
|
||||
|
||||
simple_lock(&spechash_slock);
|
||||
mtx_enter(&spechash_mtx, MTX_DEF);
|
||||
SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
|
||||
if (type == vp->v_type) {
|
||||
*vpp = vp;
|
||||
simple_unlock(&spechash_slock);
|
||||
mtx_exit(&spechash_mtx, MTX_DEF);
|
||||
return (1);
|
||||
}
|
||||
}
|
||||
simple_unlock(&spechash_slock);
|
||||
mtx_exit(&spechash_mtx, MTX_DEF);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -1994,10 +1994,10 @@ vcount(vp)
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
simple_lock(&spechash_slock);
|
||||
mtx_enter(&spechash_mtx, MTX_DEF);
|
||||
SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext)
|
||||
count += vq->v_usecount;
|
||||
simple_unlock(&spechash_slock);
|
||||
mtx_exit(&spechash_mtx, MTX_DEF);
|
||||
return (count);
|
||||
}
|
||||
|
||||
@ -2204,7 +2204,7 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
|
||||
continue;
|
||||
}
|
||||
again:
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist);
|
||||
vp != NULL;
|
||||
vp = nvp) {
|
||||
@ -2214,17 +2214,17 @@ sysctl_vnode(SYSCTL_HANDLER_ARGS)
|
||||
* recycled onto the same filesystem.
|
||||
*/
|
||||
if (vp->v_mount != mp) {
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
goto again;
|
||||
}
|
||||
nvp = LIST_NEXT(vp, v_mntvnodes);
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
|
||||
(error = SYSCTL_OUT(req, vp, VNODESZ)))
|
||||
return (error);
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
mtx_enter(&mountlist_mtx, MTX_DEF);
|
||||
nmp = TAILQ_NEXT(mp, mnt_list);
|
||||
vfs_unbusy(mp, p);
|
||||
@ -2633,7 +2633,7 @@ vfree(vp)
|
||||
int s;
|
||||
|
||||
s = splbio();
|
||||
simple_lock(&vnode_free_list_slock);
|
||||
mtx_enter(&vnode_free_list_mtx, MTX_DEF);
|
||||
KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free"));
|
||||
if (vp->v_flag & VAGE) {
|
||||
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
|
||||
@ -2641,7 +2641,7 @@ vfree(vp)
|
||||
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
|
||||
}
|
||||
freevnodes++;
|
||||
simple_unlock(&vnode_free_list_slock);
|
||||
mtx_exit(&vnode_free_list_mtx, MTX_DEF);
|
||||
vp->v_flag &= ~VAGE;
|
||||
vp->v_flag |= VFREE;
|
||||
splx(s);
|
||||
@ -2657,11 +2657,11 @@ vbusy(vp)
|
||||
int s;
|
||||
|
||||
s = splbio();
|
||||
simple_lock(&vnode_free_list_slock);
|
||||
mtx_enter(&vnode_free_list_mtx, MTX_DEF);
|
||||
KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free"));
|
||||
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
|
||||
freevnodes--;
|
||||
simple_unlock(&vnode_free_list_slock);
|
||||
mtx_exit(&vnode_free_list_mtx, MTX_DEF);
|
||||
vp->v_flag &= ~(VFREE|VAGE);
|
||||
splx(s);
|
||||
}
|
||||
@ -2680,7 +2680,7 @@ vn_pollrecord(vp, p, events)
|
||||
struct proc *p;
|
||||
short events;
|
||||
{
|
||||
simple_lock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
if (vp->v_pollinfo.vpi_revents & events) {
|
||||
/*
|
||||
* This leaves events we are not interested
|
||||
@ -2692,12 +2692,12 @@ vn_pollrecord(vp, p, events)
|
||||
events &= vp->v_pollinfo.vpi_revents;
|
||||
vp->v_pollinfo.vpi_revents &= ~events;
|
||||
|
||||
simple_unlock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
return events;
|
||||
}
|
||||
vp->v_pollinfo.vpi_events |= events;
|
||||
selrecord(p, &vp->v_pollinfo.vpi_selinfo);
|
||||
simple_unlock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2712,7 +2712,7 @@ vn_pollevent(vp, events)
|
||||
struct vnode *vp;
|
||||
short events;
|
||||
{
|
||||
simple_lock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
if (vp->v_pollinfo.vpi_events & events) {
|
||||
/*
|
||||
* We clear vpi_events so that we don't
|
||||
@ -2729,7 +2729,7 @@ vn_pollevent(vp, events)
|
||||
vp->v_pollinfo.vpi_revents |= events;
|
||||
selwakeup(&vp->v_pollinfo.vpi_selinfo);
|
||||
}
|
||||
simple_unlock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2741,12 +2741,12 @@ void
|
||||
vn_pollgone(vp)
|
||||
struct vnode *vp;
|
||||
{
|
||||
simple_lock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
if (vp->v_pollinfo.vpi_events) {
|
||||
vp->v_pollinfo.vpi_events = 0;
|
||||
selwakeup(&vp->v_pollinfo.vpi_selinfo);
|
||||
}
|
||||
simple_unlock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
}
|
||||
|
||||
|
||||
|
@ -833,9 +833,9 @@ filt_vnattach(struct knote *kn)
|
||||
if ((vp)->v_tag != VT_UFS)
|
||||
return (EOPNOTSUPP);
|
||||
|
||||
simple_lock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
SLIST_INSERT_HEAD(&vp->v_pollinfo.vpi_selinfo.si_note, kn, kn_selnext);
|
||||
simple_unlock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -845,10 +845,10 @@ filt_vndetach(struct knote *kn)
|
||||
{
|
||||
struct vnode *vp = (struct vnode *)kn->kn_fp->f_data;
|
||||
|
||||
simple_lock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
SLIST_REMOVE(&vp->v_pollinfo.vpi_selinfo.si_note,
|
||||
kn, knote, kn_selnext);
|
||||
simple_unlock(&vp->v_pollinfo.vpi_lock);
|
||||
mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -56,12 +56,11 @@
|
||||
#include <sys/bio.h>
|
||||
#include <sys/buf.h>
|
||||
#include <sys/vnode.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_extern.h>
|
||||
|
||||
#include <machine/mutex.h>
|
||||
|
||||
#include <msdosfs/bpb.h>
|
||||
#include <msdosfs/msdosfsmount.h>
|
||||
#include <msdosfs/direntry.h>
|
||||
@ -74,9 +73,7 @@ static struct denode **dehashtbl;
|
||||
static u_long dehash; /* size of hash table - 1 */
|
||||
#define DEHASH(dev, dcl, doff) (dehashtbl[(minor(dev) + (dcl) + (doff) / \
|
||||
sizeof(struct direntry)) & dehash])
|
||||
#ifndef NULL_SIMPLELOCKS
|
||||
static struct simplelock dehash_slock;
|
||||
#endif
|
||||
static struct mtx dehash_mtx;
|
||||
|
||||
union _qcvt {
|
||||
quad_t qcvt;
|
||||
@ -107,7 +104,7 @@ msdosfs_init(vfsp)
|
||||
struct vfsconf *vfsp;
|
||||
{
|
||||
dehashtbl = hashinit(desiredvnodes/2, M_MSDOSFSMNT, &dehash);
|
||||
simple_lock_init(&dehash_slock);
|
||||
mtx_init(&dehash_mtx, "msdosfs dehash", MTX_DEF);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -118,6 +115,7 @@ msdosfs_uninit(vfsp)
|
||||
|
||||
if (dehashtbl)
|
||||
free(dehashtbl, M_MSDOSFSMNT);
|
||||
mtx_destroy(&dehash_mtx);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -132,7 +130,7 @@ msdosfs_hashget(dev, dirclust, diroff)
|
||||
struct vnode *vp;
|
||||
|
||||
loop:
|
||||
simple_lock(&dehash_slock);
|
||||
mtx_enter(&dehash_mtx, MTX_DEF);
|
||||
for (dep = DEHASH(dev, dirclust, diroff); dep; dep = dep->de_next) {
|
||||
if (dirclust == dep->de_dirclust
|
||||
&& diroff == dep->de_diroffset
|
||||
@ -140,13 +138,13 @@ msdosfs_hashget(dev, dirclust, diroff)
|
||||
&& dep->de_refcnt != 0) {
|
||||
vp = DETOV(dep);
|
||||
mtx_enter(&vp->v_interlock, MTX_DEF);
|
||||
simple_unlock(&dehash_slock);
|
||||
mtx_exit(&dehash_mtx, MTX_DEF);
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
|
||||
goto loop;
|
||||
return (dep);
|
||||
}
|
||||
}
|
||||
simple_unlock(&dehash_slock);
|
||||
mtx_exit(&dehash_mtx, MTX_DEF);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@ -156,7 +154,7 @@ msdosfs_hashins(dep)
|
||||
{
|
||||
struct denode **depp, *deq;
|
||||
|
||||
simple_lock(&dehash_slock);
|
||||
mtx_enter(&dehash_mtx, MTX_DEF);
|
||||
depp = &DEHASH(dep->de_dev, dep->de_dirclust, dep->de_diroffset);
|
||||
deq = *depp;
|
||||
if (deq)
|
||||
@ -164,7 +162,7 @@ msdosfs_hashins(dep)
|
||||
dep->de_next = deq;
|
||||
dep->de_prev = depp;
|
||||
*depp = dep;
|
||||
simple_unlock(&dehash_slock);
|
||||
mtx_exit(&dehash_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -173,7 +171,7 @@ msdosfs_hashrem(dep)
|
||||
{
|
||||
struct denode *deq;
|
||||
|
||||
simple_lock(&dehash_slock);
|
||||
mtx_enter(&dehash_mtx, MTX_DEF);
|
||||
deq = dep->de_next;
|
||||
if (deq)
|
||||
deq->de_prev = dep->de_prev;
|
||||
@ -182,7 +180,7 @@ msdosfs_hashrem(dep)
|
||||
dep->de_next = NULL;
|
||||
dep->de_prev = NULL;
|
||||
#endif
|
||||
simple_unlock(&dehash_slock);
|
||||
mtx_exit(&dehash_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -721,6 +719,6 @@ msdosfs_inactive(ap)
|
||||
dep->de_Name[0]);
|
||||
#endif
|
||||
if (dep->de_Name[0] == SLOT_DELETED)
|
||||
vrecycle(vp, (struct simplelock *)0, p);
|
||||
vrecycle(vp, NULL, p);
|
||||
return (error);
|
||||
}
|
||||
|
@ -61,8 +61,7 @@
|
||||
#include <sys/fcntl.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/stat.h> /* defines ALLPERMS */
|
||||
|
||||
#include <machine/mutex.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <msdosfs/bpb.h>
|
||||
#include <msdosfs/bootsect.h>
|
||||
@ -863,7 +862,7 @@ msdosfs_sync(mp, waitfor, cred, p)
|
||||
/*
|
||||
* Write back each (modified) denode.
|
||||
*/
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
loop:
|
||||
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
|
||||
/*
|
||||
@ -883,10 +882,10 @@ msdosfs_sync(mp, waitfor, cred, p)
|
||||
mtx_exit(&vp->v_interlock, MTX_DEF);
|
||||
continue;
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
|
||||
if (error) {
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
if (error == ENOENT)
|
||||
goto loop;
|
||||
continue;
|
||||
@ -896,9 +895,9 @@ msdosfs_sync(mp, waitfor, cred, p)
|
||||
allerror = error;
|
||||
VOP_UNLOCK(vp, 0, p);
|
||||
vrele(vp);
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
|
||||
/*
|
||||
* Flush filesystem control info.
|
||||
|
@ -43,6 +43,7 @@
|
||||
#include <sys/vnode.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <ntfs/ntfs.h>
|
||||
#include <ntfs/ntfs_inode.h>
|
||||
@ -56,9 +57,7 @@ MALLOC_DEFINE(M_NTFSNTHASH, "NTFS nthash", "NTFS ntnode hash tables");
|
||||
static LIST_HEAD(nthashhead, ntnode) *ntfs_nthashtbl;
|
||||
static u_long ntfs_nthash; /* size of hash table - 1 */
|
||||
#define NTNOHASH(device, inum) (&ntfs_nthashtbl[(minor(device) + (inum)) & ntfs_nthash])
|
||||
#ifndef NULL_SIMPLELOCKS
|
||||
static struct simplelock ntfs_nthash_slock;
|
||||
#endif
|
||||
static struct mtx ntfs_nthash_mtx;
|
||||
struct lock ntfs_hashlock;
|
||||
|
||||
/*
|
||||
@ -70,7 +69,7 @@ ntfs_nthashinit()
|
||||
lockinit(&ntfs_hashlock, PINOD, "ntfs_nthashlock", 0, 0);
|
||||
ntfs_nthashtbl = HASHINIT(desiredvnodes, M_NTFSNTHASH, M_WAITOK,
|
||||
&ntfs_nthash);
|
||||
simple_lock_init(&ntfs_nthash_slock);
|
||||
mtx_init(&ntfs_nthash_mtx, "ntfs nthash", MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -80,6 +79,7 @@ void
|
||||
ntfs_nthashdestroy(void)
|
||||
{
|
||||
lockdestroy(&ntfs_hashlock);
|
||||
mtx_destroy(&ntfs_nthash_mtx);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -93,11 +93,11 @@ ntfs_nthashlookup(dev, inum)
|
||||
{
|
||||
struct ntnode *ip;
|
||||
|
||||
simple_lock(&ntfs_nthash_slock);
|
||||
mtx_enter(&ntfs_nthash_mtx, MTX_DEF);
|
||||
for (ip = NTNOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next)
|
||||
if (inum == ip->i_number && dev == ip->i_dev)
|
||||
break;
|
||||
simple_unlock(&ntfs_nthash_slock);
|
||||
mtx_exit(&ntfs_nthash_mtx, MTX_DEF);
|
||||
|
||||
return (ip);
|
||||
}
|
||||
@ -111,11 +111,11 @@ ntfs_nthashins(ip)
|
||||
{
|
||||
struct nthashhead *ipp;
|
||||
|
||||
simple_lock(&ntfs_nthash_slock);
|
||||
mtx_enter(&ntfs_nthash_mtx, MTX_DEF);
|
||||
ipp = NTNOHASH(ip->i_dev, ip->i_number);
|
||||
LIST_INSERT_HEAD(ipp, ip, i_hash);
|
||||
ip->i_flag |= IN_HASHED;
|
||||
simple_unlock(&ntfs_nthash_slock);
|
||||
mtx_exit(&ntfs_nthash_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -125,7 +125,7 @@ void
|
||||
ntfs_nthashrem(ip)
|
||||
struct ntnode *ip;
|
||||
{
|
||||
simple_lock(&ntfs_nthash_slock);
|
||||
mtx_enter(&ntfs_nthash_mtx, MTX_DEF);
|
||||
if (ip->i_flag & IN_HASHED) {
|
||||
ip->i_flag &= ~IN_HASHED;
|
||||
LIST_REMOVE(ip, i_hash);
|
||||
@ -134,5 +134,5 @@ ntfs_nthashrem(ip)
|
||||
ip->i_hash.le_prev = NULL;
|
||||
#endif
|
||||
}
|
||||
simple_unlock(&ntfs_nthash_slock);
|
||||
mtx_exit(&ntfs_nthash_mtx, MTX_DEF);
|
||||
}
|
||||
|
@ -28,20 +28,6 @@
|
||||
#ifndef _MACHINE_LOCK_H_
|
||||
#define _MACHINE_LOCK_H_
|
||||
|
||||
/*
|
||||
* Simple spin lock.
|
||||
* It is an error to hold one of these locks while a process is sleeping.
|
||||
*/
|
||||
struct simplelock {
|
||||
volatile u_int lock_data;
|
||||
};
|
||||
|
||||
/* functions in mp_machdep.c */
|
||||
void s_lock_init __P((struct simplelock *));
|
||||
void s_lock __P((struct simplelock *));
|
||||
int s_lock_try __P((struct simplelock *));
|
||||
void s_unlock_np __P((struct simplelock *));
|
||||
|
||||
#define COM_LOCK()
|
||||
#define COM_UNLOCK()
|
||||
#define COM_DISABLE_INTR() COM_LOCK()
|
||||
|
@ -228,79 +228,20 @@ globaldata_find(int cpuno)
|
||||
return cpuno_to_globaldata[cpuno];
|
||||
}
|
||||
|
||||
/* Implementation of simplelocks */
|
||||
|
||||
/*
|
||||
* Atomically swap the value of *p with val. Return the old value of *p.
|
||||
*/
|
||||
static __inline int
|
||||
atomic_xchg(volatile u_int *p, u_int val)
|
||||
{
|
||||
u_int32_t oldval, temp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
: "=&r"(oldval), "=r"(temp), "=m" (*p)
|
||||
: "m"(*p), "r"(val)
|
||||
: "memory");
|
||||
return oldval;
|
||||
}
|
||||
|
||||
void
|
||||
s_lock_init(struct simplelock *lkp)
|
||||
{
|
||||
|
||||
lkp->lock_data = 0;
|
||||
}
|
||||
|
||||
void
|
||||
s_lock(struct simplelock *lkp)
|
||||
{
|
||||
|
||||
for (;;) {
|
||||
if (s_lock_try(lkp))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Spin until clear.
|
||||
*/
|
||||
while (lkp->lock_data)
|
||||
;
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
s_lock_try(struct simplelock *lkp)
|
||||
{
|
||||
u_int32_t oldval, temp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
: "=&r"(oldval), "=r"(temp), "=m" (lkp->lock_data)
|
||||
: "m"(lkp->lock_data)
|
||||
: "memory");
|
||||
|
||||
if (!oldval) {
|
||||
/*
|
||||
* It was clear, return success.
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Other stuff */
|
||||
|
||||
/* lock around the MP rendezvous */
|
||||
static struct simplelock smp_rv_lock;
|
||||
static struct mtx smp_rv_mtx;
|
||||
|
||||
/* only 1 CPU can panic at a time :) */
|
||||
struct simplelock panic_lock;
|
||||
struct mtx panic_mtx;
|
||||
|
||||
static void
|
||||
init_locks(void)
|
||||
{
|
||||
|
||||
s_lock_init(&smp_rv_lock);
|
||||
s_lock_init(&panic_lock);
|
||||
mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN);
|
||||
mtx_init(&panic_mtx, "panic", MTX_DEF);
|
||||
}
|
||||
|
||||
void
|
||||
@ -436,14 +377,9 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
void (* teardown_func)(void *),
|
||||
void *arg)
|
||||
{
|
||||
int s;
|
||||
|
||||
/* disable interrupts on this CPU, save interrupt status */
|
||||
s = save_intr();
|
||||
disable_intr();
|
||||
|
||||
/* obtain rendezvous lock */
|
||||
s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */
|
||||
mtx_enter(&smp_rv_mtx, MTX_SPIN);
|
||||
|
||||
/* set static function pointers */
|
||||
smp_rv_setup_func = setup_func;
|
||||
@ -457,10 +393,7 @@ smp_rendezvous(void (* setup_func)(void *),
|
||||
smp_rendezvous_action();
|
||||
|
||||
/* release lock */
|
||||
s_unlock(&smp_rv_lock);
|
||||
|
||||
/* restore interrupt flag */
|
||||
restore_intr(s);
|
||||
mtx_exit(&smp_rv_mtx, MTX_SPIN);
|
||||
}
|
||||
|
||||
static u_int64_t
|
||||
|
@ -48,7 +48,7 @@ struct mtx;
|
||||
/*
|
||||
* The general lock structure. Provides for multiple shared locks,
|
||||
* upgrading from shared to exclusive, and sleeping until the lock
|
||||
* can be gained. The simple locks are defined in <machine/param.h>.
|
||||
* can be gained.
|
||||
*/
|
||||
struct lock {
|
||||
struct mtx *lk_interlock; /* lock on remaining fields */
|
||||
@ -221,22 +221,4 @@ void lockmgr_printinfo __P((struct lock *));
|
||||
int lockstatus __P((struct lock *, struct proc *));
|
||||
int lockcount __P((struct lock *));
|
||||
|
||||
#ifdef SIMPLELOCK_DEBUG
|
||||
void _simple_unlock __P((struct simplelock *alp, const char *, int));
|
||||
#define simple_unlock(alp) _simple_unlock(alp, __FILE__, __LINE__)
|
||||
int _simple_lock_try __P((struct simplelock *alp, const char *, int));
|
||||
#define simple_lock_try(alp) _simple_lock_try(alp, __FILE__, __LINE__)
|
||||
void _simple_lock __P((struct simplelock *alp, const char *, int));
|
||||
#define simple_lock(alp) _simple_lock(alp, __FILE__, __LINE__)
|
||||
void simple_lock_init __P((struct simplelock *alp));
|
||||
#else /* !SIMPLELOCK_DEBUG */
|
||||
#if MAXCPU == 1 /* no multiprocessor locking is necessary */
|
||||
#define NULL_SIMPLELOCKS
|
||||
#define simple_lock_init(alp)
|
||||
#define simple_lock(alp)
|
||||
#define simple_lock_try(alp) (1) /* always succeeds */
|
||||
#define simple_unlock(alp)
|
||||
#endif /* MAXCPU == 1 */
|
||||
#endif /* !SIMPLELOCK_DEBUG */
|
||||
|
||||
#endif /* !_LOCK_H_ */
|
||||
|
@ -48,7 +48,7 @@ struct mtx;
|
||||
/*
|
||||
* The general lock structure. Provides for multiple shared locks,
|
||||
* upgrading from shared to exclusive, and sleeping until the lock
|
||||
* can be gained. The simple locks are defined in <machine/param.h>.
|
||||
* can be gained.
|
||||
*/
|
||||
struct lock {
|
||||
struct mtx *lk_interlock; /* lock on remaining fields */
|
||||
@ -221,22 +221,4 @@ void lockmgr_printinfo __P((struct lock *));
|
||||
int lockstatus __P((struct lock *, struct proc *));
|
||||
int lockcount __P((struct lock *));
|
||||
|
||||
#ifdef SIMPLELOCK_DEBUG
|
||||
void _simple_unlock __P((struct simplelock *alp, const char *, int));
|
||||
#define simple_unlock(alp) _simple_unlock(alp, __FILE__, __LINE__)
|
||||
int _simple_lock_try __P((struct simplelock *alp, const char *, int));
|
||||
#define simple_lock_try(alp) _simple_lock_try(alp, __FILE__, __LINE__)
|
||||
void _simple_lock __P((struct simplelock *alp, const char *, int));
|
||||
#define simple_lock(alp) _simple_lock(alp, __FILE__, __LINE__)
|
||||
void simple_lock_init __P((struct simplelock *alp));
|
||||
#else /* !SIMPLELOCK_DEBUG */
|
||||
#if MAXCPU == 1 /* no multiprocessor locking is necessary */
|
||||
#define NULL_SIMPLELOCKS
|
||||
#define simple_lock_init(alp)
|
||||
#define simple_lock(alp)
|
||||
#define simple_lock_try(alp) (1) /* always succeeds */
|
||||
#define simple_unlock(alp)
|
||||
#endif /* MAXCPU == 1 */
|
||||
#endif /* !SIMPLELOCK_DEBUG */
|
||||
|
||||
#endif /* !_LOCK_H_ */
|
||||
|
@ -106,7 +106,7 @@ struct resource {
|
||||
|
||||
struct rman {
|
||||
struct resource_head rm_list;
|
||||
struct simplelock *rm_slock; /* mutex used to protect rm_list */
|
||||
struct mtx *rm_mtx; /* mutex used to protect rm_list */
|
||||
TAILQ_ENTRY(rman) rm_link; /* link in list of all rmans */
|
||||
u_long rm_start; /* index of globally first entry */
|
||||
u_long rm_end; /* index of globally last entry */
|
||||
|
@ -78,8 +78,8 @@ struct namecache;
|
||||
|
||||
/*
|
||||
* Reading or writing any of these items requires holding the appropriate lock.
|
||||
* v_freelist is locked by the global vnode_free_list simple lock.
|
||||
* v_mntvnodes is locked by the global mntvnodes simple lock.
|
||||
* v_freelist is locked by the global vnode_free_list mutex.
|
||||
* v_mntvnodes is locked by the global mntvnodes mutex.
|
||||
* v_flag, v_usecount, v_holdcount and v_writecount are
|
||||
* locked by the v_interlock mutex.
|
||||
* v_pollinfo is locked by the lock contained inside it.
|
||||
@ -124,7 +124,7 @@ struct vnode {
|
||||
struct vnode *v_dd; /* .. vnode */
|
||||
u_long v_ddid; /* .. capability identifier */
|
||||
struct {
|
||||
struct simplelock vpi_lock; /* lock to protect below */
|
||||
struct mtx vpi_lock; /* lock to protect below */
|
||||
struct selinfo vpi_selinfo; /* identity of poller(s) */
|
||||
short vpi_events; /* what they are looking for */
|
||||
short vpi_revents; /* what has happened */
|
||||
@ -369,7 +369,7 @@ extern struct vnodeop_desc *vnodeop_descs[];
|
||||
/*
|
||||
* Interlock for scanning list of vnodes attached to a mountpoint
|
||||
*/
|
||||
extern struct simplelock mntvnode_slock;
|
||||
extern struct mtx mntvnode_mtx;
|
||||
|
||||
/*
|
||||
* This macro is very helpful in defining those offsets in the vdesc struct.
|
||||
@ -576,7 +576,7 @@ int vinvalbuf __P((struct vnode *vp, int save, struct ucred *cred,
|
||||
int vtruncbuf __P((struct vnode *vp, struct ucred *cred, struct proc *p,
|
||||
off_t length, int blksize));
|
||||
void vprint __P((char *label, struct vnode *vp));
|
||||
int vrecycle __P((struct vnode *vp, struct simplelock *inter_lkp,
|
||||
int vrecycle __P((struct vnode *vp, struct mtx *inter_lkp,
|
||||
struct proc *p));
|
||||
int vn_close __P((struct vnode *vp,
|
||||
int flags, struct ucred *cred, struct proc *p));
|
||||
|
@ -50,8 +50,7 @@
|
||||
#include <sys/fcntl.h>
|
||||
#include <sys/disklabel.h>
|
||||
#include <sys/malloc.h>
|
||||
|
||||
#include <machine/mutex.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <ufs/ufs/extattr.h>
|
||||
#include <ufs/ufs/quota.h>
|
||||
@ -455,23 +454,23 @@ ffs_reload(mp, cred, p)
|
||||
}
|
||||
|
||||
loop:
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
|
||||
if (vp->v_mount != mp) {
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
goto loop;
|
||||
}
|
||||
nvp = vp->v_mntvnodes.le_next;
|
||||
/*
|
||||
* Step 4: invalidate all inactive vnodes.
|
||||
*/
|
||||
if (vrecycle(vp, &mntvnode_slock, p))
|
||||
if (vrecycle(vp, &mntvnode_mtx, p))
|
||||
goto loop;
|
||||
/*
|
||||
* Step 5: invalidate all cached file data.
|
||||
*/
|
||||
mtx_enter(&vp->v_interlock, MTX_DEF);
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
|
||||
goto loop;
|
||||
}
|
||||
@ -493,9 +492,9 @@ ffs_reload(mp, cred, p)
|
||||
ip->i_effnlink = ip->i_nlink;
|
||||
brelse(bp);
|
||||
vput(vp);
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -938,7 +937,7 @@ ffs_sync(mp, waitfor, cred, p)
|
||||
wait = 1;
|
||||
lockreq = LK_EXCLUSIVE | LK_INTERLOCK;
|
||||
}
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
loop:
|
||||
for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
|
||||
/*
|
||||
@ -957,9 +956,9 @@ ffs_sync(mp, waitfor, cred, p)
|
||||
continue;
|
||||
}
|
||||
if (vp->v_type != VCHR) {
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
if ((error = vget(vp, lockreq, p)) != 0) {
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
if (error == ENOENT)
|
||||
goto loop;
|
||||
continue;
|
||||
@ -968,15 +967,15 @@ ffs_sync(mp, waitfor, cred, p)
|
||||
allerror = error;
|
||||
VOP_UNLOCK(vp, 0, p);
|
||||
vrele(vp);
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
} else {
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
mtx_exit(&vp->v_interlock, MTX_DEF);
|
||||
UFS_UPDATE(vp, wait);
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
}
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
/*
|
||||
* Force stale file system control information to be flushed.
|
||||
*/
|
||||
@ -985,7 +984,7 @@ ffs_sync(mp, waitfor, cred, p)
|
||||
allerror = error;
|
||||
/* Flushed work items may create new vnodes to clean */
|
||||
if (count) {
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
goto loop;
|
||||
}
|
||||
}
|
||||
|
@ -40,8 +40,7 @@
|
||||
#include <sys/lock.h>
|
||||
#include <sys/vnode.h>
|
||||
#include <sys/malloc.h>
|
||||
|
||||
#include <machine/mutex.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <ufs/ufs/quota.h>
|
||||
#include <ufs/ufs/inode.h>
|
||||
@ -54,9 +53,7 @@ static MALLOC_DEFINE(M_UFSIHASH, "UFS ihash", "UFS Inode hash tables");
|
||||
static LIST_HEAD(ihashhead, inode) *ihashtbl;
|
||||
static u_long ihash; /* size of hash table - 1 */
|
||||
#define INOHASH(device, inum) (&ihashtbl[(minor(device) + (inum)) & ihash])
|
||||
#ifndef NULL_SIMPLELOCKS
|
||||
static struct simplelock ufs_ihash_slock;
|
||||
#endif
|
||||
static struct mtx ufs_ihash_mtx;
|
||||
|
||||
/*
|
||||
* Initialize inode hash table.
|
||||
@ -66,7 +63,7 @@ ufs_ihashinit()
|
||||
{
|
||||
|
||||
ihashtbl = hashinit(desiredvnodes, M_UFSIHASH, &ihash);
|
||||
simple_lock_init(&ufs_ihash_slock);
|
||||
mtx_init(&ufs_ihash_mtx, "ufs ihash", MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -80,11 +77,11 @@ ufs_ihashlookup(dev, inum)
|
||||
{
|
||||
struct inode *ip;
|
||||
|
||||
simple_lock(&ufs_ihash_slock);
|
||||
mtx_enter(&ufs_ihash_mtx, MTX_DEF);
|
||||
for (ip = INOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next)
|
||||
if (inum == ip->i_number && dev == ip->i_dev)
|
||||
break;
|
||||
simple_unlock(&ufs_ihash_slock);
|
||||
mtx_exit(&ufs_ihash_mtx, MTX_DEF);
|
||||
|
||||
if (ip)
|
||||
return (ITOV(ip));
|
||||
@ -105,18 +102,18 @@ ufs_ihashget(dev, inum)
|
||||
struct vnode *vp;
|
||||
|
||||
loop:
|
||||
simple_lock(&ufs_ihash_slock);
|
||||
mtx_enter(&ufs_ihash_mtx, MTX_DEF);
|
||||
for (ip = INOHASH(dev, inum)->lh_first; ip; ip = ip->i_hash.le_next) {
|
||||
if (inum == ip->i_number && dev == ip->i_dev) {
|
||||
vp = ITOV(ip);
|
||||
mtx_enter(&vp->v_interlock, MTX_DEF);
|
||||
simple_unlock(&ufs_ihash_slock);
|
||||
mtx_exit(&ufs_ihash_mtx, MTX_DEF);
|
||||
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p))
|
||||
goto loop;
|
||||
return (vp);
|
||||
}
|
||||
}
|
||||
simple_unlock(&ufs_ihash_slock);
|
||||
mtx_exit(&ufs_ihash_mtx, MTX_DEF);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
@ -133,11 +130,11 @@ ufs_ihashins(ip)
|
||||
/* lock the inode, then put it on the appropriate hash list */
|
||||
lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p);
|
||||
|
||||
simple_lock(&ufs_ihash_slock);
|
||||
mtx_enter(&ufs_ihash_mtx, MTX_DEF);
|
||||
ipp = INOHASH(ip->i_dev, ip->i_number);
|
||||
LIST_INSERT_HEAD(ipp, ip, i_hash);
|
||||
ip->i_flag |= IN_HASHED;
|
||||
simple_unlock(&ufs_ihash_slock);
|
||||
mtx_exit(&ufs_ihash_mtx, MTX_DEF);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -147,7 +144,7 @@ void
|
||||
ufs_ihashrem(ip)
|
||||
struct inode *ip;
|
||||
{
|
||||
simple_lock(&ufs_ihash_slock);
|
||||
mtx_enter(&ufs_ihash_mtx, MTX_DEF);
|
||||
if (ip->i_flag & IN_HASHED) {
|
||||
ip->i_flag &= ~IN_HASHED;
|
||||
LIST_REMOVE(ip, i_hash);
|
||||
@ -156,5 +153,5 @@ ufs_ihashrem(ip)
|
||||
ip->i_hash.le_prev = NULL;
|
||||
#endif
|
||||
}
|
||||
simple_unlock(&ufs_ihash_slock);
|
||||
mtx_exit(&ufs_ihash_mtx, MTX_DEF);
|
||||
}
|
||||
|
@ -46,6 +46,7 @@
|
||||
#include <sys/vnode.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/malloc.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <ufs/ufs/extattr.h>
|
||||
#include <ufs/ufs/quota.h>
|
||||
@ -108,7 +109,7 @@ ufs_inactive(ap)
|
||||
* so that it can be reused immediately.
|
||||
*/
|
||||
if (ip->i_mode == 0)
|
||||
vrecycle(vp, (struct simplelock *)0, p);
|
||||
vrecycle(vp, NULL, p);
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
@ -666,7 +666,7 @@ qsync(mp)
|
||||
* Search vnodes associated with this mount point,
|
||||
* synchronizing any modified dquot structures.
|
||||
*/
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
again:
|
||||
for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nextvp) {
|
||||
if (vp->v_mount != mp)
|
||||
@ -675,10 +675,10 @@ qsync(mp)
|
||||
if (vp->v_type == VNON)
|
||||
continue;
|
||||
mtx_enter(&vp->v_interlock, MTX_DEF);
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
|
||||
if (error) {
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
if (error == ENOENT)
|
||||
goto again;
|
||||
continue;
|
||||
@ -689,11 +689,11 @@ qsync(mp)
|
||||
dqsync(vp, dq);
|
||||
}
|
||||
vput(vp);
|
||||
simple_lock(&mntvnode_slock);
|
||||
mtx_enter(&mntvnode_mtx, MTX_DEF);
|
||||
if (vp->v_mntvnodes.le_next != nextvp)
|
||||
goto again;
|
||||
}
|
||||
simple_unlock(&mntvnode_slock);
|
||||
mtx_exit(&mntvnode_mtx, MTX_DEF);
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -75,6 +75,7 @@
|
||||
#include <sys/vmmeter.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_param.h>
|
||||
@ -118,9 +119,7 @@ static void vm_object_qcollapse __P((vm_object_t object));
|
||||
*/
|
||||
|
||||
struct object_q vm_object_list;
|
||||
#ifndef NULL_SIMPLELOCKS
|
||||
static struct simplelock vm_object_list_lock;
|
||||
#endif
|
||||
static struct mtx vm_object_list_mtx;
|
||||
static long vm_object_count; /* count of all objects */
|
||||
vm_object_t kernel_object;
|
||||
vm_object_t kmem_object;
|
||||
@ -189,7 +188,7 @@ void
|
||||
vm_object_init()
|
||||
{
|
||||
TAILQ_INIT(&vm_object_list);
|
||||
simple_lock_init(&vm_object_list_lock);
|
||||
mtx_init(&vm_object_list_mtx, "vm object_list", MTX_DEF);
|
||||
vm_object_count = 0;
|
||||
|
||||
kernel_object = &kernel_object_store;
|
||||
@ -459,9 +458,9 @@ vm_object_terminate(object)
|
||||
/*
|
||||
* Remove the object from the global object list.
|
||||
*/
|
||||
simple_lock(&vm_object_list_lock);
|
||||
mtx_enter(&vm_object_list_mtx, MTX_DEF);
|
||||
TAILQ_REMOVE(&vm_object_list, object, object_list);
|
||||
simple_unlock(&vm_object_list_lock);
|
||||
mtx_exit(&vm_object_list_mtx, MTX_DEF);
|
||||
|
||||
wakeup(object);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user