mirror of
https://git.FreeBSD.org/src.git
synced 2024-12-11 09:50:12 +00:00
AST: rework
Make most AST handlers dynamically registered. This allows to have subsystem-specific handler source located in the subsystem files, instead of making subr_trap.c aware of it. For instance, signal delivery code on return to userspace is now moved to kern_sig.c. Also, it allows to have some handlers designated as the cleanup (kclear) type, which are called both at AST and on thread/process exit. For instance, ast(), exit1(), and NFS server no longer need to be aware about UFS softdep processing. The dynamic registration also allows third-party modules to register AST handlers if needed. There is one caveat with loadable modules: the code does not make any effort to ensure that the module is not unloaded before all threads processed through AST handler in it. In fact, this is already present behavior for hwpmc.ko and ufs.ko. I do not think it is worth the efforts and the runtime overhead to try to fix it. Reviewed by: markj Tested by: emaste (arm64), pho Discussed with: jhb Sponsored by: The FreeBSD Foundation MFC after: 1 week Differential revision: https://reviews.freebsd.org/D35888
This commit is contained in:
parent
4a5ec55af6
commit
c6d31b8306
@ -585,7 +585,7 @@ fast_syscall_common:
|
||||
jnz 4f
|
||||
/* Check for and handle AST's on return to userland. */
|
||||
movq PCPU(CURTHREAD),%rax
|
||||
testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
|
||||
cmpl $0,TD_AST(%rax)
|
||||
jne 3f
|
||||
call handle_ibrs_exit
|
||||
callq *mds_handler
|
||||
@ -1141,7 +1141,7 @@ doreti_ast:
|
||||
*/
|
||||
cli
|
||||
movq PCPU(CURTHREAD),%rax
|
||||
testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax)
|
||||
cmpl $0,TD_AST(%rax)
|
||||
je doreti_exit
|
||||
sti
|
||||
movq %rsp,%rdi /* pass a pointer to the trapframe */
|
||||
|
@ -82,6 +82,7 @@ ASSYM(MD_EFIRT_TMP, offsetof(struct mdthread, md_efirt_tmp));
|
||||
|
||||
ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
|
||||
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
|
||||
ASSYM(TD_AST, offsetof(struct thread, td_ast));
|
||||
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
|
||||
ASSYM(TD_PFLAGS, offsetof(struct thread, td_pflags));
|
||||
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
|
||||
@ -90,9 +91,6 @@ ASSYM(TD_MD, offsetof(struct thread, td_md));
|
||||
ASSYM(TD_MD_PCB, offsetof(struct thread, td_md.md_pcb));
|
||||
ASSYM(TD_MD_STACK_BASE, offsetof(struct thread, td_md.md_stack_base));
|
||||
|
||||
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
|
||||
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
|
||||
|
||||
ASSYM(TDP_CALLCHAIN, TDP_CALLCHAIN);
|
||||
ASSYM(TDP_KTHREAD, TDP_KTHREAD);
|
||||
|
||||
|
@ -365,13 +365,10 @@ vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
|
||||
static int __inline
|
||||
vcpu_should_yield(struct vm *vm, int vcpu)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
if (curthread->td_flags & (TDF_ASTPENDING | TDF_NEEDRESCHED))
|
||||
return (1);
|
||||
else if (curthread->td_owepreempt)
|
||||
return (1);
|
||||
else
|
||||
return (0);
|
||||
td = curthread;
|
||||
return (td->td_ast != 0 || td->td_owepreempt != 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1331,7 +1331,7 @@ vm_handle_rendezvous(struct vm *vm, int vcpuid)
|
||||
RENDEZVOUS_CTR0(vm, vcpuid, "Wait for rendezvous completion");
|
||||
mtx_sleep(&vm->rendezvous_func, &vm->rendezvous_mtx, 0,
|
||||
"vmrndv", hz);
|
||||
if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
|
||||
if (td_ast_pending(td, TDA_SUSPEND)) {
|
||||
mtx_unlock(&vm->rendezvous_mtx);
|
||||
error = thread_check_susp(td, true);
|
||||
if (error != 0)
|
||||
@ -1421,7 +1421,7 @@ vm_handle_hlt(struct vm *vm, int vcpuid, bool intr_disabled, bool *retu)
|
||||
msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
|
||||
vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
|
||||
vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
|
||||
if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
|
||||
if (td_ast_pending(td, TDA_SUSPEND)) {
|
||||
vcpu_unlock(vcpu);
|
||||
error = thread_check_susp(td, false);
|
||||
if (error != 0)
|
||||
@ -1593,7 +1593,7 @@ vm_handle_suspend(struct vm *vm, int vcpuid, bool *retu)
|
||||
vcpu_require_state_locked(vm, vcpuid, VCPU_SLEEPING);
|
||||
msleep_spin(vcpu, &vcpu->mtx, "vmsusp", hz);
|
||||
vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
|
||||
if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
|
||||
if (td_ast_pending(td, TDA_SUSPEND)) {
|
||||
vcpu_unlock(vcpu);
|
||||
error = thread_check_susp(td, false);
|
||||
vcpu_lock(vcpu);
|
||||
|
@ -175,8 +175,7 @@ _C_LABEL(dtrace_invop_jump_addr):
|
||||
bne 2f; /* Nope, get out now */ \
|
||||
bic r4, r4, #(PSR_I|PSR_F); \
|
||||
1: GET_CURTHREAD_PTR(r5); \
|
||||
ldr r1, [r5, #(TD_FLAGS)]; \
|
||||
and r1, r1, #(TDF_ASTPENDING|TDF_NEEDRESCHED); \
|
||||
ldr r1, [r5, #(TD_AST)]; \
|
||||
teq r1, #0; \
|
||||
beq 2f; /* Nope. Just bail */ \
|
||||
msr cpsr_c, r4; /* Restore interrupts */ \
|
||||
|
@ -89,6 +89,7 @@ ASSYM(IP_DST, offsetof(struct ip, ip_dst));
|
||||
|
||||
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
|
||||
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
|
||||
ASSYM(TD_AST, offsetof(struct thread, td_ast));
|
||||
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
|
||||
ASSYM(TD_MD, offsetof(struct thread, td_md));
|
||||
ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
|
||||
@ -116,8 +117,6 @@ ASSYM(PAGE_SIZE, PAGE_SIZE);
|
||||
#ifdef PMAP_INCLUDE_PTE_SYNC
|
||||
ASSYM(PMAP_INCLUDE_PTE_SYNC, 1);
|
||||
#endif
|
||||
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
|
||||
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
|
||||
|
||||
ASSYM(MAXCOMLEN, MAXCOMLEN);
|
||||
ASSYM(MAXCPU, MAXCPU);
|
||||
|
@ -175,15 +175,13 @@ __FBSDID("$FreeBSD$");
|
||||
*/
|
||||
msr daifset, #(DAIF_INTR)
|
||||
|
||||
/* Read the current thread flags */
|
||||
/* Read the current thread AST mask */
|
||||
ldr x1, [x18, #PC_CURTHREAD] /* Load curthread */
|
||||
ldr x2, [x1, #TD_FLAGS]
|
||||
add x1, x1, #(TD_AST)
|
||||
ldr x1, [x1]
|
||||
|
||||
/* Check if we have either bits set */
|
||||
mov x3, #((TDF_ASTPENDING|TDF_NEEDRESCHED) >> 8)
|
||||
lsl x3, x3, #8
|
||||
and x2, x2, x3
|
||||
cbz x2, 2f
|
||||
/* Check if we have a non-zero AST mask */
|
||||
cbz x1, 2f
|
||||
|
||||
/* Restore interrupts */
|
||||
msr daif, x19
|
||||
|
@ -48,9 +48,6 @@ ASSYM(BP_KERN_L0PT, offsetof(struct arm64_bootparams, kern_l0pt));
|
||||
ASSYM(BP_KERN_TTBR0, offsetof(struct arm64_bootparams, kern_ttbr0));
|
||||
ASSYM(BP_BOOT_EL, offsetof(struct arm64_bootparams, boot_el));
|
||||
|
||||
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
|
||||
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
|
||||
|
||||
ASSYM(PCPU_SIZE, sizeof(struct pcpu));
|
||||
ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb));
|
||||
ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
|
||||
@ -71,6 +68,7 @@ ASSYM(SF_UC, offsetof(struct sigframe, sf_uc));
|
||||
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
|
||||
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
|
||||
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
|
||||
ASSYM(TD_AST, offsetof(struct thread, td_ast));
|
||||
ASSYM(TD_FRAME, offsetof(struct thread, td_frame));
|
||||
ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
|
||||
ASSYM(TD_MD_CANARY, offsetof(struct thread, td_md.md_canary));
|
||||
|
@ -458,9 +458,7 @@ linux_epoll_wait_ts(struct thread *td, int epfd, struct epoll_event *events,
|
||||
* usermode and TDP_OLDMASK is cleared, restoring old
|
||||
* sigmask.
|
||||
*/
|
||||
thread_lock(td);
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
thread_unlock(td);
|
||||
ast_sched(td, TDA_SIGSUSPEND);
|
||||
}
|
||||
|
||||
coargs.leventlist = events;
|
||||
|
@ -323,7 +323,7 @@ linux_clone_thread(struct thread *td, struct l_clone_args *args)
|
||||
sched_fork_thread(td, newtd);
|
||||
thread_unlock(td);
|
||||
if (P_SHOULDSTOP(p))
|
||||
newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
|
||||
ast_sched(newtd, TDA_SUSPEND);
|
||||
|
||||
if (p->p_ptevents & PTRACE_LWP)
|
||||
newtd->td_dbgflags |= TDB_BORN;
|
||||
|
@ -130,7 +130,7 @@ put_task_struct(struct task_struct *task)
|
||||
#define yield() kern_yield(PRI_UNCHANGED)
|
||||
#define sched_yield() sched_relinquish(curthread)
|
||||
|
||||
#define need_resched() (curthread->td_flags & TDF_NEEDRESCHED)
|
||||
#define need_resched() td_ast_pending(curthread, TDA_SCHED)
|
||||
|
||||
static inline int
|
||||
cond_resched_lock(spinlock_t *lock)
|
||||
|
@ -430,10 +430,11 @@ pmc_soft_intr(struct pmckern_soft *ks)
|
||||
}
|
||||
|
||||
if (user_mode) {
|
||||
/* If in user mode setup AST to process
|
||||
/*
|
||||
* If in user mode setup AST to process
|
||||
* callchain out of interrupt context.
|
||||
*/
|
||||
curthread->td_flags |= TDF_ASTPENDING;
|
||||
ast_sched(curthread, TDA_HWPMC);
|
||||
}
|
||||
} else
|
||||
pc->soft_values[ri]++;
|
||||
@ -446,6 +447,15 @@ pmc_soft_intr(struct pmckern_soft *ks)
|
||||
return (processed);
|
||||
}
|
||||
|
||||
static void
|
||||
ast_hwpmc(struct thread *td, int tda __unused)
|
||||
{
|
||||
/* Handle Software PMC callchain capture. */
|
||||
if (PMC_IS_PENDING_CALLCHAIN(td))
|
||||
PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_USER_CALLCHAIN_SOFT,
|
||||
(void *)td->td_frame);
|
||||
}
|
||||
|
||||
void
|
||||
pmc_soft_initialize(struct pmc_mdep *md)
|
||||
{
|
||||
@ -477,6 +487,8 @@ pmc_soft_initialize(struct pmc_mdep *md)
|
||||
pcd->pcd_stop_pmc = soft_stop_pmc;
|
||||
|
||||
md->pmd_npmc += SOFT_NPMCS;
|
||||
|
||||
ast_register(TDA_HWPMC, ASTR_UNCOND, 0, ast_hwpmc);
|
||||
}
|
||||
|
||||
void
|
||||
@ -493,6 +505,7 @@ pmc_soft_finalize(struct pmc_mdep *md)
|
||||
KASSERT(md->pmd_classdep[PMC_CLASS_INDEX_SOFT].pcd_class ==
|
||||
PMC_CLASS_SOFT, ("[soft,%d] class mismatch", __LINE__));
|
||||
#endif
|
||||
ast_deregister(TDA_HWPMC);
|
||||
free(soft_pcpu, M_PMC);
|
||||
soft_pcpu = NULL;
|
||||
}
|
||||
|
@ -327,7 +327,7 @@ nfssvc_program(struct svc_req *rqst, SVCXPRT *xprt)
|
||||
svc_freereq(rqst);
|
||||
|
||||
out:
|
||||
td_softdep_cleanup(curthread);
|
||||
ast_kclear(curthread);
|
||||
NFSEXITCODE(0);
|
||||
}
|
||||
|
||||
|
@ -96,6 +96,24 @@ g_waitidle(void)
|
||||
curthread->td_pflags &= ~TDP_GEOM;
|
||||
}
|
||||
|
||||
static void
|
||||
ast_geom(struct thread *td __unused, int tda __unused)
|
||||
{
|
||||
/*
|
||||
* If this thread tickled GEOM, we need to wait for the giggling to
|
||||
* stop before we return to userland.
|
||||
*/
|
||||
g_waitidle();
|
||||
}
|
||||
|
||||
static void
|
||||
geom_event_init(void *arg __unused)
|
||||
{
|
||||
ast_register(TDA_GEOM, ASTR_ASTF_REQUIRED | ASTR_TDP | ASTR_KCLEAR,
|
||||
TDP_GEOM, ast_geom);
|
||||
}
|
||||
SYSINIT(geom_event, SI_SUB_INTRINSIC, SI_ORDER_ANY, geom_event_init, NULL);
|
||||
|
||||
struct g_attrchanged_args {
|
||||
struct g_provider *pp;
|
||||
const char *attr;
|
||||
@ -353,9 +371,7 @@ g_post_event_ep_va(g_event_t *func, void *arg, int wuflag,
|
||||
mtx_unlock(&g_eventlock);
|
||||
wakeup(&g_wait_event);
|
||||
curthread->td_pflags |= TDP_GEOM;
|
||||
thread_lock(curthread);
|
||||
curthread->td_flags |= TDF_ASTPENDING;
|
||||
thread_unlock(curthread);
|
||||
ast_sched(curthread, TDA_GEOM);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -470,7 +470,7 @@ doreti_ast:
|
||||
*/
|
||||
cli
|
||||
movl PCPU(CURTHREAD),%eax
|
||||
testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%eax)
|
||||
cmpl $0,TD_AST(%eax)
|
||||
je doreti_exit
|
||||
sti
|
||||
pushl %esp /* pass a pointer to the trapframe */
|
||||
|
@ -85,6 +85,7 @@ ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap));
|
||||
ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
|
||||
|
||||
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
|
||||
ASSYM(TD_AST, offsetof(struct thread, td_ast));
|
||||
ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
|
||||
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
|
||||
ASSYM(TD_PFLAGS, offsetof(struct thread, td_pflags));
|
||||
@ -96,9 +97,6 @@ ASSYM(TDP_CALLCHAIN, TDP_CALLCHAIN);
|
||||
ASSYM(P_MD, offsetof(struct proc, p_md));
|
||||
ASSYM(MD_LDT, offsetof(struct mdproc, md_ldt));
|
||||
|
||||
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
|
||||
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
|
||||
|
||||
ASSYM(TD0_KSTACK_PAGES, TD0_KSTACK_PAGES);
|
||||
ASSYM(PAGE_SIZE, PAGE_SIZE);
|
||||
ASSYM(PAGE_SHIFT, PAGE_SHIFT);
|
||||
|
@ -385,6 +385,38 @@ DPCPU_DEFINE_STATIC(int, pcputicks); /* Per-CPU version of ticks. */
|
||||
static int devpoll_run = 0;
|
||||
#endif
|
||||
|
||||
static void
|
||||
ast_oweupc(struct thread *td, int tda __unused)
|
||||
{
|
||||
if ((td->td_proc->p_flag & P_PROFIL) == 0)
|
||||
return;
|
||||
addupc_task(td, td->td_profil_addr, td->td_profil_ticks);
|
||||
td->td_profil_ticks = 0;
|
||||
td->td_pflags &= ~TDP_OWEUPC;
|
||||
}
|
||||
|
||||
static void
|
||||
ast_alrm(struct thread *td, int tda __unused)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
p = td->td_proc;
|
||||
PROC_LOCK(p);
|
||||
kern_psignal(p, SIGVTALRM);
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
|
||||
static void
|
||||
ast_prof(struct thread *td, int tda __unused)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
p = td->td_proc;
|
||||
PROC_LOCK(p);
|
||||
kern_psignal(p, SIGPROF);
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize clock frequencies and start both clocks running.
|
||||
*/
|
||||
@ -408,6 +440,10 @@ initclocks(void *dummy __unused)
|
||||
profhz = i;
|
||||
psratio = profhz / i;
|
||||
|
||||
ast_register(TDA_OWEUPC, ASTR_ASTF_REQUIRED, 0, ast_oweupc);
|
||||
ast_register(TDA_ALRM, ASTR_ASTF_REQUIRED, 0, ast_alrm);
|
||||
ast_register(TDA_PROF, ASTR_ASTF_REQUIRED, 0, ast_prof);
|
||||
|
||||
#ifdef SW_WATCHDOG
|
||||
/* Enable hardclock watchdog now, even if a hardware watchdog exists. */
|
||||
watchdog_attach();
|
||||
@ -423,30 +459,27 @@ static __noinline void
|
||||
hardclock_itimer(struct thread *td, struct pstats *pstats, int cnt, int usermode)
|
||||
{
|
||||
struct proc *p;
|
||||
int flags;
|
||||
int ast;
|
||||
|
||||
flags = 0;
|
||||
ast = 0;
|
||||
p = td->td_proc;
|
||||
if (usermode &&
|
||||
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value)) {
|
||||
PROC_ITIMLOCK(p);
|
||||
if (itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL],
|
||||
tick * cnt) == 0)
|
||||
flags |= TDF_ALRMPEND | TDF_ASTPENDING;
|
||||
ast |= TDAI(TDA_ALRM);
|
||||
PROC_ITIMUNLOCK(p);
|
||||
}
|
||||
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value)) {
|
||||
PROC_ITIMLOCK(p);
|
||||
if (itimerdecr(&pstats->p_timer[ITIMER_PROF],
|
||||
tick * cnt) == 0)
|
||||
flags |= TDF_PROFPEND | TDF_ASTPENDING;
|
||||
ast |= TDAI(TDA_PROF);
|
||||
PROC_ITIMUNLOCK(p);
|
||||
}
|
||||
if (flags != 0) {
|
||||
thread_lock(td);
|
||||
td->td_flags |= flags;
|
||||
thread_unlock(td);
|
||||
}
|
||||
if (ast != 0)
|
||||
ast_sched_mask(td, ast);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1782,8 +1782,8 @@ kqueue_release(struct kqueue *kq, int locked)
|
||||
KQ_UNLOCK(kq);
|
||||
}
|
||||
|
||||
void
|
||||
kqueue_drain_schedtask(void)
|
||||
static void
|
||||
ast_kqueue(struct thread *td, int tda __unused)
|
||||
{
|
||||
taskqueue_quiesce(taskqueue_kqueue_ctx);
|
||||
}
|
||||
@ -1791,8 +1791,6 @@ kqueue_drain_schedtask(void)
|
||||
static void
|
||||
kqueue_schedtask(struct kqueue *kq)
|
||||
{
|
||||
struct thread *td;
|
||||
|
||||
KQ_OWNED(kq);
|
||||
KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
|
||||
("scheduling kqueue task while draining"));
|
||||
@ -1800,10 +1798,7 @@ kqueue_schedtask(struct kqueue *kq)
|
||||
if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
|
||||
taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task);
|
||||
kq->kq_state |= KQ_TASKSCHED;
|
||||
td = curthread;
|
||||
thread_lock(td);
|
||||
td->td_flags |= TDF_ASTPENDING | TDF_KQTICKLED;
|
||||
thread_unlock(td);
|
||||
ast_sched(curthread, TDA_KQUEUE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2813,6 +2808,7 @@ knote_init(void)
|
||||
|
||||
knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL,
|
||||
NULL, NULL, UMA_ALIGN_PTR, 0);
|
||||
ast_register(TDA_KQUEUE, ASTR_ASTF_REQUIRED, 0, ast_kqueue);
|
||||
}
|
||||
SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL);
|
||||
|
||||
|
@ -252,9 +252,11 @@ exit1(struct thread *td, int rval, int signo)
|
||||
}
|
||||
|
||||
/*
|
||||
* Deref SU mp, since the thread does not return to userspace.
|
||||
* Process deferred operations, designated with ASTF_KCLEAR.
|
||||
* For instance, we need to deref SU mp, since the thread does
|
||||
* not return to userspace, and wait for geom to stabilize.
|
||||
*/
|
||||
td_softdep_cleanup(td);
|
||||
ast_kclear(td);
|
||||
|
||||
/*
|
||||
* MUST abort all other threads before proceeding past here.
|
||||
@ -405,13 +407,6 @@ exit1(struct thread *td, int rval, int signo)
|
||||
pdescfree(td);
|
||||
fdescfree(td);
|
||||
|
||||
/*
|
||||
* If this thread tickled GEOM, we need to wait for the giggling to
|
||||
* stop before we return to userland
|
||||
*/
|
||||
if (td->td_pflags & TDP_GEOM)
|
||||
g_waitidle();
|
||||
|
||||
/*
|
||||
* Remove ourself from our leader's peer list and wake our leader.
|
||||
*/
|
||||
|
@ -499,7 +499,7 @@ do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *
|
||||
* to avoid calling thread_lock() again.
|
||||
*/
|
||||
if ((fr->fr_flags & RFPPWAIT) != 0)
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
ast_sched_locked(td, TDA_VFORK);
|
||||
thread_unlock(td);
|
||||
|
||||
/*
|
||||
@ -814,8 +814,8 @@ do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
fork_rfppwait(struct thread *td)
|
||||
static void
|
||||
ast_vfork(struct thread *td, int tda __unused)
|
||||
{
|
||||
struct proc *p, *p2;
|
||||
|
||||
@ -1181,3 +1181,11 @@ fork_return(struct thread *td, struct trapframe *frame)
|
||||
ktrsysret(SYS_fork, 0, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
fork_init(void *arg __unused)
|
||||
{
|
||||
ast_register(TDA_VFORK, ASTR_ASTF_REQUIRED | ASTR_TDP, TDP_RFPPWAIT,
|
||||
ast_vfork);
|
||||
}
|
||||
SYSINIT(fork, SI_SUB_INTRINSIC, SI_ORDER_ANY, fork_init, NULL);
|
||||
|
@ -209,6 +209,12 @@ ktrace_assert(struct thread *td)
|
||||
KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
|
||||
}
|
||||
|
||||
static void
|
||||
ast_ktrace(struct thread *td, int tda __unused)
|
||||
{
|
||||
KTRUSERRET(td);
|
||||
}
|
||||
|
||||
static void
|
||||
ktrace_init(void *dummy)
|
||||
{
|
||||
@ -223,6 +229,8 @@ ktrace_init(void *dummy)
|
||||
M_ZERO);
|
||||
STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
|
||||
}
|
||||
ast_register(TDA_KTRACE, ASTR_UNCOND, 0, ast_ktrace);
|
||||
|
||||
}
|
||||
SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
|
||||
|
||||
@ -370,9 +378,7 @@ ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
|
||||
mtx_lock(&ktrace_mtx);
|
||||
STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
|
||||
mtx_unlock(&ktrace_mtx);
|
||||
thread_lock(td);
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
thread_unlock(td);
|
||||
ast_sched(td, TDA_KTRACE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -246,8 +246,7 @@ proc_dtor(void *mem, int size, void *arg)
|
||||
#endif
|
||||
/* Free all OSD associated to this thread. */
|
||||
osd_thread_exit(td);
|
||||
td_softdep_cleanup(td);
|
||||
MPASS(td->td_su == NULL);
|
||||
ast_kclear(td);
|
||||
|
||||
/* Make sure all thread destructors are executed */
|
||||
EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
|
||||
|
@ -1098,12 +1098,17 @@ racct_move(struct racct *dest, struct racct *src)
|
||||
RACCT_UNLOCK();
|
||||
}
|
||||
|
||||
void
|
||||
racct_proc_throttled(struct proc *p)
|
||||
static void
|
||||
ast_racct(struct thread *td, int tda __unused)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
ASSERT_RACCT_ENABLED();
|
||||
|
||||
p = td->td_proc;
|
||||
if (p->p_throttled == 0)
|
||||
return;
|
||||
|
||||
PROC_LOCK(p);
|
||||
while (p->p_throttled != 0) {
|
||||
msleep(p->p_racct, &p->p_mtx, 0, "racct",
|
||||
@ -1144,24 +1149,24 @@ racct_proc_throttle(struct proc *p, int timeout)
|
||||
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
thread_lock(td);
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
ast_sched_locked(td, TDA_RACCT);
|
||||
|
||||
switch (TD_GET_STATE(td)) {
|
||||
case TDS_RUNQ:
|
||||
/*
|
||||
* If the thread is on the scheduler run-queue, we can
|
||||
* not just remove it from there. So we set the flag
|
||||
* TDF_NEEDRESCHED for the thread, so that once it is
|
||||
* TDA_SCHED for the thread, so that once it is
|
||||
* running, it is taken off the cpu as soon as possible.
|
||||
*/
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
ast_sched_locked(td, TDA_SCHED);
|
||||
break;
|
||||
case TDS_RUNNING:
|
||||
/*
|
||||
* If the thread is running, we request a context
|
||||
* switch for it by setting the TDF_NEEDRESCHED flag.
|
||||
* switch for it by setting the TDA_SCHED flag.
|
||||
*/
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
ast_sched_locked(td, TDA_SCHED);
|
||||
#ifdef SMP
|
||||
cpuid = td->td_oncpu;
|
||||
if ((cpuid != NOCPU) && (td != curthread))
|
||||
@ -1355,6 +1360,8 @@ racct_init(void)
|
||||
|
||||
racct_zone = uma_zcreate("racct", sizeof(struct racct),
|
||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
|
||||
ast_register(TDA_RACCT, ASTR_ASTF_REQUIRED, 0, ast_racct);
|
||||
|
||||
/*
|
||||
* XXX: Move this somewhere.
|
||||
*/
|
||||
|
@ -274,6 +274,79 @@ static int sigproptbl[NSIG] = {
|
||||
|
||||
sigset_t fastblock_mask;
|
||||
|
||||
static void
|
||||
ast_sig(struct thread *td, int tda)
|
||||
{
|
||||
struct proc *p;
|
||||
int sig;
|
||||
bool resched_sigs;
|
||||
|
||||
p = td->td_proc;
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (p->p_numthreads == 1 && (tda & (TDAI(TDA_SIG) |
|
||||
TDAI(TDA_AST))) == 0) {
|
||||
PROC_LOCK(p);
|
||||
thread_lock(td);
|
||||
/*
|
||||
* Note that TDA_SIG should be re-read from
|
||||
* td_ast, since signal might have been delivered
|
||||
* after we cleared td_flags above. This is one of
|
||||
* the reason for looping check for AST condition.
|
||||
* See comment in userret() about P_PPWAIT.
|
||||
*/
|
||||
if ((p->p_flag & P_PPWAIT) == 0 &&
|
||||
(td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
|
||||
if (SIGPENDING(td) && ((tda | td->td_ast) &
|
||||
(TDAI(TDA_SIG) | TDAI(TDA_AST))) == 0) {
|
||||
thread_unlock(td); /* fix dumps */
|
||||
panic(
|
||||
"failed2 to set signal flags for ast p %p "
|
||||
"td %p tda %#x td_ast %#x fl %#x",
|
||||
p, td, tda, td->td_ast, td->td_flags);
|
||||
}
|
||||
}
|
||||
thread_unlock(td);
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Check for signals. Unlocked reads of p_pendingcnt or
|
||||
* p_siglist might cause process-directed signal to be handled
|
||||
* later.
|
||||
*/
|
||||
if ((tda & TDA_SIG) != 0 || p->p_pendingcnt > 0 ||
|
||||
!SIGISEMPTY(p->p_siglist)) {
|
||||
sigfastblock_fetch(td);
|
||||
PROC_LOCK(p);
|
||||
mtx_lock(&p->p_sigacts->ps_mtx);
|
||||
while ((sig = cursig(td)) != 0) {
|
||||
KASSERT(sig >= 0, ("sig %d", sig));
|
||||
postsig(sig);
|
||||
}
|
||||
mtx_unlock(&p->p_sigacts->ps_mtx);
|
||||
PROC_UNLOCK(p);
|
||||
resched_sigs = true;
|
||||
} else {
|
||||
resched_sigs = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle deferred update of the fast sigblock value, after
|
||||
* the postsig() loop was performed.
|
||||
*/
|
||||
sigfastblock_setpend(td, resched_sigs);
|
||||
}
|
||||
|
||||
static void
|
||||
ast_sigsuspend(struct thread *td, int tda __unused)
|
||||
{
|
||||
MPASS((td->td_pflags & TDP_OLDMASK) != 0);
|
||||
td->td_pflags &= ~TDP_OLDMASK;
|
||||
kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
sigqueue_start(void)
|
||||
{
|
||||
@ -285,6 +358,9 @@ sigqueue_start(void)
|
||||
p31b_setcfg(CTL_P1003_1B_SIGQUEUE_MAX, max_pending_per_proc);
|
||||
SIGFILLSET(fastblock_mask);
|
||||
SIG_CANTMASK(fastblock_mask);
|
||||
ast_register(TDA_SIG, ASTR_UNCOND, 0, ast_sig);
|
||||
ast_register(TDA_SIGSUSPEND, ASTR_ASTF_REQUIRED | ASTR_TDP,
|
||||
TDP_OLDMASK, ast_sigsuspend);
|
||||
}
|
||||
|
||||
ksiginfo_t *
|
||||
@ -644,11 +720,8 @@ signotify(struct thread *td)
|
||||
|
||||
PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
|
||||
|
||||
if (SIGPENDING(td)) {
|
||||
thread_lock(td);
|
||||
td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
|
||||
thread_unlock(td);
|
||||
}
|
||||
if (SIGPENDING(td))
|
||||
ast_sched(td, TDA_SIG);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1544,6 +1617,7 @@ kern_sigsuspend(struct thread *td, sigset_t mask)
|
||||
kern_sigprocmask(td, SIG_SETMASK, &mask, &td->td_oldsigmask,
|
||||
SIGPROCMASK_PROC_LOCKED);
|
||||
td->td_pflags |= TDP_OLDMASK;
|
||||
ast_sched(td, TDA_SIGSUSPEND);
|
||||
|
||||
/*
|
||||
* Process signals now. Otherwise, we can get spurious wakeup
|
||||
@ -2587,7 +2661,7 @@ sig_suspend_threads(struct thread *td, struct proc *p, int sending)
|
||||
wakeup_swapper = 0;
|
||||
FOREACH_THREAD_IN_PROC(p, td2) {
|
||||
thread_lock(td2);
|
||||
td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
|
||||
ast_sched_locked(td2, TDA_SUSPEND);
|
||||
if ((TD_IS_SLEEPING(td2) || TD_IS_SWAPPED(td2)) &&
|
||||
(td2->td_flags & TDF_SINTR)) {
|
||||
if (td2->td_flags & TDF_SBDRY) {
|
||||
@ -2608,7 +2682,7 @@ sig_suspend_threads(struct thread *td, struct proc *p, int sending)
|
||||
thread_suspend_one(td2);
|
||||
} else if (!TD_IS_SUSPENDED(td2)) {
|
||||
if (sending || td != td2)
|
||||
td2->td_flags |= TDF_ASTPENDING;
|
||||
ast_sched_locked(td2, TDA_AST);
|
||||
#ifdef SMP
|
||||
if (TD_IS_RUNNING(td2) && td2 != td)
|
||||
forward_signal(td2);
|
||||
@ -3268,7 +3342,7 @@ sig_ast_checksusp(struct thread *td)
|
||||
p = td->td_proc;
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
|
||||
if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
|
||||
if (!td_ast_pending(td, TDA_SUSPEND))
|
||||
return (0);
|
||||
|
||||
ret = thread_suspend_check(1);
|
||||
@ -3286,7 +3360,7 @@ sig_ast_needsigchk(struct thread *td)
|
||||
p = td->td_proc;
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
|
||||
if ((td->td_flags & TDF_NEEDSIGCHK) == 0)
|
||||
if (!td_ast_pending(td, TDA_SIG))
|
||||
return (0);
|
||||
|
||||
ps = p->p_sigacts;
|
||||
@ -3332,7 +3406,7 @@ sig_intr(void)
|
||||
int ret;
|
||||
|
||||
td = curthread;
|
||||
if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0)
|
||||
if (!td_ast_pending(td, TDA_SIG) && !td_ast_pending(td, TDA_SUSPEND))
|
||||
return (0);
|
||||
|
||||
p = td->td_proc;
|
||||
@ -3354,7 +3428,7 @@ curproc_sigkilled(void)
|
||||
bool res;
|
||||
|
||||
td = curthread;
|
||||
if ((td->td_flags & TDF_NEEDSIGCHK) == 0)
|
||||
if (!td_ast_pending(td, TDA_SIG))
|
||||
return (false);
|
||||
|
||||
p = td->td_proc;
|
||||
@ -4224,9 +4298,7 @@ sigfastblock_resched(struct thread *td, bool resched)
|
||||
reschedule_signals(p, td->td_sigmask, 0);
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
thread_lock(td);
|
||||
td->td_flags |= TDF_ASTPENDING | TDF_NEEDSIGCHK;
|
||||
thread_unlock(td);
|
||||
ast_sched(td, TDA_SIG);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -632,11 +632,27 @@ loadav(void *arg)
|
||||
loadav, NULL, C_DIRECT_EXEC | C_PREL(32));
|
||||
}
|
||||
|
||||
/* ARGSUSED */
|
||||
static void
|
||||
synch_setup(void *dummy)
|
||||
ast_scheduler(struct thread *td, int tda __unused)
|
||||
{
|
||||
#ifdef KTRACE
|
||||
if (KTRPOINT(td, KTR_CSW))
|
||||
ktrcsw(1, 1, __func__);
|
||||
#endif
|
||||
thread_lock(td);
|
||||
sched_prio(td, td->td_user_pri);
|
||||
mi_switch(SW_INVOL | SWT_NEEDRESCHED);
|
||||
#ifdef KTRACE
|
||||
if (KTRPOINT(td, KTR_CSW))
|
||||
ktrcsw(0, 1, __func__);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
synch_setup(void *dummy __unused)
|
||||
{
|
||||
callout_init(&loadav_callout, 1);
|
||||
ast_register(TDA_SCHED, ASTR_ASTF_REQUIRED, 0, ast_scheduler);
|
||||
|
||||
/* Kick off timeout driven events by calling first time. */
|
||||
loadav(NULL);
|
||||
|
@ -257,7 +257,7 @@ thread_create(struct thread *td, struct rtprio *rtp,
|
||||
sched_fork_thread(td, newtd);
|
||||
thread_unlock(td);
|
||||
if (P_SHOULDSTOP(p))
|
||||
newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
|
||||
ast_sched(newtd, TDA_SUSPEND);
|
||||
if (p->p_ptevents & PTRACE_LWP)
|
||||
newtd->td_dbgflags |= TDB_BORN;
|
||||
|
||||
|
@ -87,11 +87,11 @@ __FBSDID("$FreeBSD$");
|
||||
#ifdef __amd64__
|
||||
_Static_assert(offsetof(struct thread, td_flags) == 0x108,
|
||||
"struct thread KBI td_flags");
|
||||
_Static_assert(offsetof(struct thread, td_pflags) == 0x110,
|
||||
_Static_assert(offsetof(struct thread, td_pflags) == 0x114,
|
||||
"struct thread KBI td_pflags");
|
||||
_Static_assert(offsetof(struct thread, td_frame) == 0x4a8,
|
||||
_Static_assert(offsetof(struct thread, td_frame) == 0x4b0,
|
||||
"struct thread KBI td_frame");
|
||||
_Static_assert(offsetof(struct thread, td_emuldata) == 0x6b0,
|
||||
_Static_assert(offsetof(struct thread, td_emuldata) == 0x6c0,
|
||||
"struct thread KBI td_emuldata");
|
||||
_Static_assert(offsetof(struct proc, p_flag) == 0xb8,
|
||||
"struct proc KBI p_flag");
|
||||
@ -107,11 +107,11 @@ _Static_assert(offsetof(struct proc, p_emuldata) == 0x4c8,
|
||||
#ifdef __i386__
|
||||
_Static_assert(offsetof(struct thread, td_flags) == 0x9c,
|
||||
"struct thread KBI td_flags");
|
||||
_Static_assert(offsetof(struct thread, td_pflags) == 0xa4,
|
||||
_Static_assert(offsetof(struct thread, td_pflags) == 0xa8,
|
||||
"struct thread KBI td_pflags");
|
||||
_Static_assert(offsetof(struct thread, td_frame) == 0x308,
|
||||
_Static_assert(offsetof(struct thread, td_frame) == 0x30c,
|
||||
"struct thread KBI td_frame");
|
||||
_Static_assert(offsetof(struct thread, td_emuldata) == 0x34c,
|
||||
_Static_assert(offsetof(struct thread, td_emuldata) == 0x350,
|
||||
"struct thread KBI td_emuldata");
|
||||
_Static_assert(offsetof(struct proc, p_flag) == 0x6c,
|
||||
"struct proc KBI p_flag");
|
||||
@ -406,8 +406,7 @@ thread_dtor(void *mem, int size, void *arg)
|
||||
#endif
|
||||
/* Free all OSD associated to this thread. */
|
||||
osd_thread_exit(td);
|
||||
td_softdep_cleanup(td);
|
||||
MPASS(td->td_su == NULL);
|
||||
ast_kclear(td);
|
||||
seltdfini(td);
|
||||
}
|
||||
|
||||
@ -479,6 +478,21 @@ proc_linkup(struct proc *p, struct thread *td)
|
||||
thread_link(td, p);
|
||||
}
|
||||
|
||||
static void
|
||||
ast_suspend(struct thread *td, int tda __unused)
|
||||
{
|
||||
struct proc *p;
|
||||
|
||||
p = td->td_proc;
|
||||
/*
|
||||
* We need to check to see if we have to exit or wait due to a
|
||||
* single threading requirement or some other STOP condition.
|
||||
*/
|
||||
PROC_LOCK(p);
|
||||
thread_suspend_check(0);
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
|
||||
extern int max_threads_per_proc;
|
||||
|
||||
/*
|
||||
@ -544,6 +558,7 @@ threadinit(void)
|
||||
callout_init(&thread_reap_callout, 1);
|
||||
callout_reset(&thread_reap_callout, 5 * hz,
|
||||
thread_reap_callout_cb, NULL);
|
||||
ast_register(TDA_SUSPEND, ASTR_ASTF_REQUIRED, 0, ast_suspend);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1246,7 +1261,7 @@ thread_single(struct proc *p, int mode)
|
||||
if (td2 == td)
|
||||
continue;
|
||||
thread_lock(td2);
|
||||
td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
|
||||
ast_sched_locked(td2, TDA_SUSPEND);
|
||||
if (TD_IS_INHIBITED(td2)) {
|
||||
wakeup_swapper |= weed_inhib(mode, td2, p);
|
||||
#ifdef SMP
|
||||
@ -1492,10 +1507,10 @@ thread_check_susp(struct thread *td, bool sleep)
|
||||
int error;
|
||||
|
||||
/*
|
||||
* The check for TDF_NEEDSUSPCHK is racy, but it is enough to
|
||||
* The check for TDA_SUSPEND is racy, but it is enough to
|
||||
* eventually break the lockstep loop.
|
||||
*/
|
||||
if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
|
||||
if (!td_ast_pending(td, TDA_SUSPEND))
|
||||
return (0);
|
||||
error = 0;
|
||||
p = td->td_proc;
|
||||
@ -1526,7 +1541,7 @@ thread_suspend_switch(struct thread *td, struct proc *p)
|
||||
}
|
||||
PROC_UNLOCK(p);
|
||||
thread_lock(td);
|
||||
td->td_flags &= ~TDF_NEEDSUSPCHK;
|
||||
ast_unsched_locked(td, TDA_SUSPEND);
|
||||
TD_SET_SUSPENDED(td);
|
||||
sched_sleep(td, 0);
|
||||
PROC_SUNLOCK(p);
|
||||
@ -1547,7 +1562,7 @@ thread_suspend_one(struct thread *td)
|
||||
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
||||
KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
|
||||
p->p_suspcount++;
|
||||
td->td_flags &= ~TDF_NEEDSUSPCHK;
|
||||
ast_unsched_locked(td, TDA_SUSPEND);
|
||||
TD_SET_SUSPENDED(td);
|
||||
sched_sleep(td, 0);
|
||||
}
|
||||
|
@ -307,7 +307,7 @@ maybe_resched(struct thread *td)
|
||||
|
||||
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
||||
if (td->td_priority < curthread->td_priority)
|
||||
curthread->td_flags |= TDF_NEEDRESCHED;
|
||||
ast_sched_locked(curthread, TDA_SCHED);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -755,8 +755,10 @@ sched_clock_tick(struct thread *td)
|
||||
SCHED_STAT_INC(ithread_demotions);
|
||||
sched_prio(td, td->td_base_pri + RQ_PPQ);
|
||||
}
|
||||
} else
|
||||
td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
|
||||
} else {
|
||||
td->td_flags |= TDF_SLICEEND;
|
||||
ast_sched_locked(td, TDA_SCHED);
|
||||
}
|
||||
}
|
||||
|
||||
stat = DPCPU_PTR(idlestat);
|
||||
@ -971,7 +973,7 @@ sched_lend_user_prio(struct thread *td, u_char prio)
|
||||
if (td->td_priority > td->td_user_pri)
|
||||
sched_prio(td, td->td_user_pri);
|
||||
else if (td->td_priority != td->td_user_pri)
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
ast_sched_locked(td, TDA_SCHED);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1022,7 +1024,8 @@ sched_switch(struct thread *td, int flags)
|
||||
td->td_lastcpu = td->td_oncpu;
|
||||
preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
|
||||
(flags & SW_PREEMPT) != 0;
|
||||
td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND);
|
||||
td->td_flags &= ~TDF_SLICEEND;
|
||||
ast_unsched_locked(td, TDA_SCHED);
|
||||
td->td_owepreempt = 0;
|
||||
td->td_oncpu = NOCPU;
|
||||
|
||||
@ -1279,7 +1282,7 @@ kick_other_cpu(int pri, int cpuid)
|
||||
}
|
||||
#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
|
||||
|
||||
pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
|
||||
ast_sched_locked(pcpu->pc_curthread, TDA_SCHED);
|
||||
ipi_cpu(cpuid, IPI_AST);
|
||||
return;
|
||||
}
|
||||
@ -1843,7 +1846,7 @@ sched_affinity(struct thread *td)
|
||||
if (THREAD_CAN_SCHED(td, td->td_oncpu))
|
||||
return;
|
||||
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
ast_sched_locked(td, TDA_SCHED);
|
||||
if (td != curthread)
|
||||
ipi_cpu(cpu, IPI_AST);
|
||||
break;
|
||||
|
@ -874,7 +874,8 @@ sched_balance_group(struct cpu_group *cg)
|
||||
if (td->td_lock == TDQ_LOCKPTR(tdq) &&
|
||||
(td->td_flags & TDF_IDLETD) == 0 &&
|
||||
THREAD_CAN_MIGRATE(td)) {
|
||||
td->td_flags |= TDF_NEEDRESCHED | TDF_PICKCPU;
|
||||
td->td_flags |= TDF_PICKCPU;
|
||||
ast_sched_locked(td, TDA_SCHED);
|
||||
if (high != curcpu)
|
||||
ipi_cpu(high, IPI_AST);
|
||||
}
|
||||
@ -1998,7 +1999,7 @@ sched_lend_user_prio(struct thread *td, u_char prio)
|
||||
if (td->td_priority > td->td_user_pri)
|
||||
sched_prio(td, td->td_user_pri);
|
||||
else if (td->td_priority != td->td_user_pri)
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
ast_sched_locked(td, TDA_SCHED);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2211,7 +2212,8 @@ sched_switch(struct thread *td, int flags)
|
||||
td->td_lastcpu = td->td_oncpu;
|
||||
preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
|
||||
(flags & SW_PREEMPT) != 0;
|
||||
td->td_flags &= ~(TDF_NEEDRESCHED | TDF_PICKCPU | TDF_SLICEEND);
|
||||
td->td_flags &= ~(TDF_PICKCPU | TDF_SLICEEND);
|
||||
ast_unsched_locked(td, TDA_SCHED);
|
||||
td->td_owepreempt = 0;
|
||||
atomic_store_char(&tdq->tdq_owepreempt, 0);
|
||||
if (!TD_IS_IDLETHREAD(td))
|
||||
@ -2644,8 +2646,10 @@ sched_clock(struct thread *td, int cnt)
|
||||
SCHED_STAT_INC(ithread_demotions);
|
||||
sched_prio(td, td->td_base_pri + RQ_PPQ);
|
||||
}
|
||||
} else
|
||||
td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
|
||||
} else {
|
||||
ast_sched_locked(td, TDA_SCHED);
|
||||
td->td_flags |= TDF_SLICEEND;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2720,7 +2724,7 @@ sched_setpreempt(int pri)
|
||||
|
||||
cpri = ctd->td_priority;
|
||||
if (pri < cpri)
|
||||
ctd->td_flags |= TDF_NEEDRESCHED;
|
||||
ast_sched_locked(ctd, TDA_SCHED);
|
||||
if (KERNEL_PANICKED() || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
|
||||
return;
|
||||
if (!sched_shouldpreempt(pri, cpri, 0))
|
||||
@ -2892,7 +2896,7 @@ sched_affinity(struct thread *td)
|
||||
* target thread is not running locally send an ipi to force
|
||||
* the issue.
|
||||
*/
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
ast_sched_locked(td, TDA_SCHED);
|
||||
if (td != curthread)
|
||||
ipi_cpu(ts->ts_cpu, IPI_PREEMPT);
|
||||
#endif
|
||||
|
@ -130,9 +130,7 @@ addupc_intr(struct thread *td, uintfptr_t pc, u_int ticks)
|
||||
td->td_profil_addr = pc;
|
||||
td->td_profil_ticks = ticks;
|
||||
td->td_pflags |= TDP_OWEUPC;
|
||||
thread_lock(td);
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
thread_unlock(td);
|
||||
ast_sched(td, TDA_OWEUPC);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -452,7 +452,7 @@ sleepq_check_ast_sc_locked(struct thread *td, struct sleepqueue_chain *sc)
|
||||
* thread. If not, we can switch immediately.
|
||||
*/
|
||||
thread_lock(td);
|
||||
if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0)
|
||||
if (!td_ast_pending(td, TDA_SIG) && !td_ast_pending(td, TDA_SUSPEND))
|
||||
return (0);
|
||||
|
||||
thread_unlock(td);
|
||||
|
@ -186,7 +186,7 @@ forward_signal(struct thread *td)
|
||||
int id;
|
||||
|
||||
/*
|
||||
* signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
|
||||
* signotify() has already set TDA_AST and TDA_SIG on td_ast for
|
||||
* this thread, so all we need to do is poke it if it is currently
|
||||
* executing so that it executes ast().
|
||||
*/
|
||||
|
@ -89,8 +89,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <security/mac/mac_framework.h>
|
||||
|
||||
void (*softdep_ast_cleanup)(struct thread *);
|
||||
|
||||
/*
|
||||
* Define the code needed before returning to user mode, for trap and
|
||||
* syscall.
|
||||
@ -118,15 +116,14 @@ userret(struct thread *td, struct trapframe *frame)
|
||||
PROC_LOCK(p);
|
||||
thread_lock(td);
|
||||
if ((p->p_flag & P_PPWAIT) == 0 &&
|
||||
(td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
|
||||
if (SIGPENDING(td) && (td->td_flags &
|
||||
(TDF_NEEDSIGCHK | TDF_ASTPENDING)) !=
|
||||
(TDF_NEEDSIGCHK | TDF_ASTPENDING)) {
|
||||
thread_unlock(td);
|
||||
panic(
|
||||
"failed to set signal flags for ast p %p td %p fl %x",
|
||||
p, td, td->td_flags);
|
||||
}
|
||||
(td->td_pflags & TDP_SIGFASTBLOCK) == 0 &&
|
||||
SIGPENDING(td) && !td_ast_pending(td, TDA_AST) &&
|
||||
!td_ast_pending(td, TDA_SIG)) {
|
||||
thread_unlock(td);
|
||||
panic(
|
||||
"failed to set signal flags for ast p %p "
|
||||
"td %p td_ast %#x fl %#x",
|
||||
p, td, td->td_ast, td->td_flags);
|
||||
}
|
||||
thread_unlock(td);
|
||||
PROC_UNLOCK(p);
|
||||
@ -205,184 +202,195 @@ userret(struct thread *td, struct trapframe *frame)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
ast_prep(struct thread *td, int tda __unused)
|
||||
{
|
||||
VM_CNT_INC(v_trap);
|
||||
td->td_pticks = 0;
|
||||
if (td->td_cowgen != atomic_load_int(&td->td_proc->p_cowgen))
|
||||
thread_cow_update(td);
|
||||
|
||||
}
|
||||
|
||||
struct ast_entry {
|
||||
int ae_flags;
|
||||
int ae_tdp;
|
||||
void (*ae_f)(struct thread *td, int ast);
|
||||
};
|
||||
|
||||
_Static_assert(TDAI(TDA_MAX) <= UINT_MAX, "Too many ASTs");
|
||||
|
||||
static struct ast_entry ast_entries[TDA_MAX] __read_mostly = {
|
||||
[TDA_AST] = { .ae_f = ast_prep, .ae_flags = ASTR_UNCOND},
|
||||
};
|
||||
|
||||
void
|
||||
ast_register(int ast, int flags, int tdp,
|
||||
void (*f)(struct thread *, int asts))
|
||||
{
|
||||
struct ast_entry *ae;
|
||||
|
||||
MPASS(ast < TDA_MAX);
|
||||
MPASS((flags & ASTR_TDP) == 0 || ((flags & ASTR_ASTF_REQUIRED) != 0
|
||||
&& __bitcount(tdp) == 1));
|
||||
ae = &ast_entries[ast];
|
||||
MPASS(ae->ae_f == NULL);
|
||||
ae->ae_flags = flags;
|
||||
ae->ae_tdp = tdp;
|
||||
atomic_interrupt_fence();
|
||||
ae->ae_f = f;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXXKIB Note that the deregistration of an AST handler does not
|
||||
* drain threads possibly executing it, which affects unloadable
|
||||
* modules. The issue is either handled by the subsystem using
|
||||
* handlers, or simply ignored. Fixing the problem is considered not
|
||||
* worth the overhead.
|
||||
*/
|
||||
void
|
||||
ast_deregister(int ast)
|
||||
{
|
||||
struct ast_entry *ae;
|
||||
|
||||
MPASS(ast < TDA_MAX);
|
||||
ae = &ast_entries[ast];
|
||||
MPASS(ae->ae_f != NULL);
|
||||
ae->ae_f = NULL;
|
||||
atomic_interrupt_fence();
|
||||
ae->ae_flags = 0;
|
||||
ae->ae_tdp = 0;
|
||||
}
|
||||
|
||||
void
|
||||
ast_sched_locked(struct thread *td, int tda)
|
||||
{
|
||||
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
||||
MPASS(tda < TDA_MAX);
|
||||
|
||||
td->td_ast |= TDAI(tda);
|
||||
}
|
||||
|
||||
void
|
||||
ast_unsched_locked(struct thread *td, int tda)
|
||||
{
|
||||
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
||||
MPASS(tda < TDA_MAX);
|
||||
|
||||
td->td_ast &= ~TDAI(tda);
|
||||
}
|
||||
|
||||
void
|
||||
ast_sched(struct thread *td, int tda)
|
||||
{
|
||||
thread_lock(td);
|
||||
ast_sched_locked(td, tda);
|
||||
thread_unlock(td);
|
||||
}
|
||||
|
||||
void
|
||||
ast_sched_mask(struct thread *td, int ast)
|
||||
{
|
||||
thread_lock(td);
|
||||
td->td_ast |= ast;
|
||||
thread_unlock(td);
|
||||
}
|
||||
|
||||
static bool
|
||||
ast_handler_calc_tdp_run(struct thread *td, const struct ast_entry *ae)
|
||||
{
|
||||
return ((ae->ae_flags & ASTR_TDP) == 0 ||
|
||||
(td->td_pflags & ae->ae_tdp) != 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Process an asynchronous software trap.
|
||||
* This is relatively easy.
|
||||
* This function will return with preemption disabled.
|
||||
*/
|
||||
static void
|
||||
ast_handler(struct thread *td, struct trapframe *framep, bool dtor)
|
||||
{
|
||||
struct ast_entry *ae;
|
||||
void (*f)(struct thread *td, int asts);
|
||||
int a, td_ast;
|
||||
bool run;
|
||||
|
||||
if (framep != NULL) {
|
||||
kmsan_mark(framep, sizeof(*framep), KMSAN_STATE_INITED);
|
||||
td->td_frame = framep;
|
||||
}
|
||||
|
||||
if (__predict_true(!dtor)) {
|
||||
WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode");
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
|
||||
|
||||
/*
|
||||
* This updates the td_ast for the checks below in one
|
||||
* atomic operation with turning off all scheduled AST's.
|
||||
* If another AST is triggered while we are handling the
|
||||
* AST's saved in td_ast, the td_ast is again non-zero and
|
||||
* ast() will be called again.
|
||||
*/
|
||||
thread_lock(td);
|
||||
td_ast = td->td_ast;
|
||||
td->td_ast = 0;
|
||||
thread_unlock(td);
|
||||
} else {
|
||||
/*
|
||||
* The td thread's td_lock is not guaranteed to exist,
|
||||
* the thread might be not initialized enough when it's
|
||||
* destructor is called. It is safe to read and
|
||||
* update td_ast without locking since the thread is
|
||||
* not runnable or visible to other threads.
|
||||
*/
|
||||
td_ast = td->td_ast;
|
||||
td->td_ast = 0;
|
||||
}
|
||||
|
||||
CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, td->td_proc->p_pid,
|
||||
td->td_proc->p_comm);
|
||||
KASSERT(framep == NULL || TRAPF_USERMODE(framep),
|
||||
("ast in kernel mode"));
|
||||
|
||||
for (a = 0; a < nitems(ast_entries); a++) {
|
||||
ae = &ast_entries[a];
|
||||
f = ae->ae_f;
|
||||
if (f == NULL)
|
||||
continue;
|
||||
atomic_interrupt_fence();
|
||||
|
||||
run = false;
|
||||
if (__predict_false(framep == NULL)) {
|
||||
if ((ae->ae_flags & ASTR_KCLEAR) != 0)
|
||||
run = ast_handler_calc_tdp_run(td, ae);
|
||||
} else {
|
||||
if ((ae->ae_flags & ASTR_UNCOND) != 0)
|
||||
run = true;
|
||||
else if ((ae->ae_flags & ASTR_ASTF_REQUIRED) != 0 &&
|
||||
(td_ast & TDAI(a)) != 0)
|
||||
run = ast_handler_calc_tdp_run(td, ae);
|
||||
}
|
||||
if (run)
|
||||
f(td, td_ast);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
ast(struct trapframe *framep)
|
||||
{
|
||||
struct thread *td;
|
||||
struct proc *p;
|
||||
int flags, sig;
|
||||
bool resched_sigs;
|
||||
|
||||
kmsan_mark(framep, sizeof(*framep), KMSAN_STATE_INITED);
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
|
||||
CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid,
|
||||
p->p_comm);
|
||||
KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
|
||||
WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode");
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
|
||||
td->td_frame = framep;
|
||||
td->td_pticks = 0;
|
||||
|
||||
/*
|
||||
* This updates the td_flag's for the checks below in one
|
||||
* "atomic" operation with turning off the astpending flag.
|
||||
* If another AST is triggered while we are handling the
|
||||
* AST's saved in flags, the astpending flag will be set and
|
||||
* ast() will be called again.
|
||||
*/
|
||||
thread_lock(td);
|
||||
flags = td->td_flags;
|
||||
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK |
|
||||
TDF_NEEDRESCHED | TDF_ALRMPEND | TDF_PROFPEND | TDF_MACPEND |
|
||||
TDF_KQTICKLED);
|
||||
thread_unlock(td);
|
||||
VM_CNT_INC(v_trap);
|
||||
|
||||
if (td->td_cowgen != atomic_load_int(&p->p_cowgen))
|
||||
thread_cow_update(td);
|
||||
if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) {
|
||||
addupc_task(td, td->td_profil_addr, td->td_profil_ticks);
|
||||
td->td_profil_ticks = 0;
|
||||
td->td_pflags &= ~TDP_OWEUPC;
|
||||
}
|
||||
#ifdef HWPMC_HOOKS
|
||||
/* Handle Software PMC callchain capture. */
|
||||
if (PMC_IS_PENDING_CALLCHAIN(td))
|
||||
PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_USER_CALLCHAIN_SOFT, (void *) framep);
|
||||
#endif
|
||||
if ((td->td_pflags & TDP_RFPPWAIT) != 0)
|
||||
fork_rfppwait(td);
|
||||
if (flags & TDF_ALRMPEND) {
|
||||
PROC_LOCK(p);
|
||||
kern_psignal(p, SIGVTALRM);
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
if (flags & TDF_PROFPEND) {
|
||||
PROC_LOCK(p);
|
||||
kern_psignal(p, SIGPROF);
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
#ifdef MAC
|
||||
if (flags & TDF_MACPEND)
|
||||
mac_thread_userret(td);
|
||||
#endif
|
||||
if (flags & TDF_NEEDRESCHED) {
|
||||
#ifdef KTRACE
|
||||
if (KTRPOINT(td, KTR_CSW))
|
||||
ktrcsw(1, 1, __func__);
|
||||
#endif
|
||||
thread_lock(td);
|
||||
sched_prio(td, td->td_user_pri);
|
||||
mi_switch(SW_INVOL | SWT_NEEDRESCHED);
|
||||
#ifdef KTRACE
|
||||
if (KTRPOINT(td, KTR_CSW))
|
||||
ktrcsw(0, 1, __func__);
|
||||
#endif
|
||||
}
|
||||
|
||||
td_softdep_cleanup(td);
|
||||
MPASS(td->td_su == NULL);
|
||||
|
||||
/*
|
||||
* If this thread tickled GEOM, we need to wait for the giggling to
|
||||
* stop before we return to userland
|
||||
*/
|
||||
if (__predict_false(td->td_pflags & TDP_GEOM))
|
||||
g_waitidle();
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (p->p_numthreads == 1 && (flags & TDF_NEEDSIGCHK) == 0) {
|
||||
PROC_LOCK(p);
|
||||
thread_lock(td);
|
||||
/*
|
||||
* Note that TDF_NEEDSIGCHK should be re-read from
|
||||
* td_flags, since signal might have been delivered
|
||||
* after we cleared td_flags above. This is one of
|
||||
* the reason for looping check for AST condition.
|
||||
* See comment in userret() about P_PPWAIT.
|
||||
*/
|
||||
if ((p->p_flag & P_PPWAIT) == 0 &&
|
||||
(td->td_pflags & TDP_SIGFASTBLOCK) == 0) {
|
||||
if (SIGPENDING(td) && (td->td_flags &
|
||||
(TDF_NEEDSIGCHK | TDF_ASTPENDING)) !=
|
||||
(TDF_NEEDSIGCHK | TDF_ASTPENDING)) {
|
||||
thread_unlock(td); /* fix dumps */
|
||||
panic(
|
||||
"failed2 to set signal flags for ast p %p td %p fl %x %x",
|
||||
p, td, flags, td->td_flags);
|
||||
}
|
||||
}
|
||||
thread_unlock(td);
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Check for signals. Unlocked reads of p_pendingcnt or
|
||||
* p_siglist might cause process-directed signal to be handled
|
||||
* later.
|
||||
*/
|
||||
if (flags & TDF_NEEDSIGCHK || p->p_pendingcnt > 0 ||
|
||||
!SIGISEMPTY(p->p_siglist)) {
|
||||
sigfastblock_fetch(td);
|
||||
PROC_LOCK(p);
|
||||
mtx_lock(&p->p_sigacts->ps_mtx);
|
||||
while ((sig = cursig(td)) != 0) {
|
||||
KASSERT(sig >= 0, ("sig %d", sig));
|
||||
postsig(sig);
|
||||
}
|
||||
mtx_unlock(&p->p_sigacts->ps_mtx);
|
||||
PROC_UNLOCK(p);
|
||||
resched_sigs = true;
|
||||
} else {
|
||||
resched_sigs = false;
|
||||
}
|
||||
|
||||
if ((flags & TDF_KQTICKLED) != 0)
|
||||
kqueue_drain_schedtask();
|
||||
|
||||
/*
|
||||
* Handle deferred update of the fast sigblock value, after
|
||||
* the postsig() loop was performed.
|
||||
*/
|
||||
sigfastblock_setpend(td, resched_sigs);
|
||||
|
||||
#ifdef KTRACE
|
||||
KTRUSERRET(td);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We need to check to see if we have to exit or wait due to a
|
||||
* single threading requirement or some other STOP condition.
|
||||
*/
|
||||
if (flags & TDF_NEEDSUSPCHK) {
|
||||
PROC_LOCK(p);
|
||||
thread_suspend_check(0);
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
|
||||
if (td->td_pflags & TDP_OLDMASK) {
|
||||
td->td_pflags &= ~TDP_OLDMASK;
|
||||
kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0);
|
||||
}
|
||||
|
||||
#ifdef RACCT
|
||||
if (__predict_false(racct_enable && p->p_throttled != 0))
|
||||
racct_proc_throttled(p);
|
||||
#endif
|
||||
|
||||
ast_handler(td, framep, false);
|
||||
userret(td, framep);
|
||||
}
|
||||
|
||||
void
|
||||
ast_kclear(struct thread *td)
|
||||
{
|
||||
ast_handler(td, NULL, td != curthread);
|
||||
}
|
||||
|
||||
const char *
|
||||
syscallname(struct proc *p, u_int code)
|
||||
{
|
||||
|
@ -1052,9 +1052,7 @@ kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex,
|
||||
* usermode and TDP_OLDMASK is cleared, restoring old
|
||||
* sigmask.
|
||||
*/
|
||||
thread_lock(td);
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
thread_unlock(td);
|
||||
ast_sched(td, TDA_SIGSUSPEND);
|
||||
}
|
||||
error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits);
|
||||
return (error);
|
||||
@ -1533,9 +1531,7 @@ kern_poll_kfds(struct thread *td, struct pollfd *kfds, u_int nfds,
|
||||
* usermode and TDP_OLDMASK is cleared, restoring old
|
||||
* sigmask.
|
||||
*/
|
||||
thread_lock(td);
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
thread_unlock(td);
|
||||
ast_sched(td, TDA_SIGSUSPEND);
|
||||
}
|
||||
|
||||
seltdinit(td);
|
||||
|
@ -1072,9 +1072,7 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
CTR2(KTR_PTRACE, "PT_SUSPEND: tid %d (pid %d)", td2->td_tid,
|
||||
p->p_pid);
|
||||
td2->td_dbgflags |= TDB_SUSPEND;
|
||||
thread_lock(td2);
|
||||
td2->td_flags |= TDF_NEEDSUSPCHK;
|
||||
thread_unlock(td2);
|
||||
ast_sched(td2, TDA_SUSPEND);
|
||||
break;
|
||||
|
||||
case PT_RESUME:
|
||||
|
@ -801,11 +801,9 @@ CNAME(trapexit):
|
||||
bf 17,1f /* branch if PSL_PR is false */
|
||||
|
||||
GET_CPUINFO(%r3) /* get per-CPU pointer */
|
||||
lwz %r4, TD_FLAGS(%r2) /* get thread flags value
|
||||
lwz %r4, TD_AST(%r2) /* get thread ast value
|
||||
* (r2 is curthread) */
|
||||
lis %r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h
|
||||
ori %r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l
|
||||
and. %r4,%r4,%r5
|
||||
cmpwi %r4, 0
|
||||
beq 1f
|
||||
mfmsr %r3 /* re-enable interrupts */
|
||||
ori %r3,%r3,PSL_EE@l
|
||||
|
@ -871,10 +871,8 @@ CNAME(trapexit):
|
||||
bf 17,1f /* branch if PSL_PR is false */
|
||||
|
||||
GET_CPUINFO(%r3) /* get per-CPU pointer */
|
||||
lwz %r4, TD_FLAGS(%r13) /* get thread flags value */
|
||||
lis %r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h
|
||||
ori %r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l
|
||||
and. %r4,%r4,%r5
|
||||
lwz %r4,TD_AST(%r13) /* get thread ast value */
|
||||
cmpwi %r4,0
|
||||
beq 1f
|
||||
mfmsr %r3 /* re-enable interrupts */
|
||||
ori %r3,%r3,PSL_EE@l
|
||||
|
@ -1025,10 +1025,8 @@ CNAME(trapexit):
|
||||
|
||||
GET_CPUINFO(%r3)
|
||||
LOAD %r4, PC_CURTHREAD(%r3)
|
||||
lwz %r4, TD_FLAGS(%r4)
|
||||
lis %r5, (TDF_ASTPENDING | TDF_NEEDRESCHED)@h
|
||||
ori %r5, %r5, (TDF_ASTPENDING | TDF_NEEDRESCHED)@l
|
||||
and. %r4, %r4, %r5
|
||||
lwz %r4, TD_AST(%r4)
|
||||
cmpwi %r4, 0
|
||||
beq 1f
|
||||
|
||||
/* re-enable interrupts before calling ast() */
|
||||
|
@ -231,9 +231,7 @@ ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace));
|
||||
ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap));
|
||||
|
||||
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
|
||||
|
||||
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
|
||||
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
|
||||
ASSYM(TD_AST, offsetof(struct thread, td_ast));
|
||||
|
||||
ASSYM(SF_UC, offsetof(struct sigframe, sf_uc));
|
||||
|
||||
|
@ -190,10 +190,8 @@ __FBSDID("$FreeBSD$");
|
||||
csrci sstatus, (SSTATUS_SIE)
|
||||
|
||||
ld a1, PC_CURTHREAD(tp)
|
||||
lw a2, TD_FLAGS(a1)
|
||||
lw a2, TD_AST(a1)
|
||||
|
||||
li a3, (TDF_ASTPENDING|TDF_NEEDRESCHED)
|
||||
and a2, a2, a3
|
||||
beqz a2, 2f
|
||||
|
||||
/* Restore interrupts */
|
||||
|
@ -62,8 +62,6 @@ ASSYM(KERNBASE, KERNBASE);
|
||||
ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS);
|
||||
ASSYM(VM_MAX_KERNEL_ADDRESS, VM_MAX_KERNEL_ADDRESS);
|
||||
ASSYM(VM_EARLY_DTB_ADDRESS, VM_EARLY_DTB_ADDRESS);
|
||||
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
|
||||
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
|
||||
|
||||
ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
|
||||
ASSYM(PCB_SIZE, sizeof(struct pcb));
|
||||
@ -82,6 +80,7 @@ ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
|
||||
|
||||
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
|
||||
ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
|
||||
ASSYM(TD_AST, offsetof(struct thread, td_ast));
|
||||
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
|
||||
ASSYM(TD_FRAME, offsetof(struct thread, td_frame));
|
||||
ASSYM(TD_MD, offsetof(struct thread, td_md));
|
||||
|
@ -532,9 +532,7 @@ maybe_demote(struct mac_lomac *subjlabel, struct mac_lomac *objlabel,
|
||||
subj->mac_lomac.ml_rangelow = objlabel->ml_single;
|
||||
subj->mac_lomac.ml_rangehigh = objlabel->ml_single;
|
||||
subj->mac_lomac.ml_flags |= MAC_LOMAC_FLAG_UPDATE;
|
||||
thread_lock(curthread);
|
||||
curthread->td_flags |= TDF_ASTPENDING | TDF_MACPEND;
|
||||
thread_unlock(curthread);
|
||||
ast_sched(curthread, TDA_MAC);
|
||||
|
||||
/*
|
||||
* Avoid memory allocation while holding a mutex; cache the label.
|
||||
@ -594,13 +592,25 @@ try_relabel(struct mac_lomac *from, struct mac_lomac *to)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
ast_mac(struct thread *td, int tda __unused)
|
||||
{
|
||||
mac_thread_userret(td);
|
||||
}
|
||||
|
||||
/*
|
||||
* Policy module operations.
|
||||
*/
|
||||
static void
|
||||
lomac_init(struct mac_policy_conf *conf)
|
||||
lomac_init(struct mac_policy_conf *conf __unused)
|
||||
{
|
||||
ast_register(TDA_MAC, ASTR_ASTF_REQUIRED, 0, ast_mac);
|
||||
}
|
||||
|
||||
static void
|
||||
lomac_fini(struct mac_policy_conf *conf __unused)
|
||||
{
|
||||
ast_deregister(TDA_MAC);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2898,6 +2908,7 @@ lomac_vnode_setlabel_extattr(struct ucred *cred, struct vnode *vp,
|
||||
static struct mac_policy_ops lomac_ops =
|
||||
{
|
||||
.mpo_init = lomac_init,
|
||||
.mpo_destroy = lomac_fini,
|
||||
|
||||
.mpo_bpfdesc_check_receive = lomac_bpfdesc_check_receive,
|
||||
.mpo_bpfdesc_create = lomac_bpfdesc_create,
|
||||
|
@ -263,6 +263,7 @@ struct thread {
|
||||
/* Cleared during fork1() */
|
||||
#define td_startzero td_flags
|
||||
int td_flags; /* (t) TDF_* flags. */
|
||||
int td_ast; /* (t) TDA_* indicators */
|
||||
int td_inhibitors; /* (t) Why can not run. */
|
||||
int td_pflags; /* (k) Private thread (TDP_*) flags. */
|
||||
int td_pflags2; /* (k) Private thread (TDP2_*) flags. */
|
||||
@ -457,13 +458,13 @@ do { \
|
||||
#define TDF_KTH_SUSP 0x00000100 /* kthread is suspended */
|
||||
#define TDF_ALLPROCSUSP 0x00000200 /* suspended by SINGLE_ALLPROC */
|
||||
#define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */
|
||||
#define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */
|
||||
#define TDF_KQTICKLED 0x00001000 /* AST drain kqueue taskqueue */
|
||||
#define TDF_UNUSED1 0x00000800 /* Available */
|
||||
#define TDF_UNUSED2 0x00001000 /* Available */
|
||||
#define TDF_SBDRY 0x00002000 /* Stop only on usermode boundary. */
|
||||
#define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */
|
||||
#define TDF_NEEDSUSPCHK 0x00008000 /* Thread may need to suspend. */
|
||||
#define TDF_NEEDRESCHED 0x00010000 /* Thread needs to yield. */
|
||||
#define TDF_NEEDSIGCHK 0x00020000 /* Thread may need signal delivery. */
|
||||
#define TDF_UNUSED3 0x00008000 /* Available */
|
||||
#define TDF_UNUSED4 0x00010000 /* Available */
|
||||
#define TDF_UNUSED5 0x00020000 /* Available */
|
||||
#define TDF_NOLOAD 0x00040000 /* Ignore during load avg calculations. */
|
||||
#define TDF_SERESTART 0x00080000 /* ERESTART on stop attempts. */
|
||||
#define TDF_THRWAKEUP 0x00100000 /* Libthr thread must not suspend itself. */
|
||||
@ -474,9 +475,36 @@ do { \
|
||||
#define TDF_SCHED1 0x02000000 /* Reserved for scheduler private use */
|
||||
#define TDF_SCHED2 0x04000000 /* Reserved for scheduler private use */
|
||||
#define TDF_SCHED3 0x08000000 /* Reserved for scheduler private use */
|
||||
#define TDF_ALRMPEND 0x10000000 /* Pending SIGVTALRM needs to be posted. */
|
||||
#define TDF_PROFPEND 0x20000000 /* Pending SIGPROF needs to be posted. */
|
||||
#define TDF_MACPEND 0x40000000 /* AST-based MAC event pending. */
|
||||
#define TDF_UNUSED6 0x10000000 /* Available */
|
||||
#define TDF_UNUSED7 0x20000000 /* Available */
|
||||
#define TDF_UNUSED8 0x40000000 /* Available */
|
||||
#define TDF_UNUSED9 0x80000000 /* Available */
|
||||
|
||||
enum {
|
||||
TDA_AST = 0, /* Special: call all non-flagged AST handlers */
|
||||
TDA_OWEUPC,
|
||||
TDA_HWPMC,
|
||||
TDA_VFORK,
|
||||
TDA_ALRM,
|
||||
TDA_PROF,
|
||||
TDA_MAC,
|
||||
TDA_SCHED,
|
||||
TDA_UFS,
|
||||
TDA_GEOM,
|
||||
TDA_KQUEUE,
|
||||
TDA_RACCT,
|
||||
TDA_MOD1, /* For third party use, before signals are */
|
||||
TAD_MOD2, /* processed .. */
|
||||
TDA_SIG,
|
||||
TDA_KTRACE,
|
||||
TDA_SUSPEND,
|
||||
TDA_SIGSUSPEND,
|
||||
TDA_MOD3, /* .. and after */
|
||||
TAD_MOD4,
|
||||
TDA_MAX,
|
||||
};
|
||||
#define TDAI(tda) (1U << (tda))
|
||||
#define td_ast_pending(td, tda) ((td->td_ast & TDAI(tda)) != 0)
|
||||
|
||||
/* Userland debug flags */
|
||||
#define TDB_SUSPEND 0x00000001 /* Thread is suspended by debugger */
|
||||
@ -1111,7 +1139,23 @@ struct fork_req {
|
||||
|
||||
int pget(pid_t pid, int flags, struct proc **pp);
|
||||
|
||||
/* ast_register() flags */
|
||||
#define ASTR_ASTF_REQUIRED 0x0001 /* td_ast TDAI(TDA_X) flag set is
|
||||
required for call */
|
||||
#define ASTR_TDP 0x0002 /* td_pflags flag set is required */
|
||||
#define ASTR_KCLEAR 0x0004 /* call me on ast_kclear() */
|
||||
#define ASTR_UNCOND 0x0008 /* call me always */
|
||||
|
||||
void ast(struct trapframe *framep);
|
||||
void ast_kclear(struct thread *td);
|
||||
void ast_register(int ast, int ast_flags, int tdp,
|
||||
void (*f)(struct thread *td, int asts));
|
||||
void ast_deregister(int tda);
|
||||
void ast_sched_locked(struct thread *td, int tda);
|
||||
void ast_sched_mask(struct thread *td, int ast);
|
||||
void ast_sched(struct thread *td, int tda);
|
||||
void ast_unsched_locked(struct thread *td, int tda);
|
||||
|
||||
struct thread *choosethread(void);
|
||||
int cr_cansee(struct ucred *u1, struct ucred *u2);
|
||||
int cr_canseesocket(struct ucred *cred, struct socket *so);
|
||||
@ -1124,7 +1168,6 @@ int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp,
|
||||
int enterthispgrp(struct proc *p, struct pgrp *pgrp);
|
||||
void faultin(struct proc *p);
|
||||
int fork1(struct thread *, struct fork_req *);
|
||||
void fork_rfppwait(struct thread *);
|
||||
void fork_exit(void (*)(void *, struct trapframe *), void *,
|
||||
struct trapframe *);
|
||||
void fork_return(struct thread *, struct trapframe *);
|
||||
@ -1296,15 +1339,6 @@ td_get_sched(struct thread *td)
|
||||
return ((struct td_sched *)&td[1]);
|
||||
}
|
||||
|
||||
extern void (*softdep_ast_cleanup)(struct thread *);
|
||||
static __inline void
|
||||
td_softdep_cleanup(struct thread *td)
|
||||
{
|
||||
|
||||
if (td->td_su != NULL && softdep_ast_cleanup != NULL)
|
||||
softdep_ast_cleanup(td);
|
||||
}
|
||||
|
||||
#define PROC_ID_PID 0
|
||||
#define PROC_ID_GROUP 1
|
||||
#define PROC_ID_SESSION 2
|
||||
|
@ -196,7 +196,6 @@ void racct_proc_exit(struct proc *p);
|
||||
void racct_proc_ucred_changed(struct proc *p, struct ucred *oldcred,
|
||||
struct ucred *newcred);
|
||||
void racct_move(struct racct *dest, struct racct *src);
|
||||
void racct_proc_throttled(struct proc *p);
|
||||
void racct_proc_throttle(struct proc *p, int timeout);
|
||||
|
||||
#else
|
||||
|
@ -869,7 +869,7 @@ static void pause_timer(void *);
|
||||
static int request_cleanup(struct mount *, int);
|
||||
static int softdep_request_cleanup_flush(struct mount *, struct ufsmount *);
|
||||
static void schedule_cleanup(struct mount *);
|
||||
static void softdep_ast_cleanup_proc(struct thread *);
|
||||
static void softdep_ast_cleanup_proc(struct thread *, int);
|
||||
static struct ufsmount *softdep_bp_to_mp(struct buf *bp);
|
||||
static int process_worklist_item(struct mount *, int, int);
|
||||
static void process_removes(struct vnode *);
|
||||
@ -2546,7 +2546,8 @@ softdep_initialize(void)
|
||||
bioops.io_complete = softdep_disk_write_complete;
|
||||
bioops.io_deallocate = softdep_deallocate_dependencies;
|
||||
bioops.io_countdeps = softdep_count_dependencies;
|
||||
softdep_ast_cleanup = softdep_ast_cleanup_proc;
|
||||
ast_register(TDA_UFS, ASTR_KCLEAR | ASTR_ASTF_REQUIRED, 0,
|
||||
softdep_ast_cleanup_proc);
|
||||
|
||||
/* Initialize the callout with an mtx. */
|
||||
callout_init_mtx(&softdep_callout, &lk, 0);
|
||||
@ -2565,7 +2566,7 @@ softdep_uninitialize(void)
|
||||
bioops.io_complete = NULL;
|
||||
bioops.io_deallocate = NULL;
|
||||
bioops.io_countdeps = NULL;
|
||||
softdep_ast_cleanup = NULL;
|
||||
ast_deregister(TDA_UFS);
|
||||
|
||||
callout_drain(&softdep_callout);
|
||||
}
|
||||
@ -13818,13 +13819,11 @@ schedule_cleanup(struct mount *mp)
|
||||
vfs_rel(td->td_su);
|
||||
vfs_ref(mp);
|
||||
td->td_su = mp;
|
||||
thread_lock(td);
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
thread_unlock(td);
|
||||
ast_sched(td, TDA_UFS);
|
||||
}
|
||||
|
||||
static void
|
||||
softdep_ast_cleanup_proc(struct thread *td)
|
||||
softdep_ast_cleanup_proc(struct thread *td, int ast __unused)
|
||||
{
|
||||
struct mount *mp;
|
||||
struct ufsmount *ump;
|
||||
|
Loading…
Reference in New Issue
Block a user