1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-13 10:02:38 +00:00

Cleanup thread accounting. Don't reset a threads timeslice

when it blocks; it only gets reset when it yields.

Properly set a thread's default stack guardsize.

Reviewed by:	davidxu
This commit is contained in:
Daniel Eischen 2003-07-18 02:46:55 +00:00
parent 596ea21c7f
commit a735c7a6ea
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=117715
6 changed files with 54 additions and 76 deletions

View File

@ -53,6 +53,7 @@ _pthread_attr_init(pthread_attr_t *attr)
/* Initialise the attribute object with the defaults: */
memcpy(pattr, &_pthread_attr_default,
sizeof(struct pthread_attr));
pattr->guardsize_attr = _thr_guard_default;
/* Return a pointer to the attribute object: */
*attr = pattr;

View File

@ -152,6 +152,22 @@ static int thr_timedout(struct pthread *thread, struct timespec *curtime);
static void thr_unlink(struct pthread *thread);
static void __inline
thr_accounting(struct pthread *thread)
{
if ((thread->slice_usec != -1) &&
(thread->slice_usec <= TIMESLICE_USEC) &&
(thread->attr.sched_policy != SCHED_FIFO)) {
thread->slice_usec += (thread->tmbx.tm_uticks
+ thread->tmbx.tm_sticks) * _clock_res_usec;
/* Check for time quantum exceeded: */
if (thread->slice_usec > TIMESLICE_USEC)
thread->slice_usec = -1;
}
thread->tmbx.tm_uticks = 0;
thread->tmbx.tm_sticks = 0;
}
/*
* This is called after a fork().
* No locks need to be taken here since we are guaranteed to be
@ -581,7 +597,8 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
curthread->need_switchout = 1; /* The thread yielded on its own. */
curthread->critical_yield = 0; /* No need to yield anymore. */
curthread->slice_usec = -1; /* Restart the time slice. */
thr_accounting(curthread);
/* Thread can unlock the scheduler lock. */
curthread->lock_switch = 1;
@ -637,12 +654,6 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
*/
td->kse = curkse;
/*
* Reset accounting.
*/
td->tmbx.tm_uticks = 0;
td->tmbx.tm_sticks = 0;
/*
* Reset the time slice if this thread is running
* for the first time or running again after using
@ -1027,12 +1038,6 @@ kse_sched_multi(struct kse *curkse)
*/
curthread->kse = curkse;
/*
* Reset accounting.
*/
curthread->tmbx.tm_uticks = 0;
curthread->tmbx.tm_sticks = 0;
/*
* Reset the time slice if this thread is running for the first
* time or running again after using its full time slice allocation.
@ -1416,6 +1421,7 @@ kse_check_completed(struct kse *kse)
(thread->name == NULL) ? "none" : thread->name);
thread->blocked = 0;
if (thread != kse->k_curthread) {
thr_accounting(thread);
if ((thread->flags & THR_FLAGS_SUSPENDED) != 0)
THR_SET_STATE(thread, PS_SUSPENDED);
else
@ -1545,11 +1551,6 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread)
thread->active = 0;
thread->need_switchout = 0;
/* This thread must have blocked in the kernel. */
/* thread->slice_usec = -1;*/ /* restart timeslice */
if ((thread->slice_usec != -1) &&
(thread->attr.sched_policy != SCHED_FIFO))
thread->slice_usec += (thread->tmbx.tm_uticks
+ thread->tmbx.tm_sticks) * _clock_res_usec;
/*
* Check for pending signals for this thread to
* see if we need to interrupt it in the kernel.
@ -1623,24 +1624,8 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread)
KSE_WAITQ_INSERT(kse, thread);
break;
}
if (thread->state != PS_RUNNING) {
/* Restart the time slice: */
thread->slice_usec = -1;
} else {
if (thread->need_switchout != 0)
/*
* The thread yielded on its own;
* restart the timeslice.
*/
thread->slice_usec = -1;
else if ((thread->slice_usec != -1) &&
(thread->attr.sched_policy != SCHED_FIFO)) {
thread->slice_usec += (thread->tmbx.tm_uticks
+ thread->tmbx.tm_sticks) * _clock_res_usec;
/* Check for time quantum exceeded: */
if (thread->slice_usec > TIMESLICE_USEC)
thread->slice_usec = -1;
}
thr_accounting(thread);
if (thread->state == PS_RUNNING) {
if (thread->slice_usec == -1) {
/*
* The thread exceeded its time quantum or

View File

@ -888,7 +888,7 @@ do { \
_pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
#define THR_RUNQ_REMOVE(thrd) \
_pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
#define THR_RUNQ_FIRST() \
#define THR_RUNQ_FIRST(thrd) \
_pq_first(&(thrd)->kseg->kg_schedq.sq_runq)
/*
@ -947,6 +947,9 @@ do { \
(void)_kse_critical_enter(); \
KSE_SCHED_LOCK((curthr)->kse, (curthr)->kseg); \
} while (0)
#define THR_UNLOCK_SWITCH(curthr) do { \
KSE_SCHED_UNLOCK((curthr)->kse, (curthr)->kseg);\
} while (0)
#define THR_CRITICAL_ENTER(thr) (thr)->critical_count++
#define THR_CRITICAL_LEAVE(thr) do { \
@ -989,7 +992,7 @@ SCLASS struct pthread_attr _pthread_attr_default
SCLASS_PRESET({
SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY,
THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL,
NULL, NULL, THR_STACK_DEFAULT
NULL, NULL, THR_STACK_DEFAULT, /* guardsize */0
});
/* Default mutex attributes: */

View File

@ -53,6 +53,7 @@ _pthread_attr_init(pthread_attr_t *attr)
/* Initialise the attribute object with the defaults: */
memcpy(pattr, &_pthread_attr_default,
sizeof(struct pthread_attr));
pattr->guardsize_attr = _thr_guard_default;
/* Return a pointer to the attribute object: */
*attr = pattr;

View File

@ -152,6 +152,22 @@ static int thr_timedout(struct pthread *thread, struct timespec *curtime);
static void thr_unlink(struct pthread *thread);
static void __inline
thr_accounting(struct pthread *thread)
{
if ((thread->slice_usec != -1) &&
(thread->slice_usec <= TIMESLICE_USEC) &&
(thread->attr.sched_policy != SCHED_FIFO)) {
thread->slice_usec += (thread->tmbx.tm_uticks
+ thread->tmbx.tm_sticks) * _clock_res_usec;
/* Check for time quantum exceeded: */
if (thread->slice_usec > TIMESLICE_USEC)
thread->slice_usec = -1;
}
thread->tmbx.tm_uticks = 0;
thread->tmbx.tm_sticks = 0;
}
/*
* This is called after a fork().
* No locks need to be taken here since we are guaranteed to be
@ -581,7 +597,8 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
curthread->need_switchout = 1; /* The thread yielded on its own. */
curthread->critical_yield = 0; /* No need to yield anymore. */
curthread->slice_usec = -1; /* Restart the time slice. */
thr_accounting(curthread);
/* Thread can unlock the scheduler lock. */
curthread->lock_switch = 1;
@ -637,12 +654,6 @@ _thr_sched_switch_unlocked(struct pthread *curthread)
*/
td->kse = curkse;
/*
* Reset accounting.
*/
td->tmbx.tm_uticks = 0;
td->tmbx.tm_sticks = 0;
/*
* Reset the time slice if this thread is running
* for the first time or running again after using
@ -1027,12 +1038,6 @@ kse_sched_multi(struct kse *curkse)
*/
curthread->kse = curkse;
/*
* Reset accounting.
*/
curthread->tmbx.tm_uticks = 0;
curthread->tmbx.tm_sticks = 0;
/*
* Reset the time slice if this thread is running for the first
* time or running again after using its full time slice allocation.
@ -1416,6 +1421,7 @@ kse_check_completed(struct kse *kse)
(thread->name == NULL) ? "none" : thread->name);
thread->blocked = 0;
if (thread != kse->k_curthread) {
thr_accounting(thread);
if ((thread->flags & THR_FLAGS_SUSPENDED) != 0)
THR_SET_STATE(thread, PS_SUSPENDED);
else
@ -1545,11 +1551,6 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread)
thread->active = 0;
thread->need_switchout = 0;
/* This thread must have blocked in the kernel. */
/* thread->slice_usec = -1;*/ /* restart timeslice */
if ((thread->slice_usec != -1) &&
(thread->attr.sched_policy != SCHED_FIFO))
thread->slice_usec += (thread->tmbx.tm_uticks
+ thread->tmbx.tm_sticks) * _clock_res_usec;
/*
* Check for pending signals for this thread to
* see if we need to interrupt it in the kernel.
@ -1623,24 +1624,8 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread)
KSE_WAITQ_INSERT(kse, thread);
break;
}
if (thread->state != PS_RUNNING) {
/* Restart the time slice: */
thread->slice_usec = -1;
} else {
if (thread->need_switchout != 0)
/*
* The thread yielded on its own;
* restart the timeslice.
*/
thread->slice_usec = -1;
else if ((thread->slice_usec != -1) &&
(thread->attr.sched_policy != SCHED_FIFO)) {
thread->slice_usec += (thread->tmbx.tm_uticks
+ thread->tmbx.tm_sticks) * _clock_res_usec;
/* Check for time quantum exceeded: */
if (thread->slice_usec > TIMESLICE_USEC)
thread->slice_usec = -1;
}
thr_accounting(thread);
if (thread->state == PS_RUNNING) {
if (thread->slice_usec == -1) {
/*
* The thread exceeded its time quantum or

View File

@ -888,7 +888,7 @@ do { \
_pq_insert_tail(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
#define THR_RUNQ_REMOVE(thrd) \
_pq_remove(&(thrd)->kseg->kg_schedq.sq_runq, thrd)
#define THR_RUNQ_FIRST() \
#define THR_RUNQ_FIRST(thrd) \
_pq_first(&(thrd)->kseg->kg_schedq.sq_runq)
/*
@ -947,6 +947,9 @@ do { \
(void)_kse_critical_enter(); \
KSE_SCHED_LOCK((curthr)->kse, (curthr)->kseg); \
} while (0)
#define THR_UNLOCK_SWITCH(curthr) do { \
KSE_SCHED_UNLOCK((curthr)->kse, (curthr)->kseg);\
} while (0)
#define THR_CRITICAL_ENTER(thr) (thr)->critical_count++
#define THR_CRITICAL_LEAVE(thr) do { \
@ -989,7 +992,7 @@ SCLASS struct pthread_attr _pthread_attr_default
SCLASS_PRESET({
SCHED_RR, 0, TIMESLICE_USEC, THR_DEFAULT_PRIORITY,
THR_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL,
NULL, NULL, THR_STACK_DEFAULT
NULL, NULL, THR_STACK_DEFAULT, /* guardsize */0
});
/* Default mutex attributes: */