2003-01-26 05:23:15 +00:00
|
|
|
/*-
|
2007-01-04 08:56:25 +00:00
|
|
|
* Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
|
2003-01-26 05:23:15 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice unmodified, this list of conditions, and the following
|
|
|
|
* disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* This file implements the ULE scheduler. ULE supports independent CPU
|
|
|
|
* run queues and fine grain locking. It has superior interactive
|
|
|
|
* performance under load even on uni-processor systems.
|
|
|
|
*
|
|
|
|
* etymology:
|
|
|
|
* ULE is the last three letters in schedule. It owes it's name to a
|
|
|
|
* generic user created for a scheduling system by Paul Mikesell at
|
|
|
|
* Isilon Systems and a general lack of creativity on the part of the author.
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2005-06-24 00:16:57 +00:00
|
|
|
#include "opt_hwpmc_hooks.h"
|
|
|
|
#include "opt_sched.h"
|
2004-09-02 18:59:15 +00:00
|
|
|
|
2003-01-26 05:23:15 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2004-07-10 21:38:22 +00:00
|
|
|
#include <sys/kdb.h>
|
2003-01-26 05:23:15 +00:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/ktr.h>
|
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/proc.h>
|
2003-04-02 06:46:43 +00:00
|
|
|
#include <sys/resource.h>
|
2003-11-04 07:45:41 +00:00
|
|
|
#include <sys/resourcevar.h>
|
2003-01-26 05:23:15 +00:00
|
|
|
#include <sys/sched.h>
|
|
|
|
#include <sys/smp.h>
|
|
|
|
#include <sys/sx.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/sysproto.h>
|
Rework the interface between priority propagation (lending) and the
schedulers a bit to ensure more correct handling of priorities and fewer
priority inversions:
- Add two functions to the sched(9) API to handle priority lending:
sched_lend_prio() and sched_unlend_prio(). The turnstile code uses these
functions to ask the scheduler to lend a thread a set priority and to
tell the scheduler when it thinks it is ok for a thread to stop borrowing
priority. The unlend case is slightly complex in that the turnstile code
tells the scheduler what the minimum priority of the thread needs to be
to satisfy the requirements of any other threads blocked on locks owned
by the thread in question. The scheduler then decides where the thread
can go back to normal mode (if it's normal priority is high enough to
satisfy the pending lock requests) or it it should continue to use the
priority specified to the sched_unlend_prio() call. This involves adding
a new per-thread flag TDF_BORROWING that replaces the ULE-only kse flag
for priority elevation.
- Schedulers now refuse to lower the priority of a thread that is currently
borrowing another therad's priority.
- If a scheduler changes the priority of a thread that is currently sitting
on a turnstile, it will call a new function turnstile_adjust() to inform
the turnstile code of the change. This function resorts the thread on
the priority list of the turnstile if needed, and if the thread ends up
at the head of the list (due to having the highest priority) and its
priority was raised, then it will propagate that new priority to the
owner of the lock it is blocked on.
Some additional fixes specific to the 4BSD scheduler include:
- Common code for updating the priority of a thread when the user priority
of its associated kse group has been consolidated in a new static
function resetpriority_thread(). One change to this function is that
it will now only adjust the priority of a thread if it already has a
time sharing priority, thus preserving any boosts from a tsleep() until
the thread returns to userland. Also, resetpriority() no longer calls
maybe_resched() on each thread in the group. Instead, the code calling
resetpriority() is responsible for calling resetpriority_thread() on
any threads that need to be updated.
- schedcpu() now uses resetpriority_thread() instead of just calling
sched_prio() directly after it updates a kse group's user priority.
- sched_clock() now uses resetpriority_thread() rather than writing
directly to td_priority.
- sched_nice() now updates all the priorities of the threads after the
group priority has been adjusted.
Discussed with: bde
Reviewed by: ups, jeffr
Tested on: 4bsd, ule
Tested on: i386, alpha, sparc64
2004-12-30 20:52:44 +00:00
|
|
|
#include <sys/turnstile.h>
|
2006-08-25 06:12:53 +00:00
|
|
|
#include <sys/umtx.h>
|
2003-01-26 05:23:15 +00:00
|
|
|
#include <sys/vmmeter.h>
|
|
|
|
#ifdef KTRACE
|
|
|
|
#include <sys/uio.h>
|
|
|
|
#include <sys/ktrace.h>
|
|
|
|
#endif
|
|
|
|
|
2005-04-19 04:01:25 +00:00
|
|
|
#ifdef HWPMC_HOOKS
|
|
|
|
#include <sys/pmckern.h>
|
|
|
|
#endif
|
|
|
|
|
2003-01-26 05:23:15 +00:00
|
|
|
#include <machine/cpu.h>
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
#include <machine/smp.h>
|
2003-01-26 05:23:15 +00:00
|
|
|
|
2007-01-23 08:50:34 +00:00
|
|
|
#ifndef PREEMPTION
|
|
|
|
#error "SCHED_ULE requires options PREEMPTION"
|
|
|
|
#endif
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
#define KTR_ULE 0
|
2007-01-25 19:14:11 +00:00
|
|
|
|
2005-06-04 09:23:28 +00:00
|
|
|
/*
|
2007-07-17 22:53:23 +00:00
|
|
|
* Thread scheduler specific section. All fields are protected
|
|
|
|
* by the thread lock.
|
2004-09-05 02:09:54 +00:00
|
|
|
*/
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched {
|
2007-07-17 22:53:23 +00:00
|
|
|
TAILQ_ENTRY(td_sched) ts_procq; /* Run queue. */
|
|
|
|
struct thread *ts_thread; /* Active associated thread. */
|
|
|
|
struct runq *ts_runq; /* Run-queue we're queued on. */
|
|
|
|
short ts_flags; /* TSF_* flags. */
|
|
|
|
u_char ts_rqindex; /* Run queue index. */
|
2006-12-06 06:34:57 +00:00
|
|
|
u_char ts_cpu; /* CPU that we have affinity for. */
|
2007-07-17 22:53:23 +00:00
|
|
|
int ts_slice; /* Ticks of slice remaining. */
|
|
|
|
u_int ts_slptime; /* Number of ticks we vol. slept */
|
|
|
|
u_int ts_runtime; /* Number of ticks we were running */
|
2004-09-05 02:09:54 +00:00
|
|
|
/* The following variables are only used for pctcpu calculation */
|
2006-12-06 06:34:57 +00:00
|
|
|
int ts_ltick; /* Last tick that we were running on */
|
|
|
|
int ts_ftick; /* First tick that we were running on */
|
|
|
|
int ts_ticks; /* Tick count */
|
2007-01-19 21:56:08 +00:00
|
|
|
#ifdef SMP
|
|
|
|
int ts_rltick; /* Real last tick, for affinity. */
|
|
|
|
#endif
|
2004-09-05 02:09:54 +00:00
|
|
|
};
|
2006-12-06 06:34:57 +00:00
|
|
|
/* flags kept in ts_flags */
|
2007-01-19 21:56:08 +00:00
|
|
|
#define TSF_BOUND 0x0001 /* Thread can not migrate. */
|
|
|
|
#define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */
|
2006-12-06 06:34:57 +00:00
|
|
|
|
|
|
|
static struct td_sched td_sched0;
|
2003-01-26 05:23:15 +00:00
|
|
|
|
|
|
|
/*
|
2007-01-04 08:56:25 +00:00
|
|
|
* Cpu percentage computation macros and defines.
|
|
|
|
*
|
|
|
|
* SCHED_TICK_SECS: Number of seconds to average the cpu usage across.
|
|
|
|
* SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across.
|
2007-01-05 08:50:38 +00:00
|
|
|
* SCHED_TICK_MAX: Maximum number of ticks before scaling back.
|
2007-01-04 08:56:25 +00:00
|
|
|
* SCHED_TICK_SHIFT: Shift factor to avoid rounding away results.
|
|
|
|
* SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count.
|
|
|
|
* SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks.
|
|
|
|
*/
|
|
|
|
#define SCHED_TICK_SECS 10
|
|
|
|
#define SCHED_TICK_TARG (hz * SCHED_TICK_SECS)
|
2007-01-05 08:50:38 +00:00
|
|
|
#define SCHED_TICK_MAX (SCHED_TICK_TARG + hz)
|
2007-01-04 08:56:25 +00:00
|
|
|
#define SCHED_TICK_SHIFT 10
|
|
|
|
#define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT)
|
2007-01-06 12:33:43 +00:00
|
|
|
#define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz))
|
2007-01-04 08:56:25 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* These macros determine priorities for non-interactive threads. They are
|
|
|
|
* assigned a priority based on their recent cpu utilization as expressed
|
|
|
|
* by the ratio of ticks to the tick total. NHALF priorities at the start
|
|
|
|
* and end of the MIN to MAX timeshare range are only reachable with negative
|
|
|
|
* or positive nice respectively.
|
2003-03-04 02:45:59 +00:00
|
|
|
*
|
2007-01-04 08:56:25 +00:00
|
|
|
* PRI_RANGE: Priority range for utilization dependent priorities.
|
2003-06-21 02:22:47 +00:00
|
|
|
* PRI_NRESV: Number of nice values.
|
2007-01-04 08:56:25 +00:00
|
|
|
* PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total.
|
|
|
|
* PRI_NICE: Determines the part of the priority inherited from nice.
|
2003-01-26 05:23:15 +00:00
|
|
|
*/
|
2007-01-04 08:56:25 +00:00
|
|
|
#define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN)
|
2003-11-02 03:49:32 +00:00
|
|
|
#define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2)
|
2007-01-04 08:56:25 +00:00
|
|
|
#define SCHED_PRI_MIN (PRI_MIN_TIMESHARE + SCHED_PRI_NHALF)
|
|
|
|
#define SCHED_PRI_MAX (PRI_MAX_TIMESHARE - SCHED_PRI_NHALF)
|
2007-06-15 19:33:58 +00:00
|
|
|
#define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN)
|
2007-01-04 08:56:25 +00:00
|
|
|
#define SCHED_PRI_TICKS(ts) \
|
|
|
|
(SCHED_TICK_HZ((ts)) / \
|
2007-01-06 08:44:13 +00:00
|
|
|
(roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
|
2007-01-04 08:56:25 +00:00
|
|
|
#define SCHED_PRI_NICE(nice) (nice)
|
2003-01-26 05:23:15 +00:00
|
|
|
|
|
|
|
/*
|
2007-01-04 08:56:25 +00:00
|
|
|
* These determine the interactivity of a process. Interactivity differs from
|
|
|
|
* cpu utilization in that it expresses the voluntary time slept vs time ran
|
|
|
|
* while cpu utilization includes all time not running. This more accurately
|
|
|
|
* models the intent of the thread.
|
2003-01-26 05:23:15 +00:00
|
|
|
*
|
2003-02-10 14:03:45 +00:00
|
|
|
* SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate
|
|
|
|
* before throttling back.
|
2003-11-02 03:36:33 +00:00
|
|
|
* SLP_RUN_FORK: Maximum slp+run time to inherit at fork time.
|
2003-06-15 02:18:29 +00:00
|
|
|
* INTERACT_MAX: Maximum interactivity value. Smaller is better.
|
2003-03-04 02:45:59 +00:00
|
|
|
* INTERACT_THRESH: Threshhold for placement on the current runq.
|
2003-01-26 05:23:15 +00:00
|
|
|
*/
|
2007-01-04 08:56:25 +00:00
|
|
|
#define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT)
|
|
|
|
#define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT)
|
2003-06-15 02:18:29 +00:00
|
|
|
#define SCHED_INTERACT_MAX (100)
|
|
|
|
#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2)
|
2003-10-16 08:17:43 +00:00
|
|
|
#define SCHED_INTERACT_THRESH (30)
|
2003-03-04 02:45:59 +00:00
|
|
|
|
2003-01-26 05:23:15 +00:00
|
|
|
/*
|
2007-01-04 08:56:25 +00:00
|
|
|
* tickincr: Converts a stathz tick into a hz domain scaled by
|
|
|
|
* the shift factor. Without the shift the error rate
|
|
|
|
* due to rounding would be unacceptably high.
|
|
|
|
* realstathz: stathz is sometimes 0 and run off of hz.
|
|
|
|
* sched_slice: Runtime of each thread before rescheduling.
|
2007-07-17 22:53:23 +00:00
|
|
|
* preempt_thresh: Priority threshold for preemption and remote IPIs.
|
2003-01-26 05:23:15 +00:00
|
|
|
*/
|
2007-01-04 08:56:25 +00:00
|
|
|
static int sched_interact = SCHED_INTERACT_THRESH;
|
|
|
|
static int realstathz;
|
|
|
|
static int tickincr;
|
|
|
|
static int sched_slice;
|
2007-07-17 22:53:23 +00:00
|
|
|
static int preempt_thresh = PRI_MIN_KERN;
|
|
|
|
|
2003-01-26 05:23:15 +00:00
|
|
|
/*
|
2007-07-17 22:53:23 +00:00
|
|
|
* tdq - per processor runqs and statistics. All fields are protected by the
|
|
|
|
* tdq_lock. The load and lowpri may be accessed without to avoid excess
|
|
|
|
* locking in sched_pickcpu();
|
2003-01-26 05:23:15 +00:00
|
|
|
*/
|
2006-12-06 06:34:57 +00:00
|
|
|
struct tdq {
|
2007-08-03 23:38:46 +00:00
|
|
|
struct mtx *tdq_lock; /* Pointer to group lock. */
|
2007-01-04 08:56:25 +00:00
|
|
|
struct runq tdq_realtime; /* real-time run queue. */
|
2007-07-17 22:53:23 +00:00
|
|
|
struct runq tdq_timeshare; /* timeshare run queue. */
|
|
|
|
struct runq tdq_idle; /* Queue of IDLE threads. */
|
|
|
|
int tdq_load; /* Aggregate load. */
|
2007-02-08 01:52:25 +00:00
|
|
|
u_char tdq_idx; /* Current insert index. */
|
|
|
|
u_char tdq_ridx; /* Current removal index. */
|
2003-02-03 05:30:07 +00:00
|
|
|
#ifdef SMP
|
2007-07-17 22:53:23 +00:00
|
|
|
u_char tdq_lowpri; /* Lowest priority thread. */
|
|
|
|
int tdq_transferable; /* Transferable thread count. */
|
2006-12-29 10:37:07 +00:00
|
|
|
LIST_ENTRY(tdq) tdq_siblings; /* Next in tdq group. */
|
|
|
|
struct tdq_group *tdq_group; /* Our processor group. */
|
2004-02-01 02:48:36 +00:00
|
|
|
#else
|
2006-12-29 10:37:07 +00:00
|
|
|
int tdq_sysload; /* For loadavg, !ITHD load. */
|
2003-02-03 05:30:07 +00:00
|
|
|
#endif
|
2007-07-17 22:53:23 +00:00
|
|
|
} __aligned(64);
|
2003-01-26 05:23:15 +00:00
|
|
|
|
2007-01-19 21:56:08 +00:00
|
|
|
|
2003-12-11 03:57:10 +00:00
|
|
|
#ifdef SMP
|
|
|
|
/*
|
2006-12-06 06:34:57 +00:00
|
|
|
* tdq groups are groups of processors which can cheaply share threads. When
|
2003-12-11 03:57:10 +00:00
|
|
|
* one processor in the group goes idle it will check the runqs of the other
|
|
|
|
* processors in its group prior to halting and waiting for an interrupt.
|
|
|
|
* These groups are suitable for SMT (Symetric Multi-Threading) and not NUMA.
|
|
|
|
* In a numa environment we'd want an idle bitmap per group and a two tiered
|
|
|
|
* load balancer.
|
|
|
|
*/
|
2006-12-06 06:34:57 +00:00
|
|
|
struct tdq_group {
|
2007-08-03 23:38:46 +00:00
|
|
|
struct mtx tdg_lock; /* Protects all fields below. */
|
|
|
|
int tdg_cpus; /* Count of CPUs in this tdq group. */
|
|
|
|
cpumask_t tdg_cpumask; /* Mask of cpus in this group. */
|
|
|
|
cpumask_t tdg_idlemask; /* Idle cpus in this group. */
|
|
|
|
cpumask_t tdg_mask; /* Bit mask for first cpu. */
|
|
|
|
int tdg_load; /* Total load of this group. */
|
2006-12-29 10:37:07 +00:00
|
|
|
int tdg_transferable; /* Transferable load of this group. */
|
|
|
|
LIST_HEAD(, tdq) tdg_members; /* Linked list of all members. */
|
2007-08-03 23:38:46 +00:00
|
|
|
char tdg_name[16]; /* lock name. */
|
2007-07-17 22:53:23 +00:00
|
|
|
} __aligned(64);
|
2007-01-19 21:56:08 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
#define SCHED_AFFINITY_DEFAULT (max(1, hz / 300))
|
2007-01-19 21:56:08 +00:00
|
|
|
#define SCHED_AFFINITY(ts) ((ts)->ts_rltick > ticks - affinity)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Run-time tunables.
|
|
|
|
*/
|
2007-07-19 20:03:15 +00:00
|
|
|
static int rebalance = 1;
|
|
|
|
static int balance_secs = 1;
|
|
|
|
static int pick_pri = 1;
|
2007-01-19 21:56:08 +00:00
|
|
|
static int affinity;
|
|
|
|
static int tryself = 1;
|
2007-07-17 22:53:23 +00:00
|
|
|
static int steal_htt = 0;
|
2007-07-19 20:03:15 +00:00
|
|
|
static int steal_idle = 1;
|
|
|
|
static int steal_thresh = 2;
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
static int topology = 0;
|
2003-12-11 03:57:10 +00:00
|
|
|
|
2003-01-26 05:23:15 +00:00
|
|
|
/*
|
2006-12-29 10:37:07 +00:00
|
|
|
* One thread queue per processor.
|
2003-01-26 05:23:15 +00:00
|
|
|
*/
|
2007-01-19 21:56:08 +00:00
|
|
|
static volatile cpumask_t tdq_idle;
|
2006-12-29 10:37:07 +00:00
|
|
|
static int tdg_maxid;
|
2006-12-06 06:34:57 +00:00
|
|
|
static struct tdq tdq_cpu[MAXCPU];
|
|
|
|
static struct tdq_group tdq_groups[MAXCPU];
|
2007-07-17 22:53:23 +00:00
|
|
|
static struct callout balco;
|
|
|
|
static struct callout gbalco;
|
2004-06-02 05:46:48 +00:00
|
|
|
|
2006-12-06 06:34:57 +00:00
|
|
|
#define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)])
|
|
|
|
#define TDQ_CPU(x) (&tdq_cpu[(x)])
|
2007-08-03 23:38:46 +00:00
|
|
|
#define TDQ_ID(x) ((int)((x) - tdq_cpu))
|
2006-12-06 06:34:57 +00:00
|
|
|
#define TDQ_GROUP(x) (&tdq_groups[(x)])
|
2007-08-03 23:38:46 +00:00
|
|
|
#define TDG_ID(x) ((int)((x) - tdq_groups))
|
2003-12-11 03:57:10 +00:00
|
|
|
#else /* !SMP */
|
2006-12-06 06:34:57 +00:00
|
|
|
static struct tdq tdq_cpu;
|
2007-08-03 23:38:46 +00:00
|
|
|
static struct mtx tdq_lock;
|
2004-06-02 05:46:48 +00:00
|
|
|
|
2007-06-05 02:53:51 +00:00
|
|
|
#define TDQ_ID(x) (0)
|
2006-12-06 06:34:57 +00:00
|
|
|
#define TDQ_SELF() (&tdq_cpu)
|
|
|
|
#define TDQ_CPU(x) (&tdq_cpu)
|
2003-01-29 07:00:51 +00:00
|
|
|
#endif
|
2003-01-26 05:23:15 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
#define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type))
|
|
|
|
#define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t)))
|
|
|
|
#define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
|
|
|
|
#define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t)))
|
2007-08-03 23:38:46 +00:00
|
|
|
#define TDQ_LOCKPTR(t) ((t)->tdq_lock)
|
2007-07-17 22:53:23 +00:00
|
|
|
|
2006-10-26 21:42:22 +00:00
|
|
|
static void sched_priority(struct thread *);
|
2005-06-04 09:23:28 +00:00
|
|
|
static void sched_thread_priority(struct thread *, u_char);
|
2006-10-26 21:42:22 +00:00
|
|
|
static int sched_interact_score(struct thread *);
|
|
|
|
static void sched_interact_update(struct thread *);
|
|
|
|
static void sched_interact_fork(struct thread *);
|
2006-12-06 06:34:57 +00:00
|
|
|
static void sched_pctcpu_update(struct td_sched *);
|
2003-01-26 05:23:15 +00:00
|
|
|
|
2003-02-03 05:30:07 +00:00
|
|
|
/* Operations on per processor queues */
|
2006-12-06 06:34:57 +00:00
|
|
|
static struct td_sched * tdq_choose(struct tdq *);
|
|
|
|
static void tdq_setup(struct tdq *);
|
|
|
|
static void tdq_load_add(struct tdq *, struct td_sched *);
|
|
|
|
static void tdq_load_rem(struct tdq *, struct td_sched *);
|
|
|
|
static __inline void tdq_runq_add(struct tdq *, struct td_sched *, int);
|
|
|
|
static __inline void tdq_runq_rem(struct tdq *, struct td_sched *);
|
|
|
|
void tdq_print(int cpu);
|
2007-01-04 08:56:25 +00:00
|
|
|
static void runq_print(struct runq *rq);
|
2007-07-17 22:53:23 +00:00
|
|
|
static void tdq_add(struct tdq *, struct thread *, int);
|
2003-02-03 05:30:07 +00:00
|
|
|
#ifdef SMP
|
2007-07-17 22:53:23 +00:00
|
|
|
static void tdq_move(struct tdq *, struct tdq *);
|
2006-12-06 06:34:57 +00:00
|
|
|
static int tdq_idled(struct tdq *);
|
2007-01-19 21:56:08 +00:00
|
|
|
static void tdq_notify(struct td_sched *);
|
2006-12-06 06:34:57 +00:00
|
|
|
static struct td_sched *tdq_steal(struct tdq *, int);
|
2007-07-17 22:53:23 +00:00
|
|
|
static struct td_sched *runq_steal(struct runq *);
|
|
|
|
static int sched_pickcpu(struct td_sched *, int);
|
|
|
|
static void sched_balance(void *);
|
|
|
|
static void sched_balance_groups(void *);
|
|
|
|
static void sched_balance_group(struct tdq_group *);
|
|
|
|
static void sched_balance_pair(struct tdq *, struct tdq *);
|
|
|
|
static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
|
|
|
|
static inline struct mtx *thread_block_switch(struct thread *);
|
|
|
|
static inline void thread_unblock_switch(struct thread *, struct mtx *);
|
2007-08-03 23:38:46 +00:00
|
|
|
static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
|
2007-01-06 08:44:13 +00:00
|
|
|
|
2007-01-19 21:56:08 +00:00
|
|
|
#define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0)
|
2003-02-03 05:30:07 +00:00
|
|
|
#endif
|
|
|
|
|
2007-01-04 08:56:25 +00:00
|
|
|
static void sched_setup(void *dummy);
|
|
|
|
SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
|
|
|
|
|
|
|
|
static void sched_initticks(void *dummy);
|
|
|
|
SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks, NULL)
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Print the threads waiting on a run-queue.
|
|
|
|
*/
|
2007-01-04 08:56:25 +00:00
|
|
|
static void
|
|
|
|
runq_print(struct runq *rq)
|
|
|
|
{
|
|
|
|
struct rqhead *rqh;
|
|
|
|
struct td_sched *ts;
|
|
|
|
int pri;
|
|
|
|
int j;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < RQB_LEN; i++) {
|
|
|
|
printf("\t\trunq bits %d 0x%zx\n",
|
|
|
|
i, rq->rq_status.rqb_bits[i]);
|
|
|
|
for (j = 0; j < RQB_BPW; j++)
|
|
|
|
if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
|
|
|
|
pri = j + (i << RQB_L2BPW);
|
|
|
|
rqh = &rq->rq_queues[pri];
|
|
|
|
TAILQ_FOREACH(ts, rqh, ts_procq) {
|
|
|
|
printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
|
|
|
|
ts->ts_thread, ts->ts_thread->td_proc->p_comm, ts->ts_thread->td_priority, ts->ts_rqindex, pri);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Print the status of a per-cpu thread queue. Should be a ddb show cmd.
|
|
|
|
*/
|
2003-04-11 03:47:14 +00:00
|
|
|
void
|
2006-12-06 06:34:57 +00:00
|
|
|
tdq_print(int cpu)
|
2003-02-03 05:30:07 +00:00
|
|
|
{
|
2006-12-06 06:34:57 +00:00
|
|
|
struct tdq *tdq;
|
2003-04-03 00:29:28 +00:00
|
|
|
|
2006-12-06 06:34:57 +00:00
|
|
|
tdq = TDQ_CPU(cpu);
|
2003-04-03 00:29:28 +00:00
|
|
|
|
2007-08-03 23:38:46 +00:00
|
|
|
printf("tdq %d:\n", TDQ_ID(tdq));
|
2007-07-17 22:53:23 +00:00
|
|
|
printf("\tlockptr %p\n", TDQ_LOCKPTR(tdq));
|
2006-12-29 10:37:07 +00:00
|
|
|
printf("\tload: %d\n", tdq->tdq_load);
|
2007-07-17 22:53:23 +00:00
|
|
|
printf("\ttimeshare idx: %d\n", tdq->tdq_idx);
|
2007-01-04 12:16:19 +00:00
|
|
|
printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
|
2007-01-04 08:56:25 +00:00
|
|
|
printf("\trealtime runq:\n");
|
|
|
|
runq_print(&tdq->tdq_realtime);
|
|
|
|
printf("\ttimeshare runq:\n");
|
|
|
|
runq_print(&tdq->tdq_timeshare);
|
|
|
|
printf("\tidle runq:\n");
|
|
|
|
runq_print(&tdq->tdq_idle);
|
2003-11-02 10:56:48 +00:00
|
|
|
#ifdef SMP
|
2006-12-29 10:37:07 +00:00
|
|
|
printf("\tload transferable: %d\n", tdq->tdq_transferable);
|
2007-08-03 23:38:46 +00:00
|
|
|
printf("\tlowest priority: %d\n", tdq->tdq_lowpri);
|
|
|
|
printf("\tgroup: %d\n", TDG_ID(tdq->tdq_group));
|
|
|
|
printf("\tLock name: %s\n", tdq->tdq_group->tdg_name);
|
2003-11-02 10:56:48 +00:00
|
|
|
#endif
|
2003-04-11 03:47:14 +00:00
|
|
|
}
|
2003-04-03 00:29:28 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
#define TS_RQ_PPQ (((PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE) + 1) / RQ_NQS)
|
|
|
|
/*
|
|
|
|
* Add a thread to the actual run-queue. Keeps transferable counts up to
|
|
|
|
* date with what is actually on the run-queue. Selects the correct
|
|
|
|
* queue position for timeshare threads.
|
|
|
|
*/
|
2003-11-15 07:32:07 +00:00
|
|
|
static __inline void
|
2006-12-06 06:34:57 +00:00
|
|
|
tdq_runq_add(struct tdq *tdq, struct td_sched *ts, int flags)
|
2003-11-15 07:32:07 +00:00
|
|
|
{
|
2007-07-17 22:53:23 +00:00
|
|
|
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
|
|
|
|
THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
|
2003-11-15 07:32:07 +00:00
|
|
|
#ifdef SMP
|
2007-01-04 08:56:25 +00:00
|
|
|
if (THREAD_CAN_MIGRATE(ts->ts_thread)) {
|
2006-12-29 10:37:07 +00:00
|
|
|
tdq->tdq_transferable++;
|
|
|
|
tdq->tdq_group->tdg_transferable++;
|
2006-12-06 06:34:57 +00:00
|
|
|
ts->ts_flags |= TSF_XFERABLE;
|
2003-12-11 03:57:10 +00:00
|
|
|
}
|
2003-11-15 07:32:07 +00:00
|
|
|
#endif
|
2007-01-04 08:56:25 +00:00
|
|
|
if (ts->ts_runq == &tdq->tdq_timeshare) {
|
2007-02-08 01:52:25 +00:00
|
|
|
u_char pri;
|
2007-01-04 08:56:25 +00:00
|
|
|
|
|
|
|
pri = ts->ts_thread->td_priority;
|
|
|
|
KASSERT(pri <= PRI_MAX_TIMESHARE && pri >= PRI_MIN_TIMESHARE,
|
|
|
|
("Invalid priority %d on timeshare runq", pri));
|
|
|
|
/*
|
|
|
|
* This queue contains only priorities between MIN and MAX
|
|
|
|
* realtime. Use the whole queue to represent these values.
|
|
|
|
*/
|
2007-08-03 23:38:46 +00:00
|
|
|
if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
|
2007-01-04 08:56:25 +00:00
|
|
|
pri = (pri - PRI_MIN_TIMESHARE) / TS_RQ_PPQ;
|
|
|
|
pri = (pri + tdq->tdq_idx) % RQ_NQS;
|
2007-01-04 12:16:19 +00:00
|
|
|
/*
|
|
|
|
* This effectively shortens the queue by one so we
|
|
|
|
* can have a one slot difference between idx and
|
|
|
|
* ridx while we wait for threads to drain.
|
|
|
|
*/
|
|
|
|
if (tdq->tdq_ridx != tdq->tdq_idx &&
|
|
|
|
pri == tdq->tdq_ridx)
|
2007-03-17 18:13:32 +00:00
|
|
|
pri = (unsigned char)(pri - 1) % RQ_NQS;
|
2007-01-04 08:56:25 +00:00
|
|
|
} else
|
2007-01-04 12:16:19 +00:00
|
|
|
pri = tdq->tdq_ridx;
|
2007-01-04 08:56:25 +00:00
|
|
|
runq_add_pri(ts->ts_runq, ts, pri, flags);
|
|
|
|
} else
|
|
|
|
runq_add(ts->ts_runq, ts, flags);
|
2003-11-15 07:32:07 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Remove a thread from a run-queue. This typically happens when a thread
|
|
|
|
* is selected to run. Running threads are not on the queue and the
|
|
|
|
* transferable count does not reflect them.
|
|
|
|
*/
|
2003-11-15 07:32:07 +00:00
|
|
|
static __inline void
|
2006-12-06 06:34:57 +00:00
|
|
|
tdq_runq_rem(struct tdq *tdq, struct td_sched *ts)
|
2003-11-15 07:32:07 +00:00
|
|
|
{
|
2007-07-17 22:53:23 +00:00
|
|
|
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
|
|
|
|
KASSERT(ts->ts_runq != NULL,
|
|
|
|
("tdq_runq_remove: thread %p null ts_runq", ts->ts_thread));
|
2003-11-15 07:32:07 +00:00
|
|
|
#ifdef SMP
|
2006-12-06 06:34:57 +00:00
|
|
|
if (ts->ts_flags & TSF_XFERABLE) {
|
2006-12-29 10:37:07 +00:00
|
|
|
tdq->tdq_transferable--;
|
|
|
|
tdq->tdq_group->tdg_transferable--;
|
2006-12-06 06:34:57 +00:00
|
|
|
ts->ts_flags &= ~TSF_XFERABLE;
|
2003-12-11 03:57:10 +00:00
|
|
|
}
|
2003-11-15 07:32:07 +00:00
|
|
|
#endif
|
2007-01-04 12:16:19 +00:00
|
|
|
if (ts->ts_runq == &tdq->tdq_timeshare) {
|
|
|
|
if (tdq->tdq_idx != tdq->tdq_ridx)
|
|
|
|
runq_remove_idx(ts->ts_runq, ts, &tdq->tdq_ridx);
|
|
|
|
else
|
|
|
|
runq_remove_idx(ts->ts_runq, ts, NULL);
|
2007-01-05 08:50:38 +00:00
|
|
|
/*
|
|
|
|
* For timeshare threads we update the priority here so
|
|
|
|
* the priority reflects the time we've been sleeping.
|
|
|
|
*/
|
|
|
|
ts->ts_ltick = ticks;
|
|
|
|
sched_pctcpu_update(ts);
|
|
|
|
sched_priority(ts->ts_thread);
|
2007-01-04 12:16:19 +00:00
|
|
|
} else
|
2007-01-04 08:56:25 +00:00
|
|
|
runq_remove(ts->ts_runq, ts);
|
2003-11-15 07:32:07 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Load is maintained for all threads RUNNING and ON_RUNQ. Add the load
|
|
|
|
* for this thread to the referenced thread queue.
|
|
|
|
*/
|
2003-04-11 03:47:14 +00:00
|
|
|
static void
|
2006-12-06 06:34:57 +00:00
|
|
|
tdq_load_add(struct tdq *tdq, struct td_sched *ts)
|
2003-04-11 03:47:14 +00:00
|
|
|
{
|
2003-11-02 10:56:48 +00:00
|
|
|
int class;
|
2007-07-17 22:53:23 +00:00
|
|
|
|
|
|
|
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
|
|
|
|
THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
|
2006-12-06 06:34:57 +00:00
|
|
|
class = PRI_BASE(ts->ts_thread->td_pri_class);
|
2006-12-29 10:37:07 +00:00
|
|
|
tdq->tdq_load++;
|
2007-08-03 23:38:46 +00:00
|
|
|
CTR2(KTR_SCHED, "cpu %d load: %d", TDQ_ID(tdq), tdq->tdq_load);
|
2007-01-19 21:56:08 +00:00
|
|
|
if (class != PRI_ITHD &&
|
|
|
|
(ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
|
2004-02-01 02:48:36 +00:00
|
|
|
#ifdef SMP
|
2006-12-29 10:37:07 +00:00
|
|
|
tdq->tdq_group->tdg_load++;
|
2004-02-01 02:48:36 +00:00
|
|
|
#else
|
2006-12-29 10:37:07 +00:00
|
|
|
tdq->tdq_sysload++;
|
2003-12-12 07:33:51 +00:00
|
|
|
#endif
|
2003-02-03 05:30:07 +00:00
|
|
|
}
|
2003-04-11 03:47:14 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Remove the load from a thread that is transitioning to a sleep state or
|
|
|
|
* exiting.
|
|
|
|
*/
|
2003-04-03 00:29:28 +00:00
|
|
|
static void
|
2006-12-06 06:34:57 +00:00
|
|
|
tdq_load_rem(struct tdq *tdq, struct td_sched *ts)
|
2003-02-03 05:30:07 +00:00
|
|
|
{
|
2003-11-02 10:56:48 +00:00
|
|
|
int class;
|
2007-07-17 22:53:23 +00:00
|
|
|
|
|
|
|
THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
|
|
|
|
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
|
2006-12-06 06:34:57 +00:00
|
|
|
class = PRI_BASE(ts->ts_thread->td_pri_class);
|
2007-01-19 21:56:08 +00:00
|
|
|
if (class != PRI_ITHD &&
|
|
|
|
(ts->ts_thread->td_proc->p_flag & P_NOLOAD) == 0)
|
2004-02-01 02:48:36 +00:00
|
|
|
#ifdef SMP
|
2006-12-29 10:37:07 +00:00
|
|
|
tdq->tdq_group->tdg_load--;
|
2004-02-01 02:48:36 +00:00
|
|
|
#else
|
2006-12-29 10:37:07 +00:00
|
|
|
tdq->tdq_sysload--;
|
2003-12-12 07:33:51 +00:00
|
|
|
#endif
|
2007-07-17 22:53:23 +00:00
|
|
|
KASSERT(tdq->tdq_load != 0,
|
2007-08-03 23:38:46 +00:00
|
|
|
("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
|
2006-12-29 10:37:07 +00:00
|
|
|
tdq->tdq_load--;
|
|
|
|
CTR1(KTR_SCHED, "load: %d", tdq->tdq_load);
|
2006-12-06 06:34:57 +00:00
|
|
|
ts->ts_runq = NULL;
|
2003-02-03 05:30:07 +00:00
|
|
|
}
|
|
|
|
|
2003-04-11 03:47:14 +00:00
|
|
|
#ifdef SMP
|
2003-06-09 00:39:09 +00:00
|
|
|
/*
|
2003-11-15 07:32:07 +00:00
|
|
|
* sched_balance is a simple CPU load balancing algorithm. It operates by
|
2003-06-09 00:39:09 +00:00
|
|
|
* finding the least loaded and most loaded cpu and equalizing their load
|
|
|
|
* by migrating some processes.
|
|
|
|
*
|
|
|
|
* Dealing only with two CPUs at a time has two advantages. Firstly, most
|
|
|
|
* installations will only have 2 cpus. Secondly, load balancing too much at
|
|
|
|
* once can have an unpleasant effect on the system. The scheduler rarely has
|
|
|
|
* enough information to make perfect decisions. So this algorithm chooses
|
2007-07-17 22:53:23 +00:00
|
|
|
* simplicity and more gradual effects on load in larger systems.
|
2003-06-09 00:39:09 +00:00
|
|
|
*
|
|
|
|
*/
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
static void
|
2007-07-17 22:53:23 +00:00
|
|
|
sched_balance(void *arg)
|
2003-06-09 00:39:09 +00:00
|
|
|
{
|
2006-12-06 06:34:57 +00:00
|
|
|
struct tdq_group *high;
|
|
|
|
struct tdq_group *low;
|
2006-12-29 10:37:07 +00:00
|
|
|
struct tdq_group *tdg;
|
2003-12-12 07:33:51 +00:00
|
|
|
int cnt;
|
2003-06-09 00:39:09 +00:00
|
|
|
int i;
|
|
|
|
|
2007-07-19 20:03:15 +00:00
|
|
|
callout_reset(&balco, max(hz / 2, random() % (hz * balance_secs)),
|
2007-07-17 22:53:23 +00:00
|
|
|
sched_balance, NULL);
|
|
|
|
if (smp_started == 0 || rebalance == 0)
|
2004-12-26 22:56:08 +00:00
|
|
|
return;
|
2003-12-12 07:33:51 +00:00
|
|
|
low = high = NULL;
|
2006-12-29 10:37:07 +00:00
|
|
|
i = random() % (tdg_maxid + 1);
|
|
|
|
for (cnt = 0; cnt <= tdg_maxid; cnt++) {
|
|
|
|
tdg = TDQ_GROUP(i);
|
2003-12-11 03:57:10 +00:00
|
|
|
/*
|
2003-12-12 07:33:51 +00:00
|
|
|
* Find the CPU with the highest load that has some
|
|
|
|
* threads to transfer.
|
2003-12-11 03:57:10 +00:00
|
|
|
*/
|
2006-12-29 10:37:07 +00:00
|
|
|
if ((high == NULL || tdg->tdg_load > high->tdg_load)
|
|
|
|
&& tdg->tdg_transferable)
|
|
|
|
high = tdg;
|
|
|
|
if (low == NULL || tdg->tdg_load < low->tdg_load)
|
|
|
|
low = tdg;
|
|
|
|
if (++i > tdg_maxid)
|
2003-12-12 07:33:51 +00:00
|
|
|
i = 0;
|
2003-06-09 00:39:09 +00:00
|
|
|
}
|
2003-12-12 07:33:51 +00:00
|
|
|
if (low != NULL && high != NULL && high != low)
|
2006-12-29 10:37:07 +00:00
|
|
|
sched_balance_pair(LIST_FIRST(&high->tdg_members),
|
|
|
|
LIST_FIRST(&low->tdg_members));
|
2003-12-12 07:33:51 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Balance load between CPUs in a group. Will only migrate within the group.
|
|
|
|
*/
|
2003-12-12 07:33:51 +00:00
|
|
|
static void
|
2007-07-17 22:53:23 +00:00
|
|
|
sched_balance_groups(void *arg)
|
2003-12-12 07:33:51 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2007-07-19 20:03:15 +00:00
|
|
|
callout_reset(&gbalco, max(hz / 2, random() % (hz * balance_secs)),
|
2007-07-17 22:53:23 +00:00
|
|
|
sched_balance_groups, NULL);
|
|
|
|
if (smp_started == 0 || rebalance == 0)
|
|
|
|
return;
|
|
|
|
for (i = 0; i <= tdg_maxid; i++)
|
|
|
|
sched_balance_group(TDQ_GROUP(i));
|
2003-12-12 07:33:51 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Finds the greatest imbalance between two tdqs in a group.
|
|
|
|
*/
|
2003-12-12 07:33:51 +00:00
|
|
|
static void
|
2006-12-29 10:37:07 +00:00
|
|
|
sched_balance_group(struct tdq_group *tdg)
|
2003-12-12 07:33:51 +00:00
|
|
|
{
|
2006-12-06 06:34:57 +00:00
|
|
|
struct tdq *tdq;
|
|
|
|
struct tdq *high;
|
|
|
|
struct tdq *low;
|
2003-12-12 07:33:51 +00:00
|
|
|
int load;
|
|
|
|
|
2006-12-29 10:37:07 +00:00
|
|
|
if (tdg->tdg_transferable == 0)
|
2003-12-12 07:33:51 +00:00
|
|
|
return;
|
|
|
|
low = NULL;
|
|
|
|
high = NULL;
|
2006-12-29 10:37:07 +00:00
|
|
|
LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
|
|
|
|
load = tdq->tdq_load;
|
|
|
|
if (high == NULL || load > high->tdq_load)
|
2006-12-06 06:34:57 +00:00
|
|
|
high = tdq;
|
2006-12-29 10:37:07 +00:00
|
|
|
if (low == NULL || load < low->tdq_load)
|
2006-12-06 06:34:57 +00:00
|
|
|
low = tdq;
|
2003-12-12 07:33:51 +00:00
|
|
|
}
|
|
|
|
if (high != NULL && low != NULL && high != low)
|
|
|
|
sched_balance_pair(high, low);
|
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Lock two thread queues using their address to maintain lock order.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
tdq_lock_pair(struct tdq *one, struct tdq *two)
|
|
|
|
{
|
|
|
|
if (one < two) {
|
|
|
|
TDQ_LOCK(one);
|
|
|
|
TDQ_LOCK_FLAGS(two, MTX_DUPOK);
|
|
|
|
} else {
|
|
|
|
TDQ_LOCK(two);
|
|
|
|
TDQ_LOCK_FLAGS(one, MTX_DUPOK);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transfer load between two imbalanced thread queues.
|
|
|
|
*/
|
2003-12-12 07:33:51 +00:00
|
|
|
static void
|
2006-12-06 06:34:57 +00:00
|
|
|
sched_balance_pair(struct tdq *high, struct tdq *low)
|
2003-12-12 07:33:51 +00:00
|
|
|
{
|
|
|
|
int transferable;
|
|
|
|
int high_load;
|
|
|
|
int low_load;
|
|
|
|
int move;
|
|
|
|
int diff;
|
|
|
|
int i;
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
tdq_lock_pair(high, low);
|
2003-12-11 03:57:10 +00:00
|
|
|
/*
|
|
|
|
* If we're transfering within a group we have to use this specific
|
2006-12-06 06:34:57 +00:00
|
|
|
* tdq's transferable count, otherwise we can steal from other members
|
2003-12-11 03:57:10 +00:00
|
|
|
* of the group.
|
|
|
|
*/
|
2006-12-29 10:37:07 +00:00
|
|
|
if (high->tdq_group == low->tdq_group) {
|
|
|
|
transferable = high->tdq_transferable;
|
|
|
|
high_load = high->tdq_load;
|
|
|
|
low_load = low->tdq_load;
|
2003-12-12 07:33:51 +00:00
|
|
|
} else {
|
2006-12-29 10:37:07 +00:00
|
|
|
transferable = high->tdq_group->tdg_transferable;
|
|
|
|
high_load = high->tdq_group->tdg_load;
|
|
|
|
low_load = low->tdq_group->tdg_load;
|
2003-12-12 07:33:51 +00:00
|
|
|
}
|
2003-11-15 07:32:07 +00:00
|
|
|
/*
|
|
|
|
* Determine what the imbalance is and then adjust that to how many
|
2006-12-29 10:37:07 +00:00
|
|
|
* threads we actually have to give up (transferable).
|
2003-11-15 07:32:07 +00:00
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
if (transferable != 0) {
|
|
|
|
diff = high_load - low_load;
|
|
|
|
move = diff / 2;
|
|
|
|
if (diff & 0x1)
|
|
|
|
move++;
|
|
|
|
move = min(move, transferable);
|
|
|
|
for (i = 0; i < move; i++)
|
|
|
|
tdq_move(high, low);
|
|
|
|
}
|
|
|
|
TDQ_UNLOCK(high);
|
|
|
|
TDQ_UNLOCK(low);
|
2003-06-09 00:39:09 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Move a thread from one thread queue to another.
|
|
|
|
*/
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
static void
|
2007-07-17 22:53:23 +00:00
|
|
|
tdq_move(struct tdq *from, struct tdq *to)
|
2003-06-09 00:39:09 +00:00
|
|
|
{
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched *ts;
|
2007-07-17 22:53:23 +00:00
|
|
|
struct thread *td;
|
|
|
|
struct tdq *tdq;
|
|
|
|
int cpu;
|
2006-12-06 06:34:57 +00:00
|
|
|
|
|
|
|
tdq = from;
|
2007-07-17 22:53:23 +00:00
|
|
|
cpu = TDQ_ID(to);
|
2006-12-06 06:34:57 +00:00
|
|
|
ts = tdq_steal(tdq, 1);
|
|
|
|
if (ts == NULL) {
|
2006-12-29 10:37:07 +00:00
|
|
|
struct tdq_group *tdg;
|
2006-12-06 06:34:57 +00:00
|
|
|
|
2006-12-29 10:37:07 +00:00
|
|
|
tdg = tdq->tdq_group;
|
|
|
|
LIST_FOREACH(tdq, &tdg->tdg_members, tdq_siblings) {
|
|
|
|
if (tdq == from || tdq->tdq_transferable == 0)
|
2003-12-11 03:57:10 +00:00
|
|
|
continue;
|
2006-12-06 06:34:57 +00:00
|
|
|
ts = tdq_steal(tdq, 1);
|
2003-12-11 03:57:10 +00:00
|
|
|
break;
|
|
|
|
}
|
2006-12-06 06:34:57 +00:00
|
|
|
if (ts == NULL)
|
2007-07-17 22:53:23 +00:00
|
|
|
return;
|
2003-12-11 03:57:10 +00:00
|
|
|
}
|
2006-12-06 06:34:57 +00:00
|
|
|
if (tdq == to)
|
2003-12-11 03:57:10 +00:00
|
|
|
return;
|
2007-07-17 22:53:23 +00:00
|
|
|
td = ts->ts_thread;
|
|
|
|
/*
|
|
|
|
* Although the run queue is locked the thread may be blocked. Lock
|
|
|
|
* it to clear this.
|
|
|
|
*/
|
|
|
|
thread_lock(td);
|
|
|
|
/* Drop recursive lock on from. */
|
|
|
|
TDQ_UNLOCK(from);
|
|
|
|
sched_rem(td);
|
2007-01-19 21:56:08 +00:00
|
|
|
ts->ts_cpu = cpu;
|
2007-07-17 22:53:23 +00:00
|
|
|
td->td_lock = TDQ_LOCKPTR(to);
|
|
|
|
tdq_add(to, td, SRQ_YIELDING);
|
2007-07-19 19:51:45 +00:00
|
|
|
tdq_notify(ts);
|
2003-06-09 00:39:09 +00:00
|
|
|
}
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* This tdq has idled. Try to steal a thread from another cpu and switch
|
|
|
|
* to it.
|
|
|
|
*/
|
2003-12-11 03:57:10 +00:00
|
|
|
static int
|
2006-12-06 06:34:57 +00:00
|
|
|
tdq_idled(struct tdq *tdq)
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
{
|
2006-12-29 10:37:07 +00:00
|
|
|
struct tdq_group *tdg;
|
2006-12-06 06:34:57 +00:00
|
|
|
struct tdq *steal;
|
|
|
|
struct td_sched *ts;
|
2007-07-17 22:53:23 +00:00
|
|
|
struct thread *td;
|
|
|
|
int highload;
|
|
|
|
int highcpu;
|
|
|
|
int load;
|
|
|
|
int cpu;
|
2003-12-11 03:57:10 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/* We don't want to be preempted while we're iterating over tdqs */
|
|
|
|
spinlock_enter();
|
2006-12-29 10:37:07 +00:00
|
|
|
tdg = tdq->tdq_group;
|
2003-12-11 03:57:10 +00:00
|
|
|
/*
|
2006-12-29 10:37:07 +00:00
|
|
|
* If we're in a cpu group, try and steal threads from another cpu in
|
2003-12-11 03:57:10 +00:00
|
|
|
* the group before idling.
|
|
|
|
*/
|
2007-01-19 21:56:08 +00:00
|
|
|
if (steal_htt && tdg->tdg_cpus > 1 && tdg->tdg_transferable) {
|
2006-12-29 10:37:07 +00:00
|
|
|
LIST_FOREACH(steal, &tdg->tdg_members, tdq_siblings) {
|
|
|
|
if (steal == tdq || steal->tdq_transferable == 0)
|
2003-12-11 03:57:10 +00:00
|
|
|
continue;
|
2007-07-17 22:53:23 +00:00
|
|
|
TDQ_LOCK(steal);
|
2006-12-06 06:34:57 +00:00
|
|
|
ts = tdq_steal(steal, 0);
|
2007-01-19 21:56:08 +00:00
|
|
|
if (ts)
|
|
|
|
goto steal;
|
2007-07-17 22:53:23 +00:00
|
|
|
TDQ_UNLOCK(steal);
|
2007-01-19 21:56:08 +00:00
|
|
|
}
|
|
|
|
}
|
2007-07-17 22:53:23 +00:00
|
|
|
for (;;) {
|
|
|
|
if (steal_idle == 0)
|
|
|
|
break;
|
|
|
|
highcpu = 0;
|
|
|
|
highload = 0;
|
|
|
|
for (cpu = 0; cpu <= mp_maxid; cpu++) {
|
|
|
|
if (CPU_ABSENT(cpu))
|
2007-01-19 21:56:08 +00:00
|
|
|
continue;
|
2007-07-17 22:53:23 +00:00
|
|
|
steal = TDQ_CPU(cpu);
|
|
|
|
load = TDQ_CPU(cpu)->tdq_transferable;
|
|
|
|
if (load < highload)
|
2003-12-11 03:57:10 +00:00
|
|
|
continue;
|
2007-07-17 22:53:23 +00:00
|
|
|
highload = load;
|
|
|
|
highcpu = cpu;
|
2003-12-11 03:57:10 +00:00
|
|
|
}
|
2007-07-19 20:03:15 +00:00
|
|
|
if (highload < steal_thresh)
|
2007-07-17 22:53:23 +00:00
|
|
|
break;
|
|
|
|
steal = TDQ_CPU(highcpu);
|
|
|
|
TDQ_LOCK(steal);
|
2007-07-19 20:03:15 +00:00
|
|
|
if (steal->tdq_transferable >= steal_thresh &&
|
2007-07-17 22:53:23 +00:00
|
|
|
(ts = tdq_steal(steal, 1)) != NULL)
|
|
|
|
goto steal;
|
|
|
|
TDQ_UNLOCK(steal);
|
|
|
|
break;
|
2003-12-11 03:57:10 +00:00
|
|
|
}
|
2007-07-17 22:53:23 +00:00
|
|
|
spinlock_exit();
|
2003-12-11 03:57:10 +00:00
|
|
|
return (1);
|
2007-01-19 21:56:08 +00:00
|
|
|
steal:
|
2007-07-17 22:53:23 +00:00
|
|
|
td = ts->ts_thread;
|
|
|
|
thread_lock(td);
|
|
|
|
spinlock_exit();
|
|
|
|
MPASS(td->td_lock == TDQ_LOCKPTR(steal));
|
|
|
|
TDQ_UNLOCK(steal);
|
|
|
|
sched_rem(td);
|
|
|
|
sched_setcpu(ts, PCPU_GET(cpuid), SRQ_YIELDING);
|
|
|
|
tdq_add(tdq, td, SRQ_YIELDING);
|
|
|
|
MPASS(td->td_lock == curthread->td_lock);
|
|
|
|
mi_switch(SW_VOL, NULL);
|
|
|
|
thread_unlock(curthread);
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
|
2007-01-19 21:56:08 +00:00
|
|
|
return (0);
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Notify a remote cpu of new work. Sends an IPI if criteria are met.
|
|
|
|
*/
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
static void
|
2007-01-19 21:56:08 +00:00
|
|
|
tdq_notify(struct td_sched *ts)
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
{
|
2007-01-25 23:51:59 +00:00
|
|
|
struct thread *ctd;
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
struct pcpu *pcpu;
|
2007-01-25 23:51:59 +00:00
|
|
|
int cpri;
|
|
|
|
int pri;
|
2007-01-19 21:56:08 +00:00
|
|
|
int cpu;
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
|
2007-01-19 21:56:08 +00:00
|
|
|
cpu = ts->ts_cpu;
|
2007-01-25 23:51:59 +00:00
|
|
|
pri = ts->ts_thread->td_priority;
|
2007-01-19 21:56:08 +00:00
|
|
|
pcpu = pcpu_find(cpu);
|
2007-01-25 23:51:59 +00:00
|
|
|
ctd = pcpu->pc_curthread;
|
|
|
|
cpri = ctd->td_priority;
|
2007-01-20 09:03:43 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If our priority is not better than the current priority there is
|
|
|
|
* nothing to do.
|
|
|
|
*/
|
2007-01-25 23:51:59 +00:00
|
|
|
if (pri > cpri)
|
2007-01-20 09:03:43 +00:00
|
|
|
return;
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
/*
|
2007-01-25 23:51:59 +00:00
|
|
|
* Always IPI idle.
|
|
|
|
*/
|
|
|
|
if (cpri > PRI_MIN_IDLE)
|
|
|
|
goto sendipi;
|
|
|
|
/*
|
|
|
|
* If we're realtime or better and there is timeshare or worse running
|
|
|
|
* send an IPI.
|
|
|
|
*/
|
|
|
|
if (pri < PRI_MAX_REALTIME && cpri > PRI_MAX_REALTIME)
|
|
|
|
goto sendipi;
|
|
|
|
/*
|
|
|
|
* Otherwise only IPI if we exceed the threshold.
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
if (pri > preempt_thresh)
|
2007-01-05 23:45:38 +00:00
|
|
|
return;
|
2007-01-25 23:51:59 +00:00
|
|
|
sendipi:
|
|
|
|
ctd->td_flags |= TDF_NEEDRESCHED;
|
2007-07-17 22:53:23 +00:00
|
|
|
ipi_selected(1 << cpu, IPI_PREEMPT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Steals load from a timeshare queue. Honors the rotating queue head
|
|
|
|
* index.
|
|
|
|
*/
|
|
|
|
static struct td_sched *
|
|
|
|
runq_steal_from(struct runq *rq, u_char start)
|
|
|
|
{
|
|
|
|
struct td_sched *ts;
|
|
|
|
struct rqbits *rqb;
|
|
|
|
struct rqhead *rqh;
|
|
|
|
int first;
|
|
|
|
int bit;
|
|
|
|
int pri;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
rqb = &rq->rq_status;
|
|
|
|
bit = start & (RQB_BPW -1);
|
|
|
|
pri = 0;
|
|
|
|
first = 0;
|
|
|
|
again:
|
|
|
|
for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
|
|
|
|
if (rqb->rqb_bits[i] == 0)
|
|
|
|
continue;
|
|
|
|
if (bit != 0) {
|
|
|
|
for (pri = bit; pri < RQB_BPW; pri++)
|
|
|
|
if (rqb->rqb_bits[i] & (1ul << pri))
|
|
|
|
break;
|
|
|
|
if (pri >= RQB_BPW)
|
|
|
|
continue;
|
|
|
|
} else
|
|
|
|
pri = RQB_FFS(rqb->rqb_bits[i]);
|
|
|
|
pri += (i << RQB_L2BPW);
|
|
|
|
rqh = &rq->rq_queues[pri];
|
|
|
|
TAILQ_FOREACH(ts, rqh, ts_procq) {
|
|
|
|
if (first && THREAD_CAN_MIGRATE(ts->ts_thread))
|
|
|
|
return (ts);
|
|
|
|
first = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (start != 0) {
|
|
|
|
start = 0;
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (NULL);
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Steals load from a standard linear queue.
|
|
|
|
*/
|
2006-12-06 06:34:57 +00:00
|
|
|
static struct td_sched *
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
runq_steal(struct runq *rq)
|
|
|
|
{
|
|
|
|
struct rqhead *rqh;
|
|
|
|
struct rqbits *rqb;
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched *ts;
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
int word;
|
|
|
|
int bit;
|
|
|
|
|
|
|
|
rqb = &rq->rq_status;
|
|
|
|
for (word = 0; word < RQB_LEN; word++) {
|
|
|
|
if (rqb->rqb_bits[word] == 0)
|
|
|
|
continue;
|
|
|
|
for (bit = 0; bit < RQB_BPW; bit++) {
|
2003-12-07 09:57:51 +00:00
|
|
|
if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
continue;
|
|
|
|
rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
|
2007-07-19 20:03:15 +00:00
|
|
|
TAILQ_FOREACH(ts, rqh, ts_procq)
|
|
|
|
if (THREAD_CAN_MIGRATE(ts->ts_thread))
|
2006-12-06 06:34:57 +00:00
|
|
|
return (ts);
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Attempt to steal a thread in priority order from a thread queue.
|
|
|
|
*/
|
2006-12-06 06:34:57 +00:00
|
|
|
static struct td_sched *
|
|
|
|
tdq_steal(struct tdq *tdq, int stealidle)
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
{
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched *ts;
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
|
2007-01-04 08:56:25 +00:00
|
|
|
if ((ts = runq_steal(&tdq->tdq_realtime)) != NULL)
|
2006-12-06 06:34:57 +00:00
|
|
|
return (ts);
|
2007-07-17 22:53:23 +00:00
|
|
|
if ((ts = runq_steal_from(&tdq->tdq_timeshare, tdq->tdq_ridx)) != NULL)
|
2006-12-06 06:34:57 +00:00
|
|
|
return (ts);
|
2003-12-11 03:57:10 +00:00
|
|
|
if (stealidle)
|
2006-12-29 10:37:07 +00:00
|
|
|
return (runq_steal(&tdq->tdq_idle));
|
2003-12-11 03:57:10 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the
|
|
|
|
* current lock and returns with the assigned queue locked. If this is
|
|
|
|
* via sched_switch() we leave the thread in a blocked state as an
|
|
|
|
* optimization.
|
|
|
|
*/
|
|
|
|
static inline struct tdq *
|
|
|
|
sched_setcpu(struct td_sched *ts, int cpu, int flags)
|
2003-12-11 03:57:10 +00:00
|
|
|
{
|
2007-07-17 22:53:23 +00:00
|
|
|
struct thread *td;
|
|
|
|
struct tdq *tdq;
|
2003-12-11 03:57:10 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
THREAD_LOCK_ASSERT(ts->ts_thread, MA_OWNED);
|
|
|
|
|
|
|
|
tdq = TDQ_CPU(cpu);
|
|
|
|
td = ts->ts_thread;
|
|
|
|
ts->ts_cpu = cpu;
|
2007-08-03 23:38:46 +00:00
|
|
|
|
|
|
|
/* If the lock matches just return the queue. */
|
2007-07-17 22:53:23 +00:00
|
|
|
if (td->td_lock == TDQ_LOCKPTR(tdq))
|
|
|
|
return (tdq);
|
|
|
|
#ifdef notyet
|
2007-01-19 21:56:08 +00:00
|
|
|
/*
|
2007-07-17 22:53:23 +00:00
|
|
|
* If the thread isn't running it's lockptr is a
|
|
|
|
* turnstile or a sleepqueue. We can just lock_set without
|
|
|
|
* blocking.
|
2007-01-19 21:56:08 +00:00
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
if (TD_CAN_RUN(td)) {
|
|
|
|
TDQ_LOCK(tdq);
|
|
|
|
thread_lock_set(td, TDQ_LOCKPTR(tdq));
|
|
|
|
return (tdq);
|
|
|
|
}
|
|
|
|
#endif
|
2007-01-19 21:56:08 +00:00
|
|
|
/*
|
2007-07-17 22:53:23 +00:00
|
|
|
* The hard case, migration, we need to block the thread first to
|
|
|
|
* prevent order reversals with other cpus locks.
|
2007-01-19 21:56:08 +00:00
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
thread_lock_block(td);
|
|
|
|
TDQ_LOCK(tdq);
|
2007-08-03 23:38:46 +00:00
|
|
|
thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
|
2007-07-17 22:53:23 +00:00
|
|
|
return (tdq);
|
2007-01-19 21:56:08 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Find the thread queue running the lowest priority thread.
|
|
|
|
*/
|
2007-01-19 21:56:08 +00:00
|
|
|
static int
|
2007-07-17 22:53:23 +00:00
|
|
|
tdq_lowestpri(void)
|
2007-01-19 21:56:08 +00:00
|
|
|
{
|
2007-07-17 22:53:23 +00:00
|
|
|
struct tdq *tdq;
|
2007-01-19 21:56:08 +00:00
|
|
|
int lowpri;
|
|
|
|
int lowcpu;
|
|
|
|
int lowload;
|
|
|
|
int load;
|
2007-07-17 22:53:23 +00:00
|
|
|
int cpu;
|
|
|
|
int pri;
|
|
|
|
|
|
|
|
lowload = 0;
|
|
|
|
lowpri = lowcpu = 0;
|
|
|
|
for (cpu = 0; cpu <= mp_maxid; cpu++) {
|
|
|
|
if (CPU_ABSENT(cpu))
|
|
|
|
continue;
|
|
|
|
tdq = TDQ_CPU(cpu);
|
|
|
|
pri = tdq->tdq_lowpri;
|
|
|
|
load = TDQ_CPU(cpu)->tdq_load;
|
|
|
|
CTR4(KTR_ULE,
|
|
|
|
"cpu %d pri %d lowcpu %d lowpri %d",
|
|
|
|
cpu, pri, lowcpu, lowpri);
|
|
|
|
if (pri < lowpri)
|
|
|
|
continue;
|
|
|
|
if (lowpri && lowpri == pri && load > lowload)
|
|
|
|
continue;
|
|
|
|
lowpri = pri;
|
|
|
|
lowcpu = cpu;
|
|
|
|
lowload = load;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (lowcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the thread queue with the least load.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
tdq_lowestload(void)
|
|
|
|
{
|
|
|
|
struct tdq *tdq;
|
|
|
|
int lowload;
|
|
|
|
int lowpri;
|
|
|
|
int lowcpu;
|
|
|
|
int load;
|
|
|
|
int cpu;
|
|
|
|
int pri;
|
|
|
|
|
|
|
|
lowcpu = 0;
|
|
|
|
lowload = TDQ_CPU(0)->tdq_load;
|
|
|
|
lowpri = TDQ_CPU(0)->tdq_lowpri;
|
|
|
|
for (cpu = 1; cpu <= mp_maxid; cpu++) {
|
|
|
|
if (CPU_ABSENT(cpu))
|
|
|
|
continue;
|
|
|
|
tdq = TDQ_CPU(cpu);
|
|
|
|
load = tdq->tdq_load;
|
|
|
|
pri = tdq->tdq_lowpri;
|
|
|
|
CTR4(KTR_ULE, "cpu %d load %d lowcpu %d lowload %d",
|
|
|
|
cpu, load, lowcpu, lowload);
|
|
|
|
if (load > lowload)
|
|
|
|
continue;
|
|
|
|
if (load == lowload && pri < lowpri)
|
|
|
|
continue;
|
|
|
|
lowcpu = cpu;
|
|
|
|
lowload = load;
|
|
|
|
lowpri = pri;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (lowcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pick the destination cpu for sched_add(). Respects affinity and makes
|
|
|
|
* a determination based on load or priority of available processors.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sched_pickcpu(struct td_sched *ts, int flags)
|
|
|
|
{
|
|
|
|
struct tdq *tdq;
|
2007-01-19 21:56:08 +00:00
|
|
|
int self;
|
|
|
|
int pri;
|
|
|
|
int cpu;
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
cpu = self = PCPU_GET(cpuid);
|
2007-01-19 21:56:08 +00:00
|
|
|
if (smp_started == 0)
|
|
|
|
return (self);
|
2007-07-19 20:03:15 +00:00
|
|
|
/*
|
|
|
|
* Don't migrate a running thread from sched_switch().
|
|
|
|
*/
|
|
|
|
if (flags & SRQ_OURSELF) {
|
|
|
|
CTR1(KTR_ULE, "YIELDING %d",
|
|
|
|
curthread->td_priority);
|
|
|
|
return (self);
|
|
|
|
}
|
2007-01-19 21:56:08 +00:00
|
|
|
pri = ts->ts_thread->td_priority;
|
2007-07-17 22:53:23 +00:00
|
|
|
cpu = ts->ts_cpu;
|
2007-01-19 21:56:08 +00:00
|
|
|
/*
|
|
|
|
* Regardless of affinity, if the last cpu is idle send it there.
|
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
tdq = TDQ_CPU(cpu);
|
|
|
|
if (tdq->tdq_lowpri > PRI_MIN_IDLE) {
|
2007-01-25 19:14:11 +00:00
|
|
|
CTR5(KTR_ULE,
|
2007-01-19 21:56:08 +00:00
|
|
|
"ts_cpu %d idle, ltick %d ticks %d pri %d curthread %d",
|
|
|
|
ts->ts_cpu, ts->ts_rltick, ticks, pri,
|
2007-07-17 22:53:23 +00:00
|
|
|
tdq->tdq_lowpri);
|
2007-01-19 21:56:08 +00:00
|
|
|
return (ts->ts_cpu);
|
2004-12-26 22:56:08 +00:00
|
|
|
}
|
2007-01-19 21:56:08 +00:00
|
|
|
/*
|
|
|
|
* If we have affinity, try to place it on the cpu we last ran on.
|
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
if (SCHED_AFFINITY(ts) && tdq->tdq_lowpri > pri) {
|
2007-01-25 19:14:11 +00:00
|
|
|
CTR5(KTR_ULE,
|
2007-01-19 21:56:08 +00:00
|
|
|
"affinity for %d, ltick %d ticks %d pri %d curthread %d",
|
|
|
|
ts->ts_cpu, ts->ts_rltick, ticks, pri,
|
2007-07-17 22:53:23 +00:00
|
|
|
tdq->tdq_lowpri);
|
2007-01-19 21:56:08 +00:00
|
|
|
return (ts->ts_cpu);
|
2004-12-26 22:56:08 +00:00
|
|
|
}
|
2003-12-11 03:57:10 +00:00
|
|
|
/*
|
2007-01-19 21:56:08 +00:00
|
|
|
* Look for an idle group.
|
2003-12-11 03:57:10 +00:00
|
|
|
*/
|
2007-01-25 19:14:11 +00:00
|
|
|
CTR1(KTR_ULE, "tdq_idle %X", tdq_idle);
|
2007-01-19 21:56:08 +00:00
|
|
|
cpu = ffs(tdq_idle);
|
|
|
|
if (cpu)
|
2007-07-17 22:53:23 +00:00
|
|
|
return (--cpu);
|
2007-07-19 20:03:15 +00:00
|
|
|
/*
|
|
|
|
* If there are no idle cores see if we can run the thread locally. This may
|
|
|
|
* improve locality among sleepers and wakers when there is shared data.
|
|
|
|
*/
|
|
|
|
if (tryself && pri < curthread->td_priority) {
|
|
|
|
CTR1(KTR_ULE, "tryself %d",
|
2007-01-19 21:56:08 +00:00
|
|
|
curthread->td_priority);
|
|
|
|
return (self);
|
2003-12-11 03:57:10 +00:00
|
|
|
}
|
2004-08-10 07:52:21 +00:00
|
|
|
/*
|
2007-01-19 21:56:08 +00:00
|
|
|
* Now search for the cpu running the lowest priority thread with
|
|
|
|
* the least load.
|
2004-08-10 07:52:21 +00:00
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
if (pick_pri)
|
|
|
|
cpu = tdq_lowestpri();
|
|
|
|
else
|
|
|
|
cpu = tdq_lowestload();
|
|
|
|
return (cpu);
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
}
|
2003-12-11 03:57:10 +00:00
|
|
|
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
#endif /* SMP */
|
2003-02-03 05:30:07 +00:00
|
|
|
|
2003-07-08 06:19:40 +00:00
|
|
|
/*
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
* Pick the highest priority task we have and return it.
|
2003-07-08 06:19:40 +00:00
|
|
|
*/
|
2006-12-06 06:34:57 +00:00
|
|
|
static struct td_sched *
|
|
|
|
tdq_choose(struct tdq *tdq)
|
2003-02-03 05:30:07 +00:00
|
|
|
{
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched *ts;
|
2003-02-03 05:30:07 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
|
2007-01-04 08:56:25 +00:00
|
|
|
ts = runq_choose(&tdq->tdq_realtime);
|
2007-06-15 19:33:58 +00:00
|
|
|
if (ts != NULL)
|
2007-01-04 08:56:25 +00:00
|
|
|
return (ts);
|
2007-01-04 12:16:19 +00:00
|
|
|
ts = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
|
2007-01-04 08:56:25 +00:00
|
|
|
if (ts != NULL) {
|
2007-06-15 19:33:58 +00:00
|
|
|
KASSERT(ts->ts_thread->td_priority >= PRI_MIN_TIMESHARE,
|
2007-01-04 08:56:25 +00:00
|
|
|
("tdq_choose: Invalid priority on timeshare queue %d",
|
|
|
|
ts->ts_thread->td_priority));
|
|
|
|
return (ts);
|
|
|
|
}
|
|
|
|
|
|
|
|
ts = runq_choose(&tdq->tdq_idle);
|
|
|
|
if (ts != NULL) {
|
|
|
|
KASSERT(ts->ts_thread->td_priority >= PRI_MIN_IDLE,
|
|
|
|
("tdq_choose: Invalid priority on idle queue %d",
|
|
|
|
ts->ts_thread->td_priority));
|
2006-12-06 06:34:57 +00:00
|
|
|
return (ts);
|
2003-02-03 05:30:07 +00:00
|
|
|
}
|
|
|
|
|
2007-01-04 08:56:25 +00:00
|
|
|
return (NULL);
|
2003-04-02 06:46:43 +00:00
|
|
|
}
|
2003-01-29 07:00:51 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Initialize a thread queue.
|
|
|
|
*/
|
2003-01-29 07:00:51 +00:00
|
|
|
static void
|
2006-12-06 06:34:57 +00:00
|
|
|
tdq_setup(struct tdq *tdq)
|
2003-01-29 07:00:51 +00:00
|
|
|
{
|
2007-07-17 22:53:23 +00:00
|
|
|
|
2007-08-03 23:38:46 +00:00
|
|
|
if (bootverbose)
|
|
|
|
printf("ULE: setup cpu %d\n", TDQ_ID(tdq));
|
2007-01-04 08:56:25 +00:00
|
|
|
runq_init(&tdq->tdq_realtime);
|
|
|
|
runq_init(&tdq->tdq_timeshare);
|
2006-12-29 10:37:07 +00:00
|
|
|
runq_init(&tdq->tdq_idle);
|
|
|
|
tdq->tdq_load = 0;
|
2003-01-29 07:00:51 +00:00
|
|
|
}
|
|
|
|
|
2007-08-03 23:38:46 +00:00
|
|
|
#ifdef SMP
|
2003-01-26 05:23:15 +00:00
|
|
|
static void
|
2007-08-03 23:38:46 +00:00
|
|
|
tdg_setup(struct tdq_group *tdg)
|
2003-01-26 05:23:15 +00:00
|
|
|
{
|
2007-08-03 23:38:46 +00:00
|
|
|
if (bootverbose)
|
|
|
|
printf("ULE: setup cpu group %d\n", TDG_ID(tdg));
|
|
|
|
snprintf(tdg->tdg_name, sizeof(tdg->tdg_name),
|
|
|
|
"sched lock %d", (int)TDG_ID(tdg));
|
|
|
|
mtx_init(&tdg->tdg_lock, tdg->tdg_name, "sched lock",
|
|
|
|
MTX_SPIN | MTX_RECURSE);
|
|
|
|
LIST_INIT(&tdg->tdg_members);
|
|
|
|
tdg->tdg_load = 0;
|
|
|
|
tdg->tdg_transferable = 0;
|
|
|
|
tdg->tdg_cpus = 0;
|
|
|
|
tdg->tdg_mask = 0;
|
|
|
|
tdg->tdg_cpumask = 0;
|
|
|
|
tdg->tdg_idlemask = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tdg_add(struct tdq_group *tdg, struct tdq *tdq)
|
|
|
|
{
|
|
|
|
if (tdg->tdg_mask == 0)
|
|
|
|
tdg->tdg_mask |= 1 << TDQ_ID(tdq);
|
|
|
|
tdg->tdg_cpumask |= 1 << TDQ_ID(tdq);
|
|
|
|
tdg->tdg_cpus++;
|
|
|
|
tdq->tdq_group = tdg;
|
|
|
|
tdq->tdq_lock = &tdg->tdg_lock;
|
|
|
|
LIST_INSERT_HEAD(&tdg->tdg_members, tdq, tdq_siblings);
|
|
|
|
if (bootverbose)
|
|
|
|
printf("ULE: adding cpu %d to group %d: cpus %d mask 0x%X\n",
|
|
|
|
TDQ_ID(tdq), TDG_ID(tdg), tdg->tdg_cpus, tdg->tdg_cpumask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
sched_setup_topology(void)
|
|
|
|
{
|
|
|
|
struct tdq_group *tdg;
|
|
|
|
struct cpu_group *cg;
|
2007-07-17 22:53:23 +00:00
|
|
|
int balance_groups;
|
2007-08-03 23:38:46 +00:00
|
|
|
struct tdq *tdq;
|
2003-01-26 05:23:15 +00:00
|
|
|
int i;
|
2007-08-03 23:38:46 +00:00
|
|
|
int j;
|
2003-03-04 02:45:59 +00:00
|
|
|
|
2007-08-03 23:38:46 +00:00
|
|
|
topology = 1;
|
2003-12-12 07:33:51 +00:00
|
|
|
balance_groups = 0;
|
2007-08-03 23:38:46 +00:00
|
|
|
for (i = 0; i < smp_topology->ct_count; i++) {
|
|
|
|
cg = &smp_topology->ct_group[i];
|
|
|
|
tdg = &tdq_groups[i];
|
|
|
|
/*
|
|
|
|
* Initialize the group.
|
|
|
|
*/
|
|
|
|
tdg_setup(tdg);
|
|
|
|
/*
|
|
|
|
* Find all of the group members and add them.
|
|
|
|
*/
|
|
|
|
for (j = 0; j < MAXCPU; j++) {
|
|
|
|
if ((cg->cg_mask & (1 << j)) != 0) {
|
|
|
|
tdq = TDQ_CPU(j);
|
|
|
|
tdq_setup(tdq);
|
|
|
|
tdg_add(tdg, tdq);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (tdg->tdg_cpus > 1)
|
|
|
|
balance_groups = 1;
|
2003-12-11 03:57:10 +00:00
|
|
|
}
|
2007-08-03 23:38:46 +00:00
|
|
|
tdg_maxid = smp_topology->ct_count - 1;
|
|
|
|
if (balance_groups)
|
|
|
|
sched_balance_groups(NULL);
|
|
|
|
}
|
2003-12-11 03:57:10 +00:00
|
|
|
|
2007-08-03 23:38:46 +00:00
|
|
|
static void
|
|
|
|
sched_setup_smp(void)
|
|
|
|
{
|
|
|
|
struct tdq_group *tdg;
|
|
|
|
struct tdq *tdq;
|
|
|
|
int cpus;
|
|
|
|
int i;
|
2003-04-11 03:47:14 +00:00
|
|
|
|
2007-08-03 23:38:46 +00:00
|
|
|
for (cpus = 0, i = 0; i < MAXCPU; i++) {
|
|
|
|
if (CPU_ABSENT(i))
|
|
|
|
continue;
|
|
|
|
tdq = &tdq_cpu[i];
|
|
|
|
tdg = &tdq_groups[i];
|
|
|
|
/*
|
|
|
|
* Setup a tdq group with one member.
|
|
|
|
*/
|
|
|
|
tdg_setup(tdg);
|
|
|
|
tdq_setup(tdq);
|
|
|
|
tdg_add(tdg, tdq);
|
|
|
|
cpus++;
|
2003-07-04 19:59:00 +00:00
|
|
|
}
|
2007-08-03 23:38:46 +00:00
|
|
|
tdg_maxid = cpus - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fake a topology with one group containing all CPUs.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
sched_fake_topo(void)
|
|
|
|
{
|
|
|
|
#ifdef SCHED_FAKE_TOPOLOGY
|
|
|
|
static struct cpu_top top;
|
|
|
|
static struct cpu_group group;
|
|
|
|
|
|
|
|
top.ct_count = 1;
|
|
|
|
top.ct_group = &group;
|
|
|
|
group.cg_mask = all_cpus;
|
|
|
|
group.cg_count = mp_ncpus;
|
|
|
|
group.cg_children = 0;
|
|
|
|
smp_topology = ⊤
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Setup the thread queues and initialize the topology based on MD
|
|
|
|
* information.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
sched_setup(void *dummy)
|
|
|
|
{
|
|
|
|
struct tdq *tdq;
|
|
|
|
|
|
|
|
tdq = TDQ_SELF();
|
|
|
|
#ifdef SMP
|
2003-12-12 07:33:51 +00:00
|
|
|
/*
|
2007-07-17 22:53:23 +00:00
|
|
|
* Initialize long-term cpu balancing algorithm.
|
2003-12-12 07:33:51 +00:00
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
callout_init(&balco, CALLOUT_MPSAFE);
|
|
|
|
callout_init(&gbalco, CALLOUT_MPSAFE);
|
2007-08-03 23:38:46 +00:00
|
|
|
sched_fake_topo();
|
|
|
|
/*
|
|
|
|
* Setup tdqs based on a topology configuration or vanilla SMP based
|
|
|
|
* on mp_maxid.
|
|
|
|
*/
|
|
|
|
if (smp_topology == NULL)
|
|
|
|
sched_setup_smp();
|
|
|
|
else
|
|
|
|
sched_setup_topology();
|
2007-07-17 22:53:23 +00:00
|
|
|
sched_balance(NULL);
|
2003-07-04 19:59:00 +00:00
|
|
|
#else
|
2007-08-03 23:38:46 +00:00
|
|
|
tdq_setup(tdq);
|
|
|
|
mtx_init(&tdq_lock, "sched lock", "sched lock", MTX_SPIN | MTX_RECURSE);
|
|
|
|
tdq->tdq_lock = &tdq_lock;
|
2003-06-09 00:39:09 +00:00
|
|
|
#endif
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* To avoid divide-by-zero, we set realstathz a dummy value
|
|
|
|
* in case which sched_clock() called before sched_initticks().
|
|
|
|
*/
|
|
|
|
realstathz = hz;
|
|
|
|
sched_slice = (realstathz/10); /* ~100ms */
|
|
|
|
tickincr = 1 << SCHED_TICK_SHIFT;
|
|
|
|
|
|
|
|
/* Add thread0's load since it's running. */
|
|
|
|
TDQ_LOCK(tdq);
|
2007-08-03 23:38:46 +00:00
|
|
|
thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
|
2007-07-17 22:53:23 +00:00
|
|
|
tdq_load_add(tdq, &td_sched0);
|
|
|
|
TDQ_UNLOCK(tdq);
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* This routine determines the tickincr after stathz and hz are setup.
|
|
|
|
*/
|
2005-12-19 08:26:09 +00:00
|
|
|
/* ARGSUSED */
|
|
|
|
static void
|
|
|
|
sched_initticks(void *dummy)
|
|
|
|
{
|
2007-07-17 22:53:23 +00:00
|
|
|
int incr;
|
|
|
|
|
2005-12-19 08:26:09 +00:00
|
|
|
realstathz = stathz ? stathz : hz;
|
2007-01-25 19:14:11 +00:00
|
|
|
sched_slice = (realstathz/10); /* ~100ms */
|
2005-12-19 08:26:09 +00:00
|
|
|
|
|
|
|
/*
|
2007-01-04 08:56:25 +00:00
|
|
|
* tickincr is shifted out by 10 to avoid rounding errors due to
|
2007-01-04 12:16:19 +00:00
|
|
|
* hz not being evenly divisible by stathz on all platforms.
|
2007-01-04 08:56:25 +00:00
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
incr = (hz << SCHED_TICK_SHIFT) / realstathz;
|
2007-01-04 08:56:25 +00:00
|
|
|
/*
|
|
|
|
* This does not work for values of stathz that are more than
|
|
|
|
* 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen.
|
2005-12-19 08:26:09 +00:00
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
if (incr == 0)
|
|
|
|
incr = 1;
|
|
|
|
tickincr = incr;
|
2007-01-19 21:56:08 +00:00
|
|
|
#ifdef SMP
|
2007-08-20 06:34:20 +00:00
|
|
|
/*
|
|
|
|
* Set steal thresh to log2(mp_ncpu) but no greater than 4. This
|
|
|
|
* prevents excess thrashing on large machines and excess idle on
|
|
|
|
* smaller machines.
|
|
|
|
*/
|
|
|
|
steal_thresh = min(ffs(mp_ncpus) - 1, 4);
|
2007-01-19 21:56:08 +00:00
|
|
|
affinity = SCHED_AFFINITY_DEFAULT;
|
|
|
|
#endif
|
2005-12-19 08:26:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* This is the core of the interactivity algorithm. Determines a score based
|
|
|
|
* on past behavior. It is the ratio of sleep time to run time scaled to
|
|
|
|
* a [0, 100] integer. This is the voluntary sleep time of a process, which
|
|
|
|
* differs from the cpu usage because it does not account for time spent
|
|
|
|
* waiting on a run-queue. Would be prettier if we had floating point.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sched_interact_score(struct thread *td)
|
|
|
|
{
|
|
|
|
struct td_sched *ts;
|
|
|
|
int div;
|
|
|
|
|
|
|
|
ts = td->td_sched;
|
|
|
|
/*
|
|
|
|
* The score is only needed if this is likely to be an interactive
|
|
|
|
* task. Don't go through the expense of computing it if there's
|
|
|
|
* no chance.
|
|
|
|
*/
|
|
|
|
if (sched_interact <= SCHED_INTERACT_HALF &&
|
|
|
|
ts->ts_runtime >= ts->ts_slptime)
|
|
|
|
return (SCHED_INTERACT_HALF);
|
|
|
|
|
|
|
|
if (ts->ts_runtime > ts->ts_slptime) {
|
|
|
|
div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
|
|
|
|
return (SCHED_INTERACT_HALF +
|
|
|
|
(SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
|
|
|
|
}
|
|
|
|
if (ts->ts_slptime > ts->ts_runtime) {
|
|
|
|
div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
|
|
|
|
return (ts->ts_runtime / div);
|
|
|
|
}
|
|
|
|
/* runtime == slptime */
|
|
|
|
if (ts->ts_runtime)
|
|
|
|
return (SCHED_INTERACT_HALF);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This can happen if slptime and runtime are 0.
|
|
|
|
*/
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2003-01-26 05:23:15 +00:00
|
|
|
/*
|
|
|
|
* Scale the scheduling priority according to the "interactivity" of this
|
|
|
|
* process.
|
|
|
|
*/
|
2003-04-11 03:47:14 +00:00
|
|
|
static void
|
2006-10-26 21:42:22 +00:00
|
|
|
sched_priority(struct thread *td)
|
2003-01-26 05:23:15 +00:00
|
|
|
{
|
2007-01-04 08:56:25 +00:00
|
|
|
int score;
|
2003-01-26 05:23:15 +00:00
|
|
|
int pri;
|
|
|
|
|
2006-10-26 21:42:22 +00:00
|
|
|
if (td->td_pri_class != PRI_TIMESHARE)
|
2003-04-11 03:47:14 +00:00
|
|
|
return;
|
2003-04-02 06:46:43 +00:00
|
|
|
/*
|
2007-01-04 08:56:25 +00:00
|
|
|
* If the score is interactive we place the thread in the realtime
|
|
|
|
* queue with a priority that is less than kernel and interrupt
|
|
|
|
* priorities. These threads are not subject to nice restrictions.
|
2003-04-02 06:46:43 +00:00
|
|
|
*
|
2007-07-17 22:53:23 +00:00
|
|
|
* Scores greater than this are placed on the normal timeshare queue
|
2007-01-04 08:56:25 +00:00
|
|
|
* where the priority is partially decided by the most recent cpu
|
|
|
|
* utilization and the rest is decided by nice value.
|
2003-04-02 06:46:43 +00:00
|
|
|
*/
|
2007-01-04 08:56:25 +00:00
|
|
|
score = sched_interact_score(td);
|
|
|
|
if (score < sched_interact) {
|
|
|
|
pri = PRI_MIN_REALTIME;
|
|
|
|
pri += ((PRI_MAX_REALTIME - PRI_MIN_REALTIME) / sched_interact)
|
|
|
|
* score;
|
|
|
|
KASSERT(pri >= PRI_MIN_REALTIME && pri <= PRI_MAX_REALTIME,
|
2007-01-24 18:18:43 +00:00
|
|
|
("sched_priority: invalid interactive priority %d score %d",
|
|
|
|
pri, score));
|
2007-01-04 08:56:25 +00:00
|
|
|
} else {
|
|
|
|
pri = SCHED_PRI_MIN;
|
|
|
|
if (td->td_sched->ts_ticks)
|
|
|
|
pri += SCHED_PRI_TICKS(td->td_sched);
|
|
|
|
pri += SCHED_PRI_NICE(td->td_proc->p_nice);
|
2007-07-17 22:53:23 +00:00
|
|
|
KASSERT(pri >= PRI_MIN_TIMESHARE && pri <= PRI_MAX_TIMESHARE,
|
|
|
|
("sched_priority: invalid priority %d: nice %d, "
|
|
|
|
"ticks %d ftick %d ltick %d tick pri %d",
|
|
|
|
pri, td->td_proc->p_nice, td->td_sched->ts_ticks,
|
|
|
|
td->td_sched->ts_ftick, td->td_sched->ts_ltick,
|
|
|
|
SCHED_PRI_TICKS(td->td_sched)));
|
2007-01-04 08:56:25 +00:00
|
|
|
}
|
|
|
|
sched_user_prio(td, pri);
|
2003-01-26 05:23:15 +00:00
|
|
|
|
2003-04-02 06:46:43 +00:00
|
|
|
return;
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
2003-11-02 03:36:33 +00:00
|
|
|
/*
|
|
|
|
* This routine enforces a maximum limit on the amount of scheduling history
|
2007-07-17 22:53:23 +00:00
|
|
|
* kept. It is called after either the slptime or runtime is adjusted. This
|
|
|
|
* function is ugly due to integer math.
|
2003-11-02 03:36:33 +00:00
|
|
|
*/
|
2003-06-17 06:39:51 +00:00
|
|
|
static void
|
2006-10-26 21:42:22 +00:00
|
|
|
sched_interact_update(struct thread *td)
|
2003-06-17 06:39:51 +00:00
|
|
|
{
|
2007-01-05 23:45:38 +00:00
|
|
|
struct td_sched *ts;
|
2007-01-24 18:18:43 +00:00
|
|
|
u_int sum;
|
2003-11-02 03:36:33 +00:00
|
|
|
|
2007-01-05 23:45:38 +00:00
|
|
|
ts = td->td_sched;
|
2007-07-17 22:53:23 +00:00
|
|
|
sum = ts->ts_runtime + ts->ts_slptime;
|
2003-11-02 03:36:33 +00:00
|
|
|
if (sum < SCHED_SLP_RUN_MAX)
|
|
|
|
return;
|
2007-01-05 23:45:38 +00:00
|
|
|
/*
|
|
|
|
* This only happens from two places:
|
|
|
|
* 1) We have added an unusual amount of run time from fork_exit.
|
|
|
|
* 2) We have added an unusual amount of sleep time from sched_sleep().
|
|
|
|
*/
|
|
|
|
if (sum > SCHED_SLP_RUN_MAX * 2) {
|
2007-07-17 22:53:23 +00:00
|
|
|
if (ts->ts_runtime > ts->ts_slptime) {
|
|
|
|
ts->ts_runtime = SCHED_SLP_RUN_MAX;
|
|
|
|
ts->ts_slptime = 1;
|
2007-01-05 23:45:38 +00:00
|
|
|
} else {
|
2007-07-17 22:53:23 +00:00
|
|
|
ts->ts_slptime = SCHED_SLP_RUN_MAX;
|
|
|
|
ts->ts_runtime = 1;
|
2007-01-05 23:45:38 +00:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2003-11-02 03:36:33 +00:00
|
|
|
/*
|
|
|
|
* If we have exceeded by more than 1/5th then the algorithm below
|
|
|
|
* will not bring us back into range. Dividing by two here forces
|
2004-08-10 07:52:21 +00:00
|
|
|
* us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
|
2003-11-02 03:36:33 +00:00
|
|
|
*/
|
2004-04-04 19:12:56 +00:00
|
|
|
if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
|
2007-07-17 22:53:23 +00:00
|
|
|
ts->ts_runtime /= 2;
|
|
|
|
ts->ts_slptime /= 2;
|
2003-11-02 03:36:33 +00:00
|
|
|
return;
|
|
|
|
}
|
2007-07-17 22:53:23 +00:00
|
|
|
ts->ts_runtime = (ts->ts_runtime / 5) * 4;
|
|
|
|
ts->ts_slptime = (ts->ts_slptime / 5) * 4;
|
2003-11-02 03:36:33 +00:00
|
|
|
}
|
2003-10-27 06:47:05 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Scale back the interactivity history when a child thread is created. The
|
|
|
|
* history is inherited from the parent but the thread may behave totally
|
|
|
|
* differently. For example, a shell spawning a compiler process. We want
|
|
|
|
* to learn that the compiler is behaving badly very quickly.
|
|
|
|
*/
|
2003-11-02 03:36:33 +00:00
|
|
|
static void
|
2006-10-26 21:42:22 +00:00
|
|
|
sched_interact_fork(struct thread *td)
|
2003-11-02 03:36:33 +00:00
|
|
|
{
|
|
|
|
int ratio;
|
|
|
|
int sum;
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
sum = td->td_sched->ts_runtime + td->td_sched->ts_slptime;
|
2003-11-02 03:36:33 +00:00
|
|
|
if (sum > SCHED_SLP_RUN_FORK) {
|
|
|
|
ratio = sum / SCHED_SLP_RUN_FORK;
|
2007-07-17 22:53:23 +00:00
|
|
|
td->td_sched->ts_runtime /= ratio;
|
|
|
|
td->td_sched->ts_slptime /= ratio;
|
2003-06-17 06:39:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-09-05 02:09:54 +00:00
|
|
|
/*
|
2007-07-17 22:53:23 +00:00
|
|
|
* Called from proc0_init() to setup the scheduler fields.
|
2004-09-05 02:09:54 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
schedinit(void)
|
|
|
|
{
|
2007-01-04 08:56:25 +00:00
|
|
|
|
2004-09-05 02:09:54 +00:00
|
|
|
/*
|
|
|
|
* Set up the scheduler specific parts of proc0.
|
|
|
|
*/
|
|
|
|
proc0.p_sched = NULL; /* XXX */
|
2006-12-06 06:34:57 +00:00
|
|
|
thread0.td_sched = &td_sched0;
|
2007-01-04 08:56:25 +00:00
|
|
|
td_sched0.ts_ltick = ticks;
|
2007-01-05 08:50:38 +00:00
|
|
|
td_sched0.ts_ftick = ticks;
|
2006-12-06 06:34:57 +00:00
|
|
|
td_sched0.ts_thread = &thread0;
|
2004-09-05 02:09:54 +00:00
|
|
|
}
|
|
|
|
|
2003-04-11 03:47:14 +00:00
|
|
|
/*
|
|
|
|
* This is only somewhat accurate since given many processes of the same
|
|
|
|
* priority they will switch when their slices run out, which will be
|
2007-01-04 08:56:25 +00:00
|
|
|
* at most sched_slice stathz ticks.
|
2003-04-11 03:47:14 +00:00
|
|
|
*/
|
2003-01-26 05:23:15 +00:00
|
|
|
int
|
|
|
|
sched_rr_interval(void)
|
|
|
|
{
|
2007-01-04 08:56:25 +00:00
|
|
|
|
|
|
|
/* Convert sched_slice to hz */
|
|
|
|
return (hz/(realstathz/sched_slice));
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Update the percent cpu tracking information when it is requested or
|
|
|
|
* the total history exceeds the maximum. We keep a sliding history of
|
|
|
|
* tick counts that slowly decays. This is less precise than the 4BSD
|
|
|
|
* mechanism since it happens with less regular and frequent events.
|
|
|
|
*/
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
static void
|
2006-12-06 06:34:57 +00:00
|
|
|
sched_pctcpu_update(struct td_sched *ts)
|
2003-01-26 05:23:15 +00:00
|
|
|
{
|
2007-01-04 08:56:25 +00:00
|
|
|
|
|
|
|
if (ts->ts_ticks == 0)
|
|
|
|
return;
|
2007-01-05 08:50:38 +00:00
|
|
|
if (ticks - (hz / 10) < ts->ts_ltick &&
|
|
|
|
SCHED_TICK_TOTAL(ts) < SCHED_TICK_MAX)
|
|
|
|
return;
|
2003-01-26 05:23:15 +00:00
|
|
|
/*
|
|
|
|
* Adjust counters and watermark for pctcpu calc.
|
2003-06-15 02:18:29 +00:00
|
|
|
*/
|
2007-01-04 08:56:25 +00:00
|
|
|
if (ts->ts_ltick > ticks - SCHED_TICK_TARG)
|
2006-12-06 06:34:57 +00:00
|
|
|
ts->ts_ticks = (ts->ts_ticks / (ticks - ts->ts_ftick)) *
|
2007-01-04 08:56:25 +00:00
|
|
|
SCHED_TICK_TARG;
|
|
|
|
else
|
2006-12-06 06:34:57 +00:00
|
|
|
ts->ts_ticks = 0;
|
|
|
|
ts->ts_ltick = ticks;
|
2007-01-04 08:56:25 +00:00
|
|
|
ts->ts_ftick = ts->ts_ltick - SCHED_TICK_TARG;
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Adjust the priority of a thread. Move it to the appropriate run-queue
|
|
|
|
* if necessary. This is the back-end for several priority related
|
|
|
|
* functions.
|
|
|
|
*/
|
2007-01-04 08:56:25 +00:00
|
|
|
static void
|
Rework the interface between priority propagation (lending) and the
schedulers a bit to ensure more correct handling of priorities and fewer
priority inversions:
- Add two functions to the sched(9) API to handle priority lending:
sched_lend_prio() and sched_unlend_prio(). The turnstile code uses these
functions to ask the scheduler to lend a thread a set priority and to
tell the scheduler when it thinks it is ok for a thread to stop borrowing
priority. The unlend case is slightly complex in that the turnstile code
tells the scheduler what the minimum priority of the thread needs to be
to satisfy the requirements of any other threads blocked on locks owned
by the thread in question. The scheduler then decides where the thread
can go back to normal mode (if it's normal priority is high enough to
satisfy the pending lock requests) or it it should continue to use the
priority specified to the sched_unlend_prio() call. This involves adding
a new per-thread flag TDF_BORROWING that replaces the ULE-only kse flag
for priority elevation.
- Schedulers now refuse to lower the priority of a thread that is currently
borrowing another therad's priority.
- If a scheduler changes the priority of a thread that is currently sitting
on a turnstile, it will call a new function turnstile_adjust() to inform
the turnstile code of the change. This function resorts the thread on
the priority list of the turnstile if needed, and if the thread ends up
at the head of the list (due to having the highest priority) and its
priority was raised, then it will propagate that new priority to the
owner of the lock it is blocked on.
Some additional fixes specific to the 4BSD scheduler include:
- Common code for updating the priority of a thread when the user priority
of its associated kse group has been consolidated in a new static
function resetpriority_thread(). One change to this function is that
it will now only adjust the priority of a thread if it already has a
time sharing priority, thus preserving any boosts from a tsleep() until
the thread returns to userland. Also, resetpriority() no longer calls
maybe_resched() on each thread in the group. Instead, the code calling
resetpriority() is responsible for calling resetpriority_thread() on
any threads that need to be updated.
- schedcpu() now uses resetpriority_thread() instead of just calling
sched_prio() directly after it updates a kse group's user priority.
- sched_clock() now uses resetpriority_thread() rather than writing
directly to td_priority.
- sched_nice() now updates all the priorities of the threads after the
group priority has been adjusted.
Discussed with: bde
Reviewed by: ups, jeffr
Tested on: 4bsd, ule
Tested on: i386, alpha, sparc64
2004-12-30 20:52:44 +00:00
|
|
|
sched_thread_priority(struct thread *td, u_char prio)
|
2003-01-26 05:23:15 +00:00
|
|
|
{
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched *ts;
|
2003-01-26 05:23:15 +00:00
|
|
|
|
2004-12-26 00:15:33 +00:00
|
|
|
CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
|
|
|
|
td, td->td_proc->p_comm, td->td_priority, prio, curthread,
|
|
|
|
curthread->td_proc->p_comm);
|
2006-12-06 06:34:57 +00:00
|
|
|
ts = td->td_sched;
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
Rework the interface between priority propagation (lending) and the
schedulers a bit to ensure more correct handling of priorities and fewer
priority inversions:
- Add two functions to the sched(9) API to handle priority lending:
sched_lend_prio() and sched_unlend_prio(). The turnstile code uses these
functions to ask the scheduler to lend a thread a set priority and to
tell the scheduler when it thinks it is ok for a thread to stop borrowing
priority. The unlend case is slightly complex in that the turnstile code
tells the scheduler what the minimum priority of the thread needs to be
to satisfy the requirements of any other threads blocked on locks owned
by the thread in question. The scheduler then decides where the thread
can go back to normal mode (if it's normal priority is high enough to
satisfy the pending lock requests) or it it should continue to use the
priority specified to the sched_unlend_prio() call. This involves adding
a new per-thread flag TDF_BORROWING that replaces the ULE-only kse flag
for priority elevation.
- Schedulers now refuse to lower the priority of a thread that is currently
borrowing another therad's priority.
- If a scheduler changes the priority of a thread that is currently sitting
on a turnstile, it will call a new function turnstile_adjust() to inform
the turnstile code of the change. This function resorts the thread on
the priority list of the turnstile if needed, and if the thread ends up
at the head of the list (due to having the highest priority) and its
priority was raised, then it will propagate that new priority to the
owner of the lock it is blocked on.
Some additional fixes specific to the 4BSD scheduler include:
- Common code for updating the priority of a thread when the user priority
of its associated kse group has been consolidated in a new static
function resetpriority_thread(). One change to this function is that
it will now only adjust the priority of a thread if it already has a
time sharing priority, thus preserving any boosts from a tsleep() until
the thread returns to userland. Also, resetpriority() no longer calls
maybe_resched() on each thread in the group. Instead, the code calling
resetpriority() is responsible for calling resetpriority_thread() on
any threads that need to be updated.
- schedcpu() now uses resetpriority_thread() instead of just calling
sched_prio() directly after it updates a kse group's user priority.
- sched_clock() now uses resetpriority_thread() rather than writing
directly to td_priority.
- sched_nice() now updates all the priorities of the threads after the
group priority has been adjusted.
Discussed with: bde
Reviewed by: ups, jeffr
Tested on: 4bsd, ule
Tested on: i386, alpha, sparc64
2004-12-30 20:52:44 +00:00
|
|
|
if (td->td_priority == prio)
|
|
|
|
return;
|
2007-01-04 08:56:25 +00:00
|
|
|
|
2007-01-04 12:16:19 +00:00
|
|
|
if (TD_ON_RUNQ(td) && prio < td->td_priority) {
|
2003-10-27 06:47:05 +00:00
|
|
|
/*
|
|
|
|
* If the priority has been elevated due to priority
|
|
|
|
* propagation, we may have to move ourselves to a new
|
2007-01-04 08:56:25 +00:00
|
|
|
* queue. This could be optimized to not re-add in some
|
|
|
|
* cases.
|
2004-08-12 07:56:33 +00:00
|
|
|
*/
|
2007-01-04 08:56:25 +00:00
|
|
|
sched_rem(td);
|
|
|
|
td->td_priority = prio;
|
2007-07-17 22:53:23 +00:00
|
|
|
sched_add(td, SRQ_BORROWING);
|
|
|
|
} else {
|
|
|
|
#ifdef SMP
|
|
|
|
struct tdq *tdq;
|
|
|
|
|
|
|
|
tdq = TDQ_CPU(ts->ts_cpu);
|
|
|
|
if (prio < tdq->tdq_lowpri)
|
|
|
|
tdq->tdq_lowpri = prio;
|
|
|
|
#endif
|
2003-08-26 11:33:15 +00:00
|
|
|
td->td_priority = prio;
|
2007-07-17 22:53:23 +00:00
|
|
|
}
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
Rework the interface between priority propagation (lending) and the
schedulers a bit to ensure more correct handling of priorities and fewer
priority inversions:
- Add two functions to the sched(9) API to handle priority lending:
sched_lend_prio() and sched_unlend_prio(). The turnstile code uses these
functions to ask the scheduler to lend a thread a set priority and to
tell the scheduler when it thinks it is ok for a thread to stop borrowing
priority. The unlend case is slightly complex in that the turnstile code
tells the scheduler what the minimum priority of the thread needs to be
to satisfy the requirements of any other threads blocked on locks owned
by the thread in question. The scheduler then decides where the thread
can go back to normal mode (if it's normal priority is high enough to
satisfy the pending lock requests) or it it should continue to use the
priority specified to the sched_unlend_prio() call. This involves adding
a new per-thread flag TDF_BORROWING that replaces the ULE-only kse flag
for priority elevation.
- Schedulers now refuse to lower the priority of a thread that is currently
borrowing another therad's priority.
- If a scheduler changes the priority of a thread that is currently sitting
on a turnstile, it will call a new function turnstile_adjust() to inform
the turnstile code of the change. This function resorts the thread on
the priority list of the turnstile if needed, and if the thread ends up
at the head of the list (due to having the highest priority) and its
priority was raised, then it will propagate that new priority to the
owner of the lock it is blocked on.
Some additional fixes specific to the 4BSD scheduler include:
- Common code for updating the priority of a thread when the user priority
of its associated kse group has been consolidated in a new static
function resetpriority_thread(). One change to this function is that
it will now only adjust the priority of a thread if it already has a
time sharing priority, thus preserving any boosts from a tsleep() until
the thread returns to userland. Also, resetpriority() no longer calls
maybe_resched() on each thread in the group. Instead, the code calling
resetpriority() is responsible for calling resetpriority_thread() on
any threads that need to be updated.
- schedcpu() now uses resetpriority_thread() instead of just calling
sched_prio() directly after it updates a kse group's user priority.
- sched_clock() now uses resetpriority_thread() rather than writing
directly to td_priority.
- sched_nice() now updates all the priorities of the threads after the
group priority has been adjusted.
Discussed with: bde
Reviewed by: ups, jeffr
Tested on: 4bsd, ule
Tested on: i386, alpha, sparc64
2004-12-30 20:52:44 +00:00
|
|
|
/*
|
|
|
|
* Update a thread's priority when it is lent another thread's
|
|
|
|
* priority.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
sched_lend_prio(struct thread *td, u_char prio)
|
|
|
|
{
|
|
|
|
|
|
|
|
td->td_flags |= TDF_BORROWING;
|
|
|
|
sched_thread_priority(td, prio);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore a thread's priority when priority propagation is
|
|
|
|
* over. The prio argument is the minimum priority the thread
|
|
|
|
* needs to have to satisfy other possible priority lending
|
|
|
|
* requests. If the thread's regular priority is less
|
|
|
|
* important than prio, the thread will keep a priority boost
|
|
|
|
* of prio.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
sched_unlend_prio(struct thread *td, u_char prio)
|
|
|
|
{
|
|
|
|
u_char base_pri;
|
|
|
|
|
|
|
|
if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
|
|
|
|
td->td_base_pri <= PRI_MAX_TIMESHARE)
|
2006-10-26 21:42:22 +00:00
|
|
|
base_pri = td->td_user_pri;
|
Rework the interface between priority propagation (lending) and the
schedulers a bit to ensure more correct handling of priorities and fewer
priority inversions:
- Add two functions to the sched(9) API to handle priority lending:
sched_lend_prio() and sched_unlend_prio(). The turnstile code uses these
functions to ask the scheduler to lend a thread a set priority and to
tell the scheduler when it thinks it is ok for a thread to stop borrowing
priority. The unlend case is slightly complex in that the turnstile code
tells the scheduler what the minimum priority of the thread needs to be
to satisfy the requirements of any other threads blocked on locks owned
by the thread in question. The scheduler then decides where the thread
can go back to normal mode (if it's normal priority is high enough to
satisfy the pending lock requests) or it it should continue to use the
priority specified to the sched_unlend_prio() call. This involves adding
a new per-thread flag TDF_BORROWING that replaces the ULE-only kse flag
for priority elevation.
- Schedulers now refuse to lower the priority of a thread that is currently
borrowing another therad's priority.
- If a scheduler changes the priority of a thread that is currently sitting
on a turnstile, it will call a new function turnstile_adjust() to inform
the turnstile code of the change. This function resorts the thread on
the priority list of the turnstile if needed, and if the thread ends up
at the head of the list (due to having the highest priority) and its
priority was raised, then it will propagate that new priority to the
owner of the lock it is blocked on.
Some additional fixes specific to the 4BSD scheduler include:
- Common code for updating the priority of a thread when the user priority
of its associated kse group has been consolidated in a new static
function resetpriority_thread(). One change to this function is that
it will now only adjust the priority of a thread if it already has a
time sharing priority, thus preserving any boosts from a tsleep() until
the thread returns to userland. Also, resetpriority() no longer calls
maybe_resched() on each thread in the group. Instead, the code calling
resetpriority() is responsible for calling resetpriority_thread() on
any threads that need to be updated.
- schedcpu() now uses resetpriority_thread() instead of just calling
sched_prio() directly after it updates a kse group's user priority.
- sched_clock() now uses resetpriority_thread() rather than writing
directly to td_priority.
- sched_nice() now updates all the priorities of the threads after the
group priority has been adjusted.
Discussed with: bde
Reviewed by: ups, jeffr
Tested on: 4bsd, ule
Tested on: i386, alpha, sparc64
2004-12-30 20:52:44 +00:00
|
|
|
else
|
|
|
|
base_pri = td->td_base_pri;
|
|
|
|
if (prio >= base_pri) {
|
2004-12-30 22:17:00 +00:00
|
|
|
td->td_flags &= ~TDF_BORROWING;
|
Rework the interface between priority propagation (lending) and the
schedulers a bit to ensure more correct handling of priorities and fewer
priority inversions:
- Add two functions to the sched(9) API to handle priority lending:
sched_lend_prio() and sched_unlend_prio(). The turnstile code uses these
functions to ask the scheduler to lend a thread a set priority and to
tell the scheduler when it thinks it is ok for a thread to stop borrowing
priority. The unlend case is slightly complex in that the turnstile code
tells the scheduler what the minimum priority of the thread needs to be
to satisfy the requirements of any other threads blocked on locks owned
by the thread in question. The scheduler then decides where the thread
can go back to normal mode (if it's normal priority is high enough to
satisfy the pending lock requests) or it it should continue to use the
priority specified to the sched_unlend_prio() call. This involves adding
a new per-thread flag TDF_BORROWING that replaces the ULE-only kse flag
for priority elevation.
- Schedulers now refuse to lower the priority of a thread that is currently
borrowing another therad's priority.
- If a scheduler changes the priority of a thread that is currently sitting
on a turnstile, it will call a new function turnstile_adjust() to inform
the turnstile code of the change. This function resorts the thread on
the priority list of the turnstile if needed, and if the thread ends up
at the head of the list (due to having the highest priority) and its
priority was raised, then it will propagate that new priority to the
owner of the lock it is blocked on.
Some additional fixes specific to the 4BSD scheduler include:
- Common code for updating the priority of a thread when the user priority
of its associated kse group has been consolidated in a new static
function resetpriority_thread(). One change to this function is that
it will now only adjust the priority of a thread if it already has a
time sharing priority, thus preserving any boosts from a tsleep() until
the thread returns to userland. Also, resetpriority() no longer calls
maybe_resched() on each thread in the group. Instead, the code calling
resetpriority() is responsible for calling resetpriority_thread() on
any threads that need to be updated.
- schedcpu() now uses resetpriority_thread() instead of just calling
sched_prio() directly after it updates a kse group's user priority.
- sched_clock() now uses resetpriority_thread() rather than writing
directly to td_priority.
- sched_nice() now updates all the priorities of the threads after the
group priority has been adjusted.
Discussed with: bde
Reviewed by: ups, jeffr
Tested on: 4bsd, ule
Tested on: i386, alpha, sparc64
2004-12-30 20:52:44 +00:00
|
|
|
sched_thread_priority(td, base_pri);
|
|
|
|
} else
|
|
|
|
sched_lend_prio(td, prio);
|
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Standard entry for setting the priority to an absolute value.
|
|
|
|
*/
|
Rework the interface between priority propagation (lending) and the
schedulers a bit to ensure more correct handling of priorities and fewer
priority inversions:
- Add two functions to the sched(9) API to handle priority lending:
sched_lend_prio() and sched_unlend_prio(). The turnstile code uses these
functions to ask the scheduler to lend a thread a set priority and to
tell the scheduler when it thinks it is ok for a thread to stop borrowing
priority. The unlend case is slightly complex in that the turnstile code
tells the scheduler what the minimum priority of the thread needs to be
to satisfy the requirements of any other threads blocked on locks owned
by the thread in question. The scheduler then decides where the thread
can go back to normal mode (if it's normal priority is high enough to
satisfy the pending lock requests) or it it should continue to use the
priority specified to the sched_unlend_prio() call. This involves adding
a new per-thread flag TDF_BORROWING that replaces the ULE-only kse flag
for priority elevation.
- Schedulers now refuse to lower the priority of a thread that is currently
borrowing another therad's priority.
- If a scheduler changes the priority of a thread that is currently sitting
on a turnstile, it will call a new function turnstile_adjust() to inform
the turnstile code of the change. This function resorts the thread on
the priority list of the turnstile if needed, and if the thread ends up
at the head of the list (due to having the highest priority) and its
priority was raised, then it will propagate that new priority to the
owner of the lock it is blocked on.
Some additional fixes specific to the 4BSD scheduler include:
- Common code for updating the priority of a thread when the user priority
of its associated kse group has been consolidated in a new static
function resetpriority_thread(). One change to this function is that
it will now only adjust the priority of a thread if it already has a
time sharing priority, thus preserving any boosts from a tsleep() until
the thread returns to userland. Also, resetpriority() no longer calls
maybe_resched() on each thread in the group. Instead, the code calling
resetpriority() is responsible for calling resetpriority_thread() on
any threads that need to be updated.
- schedcpu() now uses resetpriority_thread() instead of just calling
sched_prio() directly after it updates a kse group's user priority.
- sched_clock() now uses resetpriority_thread() rather than writing
directly to td_priority.
- sched_nice() now updates all the priorities of the threads after the
group priority has been adjusted.
Discussed with: bde
Reviewed by: ups, jeffr
Tested on: 4bsd, ule
Tested on: i386, alpha, sparc64
2004-12-30 20:52:44 +00:00
|
|
|
void
|
|
|
|
sched_prio(struct thread *td, u_char prio)
|
|
|
|
{
|
|
|
|
u_char oldprio;
|
|
|
|
|
|
|
|
/* First, update the base priority. */
|
|
|
|
td->td_base_pri = prio;
|
|
|
|
|
|
|
|
/*
|
2004-12-30 22:17:00 +00:00
|
|
|
* If the thread is borrowing another thread's priority, don't
|
Rework the interface between priority propagation (lending) and the
schedulers a bit to ensure more correct handling of priorities and fewer
priority inversions:
- Add two functions to the sched(9) API to handle priority lending:
sched_lend_prio() and sched_unlend_prio(). The turnstile code uses these
functions to ask the scheduler to lend a thread a set priority and to
tell the scheduler when it thinks it is ok for a thread to stop borrowing
priority. The unlend case is slightly complex in that the turnstile code
tells the scheduler what the minimum priority of the thread needs to be
to satisfy the requirements of any other threads blocked on locks owned
by the thread in question. The scheduler then decides where the thread
can go back to normal mode (if it's normal priority is high enough to
satisfy the pending lock requests) or it it should continue to use the
priority specified to the sched_unlend_prio() call. This involves adding
a new per-thread flag TDF_BORROWING that replaces the ULE-only kse flag
for priority elevation.
- Schedulers now refuse to lower the priority of a thread that is currently
borrowing another therad's priority.
- If a scheduler changes the priority of a thread that is currently sitting
on a turnstile, it will call a new function turnstile_adjust() to inform
the turnstile code of the change. This function resorts the thread on
the priority list of the turnstile if needed, and if the thread ends up
at the head of the list (due to having the highest priority) and its
priority was raised, then it will propagate that new priority to the
owner of the lock it is blocked on.
Some additional fixes specific to the 4BSD scheduler include:
- Common code for updating the priority of a thread when the user priority
of its associated kse group has been consolidated in a new static
function resetpriority_thread(). One change to this function is that
it will now only adjust the priority of a thread if it already has a
time sharing priority, thus preserving any boosts from a tsleep() until
the thread returns to userland. Also, resetpriority() no longer calls
maybe_resched() on each thread in the group. Instead, the code calling
resetpriority() is responsible for calling resetpriority_thread() on
any threads that need to be updated.
- schedcpu() now uses resetpriority_thread() instead of just calling
sched_prio() directly after it updates a kse group's user priority.
- sched_clock() now uses resetpriority_thread() rather than writing
directly to td_priority.
- sched_nice() now updates all the priorities of the threads after the
group priority has been adjusted.
Discussed with: bde
Reviewed by: ups, jeffr
Tested on: 4bsd, ule
Tested on: i386, alpha, sparc64
2004-12-30 20:52:44 +00:00
|
|
|
* ever lower the priority.
|
|
|
|
*/
|
|
|
|
if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Change the real priority. */
|
|
|
|
oldprio = td->td_priority;
|
|
|
|
sched_thread_priority(td, prio);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the thread is on a turnstile, then let the turnstile update
|
|
|
|
* its state.
|
|
|
|
*/
|
|
|
|
if (TD_ON_LOCK(td) && oldprio != prio)
|
|
|
|
turnstile_adjust(td, oldprio);
|
|
|
|
}
|
2004-12-30 22:17:00 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Set the base user priority, does not effect current running priority.
|
|
|
|
*/
|
2006-08-25 06:12:53 +00:00
|
|
|
void
|
2006-10-26 21:42:22 +00:00
|
|
|
sched_user_prio(struct thread *td, u_char prio)
|
2006-08-25 06:12:53 +00:00
|
|
|
{
|
|
|
|
u_char oldprio;
|
|
|
|
|
2006-10-26 21:42:22 +00:00
|
|
|
td->td_base_user_pri = prio;
|
2006-12-06 06:55:59 +00:00
|
|
|
if (td->td_flags & TDF_UBORROWING && td->td_user_pri <= prio)
|
|
|
|
return;
|
2006-10-26 21:42:22 +00:00
|
|
|
oldprio = td->td_user_pri;
|
|
|
|
td->td_user_pri = prio;
|
2006-08-25 06:12:53 +00:00
|
|
|
|
|
|
|
if (TD_ON_UPILOCK(td) && oldprio != prio)
|
|
|
|
umtx_pi_adjust(td, oldprio);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sched_lend_user_prio(struct thread *td, u_char prio)
|
|
|
|
{
|
|
|
|
u_char oldprio;
|
|
|
|
|
|
|
|
td->td_flags |= TDF_UBORROWING;
|
|
|
|
|
2006-11-08 09:09:07 +00:00
|
|
|
oldprio = td->td_user_pri;
|
2006-10-26 21:42:22 +00:00
|
|
|
td->td_user_pri = prio;
|
2006-08-25 06:12:53 +00:00
|
|
|
|
|
|
|
if (TD_ON_UPILOCK(td) && oldprio != prio)
|
|
|
|
umtx_pi_adjust(td, oldprio);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
sched_unlend_user_prio(struct thread *td, u_char prio)
|
|
|
|
{
|
|
|
|
u_char base_pri;
|
|
|
|
|
2006-10-26 21:42:22 +00:00
|
|
|
base_pri = td->td_base_user_pri;
|
2006-08-25 06:12:53 +00:00
|
|
|
if (prio >= base_pri) {
|
|
|
|
td->td_flags &= ~TDF_UBORROWING;
|
2006-10-26 21:42:22 +00:00
|
|
|
sched_user_prio(td, base_pri);
|
2006-08-25 06:12:53 +00:00
|
|
|
} else
|
|
|
|
sched_lend_user_prio(td, prio);
|
|
|
|
}
|
|
|
|
|
2007-07-19 19:51:45 +00:00
|
|
|
/*
|
|
|
|
* Add the thread passed as 'newtd' to the run queue before selecting
|
|
|
|
* the next thread to run. This is only used for KSE.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
sched_switchin(struct tdq *tdq, struct thread *td)
|
|
|
|
{
|
|
|
|
#ifdef SMP
|
|
|
|
spinlock_enter();
|
|
|
|
TDQ_UNLOCK(tdq);
|
|
|
|
thread_lock(td);
|
|
|
|
spinlock_exit();
|
|
|
|
sched_setcpu(td->td_sched, TDQ_ID(tdq), SRQ_YIELDING);
|
|
|
|
#else
|
|
|
|
td->td_lock = TDQ_LOCKPTR(tdq);
|
|
|
|
#endif
|
|
|
|
tdq_add(tdq, td, SRQ_YIELDING);
|
|
|
|
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
|
|
|
|
}
|
|
|
|
|
2007-08-03 23:38:46 +00:00
|
|
|
/*
|
|
|
|
* Handle migration from sched_switch(). This happens only for
|
|
|
|
* cpu binding.
|
|
|
|
*/
|
|
|
|
static struct mtx *
|
|
|
|
sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
|
|
|
|
{
|
|
|
|
struct tdq *tdn;
|
|
|
|
|
|
|
|
tdn = TDQ_CPU(td->td_sched->ts_cpu);
|
|
|
|
#ifdef SMP
|
|
|
|
/*
|
|
|
|
* Do the lock dance required to avoid LOR. We grab an extra
|
|
|
|
* spinlock nesting to prevent preemption while we're
|
|
|
|
* not holding either run-queue lock.
|
|
|
|
*/
|
|
|
|
spinlock_enter();
|
|
|
|
thread_block_switch(td); /* This releases the lock on tdq. */
|
|
|
|
TDQ_LOCK(tdn);
|
|
|
|
tdq_add(tdn, td, flags);
|
|
|
|
tdq_notify(td->td_sched);
|
|
|
|
/*
|
|
|
|
* After we unlock tdn the new cpu still can't switch into this
|
|
|
|
* thread until we've unblocked it in cpu_switch(). The lock
|
|
|
|
* pointers may match in the case of HTT cores. Don't unlock here
|
|
|
|
* or we can deadlock when the other CPU runs the IPI handler.
|
|
|
|
*/
|
|
|
|
if (TDQ_LOCKPTR(tdn) != TDQ_LOCKPTR(tdq)) {
|
|
|
|
TDQ_UNLOCK(tdn);
|
|
|
|
TDQ_LOCK(tdq);
|
|
|
|
}
|
|
|
|
spinlock_exit();
|
|
|
|
#endif
|
|
|
|
return (TDQ_LOCKPTR(tdn));
|
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Block a thread for switching. Similar to thread_block() but does not
|
|
|
|
* bump the spin count.
|
|
|
|
*/
|
|
|
|
static inline struct mtx *
|
|
|
|
thread_block_switch(struct thread *td)
|
|
|
|
{
|
|
|
|
struct mtx *lock;
|
|
|
|
|
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
|
|
|
lock = td->td_lock;
|
|
|
|
td->td_lock = &blocked_lock;
|
|
|
|
mtx_unlock_spin(lock);
|
|
|
|
|
|
|
|
return (lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release a thread that was blocked with thread_block_switch().
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
thread_unblock_switch(struct thread *td, struct mtx *mtx)
|
|
|
|
{
|
|
|
|
atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
|
|
|
|
(uintptr_t)mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Switch threads. This function has to handle threads coming in while
|
|
|
|
* blocked for some reason, running, or idle. It also must deal with
|
|
|
|
* migrating a thread from one queue to another as running threads may
|
|
|
|
* be assigned elsewhere via binding.
|
|
|
|
*/
|
2003-01-26 05:23:15 +00:00
|
|
|
void
|
2004-09-10 21:04:38 +00:00
|
|
|
sched_switch(struct thread *td, struct thread *newtd, int flags)
|
2003-01-26 05:23:15 +00:00
|
|
|
{
|
2006-12-29 12:55:32 +00:00
|
|
|
struct tdq *tdq;
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched *ts;
|
2007-07-17 22:53:23 +00:00
|
|
|
struct mtx *mtx;
|
2007-08-03 23:38:46 +00:00
|
|
|
int srqflag;
|
2007-07-17 22:53:23 +00:00
|
|
|
int cpuid;
|
2003-01-26 05:23:15 +00:00
|
|
|
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
2003-01-26 05:23:15 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
cpuid = PCPU_GET(cpuid);
|
|
|
|
tdq = TDQ_CPU(cpuid);
|
2007-01-04 08:56:25 +00:00
|
|
|
ts = td->td_sched;
|
2007-08-03 23:38:46 +00:00
|
|
|
mtx = td->td_lock;
|
2007-07-17 22:53:23 +00:00
|
|
|
#ifdef SMP
|
|
|
|
ts->ts_rltick = ticks;
|
|
|
|
if (newtd && newtd->td_priority < tdq->tdq_lowpri)
|
|
|
|
tdq->tdq_lowpri = newtd->td_priority;
|
|
|
|
#endif
|
2004-08-12 07:56:33 +00:00
|
|
|
td->td_lastcpu = td->td_oncpu;
|
2003-04-10 17:35:44 +00:00
|
|
|
td->td_oncpu = NOCPU;
|
2004-07-16 21:04:55 +00:00
|
|
|
td->td_flags &= ~TDF_NEEDRESCHED;
|
2005-04-08 03:37:53 +00:00
|
|
|
td->td_owepreempt = 0;
|
2003-12-11 04:00:49 +00:00
|
|
|
/*
|
2007-07-17 22:53:23 +00:00
|
|
|
* The lock pointer in an idle thread should never change. Reset it
|
|
|
|
* to CAN_RUN as well.
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
*/
|
2007-03-08 06:44:34 +00:00
|
|
|
if (TD_IS_IDLETHREAD(td)) {
|
2007-07-17 22:53:23 +00:00
|
|
|
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
|
2004-12-26 22:56:08 +00:00
|
|
|
TD_SET_CAN_RUN(td);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
} else if (TD_IS_RUNNING(td)) {
|
2007-07-17 22:53:23 +00:00
|
|
|
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
|
2006-12-29 12:55:32 +00:00
|
|
|
tdq_load_rem(tdq, ts);
|
2007-08-03 23:38:46 +00:00
|
|
|
srqflag = (flags & SW_PREEMPT) ?
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
|
2007-08-03 23:38:46 +00:00
|
|
|
SRQ_OURSELF|SRQ_YIELDING;
|
|
|
|
if (ts->ts_cpu == cpuid)
|
|
|
|
tdq_add(tdq, td, srqflag);
|
|
|
|
else
|
|
|
|
mtx = sched_switch_migrate(tdq, td, srqflag);
|
2007-07-17 22:53:23 +00:00
|
|
|
} else {
|
|
|
|
/* This thread must be going to sleep. */
|
|
|
|
TDQ_LOCK(tdq);
|
|
|
|
mtx = thread_block_switch(td);
|
|
|
|
tdq_load_rem(tdq, ts);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We enter here with the thread blocked and assigned to the
|
|
|
|
* appropriate cpu run-queue or sleep-queue and with the current
|
|
|
|
* thread-queue locked.
|
|
|
|
*/
|
|
|
|
TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
|
|
|
|
/*
|
2007-07-19 19:51:45 +00:00
|
|
|
* If KSE assigned a new thread just add it here and let choosethread
|
|
|
|
* select the best one.
|
2007-07-17 22:53:23 +00:00
|
|
|
*/
|
2007-07-19 19:51:45 +00:00
|
|
|
if (newtd != NULL)
|
|
|
|
sched_switchin(tdq, newtd);
|
2007-07-17 22:53:23 +00:00
|
|
|
newtd = choosethread();
|
|
|
|
/*
|
|
|
|
* Call the MD code to switch contexts if necessary.
|
|
|
|
*/
|
2005-04-19 04:01:25 +00:00
|
|
|
if (td != newtd) {
|
|
|
|
#ifdef HWPMC_HOOKS
|
|
|
|
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
|
|
|
|
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
|
|
|
|
#endif
|
2007-07-17 22:53:23 +00:00
|
|
|
cpu_switch(td, newtd, mtx);
|
|
|
|
/*
|
|
|
|
* We may return from cpu_switch on a different cpu. However,
|
|
|
|
* we always return with td_lock pointing to the current cpu's
|
|
|
|
* run queue lock.
|
|
|
|
*/
|
|
|
|
cpuid = PCPU_GET(cpuid);
|
|
|
|
tdq = TDQ_CPU(cpuid);
|
|
|
|
TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)td;
|
2005-04-19 04:01:25 +00:00
|
|
|
#ifdef HWPMC_HOOKS
|
|
|
|
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
|
|
|
|
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
|
|
|
|
#endif
|
2007-07-17 22:53:23 +00:00
|
|
|
} else
|
|
|
|
thread_unblock_switch(td, mtx);
|
|
|
|
/*
|
|
|
|
* Assert that all went well and return.
|
|
|
|
*/
|
|
|
|
#ifdef SMP
|
|
|
|
/* We should always get here with the lowest priority td possible */
|
|
|
|
tdq->tdq_lowpri = td->td_priority;
|
|
|
|
#endif
|
|
|
|
TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED);
|
|
|
|
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
|
|
|
|
td->td_oncpu = cpuid;
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Adjust thread priorities as a result of a nice request.
|
|
|
|
*/
|
2003-01-26 05:23:15 +00:00
|
|
|
void
|
2004-06-16 00:26:31 +00:00
|
|
|
sched_nice(struct proc *p, int nice)
|
2003-01-26 05:23:15 +00:00
|
|
|
{
|
|
|
|
struct thread *td;
|
|
|
|
|
2004-06-16 00:26:31 +00:00
|
|
|
PROC_LOCK_ASSERT(p, MA_OWNED);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
PROC_SLOCK_ASSERT(p, MA_OWNED);
|
2007-01-04 08:56:25 +00:00
|
|
|
|
2004-06-16 00:26:31 +00:00
|
|
|
p->p_nice = nice;
|
2006-10-26 21:42:22 +00:00
|
|
|
FOREACH_THREAD_IN_PROC(p, td) {
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
thread_lock(td);
|
2006-10-26 21:42:22 +00:00
|
|
|
sched_priority(td);
|
2007-01-04 08:56:25 +00:00
|
|
|
sched_prio(td, td->td_base_user_pri);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
thread_unlock(td);
|
2004-06-16 00:26:31 +00:00
|
|
|
}
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Record the sleep time for the interactivity scorer.
|
|
|
|
*/
|
2003-01-26 05:23:15 +00:00
|
|
|
void
|
Switch the sleep/wakeup and condition variable implementations to use the
sleep queue interface:
- Sleep queues attempt to merge some of the benefits of both sleep queues
and condition variables. Having sleep qeueus in a hash table avoids
having to allocate a queue head for each wait channel. Thus, struct cv
has shrunk down to just a single char * pointer now. However, the
hash table does not hold threads directly, but queue heads. This means
that once you have located a queue in the hash bucket, you no longer have
to walk the rest of the hash chain looking for threads. Instead, you have
a list of all the threads sleeping on that wait channel.
- Outside of the sleepq code and the sleep/cv code the kernel no longer
differentiates between cv's and sleep/wakeup. For example, calls to
abortsleep() and cv_abort() are replaced with a call to sleepq_abort().
Thus, the TDF_CVWAITQ flag is removed. Also, calls to unsleep() and
cv_waitq_remove() have been replaced with calls to sleepq_remove().
- The sched_sleep() function no longer accepts a priority argument as
sleep's no longer inherently bump the priority. Instead, this is soley
a propery of msleep() which explicitly calls sched_prio() before
blocking.
- The TDF_ONSLEEPQ flag has been dropped as it was never used. The
associated TDF_SET_ONSLEEPQ and TDF_CLR_ON_SLEEPQ macros have also been
dropped and replaced with a single explicit clearing of td_wchan.
TD_SET_ONSLEEPQ() would really have only made sense if it had taken
the wait channel and message as arguments anyway. Now that that only
happens in one place, a macro would be overkill.
2004-02-27 18:52:44 +00:00
|
|
|
sched_sleep(struct thread *td)
|
2003-01-26 05:23:15 +00:00
|
|
|
{
|
2007-01-04 08:56:25 +00:00
|
|
|
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
2003-01-26 05:23:15 +00:00
|
|
|
|
2007-09-21 04:10:23 +00:00
|
|
|
td->td_slptick = ticks;
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Schedule a thread to resume execution and record how long it voluntarily
|
|
|
|
* slept. We also update the pctcpu, interactivity, and priority.
|
|
|
|
*/
|
2003-01-26 05:23:15 +00:00
|
|
|
void
|
|
|
|
sched_wakeup(struct thread *td)
|
|
|
|
{
|
2007-01-25 19:14:11 +00:00
|
|
|
struct td_sched *ts;
|
2007-07-17 22:53:23 +00:00
|
|
|
int slptick;
|
2007-01-04 08:56:25 +00:00
|
|
|
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
2007-01-25 19:14:11 +00:00
|
|
|
ts = td->td_sched;
|
2003-01-26 05:23:15 +00:00
|
|
|
/*
|
2007-01-04 08:56:25 +00:00
|
|
|
* If we slept for more than a tick update our interactivity and
|
|
|
|
* priority.
|
2003-01-26 05:23:15 +00:00
|
|
|
*/
|
2007-09-21 04:10:23 +00:00
|
|
|
slptick = td->td_slptick;
|
|
|
|
td->td_slptick = 0;
|
2007-07-17 22:53:23 +00:00
|
|
|
if (slptick && slptick != ticks) {
|
2007-01-24 18:18:43 +00:00
|
|
|
u_int hzticks;
|
2003-03-03 04:11:40 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
hzticks = (ticks - slptick) << SCHED_TICK_SHIFT;
|
|
|
|
ts->ts_slptime += hzticks;
|
2007-01-05 23:45:38 +00:00
|
|
|
sched_interact_update(td);
|
2007-01-25 19:14:11 +00:00
|
|
|
sched_pctcpu_update(ts);
|
2006-10-26 21:42:22 +00:00
|
|
|
sched_priority(td);
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
2007-01-25 19:14:11 +00:00
|
|
|
/* Reset the slice value after we sleep. */
|
|
|
|
ts->ts_slice = sched_slice;
|
2007-01-23 08:50:34 +00:00
|
|
|
sched_add(td, SRQ_BORING);
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Penalize the parent for creating a new child and initialize the child's
|
|
|
|
* priority.
|
|
|
|
*/
|
|
|
|
void
|
2006-10-26 21:42:22 +00:00
|
|
|
sched_fork(struct thread *td, struct thread *child)
|
2003-01-26 05:23:15 +00:00
|
|
|
{
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
2006-12-06 06:34:57 +00:00
|
|
|
sched_fork_thread(td, child);
|
2007-01-04 08:56:25 +00:00
|
|
|
/*
|
|
|
|
* Penalize the parent and child for forking.
|
|
|
|
*/
|
|
|
|
sched_interact_fork(child);
|
|
|
|
sched_priority(child);
|
2007-07-17 22:53:23 +00:00
|
|
|
td->td_sched->ts_runtime += tickincr;
|
2007-01-04 08:56:25 +00:00
|
|
|
sched_interact_update(td);
|
|
|
|
sched_priority(td);
|
2006-12-06 06:34:57 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Fork a new thread, may be within the same process.
|
|
|
|
*/
|
2006-12-06 06:34:57 +00:00
|
|
|
void
|
|
|
|
sched_fork_thread(struct thread *td, struct thread *child)
|
|
|
|
{
|
|
|
|
struct td_sched *ts;
|
|
|
|
struct td_sched *ts2;
|
2003-01-26 05:23:15 +00:00
|
|
|
|
2007-01-04 08:56:25 +00:00
|
|
|
/*
|
|
|
|
* Initialize child.
|
|
|
|
*/
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
2004-09-05 02:09:54 +00:00
|
|
|
sched_newthread(child);
|
2007-07-17 22:53:23 +00:00
|
|
|
child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
|
2006-12-06 06:34:57 +00:00
|
|
|
ts = td->td_sched;
|
|
|
|
ts2 = child->td_sched;
|
|
|
|
ts2->ts_cpu = ts->ts_cpu;
|
|
|
|
ts2->ts_runq = NULL;
|
2007-01-04 08:56:25 +00:00
|
|
|
/*
|
|
|
|
* Grab our parents cpu estimation information and priority.
|
|
|
|
*/
|
2006-12-06 06:34:57 +00:00
|
|
|
ts2->ts_ticks = ts->ts_ticks;
|
|
|
|
ts2->ts_ltick = ts->ts_ltick;
|
|
|
|
ts2->ts_ftick = ts->ts_ftick;
|
2007-01-04 08:56:25 +00:00
|
|
|
child->td_user_pri = td->td_user_pri;
|
|
|
|
child->td_base_user_pri = td->td_base_user_pri;
|
|
|
|
/*
|
|
|
|
* And update interactivity score.
|
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
ts2->ts_slptime = ts->ts_slptime;
|
|
|
|
ts2->ts_runtime = ts->ts_runtime;
|
2007-01-04 08:56:25 +00:00
|
|
|
ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */
|
2003-04-11 03:47:14 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Adjust the priority class of a thread.
|
|
|
|
*/
|
2003-04-11 03:47:14 +00:00
|
|
|
void
|
2006-10-26 21:42:22 +00:00
|
|
|
sched_class(struct thread *td, int class)
|
2003-04-11 03:47:14 +00:00
|
|
|
{
|
|
|
|
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
2006-10-26 21:42:22 +00:00
|
|
|
if (td->td_pri_class == class)
|
2003-04-11 03:47:14 +00:00
|
|
|
return;
|
|
|
|
|
2003-11-02 10:56:48 +00:00
|
|
|
#ifdef SMP
|
2007-01-06 08:44:13 +00:00
|
|
|
/*
|
|
|
|
* On SMP if we're on the RUNQ we must adjust the transferable
|
|
|
|
* count because could be changing to or from an interrupt
|
|
|
|
* class.
|
|
|
|
*/
|
2007-01-23 08:50:34 +00:00
|
|
|
if (TD_ON_RUNQ(td)) {
|
2007-01-06 08:44:13 +00:00
|
|
|
struct tdq *tdq;
|
|
|
|
|
|
|
|
tdq = TDQ_CPU(td->td_sched->ts_cpu);
|
|
|
|
if (THREAD_CAN_MIGRATE(td)) {
|
|
|
|
tdq->tdq_transferable--;
|
|
|
|
tdq->tdq_group->tdg_transferable--;
|
|
|
|
}
|
|
|
|
td->td_pri_class = class;
|
|
|
|
if (THREAD_CAN_MIGRATE(td)) {
|
|
|
|
tdq->tdq_transferable++;
|
|
|
|
tdq->tdq_group->tdg_transferable++;
|
2003-11-15 07:32:07 +00:00
|
|
|
}
|
2006-10-26 21:42:22 +00:00
|
|
|
}
|
2007-01-06 08:44:13 +00:00
|
|
|
#endif
|
2006-10-26 21:42:22 +00:00
|
|
|
td->td_pri_class = class;
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return some of the child's priority and interactivity to the parent.
|
|
|
|
*/
|
|
|
|
void
|
2006-12-06 06:55:59 +00:00
|
|
|
sched_exit(struct proc *p, struct thread *child)
|
2003-01-26 05:23:15 +00:00
|
|
|
{
|
2007-01-04 08:56:25 +00:00
|
|
|
struct thread *td;
|
2006-12-06 06:55:59 +00:00
|
|
|
|
2006-10-26 21:42:22 +00:00
|
|
|
CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
|
2006-12-06 06:55:59 +00:00
|
|
|
child, child->td_proc->p_comm, child->td_priority);
|
2006-10-26 21:42:22 +00:00
|
|
|
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
PROC_SLOCK_ASSERT(p, MA_OWNED);
|
2007-01-04 08:56:25 +00:00
|
|
|
td = FIRST_THREAD_IN_PROC(p);
|
|
|
|
sched_exit_thread(td, child);
|
2006-12-06 06:34:57 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Penalize another thread for the time spent on this one. This helps to
|
|
|
|
* worsen the priority and interactivity of processes which schedule batch
|
|
|
|
* jobs such as make. This has little effect on the make process itself but
|
|
|
|
* causes new processes spawned by it to receive worse scores immediately.
|
|
|
|
*/
|
2006-12-06 06:34:57 +00:00
|
|
|
void
|
2006-12-06 06:55:59 +00:00
|
|
|
sched_exit_thread(struct thread *td, struct thread *child)
|
2006-12-06 06:34:57 +00:00
|
|
|
{
|
2007-01-04 08:56:25 +00:00
|
|
|
|
2006-12-06 06:55:59 +00:00
|
|
|
CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
|
2007-01-04 08:56:25 +00:00
|
|
|
child, child->td_proc->p_comm, child->td_priority);
|
2006-12-06 06:55:59 +00:00
|
|
|
|
2007-01-04 08:56:25 +00:00
|
|
|
#ifdef KSE
|
|
|
|
/*
|
|
|
|
* KSE forks and exits so often that this penalty causes short-lived
|
|
|
|
* threads to always be non-interactive. This causes mozilla to
|
|
|
|
* crawl under load.
|
|
|
|
*/
|
|
|
|
if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
|
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* Give the child's runtime to the parent without returning the
|
|
|
|
* sleep time as a penalty to the parent. This causes shells that
|
|
|
|
* launch expensive things to mark their children as expensive.
|
|
|
|
*/
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
thread_lock(td);
|
2007-07-17 22:53:23 +00:00
|
|
|
td->td_sched->ts_runtime += child->td_sched->ts_runtime;
|
2006-12-06 06:55:59 +00:00
|
|
|
sched_interact_update(td);
|
2007-01-04 08:56:25 +00:00
|
|
|
sched_priority(td);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
thread_unlock(td);
|
2006-12-06 06:34:57 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Fix priorities on return to user-space. Priorities may be elevated due
|
|
|
|
* to static priorities in msleep() or similar.
|
|
|
|
*/
|
2006-12-06 06:34:57 +00:00
|
|
|
void
|
|
|
|
sched_userret(struct thread *td)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* XXX we cheat slightly on the locking here to avoid locking in
|
|
|
|
* the usual case. Setting td_priority here is essentially an
|
|
|
|
* incomplete workaround for not setting it properly elsewhere.
|
|
|
|
* Now that some interrupt handlers are threads, not setting it
|
|
|
|
* properly elsewhere can clobber it in the window between setting
|
|
|
|
* it here and returning to user mode, so don't waste time setting
|
|
|
|
* it perfectly here.
|
|
|
|
*/
|
|
|
|
KASSERT((td->td_flags & TDF_BORROWING) == 0,
|
|
|
|
("thread with borrowed priority returning to userland"));
|
|
|
|
if (td->td_priority != td->td_user_pri) {
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
thread_lock(td);
|
2006-12-06 06:34:57 +00:00
|
|
|
td->td_priority = td->td_user_pri;
|
|
|
|
td->td_base_pri = td->td_user_pri;
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
thread_unlock(td);
|
2006-12-06 06:34:57 +00:00
|
|
|
}
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Handle a stathz tick. This is really only relevant for timeshare
|
|
|
|
* threads.
|
|
|
|
*/
|
2003-01-26 05:23:15 +00:00
|
|
|
void
|
2003-10-16 08:39:15 +00:00
|
|
|
sched_clock(struct thread *td)
|
2003-01-26 05:23:15 +00:00
|
|
|
{
|
2006-12-06 06:34:57 +00:00
|
|
|
struct tdq *tdq;
|
|
|
|
struct td_sched *ts;
|
2003-01-26 05:23:15 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
2007-01-04 12:16:19 +00:00
|
|
|
tdq = TDQ_SELF();
|
2004-08-10 07:52:21 +00:00
|
|
|
/*
|
2007-01-04 12:16:19 +00:00
|
|
|
* Advance the insert index once for each tick to ensure that all
|
|
|
|
* threads get a chance to run.
|
2004-08-10 07:52:21 +00:00
|
|
|
*/
|
2007-01-04 12:16:19 +00:00
|
|
|
if (tdq->tdq_idx == tdq->tdq_ridx) {
|
|
|
|
tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
|
|
|
|
if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
|
|
|
|
tdq->tdq_ridx = tdq->tdq_idx;
|
|
|
|
}
|
|
|
|
ts = td->td_sched;
|
2003-04-11 03:47:14 +00:00
|
|
|
/*
|
2006-10-26 21:42:22 +00:00
|
|
|
* We only do slicing code for TIMESHARE threads.
|
2003-04-11 03:47:14 +00:00
|
|
|
*/
|
2006-10-26 21:42:22 +00:00
|
|
|
if (td->td_pri_class != PRI_TIMESHARE)
|
2003-10-27 06:47:05 +00:00
|
|
|
return;
|
2003-01-26 05:23:15 +00:00
|
|
|
/*
|
2007-01-04 12:16:19 +00:00
|
|
|
* We used a tick; charge it to the thread so that we can compute our
|
2003-04-11 03:47:14 +00:00
|
|
|
* interactivity.
|
2003-01-26 05:23:15 +00:00
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
td->td_sched->ts_runtime += tickincr;
|
2006-10-26 21:42:22 +00:00
|
|
|
sched_interact_update(td);
|
2003-01-26 05:23:15 +00:00
|
|
|
/*
|
|
|
|
* We used up one time slice.
|
|
|
|
*/
|
2006-12-06 06:34:57 +00:00
|
|
|
if (--ts->ts_slice > 0)
|
2003-04-11 03:47:14 +00:00
|
|
|
return;
|
2003-01-26 05:23:15 +00:00
|
|
|
/*
|
2003-04-11 03:47:14 +00:00
|
|
|
* We're out of time, recompute priorities and requeue.
|
2003-01-26 05:23:15 +00:00
|
|
|
*/
|
2006-10-26 21:42:22 +00:00
|
|
|
sched_priority(td);
|
2003-04-11 03:47:14 +00:00
|
|
|
td->td_flags |= TDF_NEEDRESCHED;
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Called once per hz tick. Used for cpu utilization information. This
|
|
|
|
* is easier than trying to scale based on stathz.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
sched_tick(void)
|
|
|
|
{
|
|
|
|
struct td_sched *ts;
|
|
|
|
|
|
|
|
ts = curthread->td_sched;
|
|
|
|
/* Adjust ticks for pctcpu */
|
|
|
|
ts->ts_ticks += 1 << SCHED_TICK_SHIFT;
|
|
|
|
ts->ts_ltick = ticks;
|
|
|
|
/*
|
|
|
|
* Update if we've exceeded our desired tick threshhold by over one
|
|
|
|
* second.
|
|
|
|
*/
|
|
|
|
if (ts->ts_ftick + SCHED_TICK_MAX < ts->ts_ltick)
|
|
|
|
sched_pctcpu_update(ts);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return whether the current CPU has runnable tasks. Used for in-kernel
|
|
|
|
* cooperative idle threads.
|
|
|
|
*/
|
2003-01-26 05:23:15 +00:00
|
|
|
int
|
|
|
|
sched_runnable(void)
|
|
|
|
{
|
2006-12-06 06:34:57 +00:00
|
|
|
struct tdq *tdq;
|
2003-06-08 00:47:33 +00:00
|
|
|
int load;
|
2003-01-26 05:23:15 +00:00
|
|
|
|
2003-06-08 00:47:33 +00:00
|
|
|
load = 1;
|
|
|
|
|
2006-12-06 06:34:57 +00:00
|
|
|
tdq = TDQ_SELF();
|
2003-10-27 06:47:05 +00:00
|
|
|
if ((curthread->td_flags & TDF_IDLETD) != 0) {
|
2006-12-29 10:37:07 +00:00
|
|
|
if (tdq->tdq_load > 0)
|
2003-10-27 06:47:05 +00:00
|
|
|
goto out;
|
|
|
|
} else
|
2006-12-29 10:37:07 +00:00
|
|
|
if (tdq->tdq_load - 1 > 0)
|
2003-10-27 06:47:05 +00:00
|
|
|
goto out;
|
2003-06-08 00:47:33 +00:00
|
|
|
load = 0;
|
|
|
|
out:
|
|
|
|
return (load);
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Choose the highest priority thread to run. The thread is removed from
|
|
|
|
* the run-queue while running however the load remains. For SMP we set
|
|
|
|
* the tdq in the global idle bitmask if it idles here.
|
|
|
|
*/
|
2007-01-23 08:50:34 +00:00
|
|
|
struct thread *
|
2003-01-28 09:28:20 +00:00
|
|
|
sched_choose(void)
|
|
|
|
{
|
2007-07-17 22:53:23 +00:00
|
|
|
#ifdef SMP
|
|
|
|
struct tdq_group *tdg;
|
|
|
|
#endif
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched *ts;
|
2007-07-17 22:53:23 +00:00
|
|
|
struct tdq *tdq;
|
2003-01-28 09:28:20 +00:00
|
|
|
|
2006-12-06 06:34:57 +00:00
|
|
|
tdq = TDQ_SELF();
|
2007-07-17 22:53:23 +00:00
|
|
|
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
|
2006-12-06 06:34:57 +00:00
|
|
|
ts = tdq_choose(tdq);
|
|
|
|
if (ts) {
|
|
|
|
tdq_runq_rem(tdq, ts);
|
2007-01-23 08:50:34 +00:00
|
|
|
return (ts->ts_thread);
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
2003-01-28 09:28:20 +00:00
|
|
|
#ifdef SMP
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* We only set the idled bit when all of the cpus in the group are
|
|
|
|
* idle. Otherwise we could get into a situation where a thread bounces
|
|
|
|
* back and forth between two idle cores on seperate physical CPUs.
|
|
|
|
*/
|
|
|
|
tdg = tdq->tdq_group;
|
|
|
|
tdg->tdg_idlemask |= PCPU_GET(cpumask);
|
|
|
|
if (tdg->tdg_idlemask == tdg->tdg_cpumask)
|
|
|
|
atomic_set_int(&tdq_idle, tdg->tdg_mask);
|
|
|
|
tdq->tdq_lowpri = PRI_MAX_IDLE;
|
2003-01-28 09:28:20 +00:00
|
|
|
#endif
|
2007-01-23 08:50:34 +00:00
|
|
|
return (PCPU_GET(idlethread));
|
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Set owepreempt if necessary. Preemption never happens directly in ULE,
|
|
|
|
* we always request it once we exit a critical section.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
sched_setpreempt(struct thread *td)
|
2007-01-23 08:50:34 +00:00
|
|
|
{
|
|
|
|
struct thread *ctd;
|
|
|
|
int cpri;
|
|
|
|
int pri;
|
|
|
|
|
|
|
|
ctd = curthread;
|
|
|
|
pri = td->td_priority;
|
|
|
|
cpri = ctd->td_priority;
|
2007-07-17 22:53:23 +00:00
|
|
|
if (td->td_priority < ctd->td_priority)
|
|
|
|
curthread->td_flags |= TDF_NEEDRESCHED;
|
2007-01-23 08:50:34 +00:00
|
|
|
if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
|
2007-07-17 22:53:23 +00:00
|
|
|
return;
|
2007-01-23 08:50:34 +00:00
|
|
|
/*
|
|
|
|
* Always preempt IDLE threads. Otherwise only if the preempting
|
|
|
|
* thread is an ithread.
|
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
if (pri > preempt_thresh && cpri < PRI_MIN_IDLE)
|
|
|
|
return;
|
|
|
|
ctd->td_owepreempt = 1;
|
|
|
|
return;
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Add a thread to a thread queue. Initializes priority, slice, runq, and
|
|
|
|
* add it to the appropriate queue. This is the internal function called
|
|
|
|
* when the tdq is predetermined.
|
|
|
|
*/
|
2003-01-26 05:23:15 +00:00
|
|
|
void
|
2007-07-17 22:53:23 +00:00
|
|
|
tdq_add(struct tdq *tdq, struct thread *td, int flags)
|
2003-01-26 05:23:15 +00:00
|
|
|
{
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched *ts;
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
int class;
|
2007-01-19 21:56:08 +00:00
|
|
|
#ifdef SMP
|
|
|
|
int cpumask;
|
|
|
|
#endif
|
2003-01-26 05:23:15 +00:00
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
|
2007-01-23 08:50:34 +00:00
|
|
|
KASSERT((td->td_inhibitors == 0),
|
|
|
|
("sched_add: trying to run inhibited thread"));
|
|
|
|
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
|
|
|
|
("sched_add: bad thread state"));
|
2007-09-17 05:31:39 +00:00
|
|
|
KASSERT(td->td_flags & TDF_INMEM,
|
|
|
|
("sched_add: thread swapped out"));
|
2007-07-17 22:53:23 +00:00
|
|
|
|
|
|
|
ts = td->td_sched;
|
2007-01-23 08:50:34 +00:00
|
|
|
class = PRI_BASE(td->td_pri_class);
|
2007-07-17 22:53:23 +00:00
|
|
|
TD_SET_RUNQ(td);
|
2007-01-23 08:50:34 +00:00
|
|
|
if (ts->ts_slice == 0)
|
|
|
|
ts->ts_slice = sched_slice;
|
2004-08-10 07:52:21 +00:00
|
|
|
/*
|
2007-07-17 22:53:23 +00:00
|
|
|
* Pick the run queue based on priority.
|
2004-08-10 07:52:21 +00:00
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
if (td->td_priority <= PRI_MAX_REALTIME)
|
|
|
|
ts->ts_runq = &tdq->tdq_realtime;
|
|
|
|
else if (td->td_priority <= PRI_MAX_TIMESHARE)
|
|
|
|
ts->ts_runq = &tdq->tdq_timeshare;
|
|
|
|
else
|
|
|
|
ts->ts_runq = &tdq->tdq_idle;
|
|
|
|
#ifdef SMP
|
2007-01-19 21:56:08 +00:00
|
|
|
cpumask = 1 << ts->ts_cpu;
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
/*
|
2003-12-20 14:03:14 +00:00
|
|
|
* If we had been idle, clear our bit in the group and potentially
|
2007-01-19 21:56:08 +00:00
|
|
|
* the global bitmap.
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
*/
|
2007-01-04 08:56:25 +00:00
|
|
|
if ((class != PRI_IDLE && class != PRI_ITHD) &&
|
2007-01-19 21:56:08 +00:00
|
|
|
(tdq->tdq_group->tdg_idlemask & cpumask) != 0) {
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
/*
|
2003-12-11 03:57:10 +00:00
|
|
|
* Check to see if our group is unidling, and if so, remove it
|
|
|
|
* from the global idle mask.
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
*/
|
2006-12-29 10:37:07 +00:00
|
|
|
if (tdq->tdq_group->tdg_idlemask ==
|
|
|
|
tdq->tdq_group->tdg_cpumask)
|
|
|
|
atomic_clear_int(&tdq_idle, tdq->tdq_group->tdg_mask);
|
2003-12-11 03:57:10 +00:00
|
|
|
/*
|
|
|
|
* Now remove ourselves from the group specific idle mask.
|
|
|
|
*/
|
2007-01-19 21:56:08 +00:00
|
|
|
tdq->tdq_group->tdg_idlemask &= ~cpumask;
|
|
|
|
}
|
2007-07-17 22:53:23 +00:00
|
|
|
if (td->td_priority < tdq->tdq_lowpri)
|
|
|
|
tdq->tdq_lowpri = td->td_priority;
|
- Add static to local functions and data where it was missing.
- Add an IPI based mechanism for migrating kses. This mechanism is
broken down into several components. This is intended to reduce cache
thrashing by eliminating most cases where one cpu touches another's
run queues.
- kseq_notify() appends a kse to a lockless singly linked list and
conditionally sends an IPI to the target processor. Right now this is
protected by sched_lock but at some point I'd like to get rid of the
global lock. This is why I used something more complicated than a
standard queue.
- kseq_assign() processes our list of kses that have been assigned to us
by other processors. This simply calls sched_add() for each item on the
list after clearing the new KEF_ASSIGNED flag. This flag is used to
indicate that we have been appeneded to the assigned queue but not
added to the run queue yet.
- In sched_add(), instead of adding a KSE to another processor's queue we
use kse_notify() so that we don't touch their queue. Also in sched_add(),
if KEF_ASSIGNED is already set return immediately. This can happen if
a thread is removed and readded so that the priority is recorded properly.
- In sched_rem() return immediately if KEF_ASSIGNED is set. All callers
immediately readd simply to adjust priorites etc.
- In sched_choose(), if we're running an IDLE task or the per cpu idle thread
set our cpumask bit in 'kseq_idle' so that other processors may know that
we are idle. Before this, make a single pass through the run queues of
other processors so that we may find work more immediately if it is
available.
- In sched_runnable(), don't scan each processor's run queue, they will IPI
us if they have work for us to do.
- In sched_add(), if we're adding a thread that can be migrated and we have
plenty of work to do, try to migrate the thread to an idle kseq.
- Simplify the logic in sched_prio() and take the KEF_ASSIGNED flag into
consideration.
- No longer use kseq_choose() to steal threads, it can lose it's last
argument.
- Create a new function runq_steal() which operates like runq_choose() but
skips threads based on some criteria. Currently it will not steal
PRI_ITHD threads. In the future this will be used for CPU binding.
- Create a kseq_steal() that checks each run queue with runq_steal(), use
kseq_steal() in the places where we used kseq_choose() to steal with
before.
2003-10-31 11:16:04 +00:00
|
|
|
#endif
|
2006-12-06 06:34:57 +00:00
|
|
|
tdq_runq_add(tdq, ts, flags);
|
|
|
|
tdq_load_add(tdq, ts);
|
2007-07-17 22:53:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Select the target thread queue and add a thread to it. Request
|
|
|
|
* preemption or IPI a remote processor if required.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
sched_add(struct thread *td, int flags)
|
|
|
|
{
|
|
|
|
struct td_sched *ts;
|
|
|
|
struct tdq *tdq;
|
2007-01-19 21:56:08 +00:00
|
|
|
#ifdef SMP
|
2007-07-17 22:53:23 +00:00
|
|
|
int cpuid;
|
|
|
|
int cpu;
|
|
|
|
#endif
|
|
|
|
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
|
|
|
|
td, td->td_proc->p_comm, td->td_priority, curthread,
|
|
|
|
curthread->td_proc->p_comm);
|
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
|
|
|
ts = td->td_sched;
|
|
|
|
/*
|
|
|
|
* Recalculate the priority before we select the target cpu or
|
|
|
|
* run-queue.
|
|
|
|
*/
|
|
|
|
if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
|
|
|
|
sched_priority(td);
|
|
|
|
#ifdef SMP
|
|
|
|
cpuid = PCPU_GET(cpuid);
|
|
|
|
/*
|
|
|
|
* Pick the destination cpu and if it isn't ours transfer to the
|
|
|
|
* target cpu.
|
|
|
|
*/
|
|
|
|
if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_MIGRATE(td))
|
|
|
|
cpu = cpuid;
|
|
|
|
else if (!THREAD_CAN_MIGRATE(td))
|
|
|
|
cpu = ts->ts_cpu;
|
|
|
|
else
|
|
|
|
cpu = sched_pickcpu(ts, flags);
|
|
|
|
tdq = sched_setcpu(ts, cpu, flags);
|
|
|
|
tdq_add(tdq, td, flags);
|
|
|
|
if (cpu != cpuid) {
|
2007-01-19 21:56:08 +00:00
|
|
|
tdq_notify(ts);
|
|
|
|
return;
|
|
|
|
}
|
2007-07-17 22:53:23 +00:00
|
|
|
#else
|
|
|
|
tdq = TDQ_SELF();
|
|
|
|
TDQ_LOCK(tdq);
|
|
|
|
/*
|
|
|
|
* Now that the thread is moving to the run-queue, set the lock
|
|
|
|
* to the scheduler's lock.
|
|
|
|
*/
|
|
|
|
thread_lock_set(td, TDQ_LOCKPTR(tdq));
|
|
|
|
tdq_add(tdq, td, flags);
|
2007-01-19 21:56:08 +00:00
|
|
|
#endif
|
2007-07-17 22:53:23 +00:00
|
|
|
if (!(flags & SRQ_YIELDING))
|
|
|
|
sched_setpreempt(td);
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Remove a thread from a run-queue without running it. This is used
|
|
|
|
* when we're stealing a thread from a remote queue. Otherwise all threads
|
|
|
|
* exit by calling sched_exit_thread() and sched_throw() themselves.
|
|
|
|
*/
|
2003-01-26 05:23:15 +00:00
|
|
|
void
|
2003-10-16 08:39:15 +00:00
|
|
|
sched_rem(struct thread *td)
|
2003-01-26 05:23:15 +00:00
|
|
|
{
|
2006-12-06 06:34:57 +00:00
|
|
|
struct tdq *tdq;
|
|
|
|
struct td_sched *ts;
|
2003-10-16 08:39:15 +00:00
|
|
|
|
2004-12-26 00:15:33 +00:00
|
|
|
CTR5(KTR_SCHED, "sched_rem: %p(%s) prio %d by %p(%s)",
|
|
|
|
td, td->td_proc->p_comm, td->td_priority, curthread,
|
|
|
|
curthread->td_proc->p_comm);
|
2006-12-06 06:34:57 +00:00
|
|
|
ts = td->td_sched;
|
2007-07-17 22:53:23 +00:00
|
|
|
tdq = TDQ_CPU(ts->ts_cpu);
|
|
|
|
TDQ_LOCK_ASSERT(tdq, MA_OWNED);
|
|
|
|
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
|
2007-01-23 08:50:34 +00:00
|
|
|
KASSERT(TD_ON_RUNQ(td),
|
2006-12-06 06:34:57 +00:00
|
|
|
("sched_rem: thread not on run queue"));
|
|
|
|
tdq_runq_rem(tdq, ts);
|
|
|
|
tdq_load_rem(tdq, ts);
|
2007-01-23 08:50:34 +00:00
|
|
|
TD_SET_CAN_RUN(td);
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Fetch cpu utilization information. Updates on demand.
|
|
|
|
*/
|
2003-01-26 05:23:15 +00:00
|
|
|
fixpt_t
|
2003-10-16 08:39:15 +00:00
|
|
|
sched_pctcpu(struct thread *td)
|
2003-01-26 05:23:15 +00:00
|
|
|
{
|
|
|
|
fixpt_t pctcpu;
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched *ts;
|
2003-01-26 05:23:15 +00:00
|
|
|
|
|
|
|
pctcpu = 0;
|
2006-12-06 06:34:57 +00:00
|
|
|
ts = td->td_sched;
|
|
|
|
if (ts == NULL)
|
2003-10-20 19:55:21 +00:00
|
|
|
return (0);
|
2003-01-26 05:23:15 +00:00
|
|
|
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
thread_lock(td);
|
2006-12-06 06:34:57 +00:00
|
|
|
if (ts->ts_ticks) {
|
2003-01-26 05:23:15 +00:00
|
|
|
int rtick;
|
|
|
|
|
2007-01-05 08:50:38 +00:00
|
|
|
sched_pctcpu_update(ts);
|
2003-01-26 05:23:15 +00:00
|
|
|
/* How many rtick per second ? */
|
2007-01-04 08:56:25 +00:00
|
|
|
rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
|
|
|
|
pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
|
2003-01-26 05:23:15 +00:00
|
|
|
}
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
thread_unlock(td);
|
2003-01-26 05:23:15 +00:00
|
|
|
|
|
|
|
return (pctcpu);
|
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Bind a thread to a target cpu.
|
|
|
|
*/
|
2003-11-04 07:45:41 +00:00
|
|
|
void
|
|
|
|
sched_bind(struct thread *td, int cpu)
|
|
|
|
{
|
2006-12-06 06:34:57 +00:00
|
|
|
struct td_sched *ts;
|
2003-11-04 07:45:41 +00:00
|
|
|
|
2007-08-03 23:38:46 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
|
2006-12-06 06:34:57 +00:00
|
|
|
ts = td->td_sched;
|
2007-01-20 09:03:43 +00:00
|
|
|
if (ts->ts_flags & TSF_BOUND)
|
2007-01-20 17:03:33 +00:00
|
|
|
sched_unbind(td);
|
2006-12-06 06:34:57 +00:00
|
|
|
ts->ts_flags |= TSF_BOUND;
|
2003-12-11 03:57:10 +00:00
|
|
|
#ifdef SMP
|
2007-01-20 09:03:43 +00:00
|
|
|
sched_pin();
|
2003-12-11 03:57:10 +00:00
|
|
|
if (PCPU_GET(cpuid) == cpu)
|
2003-11-04 07:45:41 +00:00
|
|
|
return;
|
2007-01-20 09:03:43 +00:00
|
|
|
ts->ts_cpu = cpu;
|
2003-11-04 07:45:41 +00:00
|
|
|
/* When we return from mi_switch we'll be on the correct cpu. */
|
2004-07-03 16:57:51 +00:00
|
|
|
mi_switch(SW_VOL, NULL);
|
2003-11-04 07:45:41 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Release a bound thread.
|
|
|
|
*/
|
2003-11-04 07:45:41 +00:00
|
|
|
void
|
|
|
|
sched_unbind(struct thread *td)
|
|
|
|
{
|
2007-01-04 08:56:25 +00:00
|
|
|
struct td_sched *ts;
|
|
|
|
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
2007-01-04 08:56:25 +00:00
|
|
|
ts = td->td_sched;
|
2007-01-20 09:03:43 +00:00
|
|
|
if ((ts->ts_flags & TSF_BOUND) == 0)
|
|
|
|
return;
|
2007-01-04 08:56:25 +00:00
|
|
|
ts->ts_flags &= ~TSF_BOUND;
|
|
|
|
#ifdef SMP
|
|
|
|
sched_unpin();
|
|
|
|
#endif
|
2003-11-04 07:45:41 +00:00
|
|
|
}
|
|
|
|
|
2005-04-19 04:01:25 +00:00
|
|
|
int
|
|
|
|
sched_is_bound(struct thread *td)
|
|
|
|
{
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
2006-12-06 06:34:57 +00:00
|
|
|
return (td->td_sched->ts_flags & TSF_BOUND);
|
2005-04-19 04:01:25 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Basic yield call.
|
|
|
|
*/
|
2006-06-15 06:37:39 +00:00
|
|
|
void
|
|
|
|
sched_relinquish(struct thread *td)
|
|
|
|
{
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
thread_lock(td);
|
2006-10-26 21:42:22 +00:00
|
|
|
if (td->td_pri_class == PRI_TIMESHARE)
|
2006-06-15 06:37:39 +00:00
|
|
|
sched_prio(td, PRI_MAX_TIMESHARE);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
SCHED_STAT_INC(switch_relinquish);
|
2006-06-15 06:37:39 +00:00
|
|
|
mi_switch(SW_VOL, NULL);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
thread_unlock(td);
|
2006-06-15 06:37:39 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* Return the total system load.
|
|
|
|
*/
|
2004-02-01 02:48:36 +00:00
|
|
|
int
|
|
|
|
sched_load(void)
|
|
|
|
{
|
|
|
|
#ifdef SMP
|
|
|
|
int total;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
total = 0;
|
2006-12-29 10:37:07 +00:00
|
|
|
for (i = 0; i <= tdg_maxid; i++)
|
|
|
|
total += TDQ_GROUP(i)->tdg_load;
|
2004-02-01 02:48:36 +00:00
|
|
|
return (total);
|
|
|
|
#else
|
2006-12-29 10:37:07 +00:00
|
|
|
return (TDQ_SELF()->tdq_sysload);
|
2004-02-01 02:48:36 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2003-01-26 05:23:15 +00:00
|
|
|
int
|
|
|
|
sched_sizeof_proc(void)
|
|
|
|
{
|
|
|
|
return (sizeof(struct proc));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
sched_sizeof_thread(void)
|
|
|
|
{
|
|
|
|
return (sizeof(struct thread) + sizeof(struct td_sched));
|
|
|
|
}
|
Add scheduler CORE, the work I have done half a year ago, recent,
I picked it up again. The scheduler is forked from ULE, but the
algorithm to detect an interactive process is almost completely
different with ULE, it comes from Linux paper "Understanding the
Linux 2.6.8.1 CPU Scheduler", although I still use same word
"score" as a priority boost in ULE scheduler.
Briefly, the scheduler has following characteristic:
1. Timesharing process's nice value is seriously respected,
timeslice and interaction detecting algorithm are based
on nice value.
2. per-cpu scheduling queue and load balancing.
3. O(1) scheduling.
4. Some cpu affinity code in wakeup path.
5. Support POSIX SCHED_FIFO and SCHED_RR.
Unlike scheduler 4BSD and ULE which using fuzzy RQ_PPQ, the scheduler
uses 256 priority queues. Unlike ULE which using pull and push, the
scheduelr uses pull method, the main reason is to let relative idle
cpu do the work, but current the whole scheduler is protected by the
big sched_lock, so the benefit is not visible, it really can be worse
than nothing because all other cpu are locked out when we are doing
balancing work, which the 4BSD scheduelr does not have this problem.
The scheduler does not support hyperthreading very well, in fact,
the scheduler does not make the difference between physical CPU and
logical CPU, this should be improved in feature. The scheduler has
priority inversion problem on MP machine, it is not good for
realtime scheduling, it can cause realtime process starving.
As a result, it seems the MySQL super-smack runs better on my
Pentium-D machine when using libthr, despite on UP or SMP kernel.
2006-06-13 13:12:56 +00:00
|
|
|
|
2007-01-23 08:50:34 +00:00
|
|
|
/*
|
|
|
|
* The actual idle process.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
sched_idletd(void *dummy)
|
|
|
|
{
|
|
|
|
struct thread *td;
|
2007-07-17 22:53:23 +00:00
|
|
|
struct tdq *tdq;
|
2007-01-23 08:50:34 +00:00
|
|
|
|
|
|
|
td = curthread;
|
2007-07-17 22:53:23 +00:00
|
|
|
tdq = TDQ_SELF();
|
2007-01-23 08:50:34 +00:00
|
|
|
mtx_assert(&Giant, MA_NOTOWNED);
|
2007-07-17 22:53:23 +00:00
|
|
|
/* ULE relies on preemption for idle interruption. */
|
|
|
|
for (;;) {
|
|
|
|
#ifdef SMP
|
|
|
|
if (tdq_idled(tdq))
|
|
|
|
cpu_idle();
|
|
|
|
#else
|
2007-01-23 08:50:34 +00:00
|
|
|
cpu_idle();
|
2007-07-17 22:53:23 +00:00
|
|
|
#endif
|
|
|
|
}
|
Add scheduler CORE, the work I have done half a year ago, recent,
I picked it up again. The scheduler is forked from ULE, but the
algorithm to detect an interactive process is almost completely
different with ULE, it comes from Linux paper "Understanding the
Linux 2.6.8.1 CPU Scheduler", although I still use same word
"score" as a priority boost in ULE scheduler.
Briefly, the scheduler has following characteristic:
1. Timesharing process's nice value is seriously respected,
timeslice and interaction detecting algorithm are based
on nice value.
2. per-cpu scheduling queue and load balancing.
3. O(1) scheduling.
4. Some cpu affinity code in wakeup path.
5. Support POSIX SCHED_FIFO and SCHED_RR.
Unlike scheduler 4BSD and ULE which using fuzzy RQ_PPQ, the scheduler
uses 256 priority queues. Unlike ULE which using pull and push, the
scheduelr uses pull method, the main reason is to let relative idle
cpu do the work, but current the whole scheduler is protected by the
big sched_lock, so the benefit is not visible, it really can be worse
than nothing because all other cpu are locked out when we are doing
balancing work, which the 4BSD scheduelr does not have this problem.
The scheduler does not support hyperthreading very well, in fact,
the scheduler does not make the difference between physical CPU and
logical CPU, this should be improved in feature. The scheduler has
priority inversion problem on MP machine, it is not good for
realtime scheduling, it can cause realtime process starving.
As a result, it seems the MySQL super-smack runs better on my
Pentium-D machine when using libthr, despite on UP or SMP kernel.
2006-06-13 13:12:56 +00:00
|
|
|
}
|
2007-01-04 08:56:25 +00:00
|
|
|
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
/*
|
|
|
|
* A CPU is entering for the first time or a thread is exiting.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
sched_throw(struct thread *td)
|
|
|
|
{
|
2007-07-17 22:53:23 +00:00
|
|
|
struct tdq *tdq;
|
|
|
|
|
|
|
|
tdq = TDQ_SELF();
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
if (td == NULL) {
|
2007-07-17 22:53:23 +00:00
|
|
|
/* Correct spinlock nesting and acquire the correct lock. */
|
|
|
|
TDQ_LOCK(tdq);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
spinlock_exit();
|
|
|
|
} else {
|
2007-07-17 22:53:23 +00:00
|
|
|
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
|
|
|
|
tdq_load_rem(tdq, td->td_sched);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
}
|
|
|
|
KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
|
|
|
|
PCPU_SET(switchtime, cpu_ticks());
|
|
|
|
PCPU_SET(switchticks, ticks);
|
|
|
|
cpu_throw(td, choosethread()); /* doesn't return */
|
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
/*
|
|
|
|
* This is called from fork_exit(). Just acquire the correct locks and
|
|
|
|
* let fork do the rest of the work.
|
|
|
|
*/
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
void
|
2007-06-12 07:47:09 +00:00
|
|
|
sched_fork_exit(struct thread *td)
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
{
|
2007-07-17 22:53:23 +00:00
|
|
|
struct td_sched *ts;
|
|
|
|
struct tdq *tdq;
|
|
|
|
int cpuid;
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Finish setting up thread glue so that it begins execution in a
|
2007-07-17 22:53:23 +00:00
|
|
|
* non-nested critical section with the scheduler lock held.
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
*/
|
2007-07-17 22:53:23 +00:00
|
|
|
cpuid = PCPU_GET(cpuid);
|
|
|
|
tdq = TDQ_CPU(cpuid);
|
|
|
|
ts = td->td_sched;
|
|
|
|
if (TD_IS_IDLETHREAD(td))
|
|
|
|
td->td_lock = TDQ_LOCKPTR(tdq);
|
|
|
|
MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
|
|
|
|
td->td_oncpu = cpuid;
|
|
|
|
TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)td;
|
2007-06-12 07:47:09 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
|
Commit 1/14 of sched_lock decomposition.
- Move all scheduler locking into the schedulers utilizing a technique
similar to solaris's container locking.
- A per-process spinlock is now used to protect the queue of threads,
thread count, suspension count, p_sflags, and other process
related scheduling fields.
- The new thread lock is actually a pointer to a spinlock for the
container that the thread is currently owned by. The container may
be a turnstile, sleepqueue, or run queue.
- thread_lock() is now used to protect access to thread related scheduling
fields. thread_unlock() unlocks the lock and thread_set_lock()
implements the transition from one lock to another.
- A new "blocked_lock" is used in cases where it is not safe to hold the
actual thread's lock yet we must prevent access to the thread.
- sched_throw() and sched_fork_exit() are introduced to allow the
schedulers to fix-up locking at these points.
- Add some minor infrastructure for optionally exporting scheduler
statistics that were invaluable in solving performance problems with
this patch. Generally these statistics allow you to differentiate
between different causes of context switches.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:50:30 +00:00
|
|
|
}
|
|
|
|
|
2007-07-17 22:53:23 +00:00
|
|
|
static SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0,
|
|
|
|
"Scheduler");
|
|
|
|
SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
|
2007-01-04 08:56:25 +00:00
|
|
|
"Scheduler name");
|
2007-07-17 22:53:23 +00:00
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
|
|
|
|
"Slice size for timeshare threads");
|
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
|
|
|
|
"Interactivity score threshold");
|
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW, &preempt_thresh,
|
|
|
|
0,"Min priority for preemption, lower priorities have greater precedence");
|
2007-01-19 21:56:08 +00:00
|
|
|
#ifdef SMP
|
2007-07-17 22:53:23 +00:00
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, pick_pri, CTLFLAG_RW, &pick_pri, 0,
|
|
|
|
"Pick the target cpu based on priority rather than load.");
|
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
|
|
|
|
"Number of hz ticks to keep thread affinity for");
|
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, tryself, CTLFLAG_RW, &tryself, 0, "");
|
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
|
|
|
|
"Enables the long-term load balancer");
|
2007-07-19 20:03:15 +00:00
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, balance_secs, CTLFLAG_RW, &balance_secs, 0,
|
|
|
|
"Average frequence in seconds to run the long-term balancer");
|
2007-07-17 22:53:23 +00:00
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, steal_htt, CTLFLAG_RW, &steal_htt, 0,
|
|
|
|
"Steals work from another hyper-threaded core on idle");
|
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
|
|
|
|
"Attempts to steal work from other cores before idling");
|
2007-07-19 20:03:15 +00:00
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
|
|
|
|
"Minimum load on remote cpu before we'll steal");
|
2007-07-17 22:53:23 +00:00
|
|
|
SYSCTL_INT(_kern_sched, OID_AUTO, topology, CTLFLAG_RD, &topology, 0,
|
|
|
|
"True when a topology has been specified by the MD code.");
|
2007-01-19 21:56:08 +00:00
|
|
|
#endif
|
2007-01-04 08:56:25 +00:00
|
|
|
|
2007-09-21 04:10:23 +00:00
|
|
|
/* ps compat. All cpu percentages from ULE are weighted. */
|
|
|
|
static int ccpu = 0.0;
|
2007-01-04 08:56:25 +00:00
|
|
|
SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
|
|
|
|
|
|
|
|
|
2004-09-05 02:09:54 +00:00
|
|
|
#define KERN_SWITCH_INCLUDE 1
|
|
|
|
#include "kern/kern_switch.c"
|