From fa88511615b024b53c7c2db01bc4ed710e186390 Mon Sep 17 00:00:00 2001
From: Julian Elischer <julian@FreeBSD.org>
Date: Wed, 16 Jun 2004 00:26:31 +0000
Subject: [PATCH] Nice, is a property of a process as a whole.. I mistakenly
 moved it to the ksegroup when breaking up the process structure. Put it back
 in the proc structure.

---
 sys/fs/specfs/spec_vnops.c  |  4 +--
 sys/i386/ibcs2/ibcs2_misc.c |  4 +--
 sys/kern/init_main.c        |  2 +-
 sys/kern/kern_clock.c       |  2 +-
 sys/kern/kern_proc.c        |  2 +-
 sys/kern/kern_resource.c    | 44 +++++++-----------------------
 sys/kern/sched_4bsd.c       | 13 +++++----
 sys/kern/sched_ule.c        | 54 ++++++++++++++++++++-----------------
 sys/sys/proc.h              |  2 +-
 sys/sys/sched.h             |  2 +-
 sys/ufs/ffs/ffs_snapshot.c  |  8 +++---
 sys/vm/vm_glue.c            |  2 +-
 sys/vm/vm_pageout.c         |  5 +---
 13 files changed, 63 insertions(+), 81 deletions(-)

diff --git a/sys/fs/specfs/spec_vnops.c b/sys/fs/specfs/spec_vnops.c
index 9d53ca531180..a90233ee4a7d 100644
--- a/sys/fs/specfs/spec_vnops.c
+++ b/sys/fs/specfs/spec_vnops.c
@@ -470,11 +470,11 @@ spec_xstrategy(struct vnode *vp, struct buf *bp)
 	/*
 	 * Slow down disk requests for niced processes.
 	 */
-	if (doslowdown && td && td->td_ksegrp->kg_nice > 0) {
+	if (doslowdown && td && td->td_proc->p_nice > 0) {
 		mtx_lock(&strategy_mtx);
 		msleep(&strategy_mtx, &strategy_mtx,
 		    PPAUSE | PCATCH | PDROP, "ioslow",
-		    td->td_ksegrp->kg_nice);
+		    td->td_proc->p_nice);
 	}
 	/*
 	 * Collect statistics on synchronous and asynchronous read
diff --git a/sys/i386/ibcs2/ibcs2_misc.c b/sys/i386/ibcs2/ibcs2_misc.c
index c3edf1569e2c..4f5cdab6fa7b 100644
--- a/sys/i386/ibcs2/ibcs2_misc.c
+++ b/sys/i386/ibcs2/ibcs2_misc.c
@@ -951,10 +951,10 @@ ibcs2_nice(td, uap)
 
 	sa.which = PRIO_PROCESS;
 	sa.who = 0;
-	sa.prio = td->td_ksegrp->kg_nice + uap->incr;
+	sa.prio = td->td_proc->p_nice + uap->incr;
 	if ((error = setpriority(td, &sa)) != 0)
 		return EPERM;
-	td->td_retval[0] = td->td_ksegrp->kg_nice;
+	td->td_retval[0] = td->td_proc->p_nice;
 	return 0;
 }
 
diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c
index 30c0d86d3bce..757f8aeddf3d 100644
--- a/sys/kern/init_main.c
+++ b/sys/kern/init_main.c
@@ -382,8 +382,8 @@ proc0_init(void *dummy __unused)
 	p->p_flag = P_SYSTEM;
 	p->p_sflag = PS_INMEM;
 	p->p_state = PRS_NORMAL;
+	p->p_nice = NZERO;
 	td->td_state = TDS_RUNNING;
-	kg->kg_nice = NZERO;
 	kg->kg_pri_class = PRI_TIMESHARE;
 	kg->kg_user_pri = PUSER;
 	td->td_priority = PVM;
diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c
index 5d5628355d5e..d346e236efe2 100644
--- a/sys/kern/kern_clock.c
+++ b/sys/kern/kern_clock.c
@@ -392,7 +392,7 @@ statclock(frame)
 		if (p->p_flag & P_SA)
 			thread_statclock(1);
 		p->p_uticks++;
-		if (td->td_ksegrp->kg_nice > NZERO)
+		if (p->p_nice > NZERO)
 			cp_time[CP_NICE]++;
 		else
 			cp_time[CP_USER]++;
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 74fa5afd98b3..daf51b034fbf 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -740,6 +740,7 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
 		kp->ki_sflag = p->p_sflag;
 		kp->ki_swtime = p->p_swtime;
 		kp->ki_pid = p->p_pid;
+		kp->ki_nice = p->p_nice;
 		kg = td->td_ksegrp;
 		ke = td->td_kse;
 		bintime2timeval(&p->p_runtime, &tv);
@@ -751,7 +752,6 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
 		kp->ki_slptime = kg->kg_slptime;
 		kp->ki_pri.pri_user = kg->kg_user_pri;
 		kp->ki_pri.pri_class = kg->kg_pri_class;
-		kp->ki_nice = kg->kg_nice;
 
 		/* Things in the thread */
 		kp->ki_wchan = td->td_wchan;
diff --git a/sys/kern/kern_resource.c b/sys/kern/kern_resource.c
index dd9ad4dc1e2d..8881920ce024 100644
--- a/sys/kern/kern_resource.c
+++ b/sys/kern/kern_resource.c
@@ -88,7 +88,6 @@ getpriority(td, uap)
 	struct thread *td;
 	register struct getpriority_args *uap;
 {
-	struct ksegrp *kg;
 	struct proc *p;
 	int error, low;
 
@@ -98,16 +97,13 @@ getpriority(td, uap)
 
 	case PRIO_PROCESS:
 		if (uap->who == 0)
-			low = td->td_ksegrp->kg_nice;
+			low = td->td_proc->p_nice;
 		else {
 			p = pfind(uap->who);
 			if (p == NULL)
 				break;
 			if (p_cansee(td, p) == 0) {
-				FOREACH_KSEGRP_IN_PROC(p, kg) {
-					if (kg->kg_nice < low)
-						low = kg->kg_nice;
-				}
+				low = p->p_nice;
 			}
 			PROC_UNLOCK(p);
 		}
@@ -131,10 +127,8 @@ getpriority(td, uap)
 		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
 			PROC_LOCK(p);
 			if (!p_cansee(td, p)) {
-				FOREACH_KSEGRP_IN_PROC(p, kg) {
-					if (kg->kg_nice < low)
-						low = kg->kg_nice;
-				}
+				if (p->p_nice < low)
+					low = p->p_nice;
 			}
 			PROC_UNLOCK(p);
 		}
@@ -150,10 +144,8 @@ getpriority(td, uap)
 			PROC_LOCK(p);
 			if (!p_cansee(td, p) &&
 			    p->p_ucred->cr_uid == uap->who) {
-				FOREACH_KSEGRP_IN_PROC(p, kg) {
-					if (kg->kg_nice < low)
-						low = kg->kg_nice;
-				}
+				if (p->p_nice < low)
+					low = p->p_nice;
 			}
 			PROC_UNLOCK(p);
 		}
@@ -260,19 +252,13 @@ setpriority(td, uap)
 }
 
 /* 
- * Set "nice" for a process.  Doesn't really understand threaded processes
- * well but does try.  Has the unfortunate side effect of making all the NICE
- * values for a process's ksegrps the same.  This suggests that
- * NICE values should be stored as a process nice and deltas for the ksegrps.
- * (but not yet).
+ * Set "nice" for a (whole) process.
  */
 static int
 donice(struct thread *td, struct proc *p, int n)
 {
-	struct ksegrp *kg;
-	int error, low;
+	int error;
 
-	low = PRIO_MAX + 1;
 	PROC_LOCK_ASSERT(p, MA_OWNED);
 	if ((error = p_cansched(td, p)))
 		return (error);
@@ -280,20 +266,10 @@ donice(struct thread *td, struct proc *p, int n)
 		n = PRIO_MAX;
 	if (n < PRIO_MIN)
 		n = PRIO_MIN;
-	/* 
-	 * Only allow nicing if to more than the lowest nice.
-	 * E.g., for nices of 4,3,2 allow nice to 3 but not 1
-	 */
-	FOREACH_KSEGRP_IN_PROC(p, kg) {
-		if (kg->kg_nice < low)
-			low = kg->kg_nice;
-	}
- 	if (n < low && suser(td) != 0)
+ 	if (n <  p->p_nice && suser(td) != 0)
 		return (EACCES);
 	mtx_lock_spin(&sched_lock);
-	FOREACH_KSEGRP_IN_PROC(p, kg) {
-		sched_nice(kg, n);
-	}
+	sched_nice(p, n);
 	mtx_unlock_spin(&sched_lock);
 	return (0);
 }
diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c
index 4d3c939f4a3f..b339b5851043 100644
--- a/sys/kern/sched_4bsd.c
+++ b/sys/kern/sched_4bsd.c
@@ -439,7 +439,7 @@ resetpriority(struct ksegrp *kg)
 
 	if (kg->kg_pri_class == PRI_TIMESHARE) {
 		newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
-		    NICE_WEIGHT * (kg->kg_nice - PRIO_MIN);
+		    NICE_WEIGHT * (kg->kg_proc->p_nice - PRIO_MIN);
 		newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
 		    PRI_MAX_TIMESHARE);
 		kg->kg_user_pri = newpriority;
@@ -583,13 +583,16 @@ sched_fork_thread(struct thread *td, struct thread *child)
 }
 
 void
-sched_nice(struct ksegrp *kg, int nice)
+sched_nice(struct proc *p, int nice)
 {
+	struct ksegrp *kg;
 
-	PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
+	PROC_LOCK_ASSERT(p, MA_OWNED);
 	mtx_assert(&sched_lock, MA_OWNED);
-	kg->kg_nice = nice;
-	resetpriority(kg);
+	p->p_nice = nice;
+	FOREACH_KSEGRP_IN_PROC(p, kg) {
+		resetpriority(kg);
+	}
 }
 
 void
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index e83e7a704062..e7333fcc5fce 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -366,9 +366,9 @@ kseq_load_add(struct kseq *kseq, struct kse *ke)
 		CTR6(KTR_ULE,
 		    "Add kse %p to %p (slice: %d, pri: %d, nice: %d(%d))",
 		    ke, ke->ke_runq, ke->ke_slice, ke->ke_thread->td_priority,
-		    ke->ke_ksegrp->kg_nice, kseq->ksq_nicemin);
+		    ke->ke_proc->p_nice, kseq->ksq_nicemin);
 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
-		kseq_nice_add(kseq, ke->ke_ksegrp->kg_nice);
+		kseq_nice_add(kseq, ke->ke_proc->p_nice);
 }
 
 static void
@@ -388,7 +388,7 @@ kseq_load_rem(struct kseq *kseq, struct kse *ke)
 	kseq->ksq_load--;
 	ke->ke_runq = NULL;
 	if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
-		kseq_nice_rem(kseq, ke->ke_ksegrp->kg_nice);
+		kseq_nice_rem(kseq, ke->ke_proc->p_nice);
 }
 
 static void
@@ -929,7 +929,7 @@ sched_priority(struct ksegrp *kg)
 
 	pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
 	pri += SCHED_PRI_BASE;
-	pri += kg->kg_nice;
+	pri += kg->kg_proc->p_nice;
 
 	if (pri > PRI_MAX_TIMESHARE)
 		pri = PRI_MAX_TIMESHARE;
@@ -980,13 +980,13 @@ sched_slice(struct kse *ke)
 	if (!SCHED_INTERACTIVE(kg)) {
 		int nice;
 
-		nice = kg->kg_nice + (0 - kseq->ksq_nicemin);
+		nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
 		if (kseq->ksq_load_timeshare == 0 ||
-		    kg->kg_nice < kseq->ksq_nicemin)
+		    kg->kg_proc->p_nice < kseq->ksq_nicemin)
 			ke->ke_slice = SCHED_SLICE_MAX;
 		else if (nice <= SCHED_SLICE_NTHRESH)
 			ke->ke_slice = SCHED_SLICE_NICE(nice);
-		else if (kg->kg_nice == 0)
+		else if (kg->kg_proc->p_nice == 0)
 			ke->ke_slice = SCHED_SLICE_MIN;
 		else
 			ke->ke_slice = 0;
@@ -995,7 +995,7 @@ sched_slice(struct kse *ke)
 
 	CTR6(KTR_ULE,
 	    "Sliced %p(%d) (nice: %d, nicemin: %d, load: %d, interactive: %d)",
-	    ke, ke->ke_slice, kg->kg_nice, kseq->ksq_nicemin,
+	    ke, ke->ke_slice, kg->kg_proc->p_nice, kseq->ksq_nicemin,
 	    kseq->ksq_load_timeshare, SCHED_INTERACTIVE(kg));
 
 	return;
@@ -1167,29 +1167,35 @@ sched_switch(struct thread *td)
 }
 
 void
-sched_nice(struct ksegrp *kg, int nice)
+sched_nice(struct proc *p, int nice)
 {
+	struct ksegrp *kg;
 	struct kse *ke;
 	struct thread *td;
 	struct kseq *kseq;
 
-	PROC_LOCK_ASSERT(kg->kg_proc, MA_OWNED);
+	PROC_LOCK_ASSERT(p, MA_OWNED);
 	mtx_assert(&sched_lock, MA_OWNED);
 	/*
 	 * We need to adjust the nice counts for running KSEs.
 	 */
-	if (kg->kg_pri_class == PRI_TIMESHARE)
-		FOREACH_KSE_IN_GROUP(kg, ke) {
-			if (ke->ke_runq == NULL)
-				continue;
-			kseq = KSEQ_CPU(ke->ke_cpu);
-			kseq_nice_rem(kseq, kg->kg_nice);
-			kseq_nice_add(kseq, nice);
+	FOREACH_KSEGRP_IN_PROC(p, kg) {
+		if (kg->kg_pri_class == PRI_TIMESHARE) {
+			FOREACH_KSE_IN_GROUP(kg, ke) {
+				if (ke->ke_runq == NULL)
+					continue;
+				kseq = KSEQ_CPU(ke->ke_cpu);
+				kseq_nice_rem(kseq, p->p_nice);
+				kseq_nice_add(kseq, nice);
+			}
 		}
-	kg->kg_nice = nice;
-	sched_priority(kg);
-	FOREACH_THREAD_IN_GROUP(kg, td)
-		td->td_flags |= TDF_NEEDRESCHED;
+	}
+	p->p_nice = nice;
+	FOREACH_KSEGRP_IN_PROC(p, kg) {
+		sched_priority(kg);
+		FOREACH_THREAD_IN_GROUP(kg, td)
+			td->td_flags |= TDF_NEEDRESCHED;
+	}
 }
 
 void
@@ -1246,6 +1252,7 @@ sched_fork(struct proc *p, struct proc *p1)
 
 	mtx_assert(&sched_lock, MA_OWNED);
 
+	p1->p_nice = p->p_nice;
 	sched_fork_ksegrp(FIRST_KSEGRP_IN_PROC(p), FIRST_KSEGRP_IN_PROC(p1));
 	sched_fork_kse(FIRST_KSE_IN_PROC(p), FIRST_KSE_IN_PROC(p1));
 	sched_fork_thread(FIRST_THREAD_IN_PROC(p), FIRST_THREAD_IN_PROC(p1));
@@ -1273,7 +1280,6 @@ sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child)
 	child->kg_slptime = kg->kg_slptime;
 	child->kg_runtime = kg->kg_runtime;
 	child->kg_user_pri = kg->kg_user_pri;
-	child->kg_nice = kg->kg_nice;
 	sched_interact_fork(child);
 	kg->kg_runtime += tickincr << 10;
 	sched_interact_update(kg);
@@ -1327,11 +1333,11 @@ sched_class(struct ksegrp *kg, int class)
 #endif
 		if (oclass == PRI_TIMESHARE) {
 			kseq->ksq_load_timeshare--;
-			kseq_nice_rem(kseq, kg->kg_nice);
+			kseq_nice_rem(kseq, kg->kg_proc->p_nice);
 		}
 		if (nclass == PRI_TIMESHARE) {
 			kseq->ksq_load_timeshare++;
-			kseq_nice_add(kseq, kg->kg_nice);
+			kseq_nice_add(kseq, kg->kg_proc->p_nice);
 		}
 	}
 
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index a418bec4db98..a7499d61830f 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -509,7 +509,6 @@ struct ksegrp {
 #define	kg_startcopy	kg_endzero
 	u_char		kg_pri_class;	/* (j) Scheduling class. */
 	u_char		kg_user_pri;	/* (j) User pri from estcpu and nice. */
-	signed char	kg_nice;	/* (c + j) Process "nice" value. */
 #define	kg_endcopy kg_numthreads
 	int		kg_numthreads;	/* (j) Num threads in total. */
 	int		kg_kses;	/* (j) Num KSEs in group. */
@@ -597,6 +596,7 @@ struct proc {
 	struct sysentvec *p_sysent;	/* (b) Syscall dispatch info. */
 	struct pargs	*p_args;	/* (c) Process arguments. */
 	rlim_t		p_cpulimit;	/* (j) Current CPU limit in seconds. */
+	signed char	p_nice;		/* (c + j) Process "nice" value. */
 /* End area that is copied on creation. */
 #define	p_endcopy	p_xstat
 
diff --git a/sys/sys/sched.h b/sys/sys/sched.h
index 9e29fb40d075..8a7e84e11f84 100644
--- a/sys/sys/sched.h
+++ b/sys/sys/sched.h
@@ -55,7 +55,7 @@ void	sched_fork(struct proc *p, struct proc *child);
 void	sched_class(struct ksegrp *kg, int class);
 void	sched_exit_ksegrp(struct ksegrp *kg, struct ksegrp *child);
 void	sched_fork_ksegrp(struct ksegrp *kg, struct ksegrp *child);
-void	sched_nice(struct ksegrp *kg, int nice);
+void	sched_nice(struct proc *p, int nice);
 
 /*
  * Threads are switched in and out, block on resources, have temporary
diff --git a/sys/ufs/ffs/ffs_snapshot.c b/sys/ufs/ffs/ffs_snapshot.c
index b11b8675381b..adfb41b45b1c 100644
--- a/sys/ufs/ffs/ffs_snapshot.c
+++ b/sys/ufs/ffs/ffs_snapshot.c
@@ -301,11 +301,11 @@ ffs_snapshot(mp, snapfile)
 	 *
 	 * Recind nice scheduling while running with the filesystem suspended.
 	 */
-	if (td->td_ksegrp->kg_nice > 0) {
+	if (td->td_proc->p_nice > 0) {
 		PROC_LOCK(td->td_proc);
 		mtx_lock_spin(&sched_lock);
-		saved_nice = td->td_ksegrp->kg_nice;
-		sched_nice(td->td_ksegrp, 0);
+		saved_nice = td->td_proc->p_nice;
+		sched_nice(td->td_proc, 0);
 		mtx_unlock_spin(&sched_lock);
 		PROC_UNLOCK(td->td_proc);
 	}
@@ -665,7 +665,7 @@ ffs_snapshot(mp, snapfile)
 	if (saved_nice > 0) {
 		PROC_LOCK(td->td_proc);
 		mtx_lock_spin(&sched_lock);
-		sched_nice(td->td_ksegrp, saved_nice);
+		sched_nice(td->td_proc, saved_nice);
 		mtx_unlock_spin(&sched_lock);
 		PROC_UNLOCK(td->td_proc);
 	}
diff --git a/sys/vm/vm_glue.c b/sys/vm/vm_glue.c
index b625615f9351..b34f6296f4bc 100644
--- a/sys/vm/vm_glue.c
+++ b/sys/vm/vm_glue.c
@@ -824,7 +824,7 @@ scheduler(dummy)
 				kg = td->td_ksegrp;
 				pri = p->p_swtime + kg->kg_slptime;
 				if ((p->p_sflag & PS_SWAPINREQ) == 0) {
-					pri -= kg->kg_nice * 8;
+					pri -= p->p_nice * 8;
 				}
 
 				/*
diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c
index b7ac7a45b513..640770dab624 100644
--- a/sys/vm/vm_pageout.c
+++ b/sys/vm/vm_pageout.c
@@ -1225,12 +1225,9 @@ vm_pageout_scan(int pass)
 		}
 		sx_sunlock(&allproc_lock);
 		if (bigproc != NULL) {
-			struct ksegrp *kg;
 			killproc(bigproc, "out of swap space");
 			mtx_lock_spin(&sched_lock);
-			FOREACH_KSEGRP_IN_PROC(bigproc, kg) {
-				sched_nice(kg, PRIO_MIN); /* XXXKSE ??? */
-			}
+			sched_nice(bigproc, PRIO_MIN);
 			mtx_unlock_spin(&sched_lock);
 			PROC_UNLOCK(bigproc);
 			wakeup(&cnt.v_free_count);