From 8a9d4d98b174c92c050fb4b4d3f54a4edc631291 Mon Sep 17 00:00:00 2001 From: Bruce Evans Date: Sat, 27 Nov 1999 12:32:27 +0000 Subject: [PATCH] Moved scheduling-related code to kern_synch.c so that it is easier to fix and extend. The new function containing the code is named schedclock() as in NetBSD, but it has slightly different semantics (it already handles incrementation of p->p_cpticks, and it should handle any calling frequency). Agreed with in principle by: dufault --- sys/amd64/amd64/mp_machdep.c | 9 +-------- sys/amd64/amd64/mptable.c | 9 +-------- sys/amd64/include/mptable.h | 9 +-------- sys/i386/i386/mp_machdep.c | 9 +-------- sys/i386/i386/mptable.c | 9 +-------- sys/i386/include/mptable.h | 9 +-------- sys/kern/kern_clock.c | 22 +--------------------- sys/kern/kern_synch.c | 26 ++++++++++++++++++++++++++ sys/kern/kern_tc.c | 22 +--------------------- sys/kern/subr_smp.c | 9 +-------- sys/sys/proc.h | 1 + 11 files changed, 36 insertions(+), 98 deletions(-) diff --git a/sys/amd64/amd64/mp_machdep.c b/sys/amd64/amd64/mp_machdep.c index d1019733b41b..b565ceee45e9 100644 --- a/sys/amd64/amd64/mp_machdep.c +++ b/sys/amd64/amd64/mp_machdep.c @@ -2323,14 +2323,7 @@ forwarded_statclock(int id, int pscnt, int *astmap) cp_time[CP_INTR]++; } if (p != NULL) { - p->p_cpticks++; - if (++p->p_estcpu == 0) - p->p_estcpu--; - if ((p->p_estcpu & 3) == 0) { - resetpriority(p); - if (p->p_priority >= PUSER) - p->p_priority = p->p_usrpri; - } + schedclock(p); /* Update resource usage integrals and maximums. */ if ((pstats = p->p_stats) != NULL && diff --git a/sys/amd64/amd64/mptable.c b/sys/amd64/amd64/mptable.c index d1019733b41b..b565ceee45e9 100644 --- a/sys/amd64/amd64/mptable.c +++ b/sys/amd64/amd64/mptable.c @@ -2323,14 +2323,7 @@ forwarded_statclock(int id, int pscnt, int *astmap) cp_time[CP_INTR]++; } if (p != NULL) { - p->p_cpticks++; - if (++p->p_estcpu == 0) - p->p_estcpu--; - if ((p->p_estcpu & 3) == 0) { - resetpriority(p); - if (p->p_priority >= PUSER) - p->p_priority = p->p_usrpri; - } + schedclock(p); /* Update resource usage integrals and maximums. */ if ((pstats = p->p_stats) != NULL && diff --git a/sys/amd64/include/mptable.h b/sys/amd64/include/mptable.h index d1019733b41b..b565ceee45e9 100644 --- a/sys/amd64/include/mptable.h +++ b/sys/amd64/include/mptable.h @@ -2323,14 +2323,7 @@ forwarded_statclock(int id, int pscnt, int *astmap) cp_time[CP_INTR]++; } if (p != NULL) { - p->p_cpticks++; - if (++p->p_estcpu == 0) - p->p_estcpu--; - if ((p->p_estcpu & 3) == 0) { - resetpriority(p); - if (p->p_priority >= PUSER) - p->p_priority = p->p_usrpri; - } + schedclock(p); /* Update resource usage integrals and maximums. */ if ((pstats = p->p_stats) != NULL && diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c index d1019733b41b..b565ceee45e9 100644 --- a/sys/i386/i386/mp_machdep.c +++ b/sys/i386/i386/mp_machdep.c @@ -2323,14 +2323,7 @@ forwarded_statclock(int id, int pscnt, int *astmap) cp_time[CP_INTR]++; } if (p != NULL) { - p->p_cpticks++; - if (++p->p_estcpu == 0) - p->p_estcpu--; - if ((p->p_estcpu & 3) == 0) { - resetpriority(p); - if (p->p_priority >= PUSER) - p->p_priority = p->p_usrpri; - } + schedclock(p); /* Update resource usage integrals and maximums. */ if ((pstats = p->p_stats) != NULL && diff --git a/sys/i386/i386/mptable.c b/sys/i386/i386/mptable.c index d1019733b41b..b565ceee45e9 100644 --- a/sys/i386/i386/mptable.c +++ b/sys/i386/i386/mptable.c @@ -2323,14 +2323,7 @@ forwarded_statclock(int id, int pscnt, int *astmap) cp_time[CP_INTR]++; } if (p != NULL) { - p->p_cpticks++; - if (++p->p_estcpu == 0) - p->p_estcpu--; - if ((p->p_estcpu & 3) == 0) { - resetpriority(p); - if (p->p_priority >= PUSER) - p->p_priority = p->p_usrpri; - } + schedclock(p); /* Update resource usage integrals and maximums. */ if ((pstats = p->p_stats) != NULL && diff --git a/sys/i386/include/mptable.h b/sys/i386/include/mptable.h index d1019733b41b..b565ceee45e9 100644 --- a/sys/i386/include/mptable.h +++ b/sys/i386/include/mptable.h @@ -2323,14 +2323,7 @@ forwarded_statclock(int id, int pscnt, int *astmap) cp_time[CP_INTR]++; } if (p != NULL) { - p->p_cpticks++; - if (++p->p_estcpu == 0) - p->p_estcpu--; - if ((p->p_estcpu & 3) == 0) { - resetpriority(p); - if (p->p_priority >= PUSER) - p->p_priority = p->p_usrpri; - } + schedclock(p); /* Update resource usage integrals and maximums. */ if ((pstats = p->p_stats) != NULL && diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index 2d8a3506e17a..a8d492df2320 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -446,28 +446,8 @@ statclock(frame) * programs: the amount of time in each cpu state. */ - /* - * We adjust the priority of the current process. The priority of - * a process gets worse as it accumulates CPU time. The cpu usage - * estimator (p_estcpu) is increased here. The formula for computing - * priorities (in kern_synch.c) will compute a different value each - * time p_estcpu increases by 4. The cpu usage estimator ramps up - * quite quickly when the process is running (linearly), and decays - * away exponentially, at a rate which is proportionally slower when - * the system is busy. The basic principal is that the system will - * 90% forget that the process used a lot of CPU time in 5 * loadav - * seconds. This causes the system to favor processes which haven't - * run much recently, and to round-robin among other processes. - */ if (p != NULL) { - p->p_cpticks++; - if (++p->p_estcpu == 0) - p->p_estcpu--; - if ((p->p_estcpu & 3) == 0) { - resetpriority(p); - if (p->p_priority >= PUSER) - p->p_priority = p->p_usrpri; - } + schedclock(p); /* Update resource usage integrals and maximums. */ if ((pstats = p->p_stats) != NULL && diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 4811669f79b5..f14980bc9daf 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -899,3 +899,29 @@ sched_setup(dummy) schedcpu(NULL); } +/* + * We adjust the priority of the current process. The priority of + * a process gets worse as it accumulates CPU time. The cpu usage + * estimator (p_estcpu) is increased here. The formula for computing + * priorities (in kern_synch.c) will compute a different value each + * time p_estcpu increases by 4. The cpu usage estimator ramps up + * quite quickly when the process is running (linearly), and decays + * away exponentially, at a rate which is proportionally slower when + * the system is busy. The basic principal is that the system will + * 90% forget that the process used a lot of CPU time in 5 * loadav + * seconds. This causes the system to favor processes which haven't + * run much recently, and to round-robin among other processes. + */ +void +schedclock(p) + struct proc *p; +{ + p->p_cpticks++; + if (++p->p_estcpu == 0) + p->p_estcpu--; + if ((p->p_estcpu & 3) == 0) { + resetpriority(p); + if (p->p_priority >= PUSER) + p->p_priority = p->p_usrpri; + } +} diff --git a/sys/kern/kern_tc.c b/sys/kern/kern_tc.c index 2d8a3506e17a..a8d492df2320 100644 --- a/sys/kern/kern_tc.c +++ b/sys/kern/kern_tc.c @@ -446,28 +446,8 @@ statclock(frame) * programs: the amount of time in each cpu state. */ - /* - * We adjust the priority of the current process. The priority of - * a process gets worse as it accumulates CPU time. The cpu usage - * estimator (p_estcpu) is increased here. The formula for computing - * priorities (in kern_synch.c) will compute a different value each - * time p_estcpu increases by 4. The cpu usage estimator ramps up - * quite quickly when the process is running (linearly), and decays - * away exponentially, at a rate which is proportionally slower when - * the system is busy. The basic principal is that the system will - * 90% forget that the process used a lot of CPU time in 5 * loadav - * seconds. This causes the system to favor processes which haven't - * run much recently, and to round-robin among other processes. - */ if (p != NULL) { - p->p_cpticks++; - if (++p->p_estcpu == 0) - p->p_estcpu--; - if ((p->p_estcpu & 3) == 0) { - resetpriority(p); - if (p->p_priority >= PUSER) - p->p_priority = p->p_usrpri; - } + schedclock(p); /* Update resource usage integrals and maximums. */ if ((pstats = p->p_stats) != NULL && diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c index d1019733b41b..b565ceee45e9 100644 --- a/sys/kern/subr_smp.c +++ b/sys/kern/subr_smp.c @@ -2323,14 +2323,7 @@ forwarded_statclock(int id, int pscnt, int *astmap) cp_time[CP_INTR]++; } if (p != NULL) { - p->p_cpticks++; - if (++p->p_estcpu == 0) - p->p_estcpu--; - if ((p->p_estcpu & 3) == 0) { - resetpriority(p); - if (p->p_priority >= PUSER) - p->p_priority = p->p_usrpri; - } + schedclock(p); /* Update resource usage integrals and maximums. */ if ((pstats = p->p_stats) != NULL && diff --git a/sys/sys/proc.h b/sys/sys/proc.h index bd68a739f0b8..de6ccca240a6 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -403,6 +403,7 @@ void procinit __P((void)); int p_trespass __P((struct proc *p1, struct proc *p2)); void resetpriority __P((struct proc *)); int roundrobin_interval __P((void)); +void schedclock __P((struct proc *)); void setrunnable __P((struct proc *)); void setrunqueue __P((struct proc *)); void sleepinit __P((void));