From d7f687fc9b6513875325f8aad4c9a92444984b96 Mon Sep 17 00:00:00 2001 From: Jeff Roberson Date: Sun, 2 Mar 2008 07:39:22 +0000 Subject: [PATCH] Add cpuset, an api for thread to cpu binding and cpu resource grouping and assignment. - Add a reference to a struct cpuset in each thread that is inherited from the thread that created it. - Release the reference when the thread is destroyed. - Add prototypes for syscalls and macros for manipulating cpusets in sys/cpuset.h - Add syscalls to create, get, and set new numbered cpusets: cpuset(), cpuset_{get,set}id() - Add syscalls for getting and setting affinity masks for cpusets or individual threads: cpuid_{get,set}affinity() - Add types for the 'level' and 'which' parameters for the cpuset. This will permit expansion of the api to cover cpu masks for other objects identifiable with an id_t integer. For example, IRQs and Jails may be coming soon. - The root set 0 contains all valid cpus. All thread initially belong to cpuset 1. This permits migrating all threads off of certain cpus to reserve them for special applications. Sponsored by: Nokia Discussed with: arch, rwatson, brooks, davidxu, deischen Reviewed by: antoine --- lib/libc/sys/Symbol.map | 15 + sys/conf/files | 1 + sys/kern/init_main.c | 2 + sys/kern/kern_cpuset.c | 907 +++++++++++++++++++++++++++++++++++++++ sys/kern/kern_thread.c | 6 +- sys/kern/syscalls.master | 13 + sys/sys/_types.h | 3 + sys/sys/cpuset.h | 146 +++++++ sys/sys/proc.h | 3 +- sys/sys/types.h | 4 + 10 files changed, 1098 insertions(+), 2 deletions(-) create mode 100644 sys/kern/kern_cpuset.c create mode 100644 sys/sys/cpuset.h diff --git a/lib/libc/sys/Symbol.map b/lib/libc/sys/Symbol.map index 59d5190087f0..7ca8ff3d99f1 100644 --- a/lib/libc/sys/Symbol.map +++ b/lib/libc/sys/Symbol.map @@ -66,6 +66,11 @@ FBSD_1.0 { clock_settime; close; connect; + cpuset; + cpuset_getid; + cpuset_setid; + cpuset_getaffinity; + cpuset_setaffinity; dup; dup2; eaccess; @@ -450,6 +455,16 @@ FBSDprivate_1.0 { __sys_close; _connect; __sys_connect; + __cpuset; + __sys_cpuset; + __cpuset_getid; + __sys_cpuset_getid; + __cpuset_setid; + __sys_cpuset_setid; + __cpuset_getaffinity; + __sys_cpuset_getaffinity; + __cpuset_setaffinity; + __sys_cpuset_setaffinity; _dup; __sys_dup; _dup2; diff --git a/sys/conf/files b/sys/conf/files index 59640aefe1b9..2f5a3585f8ad 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -1440,6 +1440,7 @@ kern/kern_clock.c standard kern/kern_condvar.c standard kern/kern_conf.c standard kern/kern_cpu.c standard +kern/kern_cpuset.c standard kern/kern_context.c standard kern/kern_descrip.c standard kern/kern_environment.c standard diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index 299acbecf75a..eba81cb19945 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -73,6 +73,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include @@ -430,6 +431,7 @@ proc0_init(void *dummy __unused) td->td_base_pri = PUSER; td->td_oncpu = 0; td->td_flags = TDF_INMEM|TDP_KTHREAD; + td->td_cpuset = cpuset_thread0(); p->p_peers = 0; p->p_leader = p; diff --git a/sys/kern/kern_cpuset.c b/sys/kern/kern_cpuset.c new file mode 100644 index 000000000000..b262dc12593b --- /dev/null +++ b/sys/kern/kern_cpuset.c @@ -0,0 +1,907 @@ +/*- + * Copyright (c) 2008, Jeffrey Roberson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * cpusets provide a mechanism for creating and manipulating sets of + * processors for the purpose of constraining the scheduling of threads to + * specific processors. + * + * Each process belongs to an identified set, by default this is set 1. Each + * thread may further restrict the cpus it may run on to a subset of this + * named set. This creates an anonymous set which other threads and processes + * may not join by number. + * + * The named set is referred to herein as the 'base' set to avoid ambiguity. + * This set is usually a child of a 'root' set while the anonymous set may + * simply be referred to as a mask. In the syscall api these are referred to + * as the ROOT, CPUSET, and MASK levels where CPUSET is called 'base' here. + * + * Threads inherit their set from their creator whether it be anonymous or + * not. This means that anonymous sets are immutable because they may be + * shared. To modify an anonymous set a new set is created with the desired + * mask and the same parent as the existing anonymous set. This gives the + * illusion of each thread having a private mask.A + * + * Via the syscall apis a user may ask to retrieve or modify the root, base, + * or mask that is discovered via a pid, tid, or setid. Modifying a set + * modifies all numbered and anonymous child sets to comply with the new mask. + * Modifying a pid or tid's mask applies only to that tid but must still + * exist within the assigned parent set. + * + * A thread may not be assigned to a a group seperate from other threads in + * the process. This is to remove ambiguity when the setid is queried with + * a pid argument. There is no other technical limitation. + * + * This somewhat complex arrangement is intended to make it easy for + * applications to query available processors and bind their threads to + * specific processors while also allowing administrators to dynamically + * reprovision by changing sets which apply to groups of processes. + * + * A simple application should not concern itself with sets at all and + * rather apply masks to its own threads via CPU_WHICH_TID and a -1 id + * meaning 'curthread'. It may query availble cpus for that tid with a + * getaffinity call using (CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, ...). + */ +static uma_zone_t cpuset_zone; +static struct mtx cpuset_lock; +static struct setlist cpuset_ids; +struct cpuset *cpuset_zero; +static struct unrhdr *cpuset_unr; + +/* + * Acquire a reference to a cpuset, all pointers must be tracked with refs. + */ +struct cpuset * +cpuset_ref(struct cpuset *set) +{ + + refcount_acquire(&set->cs_ref); + return (set); +} + +/* + * Release a reference in a context where it is safe to allocte. + */ +void +cpuset_rel(struct cpuset *set) +{ + cpusetid_t id; + + if (refcount_release(&set->cs_ref) == 0) + return; + mtx_lock_spin(&cpuset_lock); + LIST_REMOVE(set, cs_siblings); + id = set->cs_id; + if (id != CPUSET_INVALID) + LIST_REMOVE(set, cs_link); + mtx_unlock_spin(&cpuset_lock); + cpuset_rel(set->cs_parent); + uma_zfree(cpuset_zone, set); + if (id != CPUSET_INVALID) + free_unr(cpuset_unr, id); +} + +/* + * Deferred release must be used when in a context that is not safe to + * allocate/free. This places any unreferenced sets on the list 'head'. + */ +static void +cpuset_rel_defer(struct setlist *head, struct cpuset *set) +{ + + if (refcount_release(&set->cs_ref) == 0) + return; + mtx_lock_spin(&cpuset_lock); + LIST_REMOVE(set, cs_siblings); + if (set->cs_id != CPUSET_INVALID) + LIST_REMOVE(set, cs_link); + LIST_INSERT_HEAD(head, set, cs_link); + mtx_unlock_spin(&cpuset_lock); +} + +/* + * Complete a deferred release. Removes the set from the list provided to + * cpuset_rel_defer. + */ +static void +cpuset_rel_complete(struct cpuset *set) +{ + LIST_REMOVE(set, cs_link); + cpuset_rel(set->cs_parent); + uma_zfree(cpuset_zone, set); +} + +/* + * Find a set based on an id. Returns it with a ref. + */ +static struct cpuset * +cpuset_lookup(cpusetid_t setid) +{ + struct cpuset *set; + + if (setid == CPUSET_INVALID) + return (NULL); + mtx_lock_spin(&cpuset_lock); + LIST_FOREACH(set, &cpuset_ids, cs_link) + if (set->cs_id == setid) + break; + if (set) + cpuset_ref(set); + mtx_unlock_spin(&cpuset_lock); + return (set); +} + +/* + * Create a set in the space provided in 'set' with the provided parameters. + * The set is returned with a single ref. May return EDEADLK if the set + * will have no valid cpu based on restrictions from the parent. + */ +static int +_cpuset_create(struct cpuset *set, struct cpuset *parent, cpuset_t *mask, + cpusetid_t id) +{ + int error; + + error = 0; + CPU_COPY(mask, &set->cs_mask); + LIST_INIT(&set->cs_children); + refcount_init(&set->cs_ref, 1); + set->cs_flags = 0; + mtx_lock_spin(&cpuset_lock); + CPU_AND(mask, &parent->cs_mask); + if (!CPU_EMPTY(mask)) { + set->cs_id = id; + set->cs_parent = cpuset_ref(parent); + LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings); + if (set->cs_id != CPUSET_INVALID) + LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); + } else + error = EDEADLK; + mtx_unlock_spin(&cpuset_lock); + + return (error); +} + +/* + * Create a new non-anonymous set with the requested parent and mask. May + * return failures if the mask is invalid or a new number can not be + * allocated. + */ +static int +cpuset_create(struct cpuset **setp, struct cpuset *parent, cpuset_t *mask) +{ + struct cpuset *set; + cpusetid_t id; + int error; + + id = alloc_unr(cpuset_unr); + if (id == -1) + return (ENFILE); + *setp = set = uma_zalloc(cpuset_zone, M_WAITOK); + error = _cpuset_create(set, parent, mask, id); + if (error == 0) + return (0); + free_unr(cpuset_unr, id); + uma_zfree(cpuset_zone, set); + + return (error); +} + +/* + * Recursively check for errors that would occur from applying mask to + * the tree of sets starting at 'set'. Checks for sets that would become + * empty as well as RDONLY flags. + */ +static int +cpuset_testupdate(struct cpuset *set, cpuset_t *mask) +{ + struct cpuset *nset; + cpuset_t newmask; + int error; + + mtx_assert(&cpuset_lock, MA_OWNED); + if (set->cs_flags & CPU_SET_RDONLY) + return (EPERM); + error = 0; + CPU_COPY(&set->cs_mask, &newmask); + CPU_AND(&newmask, mask); + if (CPU_EMPTY(&newmask)) + return (EDEADLK); + LIST_FOREACH(nset, &set->cs_children, cs_siblings) + if ((error = cpuset_testupdate(nset, &newmask)) != 0) + break; + return (error); +} + +/* + * Applies the mask 'mask' without checking for empty sets or permissions. + */ +static void +cpuset_update(struct cpuset *set, cpuset_t *mask) +{ + struct cpuset *nset; + + mtx_assert(&cpuset_lock, MA_OWNED); + CPU_AND(&set->cs_mask, mask); + LIST_FOREACH(nset, &set->cs_children, cs_siblings) + cpuset_update(nset, &set->cs_mask); + + return; +} + +/* + * Modify the set 'set' to use a copy of the mask provided. Apply this new + * mask to restrict all children in the tree. Checks for validity before + * applying the changes. + */ +static int +cpuset_modify(struct cpuset *set, cpuset_t *mask) +{ + int error; + + error = suser(curthread); + if (error) + return (error); + mtx_lock_spin(&cpuset_lock); + error = cpuset_testupdate(set, mask); + if (error) + goto out; + cpuset_update(set, mask); + CPU_COPY(mask, &set->cs_mask); +out: + mtx_unlock_spin(&cpuset_lock); + + return (error); +} + +/* + * Walks up the tree from 'set' to find the root. Returns the root + * referenced. + */ +static struct cpuset * +cpuset_root(struct cpuset *set) +{ + + mtx_lock_spin(&cpuset_lock); + for (; set->cs_parent != NULL; set = set->cs_parent) + if (set->cs_flags & CPU_SET_ROOT) + break; + cpuset_ref(set); + mtx_unlock_spin(&cpuset_lock); + + return (set); +} + +/* + * Find the first non-anonymous set starting from 'set'. Returns this set + * referenced. May return the passed in set with an extra ref if it is + * not anonymous. + */ +static struct cpuset * +cpuset_base(struct cpuset *set) +{ + + mtx_lock_spin(&cpuset_lock); + if (set->cs_id == CPUSET_INVALID) + set = set->cs_parent; + cpuset_ref(set); + mtx_unlock_spin(&cpuset_lock); + + return (set); +} + +/* + * Resolve the 'which' parameter of several cpuset apis. + * + * For WHICH_PID and WHICH_TID return a locked proc and valid proc/tid. Also + * checks for permission via p_cansched(). + * + * For WHICH_SET returns a valid set with a new reference. + * + * -1 may be supplied for any argument to mean the current proc/thread or + * the base set of the current thread. May fail with ESRCH/EPERM. + */ +static int +cpuset_which(cpuwhich_t which, id_t id, struct proc **pp, struct thread **tdp, + struct cpuset **setp) +{ + struct cpuset *set; + struct thread *td; + struct proc *p; + int error; + + *pp = p = NULL; + *tdp = td = NULL; + *setp = set = NULL; + switch (which) { + case CPU_WHICH_PID: + if (id == -1) { + PROC_LOCK(curproc); + p = curproc; + break; + } + if ((p = pfind(id)) == NULL) + return (ESRCH); + break; + case CPU_WHICH_TID: + if (id == -1) { + PROC_LOCK(curproc); + p = curproc; + td = curthread; + break; + } + sx_slock(&allproc_lock); + FOREACH_PROC_IN_SYSTEM(p) { + PROC_LOCK(p); + PROC_SLOCK(p); + FOREACH_THREAD_IN_PROC(p, td) + if (td->td_tid == id) + break; + PROC_SUNLOCK(p); + if (td != NULL) + break; + PROC_UNLOCK(p); + } + sx_sunlock(&allproc_lock); + if (td == NULL) + return (ESRCH); + break; + case CPU_WHICH_CPUSET: + if (id == -1) { + thread_lock(curthread); + set = cpuset_base(curthread->td_cpuset); + thread_unlock(curthread); + } else + set = cpuset_lookup(id); + if (set) { + *setp = set; + return (0); + } + return (ESRCH); + default: + return (EINVAL); + } + error = p_cansched(curthread, p); + if (error) { + PROC_UNLOCK(p); + return (error); + } + if (td == NULL) + td = FIRST_THREAD_IN_PROC(p); + *pp = p; + *tdp = td; + return (0); +} + +/* + * Create an anonymous set with the provided mask in the space provided by + * 'fset'. If the passed in set is anonymous we use its parent otherwise + * the new set is a child of 'set'. + */ +static int +cpuset_shadow(struct cpuset *set, struct cpuset *fset, cpuset_t *mask) +{ + struct cpuset *parent; + + if (set->cs_id == CPUSET_INVALID) + parent = set->cs_parent; + else + parent = set; + return (_cpuset_create(fset, parent, mask, CPUSET_INVALID)); +} + +/* + * Handle two cases for replacing the base set or mask of an entire process. + * + * 1) Set is non-null and mask is null. This reparents all anonymous sets + * to the provided set and replaces all non-anonymous td_cpusets with the + * provided set. + * 2) Mask is non-null and set is null. This replaces or creates anonymous + * sets for every thread with the existing base as a parent. + * + * This is overly complicated because we can't allocate while holding a + * spinlock and spinlocks must be held while changing and examining thread + * state. + */ +static int +cpuset_setproc(pid_t pid, struct cpuset *set, cpuset_t *mask) +{ + struct setlist freelist; + struct setlist droplist; + struct cpuset *nset; + struct thread *td; + struct proc *p; + int threads; + int nfree; + int error; + /* + * The algorithm requires two passes due to locking considerations. + * + * 1) Lookup the process and acquire the locks in the required order. + * 2) If enough cpusets have not been allocated release the locks and + * allocate them. Loop. + */ + LIST_INIT(&freelist); + LIST_INIT(&droplist); + nfree = 0; + for (;;) { + error = cpuset_which(CPU_WHICH_PID, pid, &p, &td, &nset); + if (error) + goto out; + PROC_SLOCK(p); + if (nfree >= p->p_numthreads) + break; + threads = p->p_numthreads; + PROC_SUNLOCK(p); + PROC_UNLOCK(p); + for (; nfree < threads; nfree++) { + nset = uma_zalloc(cpuset_zone, M_WAITOK); + LIST_INSERT_HEAD(&freelist, nset, cs_link); + } + } + PROC_LOCK_ASSERT(p, MA_OWNED); + PROC_SLOCK_ASSERT(p, MA_OWNED); + /* + * Now that the appropriate locks are held and we have enough cpusets, + * replace each thread's cpuset while using deferred release. We + * must do this because the PROC_SLOCK has to be held while traversing + * the thread list and this limits the type of operations allowed. + */ + error = 0; + FOREACH_THREAD_IN_PROC(p, td) { + struct cpuset *tdset; + thread_lock(td); + /* + * If we presently have an anonymous set or are applying a + * mask we must create an anonymous shadow set. That is + * either parented to our existing base or the supplied set. + * + * If we have a base set with no anonymous shadow we simply + * replace it outright. + */ + tdset = td->td_cpuset; + if (tdset->cs_id == CPUSET_INVALID || mask) { + nset = LIST_FIRST(&freelist); + LIST_REMOVE(nset, cs_link); + if (mask) + error = cpuset_shadow(tdset, nset, mask); + else + error = _cpuset_create(nset, set, + &tdset->cs_mask, CPUSET_INVALID); + if (error) { + LIST_INSERT_HEAD(&freelist, nset, cs_link); + thread_unlock(td); + break; + } + } else + nset = cpuset_ref(set); + cpuset_rel_defer(&droplist, tdset); + td->td_cpuset = nset; + sched_affinity(td); + thread_unlock(td); + } + PROC_SUNLOCK(p); + PROC_UNLOCK(p); +out: + while ((nset = LIST_FIRST(&droplist)) != NULL) + cpuset_rel_complete(nset); + while ((nset = LIST_FIRST(&freelist)) != NULL) { + LIST_REMOVE(nset, cs_link); + uma_zfree(cpuset_zone, nset); + } + return (error); +} + +/* + * Apply an anonymous mask to a single thread. + */ +static int +cpuset_setthread(lwpid_t id, cpuset_t *mask) +{ + struct cpuset *nset; + struct cpuset *set; + struct thread *td; + struct proc *p; + int error; + + nset = uma_zalloc(cpuset_zone, M_WAITOK); + error = cpuset_which(CPU_WHICH_TID, id, &p, &td, &nset); + if (error) + goto out; + thread_lock(td); + set = td->td_cpuset; + error = cpuset_shadow(set, nset, mask); + if (error == 0) { + cpuset_rel(td->td_cpuset); + td->td_cpuset = nset; + sched_affinity(td); + nset = NULL; + } + thread_unlock(td); + PROC_UNLOCK(p); +out: + if (nset) + uma_zfree(cpuset_zone, nset); + return (error); +} + +/* + * Creates the cpuset for thread0. We make two sets: + * + * 0 - The root set which should represent all valid processors in the + * system. It is initially created with a mask of all processors + * because we don't know what processors are valid until cpuset_init() + * runs. This set is immutable. + * 1 - The default set which all processes are a member of until changed. + * This allows an administrator to move all threads off of given cpus to + * dedicate them to high priority tasks or save power etc. + */ +struct cpuset * +cpuset_thread0(void) +{ + struct cpuset *set; + int error; + + cpuset_zone = uma_zcreate("cpuset", sizeof(struct cpuset), NULL, NULL, + NULL, NULL, UMA_ALIGN_PTR, 0); + mtx_init(&cpuset_lock, "cpuset", NULL, MTX_SPIN | MTX_RECURSE); + /* + * Create the root system set for the whole machine. Doesn't use + * cpuset_create() due to NULL parent. + */ + set = uma_zalloc(cpuset_zone, M_WAITOK | M_ZERO); + set->cs_mask.__bits[0] = -1; + LIST_INIT(&set->cs_children); + LIST_INSERT_HEAD(&cpuset_ids, set, cs_link); + set->cs_ref = 1; + set->cs_flags = CPU_SET_ROOT; + cpuset_zero = set; + /* + * Now derive a default, modifiable set from that to give out. + */ + set = uma_zalloc(cpuset_zone, M_WAITOK); + error = _cpuset_create(set, cpuset_zero, &cpuset_zero->cs_mask, 1); + KASSERT(error == 0, ("Error creating default set: %d\n", error)); + /* + * Initialize the unit allocator. 0 and 1 are allocated above. + */ + cpuset_unr = new_unrhdr(2, INT_MAX, NULL); + + return (set); +} + +/* + * This is called once the final set of system cpus is known. Modifies + * the root set and all children and mark the root readonly. + */ +static void +cpuset_init(void *arg) +{ + cpuset_t mask; + + CPU_ZERO(&mask); +#ifdef SMP + mask.__bits[0] = all_cpus; +#else + mask.__bits[0] = 1; +#endif + if (cpuset_modify(cpuset_zero, &mask)) + panic("Can't set initial cpuset mask.\n"); + cpuset_zero->cs_flags |= CPU_SET_RDONLY; +} +SYSINIT(cpuset, SI_SUB_SMP, SI_ORDER_ANY, cpuset_init, NULL); + +#ifndef _SYS_SYSPROTO_H_ +struct cpuset_args { + cpusetid_t *setid; +}; +#endif +int +cpuset(struct thread *td, struct cpuset_args *uap) +{ + struct cpuset *root; + struct cpuset *set; + int error; + + thread_lock(td); + root = cpuset_root(td->td_cpuset); + thread_unlock(td); + error = cpuset_create(&set, root, &root->cs_mask); + cpuset_rel(root); + if (error) + return (error); + error = cpuset_setproc(-1, set, NULL); + if (error == 0) + error = copyout(&set->cs_id, uap->setid, sizeof(set->cs_id)); + cpuset_rel(set); + return (error); +} + +#ifndef _SYS_SYSPROTO_H_ +struct cpuset_setid_args { + cpuwhich_t which; + id_t id; + cpusetid_t setid; +}; +#endif +int +cpuset_setid(struct thread *td, struct cpuset_setid_args *uap) +{ + struct cpuset *set; + int error; + + /* + * Presently we only support per-process sets. + */ + if (uap->which != CPU_WHICH_PID) + return (EINVAL); + set = cpuset_lookup(uap->setid); + if (set == NULL) + return (ESRCH); + error = cpuset_setproc(uap->id, set, NULL); + cpuset_rel(set); + return (error); +} + +#ifndef _SYS_SYSPROTO_H_ +struct cpuset_getid_args { + cpulevel_t level; + cpuwhich_t which; + id_t id; + cpusetid_t *setid; +#endif +int +cpuset_getid(struct thread *td, struct cpuset_getid_args *uap) +{ + struct cpuset *nset; + struct cpuset *set; + struct thread *ttd; + struct proc *p; + cpusetid_t id; + int error; + + if (uap->level == CPU_LEVEL_WHICH && uap->which != CPU_WHICH_CPUSET) + return (EINVAL); + error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); + if (error) + return (error); + switch (uap->which) { + case CPU_WHICH_TID: + case CPU_WHICH_PID: + thread_lock(ttd); + set = cpuset_base(ttd->td_cpuset); + thread_unlock(ttd); + PROC_UNLOCK(p); + break; + case CPU_WHICH_CPUSET: + break; + } + switch (uap->level) { + case CPU_LEVEL_ROOT: + nset = cpuset_root(set); + cpuset_rel(set); + set = nset; + break; + case CPU_LEVEL_CPUSET: + break; + case CPU_LEVEL_WHICH: + break; + } + id = set->cs_id; + cpuset_rel(set); + if (error == 0) + error = copyout(&id, uap->setid, sizeof(id)); + + return (error); +} + +#ifndef _SYS_SYSPROTO_H_ +struct cpuset_getaffinity_args { + cpulevel_t level; + cpuwhich_t which; + int id; + int cpusetsize; + long *mask; +}; +#endif +int +cpuset_getaffinity(struct thread *td, struct cpuset_getaffinity_args *uap) +{ + struct thread *ttd; + struct cpuset *nset; + struct cpuset *set; + struct proc *p; + cpuset_t *mask; + int error; + int size; + + if (uap->cpusetsize < CPU_SETSIZE || uap->cpusetsize > CPU_MAXSIZE) + return (ERANGE); + size = uap->cpusetsize / NBBY; + mask = malloc(size, M_TEMP, M_WAITOK | M_ZERO); + error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); + if (error) + goto out; + error = 0; + switch (uap->level) { + case CPU_LEVEL_ROOT: + case CPU_LEVEL_CPUSET: + switch (uap->which) { + case CPU_WHICH_TID: + case CPU_WHICH_PID: + thread_lock(ttd); + set = cpuset_ref(ttd->td_cpuset); + thread_unlock(ttd); + break; + case CPU_WHICH_CPUSET: + break; + } + if (uap->level == CPU_LEVEL_ROOT) + nset = cpuset_root(set); + else + nset = cpuset_base(set); + CPU_COPY(&nset->cs_mask, mask); + cpuset_rel(nset); + break; + case CPU_LEVEL_WHICH: + switch (uap->which) { + case CPU_WHICH_TID: + thread_lock(ttd); + CPU_COPY(&ttd->td_cpuset->cs_mask, mask); + thread_unlock(ttd); + break; + case CPU_WHICH_PID: + PROC_SLOCK(p); + FOREACH_THREAD_IN_PROC(p, ttd) { + thread_lock(ttd); + CPU_OR(mask, &ttd->td_cpuset->cs_mask); + thread_unlock(ttd); + } + PROC_SUNLOCK(p); + break; + case CPU_WHICH_CPUSET: + CPU_COPY(&set->cs_mask, mask); + break; + } + break; + default: + error = EINVAL; + break; + } + if (set) + cpuset_rel(set); + if (p) + PROC_UNLOCK(p); + if (error == 0) + error = copyout(mask, uap->mask, size); +out: + free(mask, M_TEMP); + return (error); +} + +#ifndef _SYS_SYSPROTO_H_ +struct cpuset_setaffinity_args { + cpulevel_t level; + cpuwhich_t which; + int id; + int cpusetsize; + long * mask; +}; +#endif +int +cpuset_setaffinity(struct thread *td, struct cpuset_setaffinity_args *uap) +{ + struct cpuset *nset; + struct cpuset *set; + struct thread *ttd; + struct proc *p; + cpuset_t *mask; + int error; + + if (uap->cpusetsize < CPU_SETSIZE || uap->cpusetsize > CPU_MAXSIZE) + return (ERANGE); + mask = malloc(uap->cpusetsize / NBBY, M_TEMP, M_WAITOK | M_ZERO); + error = copyin(uap->mask, mask, uap->cpusetsize / NBBY); + if (error) + goto out; + switch (uap->level) { + case CPU_LEVEL_ROOT: + case CPU_LEVEL_CPUSET: + error = cpuset_which(uap->which, uap->id, &p, &ttd, &set); + if (error) + break; + switch (uap->which) { + case CPU_WHICH_TID: + case CPU_WHICH_PID: + thread_lock(ttd); + set = cpuset_ref(ttd->td_cpuset); + thread_unlock(ttd); + break; + case CPU_WHICH_CPUSET: + break; + } + if (uap->level == CPU_LEVEL_ROOT) + nset = cpuset_root(set); + else + nset = cpuset_base(set); + error = cpuset_modify(nset, mask); + cpuset_rel(nset); + cpuset_rel(set); + break; + case CPU_LEVEL_WHICH: + switch (uap->which) { + case CPU_WHICH_TID: + error = cpuset_setthread(uap->id, mask); + break; + case CPU_WHICH_PID: + error = cpuset_setproc(uap->id, NULL, mask); + break; + case CPU_WHICH_CPUSET: + error = cpuset_which(CPU_WHICH_CPUSET, uap->id, &p, + &ttd, &set); + if (error == 0) { + error = cpuset_modify(set, mask); + cpuset_rel(set); + } + break; + default: + error = EINVAL; + break; + } + break; + default: + error = EINVAL; + break; + } +out: + free(mask, M_TEMP); + return (error); +} diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 6f23b634127f..8d9f75320a4f 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include @@ -342,7 +343,8 @@ thread_alloc(void) void thread_free(struct thread *td) { - + cpuset_rel(td->td_cpuset); + td->td_cpuset = NULL; cpu_thread_free(td); if (td->td_altkstack != 0) vm_thread_dispose_altkstack(td); @@ -527,6 +529,8 @@ thread_wait(struct proc *p) /* Wait for any remaining threads to exit cpu_throw(). */ while (p->p_exitthreads) sched_relinquish(curthread); + cpuset_rel(td->td_cpuset); + td->td_cpuset = NULL; cpu_thread_clean(td); crfree(td->td_ucred); thread_reap(); /* check for zombie threads etc. */ diff --git a/sys/kern/syscalls.master b/sys/kern/syscalls.master index c22f99c333a0..c31df8d0d314 100644 --- a/sys/kern/syscalls.master +++ b/sys/kern/syscalls.master @@ -850,5 +850,18 @@ 482 AUE_SHMOPEN STD { int shm_open(const char *path, int flags, \ mode_t mode); } 483 AUE_SHMUNLINK STD { int shm_unlink(const char *path); } +484 AUE_NULL STD { int cpuset(cpusetid_t *setid); } +485 AUE_NULL STD { int cpuset_setid(cpuwhich_t which, id_t id, \ + cpusetid_t setid); } +486 AUE_NULL STD { int cpuset_getid(cpulevel_t level, \ + cpuwhich_t which, id_t id, \ + cpusetid_t *setid); } +487 AUE_NULL STD { int cpuset_getaffinity(cpulevel_t level, \ + cpuwhich_t which, id_t id, int cpusetsize, \ + long *mask); } +488 AUE_NULL STD { int cpuset_setaffinity(cpulevel_t level, \ + cpuwhich_t which, id_t id, int cpusetsize, \ + long *mask); } + ; Please copy any additions and changes to the following compatability tables: ; sys/compat/freebsd32/syscalls.master diff --git a/sys/sys/_types.h b/sys/sys/_types.h index 6f1f74a3bc07..bee2ccad0896 100644 --- a/sys/sys/_types.h +++ b/sys/sys/_types.h @@ -61,6 +61,9 @@ typedef struct __timer *__timer_t; /* timer_gettime()... */ typedef struct __mq *__mqd_t; /* mq_open()... */ typedef __uint32_t __uid_t; typedef unsigned int __useconds_t; /* microseconds (unsigned) */ +typedef int __cpuwhich_t; /* which parameter for cpuset. */ +typedef int __cpulevel_t; /* level parameter for cpuset. */ +typedef int __cpusetid_t; /* cpuset identifier. */ /* * Unusual type definitions. diff --git a/sys/sys/cpuset.h b/sys/sys/cpuset.h new file mode 100644 index 000000000000..301f96c1680b --- /dev/null +++ b/sys/sys/cpuset.h @@ -0,0 +1,146 @@ +/*- + * Copyright (c) 2008, Jeffrey Roberson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _SYS_CPUSET_H_ +#define _SYS_CPUSET_H_ + +#ifdef _KERNEL +#define CPU_SETSIZE MAXCPU +#endif + +#define CPU_MAXSIZE 128 + +#ifndef CPU_SETSIZE +#define CPU_SETSIZE CPU_MAXSIZE +#endif + +#define _NCPUBITS (sizeof(long) * NBBY) /* bits per mask */ +#define _NCPUWORDS howmany(CPU_SETSIZE, _NCPUBITS) + +typedef struct _cpuset { + long __bits[howmany(CPU_SETSIZE, _NCPUBITS)]; +} cpuset_t; + +#define __cpuset_mask(n) ((long)1 << ((n) % _NCPUBITS)) +#define CPU_CLR(n, p) ((p)->__bits[(n)/_NCPUBITS] &= ~__cpuset_mask(n)) +#define CPU_COPY(f, t) (void)(*(t) = *(f)) +#define CPU_ISSET(n, p) (((p)->__bits[(n)/_NCPUBITS] & __cpuset_mask(n)) != 0) +#define CPU_SET(n, p) ((p)->__bits[(n)/_NCPUBITS] |= __cpuset_mask(n)) +#define CPU_ZERO(p) do { \ + __size_t __i; \ + for (__i = 0; __i < _NCPUWORDS; __i++) \ + (p)->__bits[__i] = 0; \ +} while (0) + +#define CPU_EMPTY(p) __extension__ ({ \ + __size_t __i; \ + for (__i = 0; __i < _NCPUWORDS; __i++) \ + if ((p)->__bits[__i]) \ + break; \ + __i == _NCPUWORDS; \ +}) + +#define CPU_OR(d, s) do { \ + __size_t __i; \ + for (__i = 0; __i < _NCPUWORDS; __i++) \ + (d)->__bits[__i] |= (s)->__bits[__i]; \ +} while (0) + +#define CPU_AND(d, s) do { \ + __size_t __i; \ + for (__i = 0; __i < _NCPUWORDS; __i++) \ + (d)->__bits[__i] &= (s)->__bits[__i]; \ +} while (0) + +#define CPU_NAND(d, s) do { \ + __size_t __i; \ + for (__i = 0; __i < _NCPUWORDS; __i++) \ + (d)->__bits[__i] &= ~(s)->__bits[__i]; \ +} while (0) + +/* + * Valid cpulevel_t values. + */ +#define CPU_LEVEL_ROOT 1 /* All system cpus. */ +#define CPU_LEVEL_CPUSET 2 /* Available cpus for which. */ +#define CPU_LEVEL_WHICH 3 /* Actual mask/id for which. */ + +/* + * Valid cpuwhich_t values. + */ +#define CPU_WHICH_TID 1 /* Specifies a thread id. */ +#define CPU_WHICH_PID 2 /* Specifies a process id. */ +#define CPU_WHICH_CPUSET 3 /* Specifies a set id. */ + +/* + * Reserved cpuset identifiers. + */ +#define CPUSET_INVALID -1 +#define CPUSET_DEFAULT 0 + +#ifdef _KERNEL +LIST_HEAD(setlist, cpuset); + +/* + * cpusets encapsulate cpu binding information for one or more threads. + * + * a - Accessed with atomics. + * s - Set at creation, never modified. Only a ref required to read. + * c - Locked internally by a cpuset lock. + * + * The bitmask is only modified while holding the cpuset lock. It may be + * read while only a reference is held but the consumer must be prepared + * to deal with inconsistent results. + */ +struct cpuset { + cpuset_t cs_mask; /* bitmask of valid cpus. */ + volatile u_int cs_ref; /* (a) Reference count. */ + int cs_flags; /* (s) Flags from below. */ + cpusetid_t cs_id; /* (s) Id or INVALID. */ + struct cpuset *cs_parent; /* (s) Pointer to our parent. */ + LIST_ENTRY(cpuset) cs_link; /* (c) All identified sets. */ + LIST_ENTRY(cpuset) cs_siblings; /* (c) Sibling set link. */ + struct setlist cs_children; /* (c) List of children. */ +}; + +#define CPU_SET_ROOT 0x0001 /* Set is a root set. */ +#define CPU_SET_RDONLY 0x0002 /* No modification allowed. */ + +struct cpuset *cpuset_thread0(void); +struct cpuset *cpuset_ref(struct cpuset *); +void cpuset_rel(struct cpuset *); +#else +__BEGIN_DECLS +int cpuset(cpusetid_t *); +int cpuset_setid(cpuwhich_t, id_t, cpusetid_t); +int cpuset_getid(cpulevel_t, cpuwhich_t, id_t, cpusetid_t *); +int cpuset_getaffinity(cpulevel_t, cpuwhich_t, id_t, int, cpuset_t *); +int cpuset_setaffinity(cpulevel_t, cpuwhich_t, id_t, int, cpuset_t *); +__END_DECLS +#endif +#endif /* !_SYS_CPUSET_H_ */ diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 03a81423ea94..c147415a1562 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -163,6 +163,7 @@ struct thread; struct trapframe; struct turnstile; struct mqueue_notifier; +struct cpuset; /* * Here we define the two structures used for process information. @@ -208,7 +209,7 @@ struct thread { /* The two queues below should someday be merged. */ TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */ TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */ - + struct cpuset *td_cpuset; /* (t) CPU affinity mask. */ struct seltd *td_sel; /* Select queue/channel. */ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ struct turnstile *td_turnstile; /* (k) Associated turnstile. */ diff --git a/sys/sys/types.h b/sys/sys/types.h index 51c02c8b553c..ed0e3a9d6016 100644 --- a/sys/sys/types.h +++ b/sys/sys/types.h @@ -124,6 +124,10 @@ typedef __blksize_t blksize_t; #define _BLKSIZE_T_DECLARED #endif +typedef __cpuwhich_t cpuwhich_t; +typedef __cpulevel_t cpulevel_t; +typedef __cpusetid_t cpusetid_t; + #ifndef _BLKCNT_T_DECLARED typedef __blkcnt_t blkcnt_t; #define _BLKCNT_T_DECLARED