Path: blob/master/arch/mips/kernel/mips-mt-fpaff.c
10817 views
/*1* General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels2* Copyright (C) 2005 Mips Technologies, Inc3*/4#include <linux/cpu.h>5#include <linux/cpuset.h>6#include <linux/cpumask.h>7#include <linux/delay.h>8#include <linux/kernel.h>9#include <linux/init.h>10#include <linux/sched.h>11#include <linux/security.h>12#include <linux/types.h>13#include <asm/uaccess.h>1415/*16* CPU mask used to set process affinity for MT VPEs/TCs with FPUs17*/18cpumask_t mt_fpu_cpumask;1920static int fpaff_threshold = -1;21unsigned long mt_fpemul_threshold;2223/*24* Replacement functions for the sys_sched_setaffinity() and25* sys_sched_getaffinity() system calls, so that we can integrate26* FPU affinity with the user's requested processor affinity.27* This code is 98% identical with the sys_sched_setaffinity()28* and sys_sched_getaffinity() system calls, and should be29* updated when kernel/sched.c changes.30*/3132/*33* find_process_by_pid - find a process with a matching PID value.34* used in sys_sched_set/getaffinity() in kernel/sched.c, so35* cloned here.36*/37static inline struct task_struct *find_process_by_pid(pid_t pid)38{39return pid ? find_task_by_vpid(pid) : current;40}4142/*43* check the target process has a UID that matches the current process's44*/45static bool check_same_owner(struct task_struct *p)46{47const struct cred *cred = current_cred(), *pcred;48bool match;4950rcu_read_lock();51pcred = __task_cred(p);52match = (cred->euid == pcred->euid ||53cred->euid == pcred->uid);54rcu_read_unlock();55return match;56}5758/*59* mipsmt_sys_sched_setaffinity - set the cpu affinity of a process60*/61asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,62unsigned long __user *user_mask_ptr)63{64cpumask_var_t cpus_allowed, new_mask, effective_mask;65struct thread_info *ti;66struct task_struct *p;67int retval;6869if (len < sizeof(new_mask))70return -EINVAL;7172if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))73return -EFAULT;7475get_online_cpus();76rcu_read_lock();7778p = find_process_by_pid(pid);79if (!p) {80rcu_read_unlock();81put_online_cpus();82return -ESRCH;83}8485/* Prevent p going away */86get_task_struct(p);87rcu_read_unlock();8889if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {90retval = -ENOMEM;91goto out_put_task;92}93if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {94retval = -ENOMEM;95goto out_free_cpus_allowed;96}97if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {98retval = -ENOMEM;99goto out_free_new_mask;100}101retval = -EPERM;102if (!check_same_owner(p) && !capable(CAP_SYS_NICE))103goto out_unlock;104105retval = security_task_setscheduler(p);106if (retval)107goto out_unlock;108109/* Record new user-specified CPU set for future reference */110cpumask_copy(&p->thread.user_cpus_allowed, new_mask);111112again:113/* Compute new global allowed CPU set if necessary */114ti = task_thread_info(p);115if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&116cpus_intersects(*new_mask, mt_fpu_cpumask)) {117cpus_and(*effective_mask, *new_mask, mt_fpu_cpumask);118retval = set_cpus_allowed_ptr(p, effective_mask);119} else {120cpumask_copy(effective_mask, new_mask);121clear_ti_thread_flag(ti, TIF_FPUBOUND);122retval = set_cpus_allowed_ptr(p, new_mask);123}124125if (!retval) {126cpuset_cpus_allowed(p, cpus_allowed);127if (!cpumask_subset(effective_mask, cpus_allowed)) {128/*129* We must have raced with a concurrent cpuset130* update. Just reset the cpus_allowed to the131* cpuset's cpus_allowed132*/133cpumask_copy(new_mask, cpus_allowed);134goto again;135}136}137out_unlock:138free_cpumask_var(effective_mask);139out_free_new_mask:140free_cpumask_var(new_mask);141out_free_cpus_allowed:142free_cpumask_var(cpus_allowed);143out_put_task:144put_task_struct(p);145put_online_cpus();146return retval;147}148149/*150* mipsmt_sys_sched_getaffinity - get the cpu affinity of a process151*/152asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,153unsigned long __user *user_mask_ptr)154{155unsigned int real_len;156cpumask_t mask;157int retval;158struct task_struct *p;159160real_len = sizeof(mask);161if (len < real_len)162return -EINVAL;163164get_online_cpus();165read_lock(&tasklist_lock);166167retval = -ESRCH;168p = find_process_by_pid(pid);169if (!p)170goto out_unlock;171retval = security_task_getscheduler(p);172if (retval)173goto out_unlock;174175cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);176177out_unlock:178read_unlock(&tasklist_lock);179put_online_cpus();180if (retval)181return retval;182if (copy_to_user(user_mask_ptr, &mask, real_len))183return -EFAULT;184return real_len;185}186187188static int __init fpaff_thresh(char *str)189{190get_option(&str, &fpaff_threshold);191return 1;192}193__setup("fpaff=", fpaff_thresh);194195/*196* FPU Use Factor empirically derived from experiments on 34K197*/198#define FPUSEFACTOR 2000199200static __init int mt_fp_affinity_init(void)201{202if (fpaff_threshold >= 0) {203mt_fpemul_threshold = fpaff_threshold;204} else {205mt_fpemul_threshold =206(FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;207}208printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n",209mt_fpemul_threshold);210211return 0;212}213arch_initcall(mt_fp_affinity_init);214215216