Path: blob/master/arch/x86/kernel/cpu/resctrl/rdtgroup.c
26516 views
// SPDX-License-Identifier: GPL-2.0-only1/*2* User interface for Resource Allocation in Resource Director Technology(RDT)3*4* Copyright (C) 2016 Intel Corporation5*6* Author: Fenghua Yu <[email protected]>7*8* More information about RDT be found in the Intel (R) x86 Architecture9* Software Developer Manual.10*/1112#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt1314#include <linux/cpu.h>15#include <linux/debugfs.h>16#include <linux/fs.h>17#include <linux/fs_parser.h>18#include <linux/sysfs.h>19#include <linux/kernfs.h>20#include <linux/resctrl.h>21#include <linux/seq_buf.h>22#include <linux/seq_file.h>23#include <linux/sched/signal.h>24#include <linux/sched/task.h>25#include <linux/slab.h>26#include <linux/task_work.h>27#include <linux/user_namespace.h>2829#include <uapi/linux/magic.h>3031#include <asm/msr.h>32#include "internal.h"3334DEFINE_STATIC_KEY_FALSE(rdt_enable_key);3536DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);3738DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);3940/*41* This is safe against resctrl_arch_sched_in() called from __switch_to()42* because __switch_to() is executed with interrupts disabled. A local call43* from update_closid_rmid() is protected against __switch_to() because44* preemption is disabled.45*/46void resctrl_arch_sync_cpu_closid_rmid(void *info)47{48struct resctrl_cpu_defaults *r = info;4950if (r) {51this_cpu_write(pqr_state.default_closid, r->closid);52this_cpu_write(pqr_state.default_rmid, r->rmid);53}5455/*56* We cannot unconditionally write the MSR because the current57* executing task might have its own closid selected. Just reuse58* the context switch code.59*/60resctrl_arch_sched_in(current);61}6263#define INVALID_CONFIG_INDEX UINT_MAX6465/**66* mon_event_config_index_get - get the hardware index for the67* configurable event68* @evtid: event id.69*70* Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID71* 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID72* INVALID_CONFIG_INDEX for invalid evtid73*/74static inline unsigned int mon_event_config_index_get(u32 evtid)75{76switch (evtid) {77case QOS_L3_MBM_TOTAL_EVENT_ID:78return 0;79case QOS_L3_MBM_LOCAL_EVENT_ID:80return 1;81default:82/* Should never reach here */83return INVALID_CONFIG_INDEX;84}85}8687void resctrl_arch_mon_event_config_read(void *_config_info)88{89struct resctrl_mon_config_info *config_info = _config_info;90unsigned int index;91u64 msrval;9293index = mon_event_config_index_get(config_info->evtid);94if (index == INVALID_CONFIG_INDEX) {95pr_warn_once("Invalid event id %d\n", config_info->evtid);96return;97}98rdmsrq(MSR_IA32_EVT_CFG_BASE + index, msrval);99100/* Report only the valid event configuration bits */101config_info->mon_config = msrval & MAX_EVT_CONFIG_BITS;102}103104void resctrl_arch_mon_event_config_write(void *_config_info)105{106struct resctrl_mon_config_info *config_info = _config_info;107unsigned int index;108109index = mon_event_config_index_get(config_info->evtid);110if (index == INVALID_CONFIG_INDEX) {111pr_warn_once("Invalid event id %d\n", config_info->evtid);112return;113}114wrmsrq(MSR_IA32_EVT_CFG_BASE + index, config_info->mon_config);115}116117static void l3_qos_cfg_update(void *arg)118{119bool *enable = arg;120121wrmsrq(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);122}123124static void l2_qos_cfg_update(void *arg)125{126bool *enable = arg;127128wrmsrq(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);129}130131static int set_cache_qos_cfg(int level, bool enable)132{133void (*update)(void *arg);134struct rdt_ctrl_domain *d;135struct rdt_resource *r_l;136cpumask_var_t cpu_mask;137int cpu;138139/* Walking r->domains, ensure it can't race with cpuhp */140lockdep_assert_cpus_held();141142if (level == RDT_RESOURCE_L3)143update = l3_qos_cfg_update;144else if (level == RDT_RESOURCE_L2)145update = l2_qos_cfg_update;146else147return -EINVAL;148149if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))150return -ENOMEM;151152r_l = &rdt_resources_all[level].r_resctrl;153list_for_each_entry(d, &r_l->ctrl_domains, hdr.list) {154if (r_l->cache.arch_has_per_cpu_cfg)155/* Pick all the CPUs in the domain instance */156for_each_cpu(cpu, &d->hdr.cpu_mask)157cpumask_set_cpu(cpu, cpu_mask);158else159/* Pick one CPU from each domain instance to update MSR */160cpumask_set_cpu(cpumask_any(&d->hdr.cpu_mask), cpu_mask);161}162163/* Update QOS_CFG MSR on all the CPUs in cpu_mask */164on_each_cpu_mask(cpu_mask, update, &enable, 1);165166free_cpumask_var(cpu_mask);167168return 0;169}170171/* Restore the qos cfg state when a domain comes online */172void rdt_domain_reconfigure_cdp(struct rdt_resource *r)173{174struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);175176if (!r->cdp_capable)177return;178179if (r->rid == RDT_RESOURCE_L2)180l2_qos_cfg_update(&hw_res->cdp_enabled);181182if (r->rid == RDT_RESOURCE_L3)183l3_qos_cfg_update(&hw_res->cdp_enabled);184}185186static int cdp_enable(int level)187{188struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl;189int ret;190191if (!r_l->alloc_capable)192return -EINVAL;193194ret = set_cache_qos_cfg(level, true);195if (!ret)196rdt_resources_all[level].cdp_enabled = true;197198return ret;199}200201static void cdp_disable(int level)202{203struct rdt_hw_resource *r_hw = &rdt_resources_all[level];204205if (r_hw->cdp_enabled) {206set_cache_qos_cfg(level, false);207r_hw->cdp_enabled = false;208}209}210211int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable)212{213struct rdt_hw_resource *hw_res = &rdt_resources_all[l];214215if (!hw_res->r_resctrl.cdp_capable)216return -EINVAL;217218if (enable)219return cdp_enable(l);220221cdp_disable(l);222223return 0;224}225226bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l)227{228return rdt_resources_all[l].cdp_enabled;229}230231void resctrl_arch_reset_all_ctrls(struct rdt_resource *r)232{233struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);234struct rdt_hw_ctrl_domain *hw_dom;235struct msr_param msr_param;236struct rdt_ctrl_domain *d;237int i;238239/* Walking r->domains, ensure it can't race with cpuhp */240lockdep_assert_cpus_held();241242msr_param.res = r;243msr_param.low = 0;244msr_param.high = hw_res->num_closid;245246/*247* Disable resource control for this resource by setting all248* CBMs in all ctrl_domains to the maximum mask value. Pick one CPU249* from each domain to update the MSRs below.250*/251list_for_each_entry(d, &r->ctrl_domains, hdr.list) {252hw_dom = resctrl_to_arch_ctrl_dom(d);253254for (i = 0; i < hw_res->num_closid; i++)255hw_dom->ctrl_val[i] = resctrl_get_default_ctrl(r);256msr_param.dom = d;257smp_call_function_any(&d->hdr.cpu_mask, rdt_ctrl_update, &msr_param, 1);258}259260return;261}262263264