Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kernel/cpu/resctrl/rdtgroup.c
26516 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* User interface for Resource Allocation in Resource Director Technology(RDT)
4
*
5
* Copyright (C) 2016 Intel Corporation
6
*
7
* Author: Fenghua Yu <[email protected]>
8
*
9
* More information about RDT be found in the Intel (R) x86 Architecture
10
* Software Developer Manual.
11
*/
12
13
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15
#include <linux/cpu.h>
16
#include <linux/debugfs.h>
17
#include <linux/fs.h>
18
#include <linux/fs_parser.h>
19
#include <linux/sysfs.h>
20
#include <linux/kernfs.h>
21
#include <linux/resctrl.h>
22
#include <linux/seq_buf.h>
23
#include <linux/seq_file.h>
24
#include <linux/sched/signal.h>
25
#include <linux/sched/task.h>
26
#include <linux/slab.h>
27
#include <linux/task_work.h>
28
#include <linux/user_namespace.h>
29
30
#include <uapi/linux/magic.h>
31
32
#include <asm/msr.h>
33
#include "internal.h"
34
35
DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
36
37
DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
38
39
DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
40
41
/*
42
* This is safe against resctrl_arch_sched_in() called from __switch_to()
43
* because __switch_to() is executed with interrupts disabled. A local call
44
* from update_closid_rmid() is protected against __switch_to() because
45
* preemption is disabled.
46
*/
47
void resctrl_arch_sync_cpu_closid_rmid(void *info)
48
{
49
struct resctrl_cpu_defaults *r = info;
50
51
if (r) {
52
this_cpu_write(pqr_state.default_closid, r->closid);
53
this_cpu_write(pqr_state.default_rmid, r->rmid);
54
}
55
56
/*
57
* We cannot unconditionally write the MSR because the current
58
* executing task might have its own closid selected. Just reuse
59
* the context switch code.
60
*/
61
resctrl_arch_sched_in(current);
62
}
63
64
#define INVALID_CONFIG_INDEX UINT_MAX
65
66
/**
67
* mon_event_config_index_get - get the hardware index for the
68
* configurable event
69
* @evtid: event id.
70
*
71
* Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID
72
* 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID
73
* INVALID_CONFIG_INDEX for invalid evtid
74
*/
75
static inline unsigned int mon_event_config_index_get(u32 evtid)
76
{
77
switch (evtid) {
78
case QOS_L3_MBM_TOTAL_EVENT_ID:
79
return 0;
80
case QOS_L3_MBM_LOCAL_EVENT_ID:
81
return 1;
82
default:
83
/* Should never reach here */
84
return INVALID_CONFIG_INDEX;
85
}
86
}
87
88
void resctrl_arch_mon_event_config_read(void *_config_info)
89
{
90
struct resctrl_mon_config_info *config_info = _config_info;
91
unsigned int index;
92
u64 msrval;
93
94
index = mon_event_config_index_get(config_info->evtid);
95
if (index == INVALID_CONFIG_INDEX) {
96
pr_warn_once("Invalid event id %d\n", config_info->evtid);
97
return;
98
}
99
rdmsrq(MSR_IA32_EVT_CFG_BASE + index, msrval);
100
101
/* Report only the valid event configuration bits */
102
config_info->mon_config = msrval & MAX_EVT_CONFIG_BITS;
103
}
104
105
void resctrl_arch_mon_event_config_write(void *_config_info)
106
{
107
struct resctrl_mon_config_info *config_info = _config_info;
108
unsigned int index;
109
110
index = mon_event_config_index_get(config_info->evtid);
111
if (index == INVALID_CONFIG_INDEX) {
112
pr_warn_once("Invalid event id %d\n", config_info->evtid);
113
return;
114
}
115
wrmsrq(MSR_IA32_EVT_CFG_BASE + index, config_info->mon_config);
116
}
117
118
static void l3_qos_cfg_update(void *arg)
119
{
120
bool *enable = arg;
121
122
wrmsrq(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
123
}
124
125
static void l2_qos_cfg_update(void *arg)
126
{
127
bool *enable = arg;
128
129
wrmsrq(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
130
}
131
132
static int set_cache_qos_cfg(int level, bool enable)
133
{
134
void (*update)(void *arg);
135
struct rdt_ctrl_domain *d;
136
struct rdt_resource *r_l;
137
cpumask_var_t cpu_mask;
138
int cpu;
139
140
/* Walking r->domains, ensure it can't race with cpuhp */
141
lockdep_assert_cpus_held();
142
143
if (level == RDT_RESOURCE_L3)
144
update = l3_qos_cfg_update;
145
else if (level == RDT_RESOURCE_L2)
146
update = l2_qos_cfg_update;
147
else
148
return -EINVAL;
149
150
if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
151
return -ENOMEM;
152
153
r_l = &rdt_resources_all[level].r_resctrl;
154
list_for_each_entry(d, &r_l->ctrl_domains, hdr.list) {
155
if (r_l->cache.arch_has_per_cpu_cfg)
156
/* Pick all the CPUs in the domain instance */
157
for_each_cpu(cpu, &d->hdr.cpu_mask)
158
cpumask_set_cpu(cpu, cpu_mask);
159
else
160
/* Pick one CPU from each domain instance to update MSR */
161
cpumask_set_cpu(cpumask_any(&d->hdr.cpu_mask), cpu_mask);
162
}
163
164
/* Update QOS_CFG MSR on all the CPUs in cpu_mask */
165
on_each_cpu_mask(cpu_mask, update, &enable, 1);
166
167
free_cpumask_var(cpu_mask);
168
169
return 0;
170
}
171
172
/* Restore the qos cfg state when a domain comes online */
173
void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
174
{
175
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
176
177
if (!r->cdp_capable)
178
return;
179
180
if (r->rid == RDT_RESOURCE_L2)
181
l2_qos_cfg_update(&hw_res->cdp_enabled);
182
183
if (r->rid == RDT_RESOURCE_L3)
184
l3_qos_cfg_update(&hw_res->cdp_enabled);
185
}
186
187
static int cdp_enable(int level)
188
{
189
struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl;
190
int ret;
191
192
if (!r_l->alloc_capable)
193
return -EINVAL;
194
195
ret = set_cache_qos_cfg(level, true);
196
if (!ret)
197
rdt_resources_all[level].cdp_enabled = true;
198
199
return ret;
200
}
201
202
static void cdp_disable(int level)
203
{
204
struct rdt_hw_resource *r_hw = &rdt_resources_all[level];
205
206
if (r_hw->cdp_enabled) {
207
set_cache_qos_cfg(level, false);
208
r_hw->cdp_enabled = false;
209
}
210
}
211
212
int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable)
213
{
214
struct rdt_hw_resource *hw_res = &rdt_resources_all[l];
215
216
if (!hw_res->r_resctrl.cdp_capable)
217
return -EINVAL;
218
219
if (enable)
220
return cdp_enable(l);
221
222
cdp_disable(l);
223
224
return 0;
225
}
226
227
bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l)
228
{
229
return rdt_resources_all[l].cdp_enabled;
230
}
231
232
void resctrl_arch_reset_all_ctrls(struct rdt_resource *r)
233
{
234
struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
235
struct rdt_hw_ctrl_domain *hw_dom;
236
struct msr_param msr_param;
237
struct rdt_ctrl_domain *d;
238
int i;
239
240
/* Walking r->domains, ensure it can't race with cpuhp */
241
lockdep_assert_cpus_held();
242
243
msr_param.res = r;
244
msr_param.low = 0;
245
msr_param.high = hw_res->num_closid;
246
247
/*
248
* Disable resource control for this resource by setting all
249
* CBMs in all ctrl_domains to the maximum mask value. Pick one CPU
250
* from each domain to update the MSRs below.
251
*/
252
list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
253
hw_dom = resctrl_to_arch_ctrl_dom(d);
254
255
for (i = 0; i < hw_res->num_closid; i++)
256
hw_dom->ctrl_val[i] = resctrl_get_default_ctrl(r);
257
msr_param.dom = d;
258
smp_call_function_any(&d->hdr.cpu_mask, rdt_ctrl_update, &msr_param, 1);
259
}
260
261
return;
262
}
263
264