Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/cpufreq/cpufreq_stats.c
26278 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* drivers/cpufreq/cpufreq_stats.c
4
*
5
* Copyright (C) 2003-2004 Venkatesh Pallipadi <[email protected]>.
6
* (C) 2004 Zou Nan hai <[email protected]>.
7
*/
8
9
#include <linux/cpu.h>
10
#include <linux/cpufreq.h>
11
#include <linux/module.h>
12
#include <linux/sched/clock.h>
13
#include <linux/slab.h>
14
15
struct cpufreq_stats {
16
unsigned int total_trans;
17
unsigned long long last_time;
18
unsigned int max_state;
19
unsigned int state_num;
20
unsigned int last_index;
21
u64 *time_in_state;
22
unsigned int *freq_table;
23
unsigned int *trans_table;
24
25
/* Deferred reset */
26
unsigned int reset_pending;
27
unsigned long long reset_time;
28
};
29
30
static void cpufreq_stats_update(struct cpufreq_stats *stats,
31
unsigned long long time)
32
{
33
unsigned long long cur_time = local_clock();
34
35
stats->time_in_state[stats->last_index] += cur_time - time;
36
stats->last_time = cur_time;
37
}
38
39
static void cpufreq_stats_reset_table(struct cpufreq_stats *stats)
40
{
41
unsigned int count = stats->max_state;
42
43
memset(stats->time_in_state, 0, count * sizeof(u64));
44
memset(stats->trans_table, 0, count * count * sizeof(int));
45
stats->last_time = local_clock();
46
stats->total_trans = 0;
47
48
/* Adjust for the time elapsed since reset was requested */
49
WRITE_ONCE(stats->reset_pending, 0);
50
/*
51
* Prevent the reset_time read from being reordered before the
52
* reset_pending accesses in cpufreq_stats_record_transition().
53
*/
54
smp_rmb();
55
cpufreq_stats_update(stats, READ_ONCE(stats->reset_time));
56
}
57
58
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
59
{
60
struct cpufreq_stats *stats = policy->stats;
61
62
if (READ_ONCE(stats->reset_pending))
63
return sprintf(buf, "%d\n", 0);
64
else
65
return sprintf(buf, "%u\n", stats->total_trans);
66
}
67
cpufreq_freq_attr_ro(total_trans);
68
69
static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
70
{
71
struct cpufreq_stats *stats = policy->stats;
72
bool pending = READ_ONCE(stats->reset_pending);
73
unsigned long long time;
74
ssize_t len = 0;
75
int i;
76
77
for (i = 0; i < stats->state_num; i++) {
78
if (pending) {
79
if (i == stats->last_index) {
80
/*
81
* Prevent the reset_time read from occurring
82
* before the reset_pending read above.
83
*/
84
smp_rmb();
85
time = local_clock() - READ_ONCE(stats->reset_time);
86
} else {
87
time = 0;
88
}
89
} else {
90
time = stats->time_in_state[i];
91
if (i == stats->last_index)
92
time += local_clock() - stats->last_time;
93
}
94
95
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
96
nsec_to_clock_t(time));
97
}
98
return len;
99
}
100
cpufreq_freq_attr_ro(time_in_state);
101
102
/* We don't care what is written to the attribute */
103
static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
104
size_t count)
105
{
106
struct cpufreq_stats *stats = policy->stats;
107
108
/*
109
* Defer resetting of stats to cpufreq_stats_record_transition() to
110
* avoid races.
111
*/
112
WRITE_ONCE(stats->reset_time, local_clock());
113
/*
114
* The memory barrier below is to prevent the readers of reset_time from
115
* seeing a stale or partially updated value.
116
*/
117
smp_wmb();
118
WRITE_ONCE(stats->reset_pending, 1);
119
120
return count;
121
}
122
cpufreq_freq_attr_wo(reset);
123
124
static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
125
{
126
struct cpufreq_stats *stats = policy->stats;
127
bool pending = READ_ONCE(stats->reset_pending);
128
ssize_t len = 0;
129
int i, j, count;
130
131
len += sysfs_emit_at(buf, len, " From : To\n");
132
len += sysfs_emit_at(buf, len, " : ");
133
for (i = 0; i < stats->state_num; i++) {
134
if (len >= PAGE_SIZE - 1)
135
break;
136
len += sysfs_emit_at(buf, len, "%9u ", stats->freq_table[i]);
137
}
138
if (len >= PAGE_SIZE - 1)
139
return PAGE_SIZE - 1;
140
141
len += sysfs_emit_at(buf, len, "\n");
142
143
for (i = 0; i < stats->state_num; i++) {
144
if (len >= PAGE_SIZE - 1)
145
break;
146
147
len += sysfs_emit_at(buf, len, "%9u: ", stats->freq_table[i]);
148
149
for (j = 0; j < stats->state_num; j++) {
150
if (len >= PAGE_SIZE - 1)
151
break;
152
153
if (pending)
154
count = 0;
155
else
156
count = stats->trans_table[i * stats->max_state + j];
157
158
len += sysfs_emit_at(buf, len, "%9u ", count);
159
}
160
if (len >= PAGE_SIZE - 1)
161
break;
162
len += sysfs_emit_at(buf, len, "\n");
163
}
164
165
if (len >= PAGE_SIZE - 1) {
166
pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
167
return -EFBIG;
168
}
169
return len;
170
}
171
cpufreq_freq_attr_ro(trans_table);
172
173
static struct attribute *default_attrs[] = {
174
&total_trans.attr,
175
&time_in_state.attr,
176
&reset.attr,
177
&trans_table.attr,
178
NULL
179
};
180
static const struct attribute_group stats_attr_group = {
181
.attrs = default_attrs,
182
.name = "stats"
183
};
184
185
static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
186
{
187
int index;
188
for (index = 0; index < stats->max_state; index++)
189
if (stats->freq_table[index] == freq)
190
return index;
191
return -1;
192
}
193
194
void cpufreq_stats_free_table(struct cpufreq_policy *policy)
195
{
196
struct cpufreq_stats *stats = policy->stats;
197
198
/* Already freed */
199
if (!stats)
200
return;
201
202
pr_debug("%s: Free stats table\n", __func__);
203
204
sysfs_remove_group(&policy->kobj, &stats_attr_group);
205
kfree(stats->time_in_state);
206
kfree(stats);
207
policy->stats = NULL;
208
}
209
210
void cpufreq_stats_create_table(struct cpufreq_policy *policy)
211
{
212
unsigned int i = 0, count;
213
struct cpufreq_stats *stats;
214
unsigned int alloc_size;
215
struct cpufreq_frequency_table *pos;
216
217
count = cpufreq_table_count_valid_entries(policy);
218
if (!count)
219
return;
220
221
/* stats already initialized */
222
if (policy->stats)
223
return;
224
225
stats = kzalloc(sizeof(*stats), GFP_KERNEL);
226
if (!stats)
227
return;
228
229
alloc_size = count * sizeof(int) + count * sizeof(u64);
230
231
alloc_size += count * count * sizeof(int);
232
233
/* Allocate memory for time_in_state/freq_table/trans_table in one go */
234
stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
235
if (!stats->time_in_state)
236
goto free_stat;
237
238
stats->freq_table = (unsigned int *)(stats->time_in_state + count);
239
240
stats->trans_table = stats->freq_table + count;
241
242
stats->max_state = count;
243
244
/* Find valid-unique entries */
245
cpufreq_for_each_valid_entry(pos, policy->freq_table)
246
if (policy->freq_table_sorted != CPUFREQ_TABLE_UNSORTED ||
247
freq_table_get_index(stats, pos->frequency) == -1)
248
stats->freq_table[i++] = pos->frequency;
249
250
stats->state_num = i;
251
stats->last_time = local_clock();
252
stats->last_index = freq_table_get_index(stats, policy->cur);
253
254
policy->stats = stats;
255
if (!sysfs_create_group(&policy->kobj, &stats_attr_group))
256
return;
257
258
/* We failed, release resources */
259
policy->stats = NULL;
260
kfree(stats->time_in_state);
261
free_stat:
262
kfree(stats);
263
}
264
265
void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
266
unsigned int new_freq)
267
{
268
struct cpufreq_stats *stats = policy->stats;
269
int old_index, new_index;
270
271
if (unlikely(!stats))
272
return;
273
274
if (unlikely(READ_ONCE(stats->reset_pending)))
275
cpufreq_stats_reset_table(stats);
276
277
old_index = stats->last_index;
278
new_index = freq_table_get_index(stats, new_freq);
279
280
/* We can't do stats->time_in_state[-1]= .. */
281
if (unlikely(old_index == -1 || new_index == -1 || old_index == new_index))
282
return;
283
284
cpufreq_stats_update(stats, stats->last_time);
285
286
stats->last_index = new_index;
287
stats->trans_table[old_index * stats->max_state + new_index]++;
288
stats->total_trans++;
289
}
290
291