Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/powerpc/oprofile/op_model_power4.c
10818 views
1
/*
2
* Copyright (C) 2004 Anton Blanchard <[email protected]>, IBM
3
* Added mmcra[slot] support:
4
* Copyright (C) 2006-2007 Will Schmidt <[email protected]>, IBM
5
*
6
* This program is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU General Public License
8
* as published by the Free Software Foundation; either version
9
* 2 of the License, or (at your option) any later version.
10
*/
11
12
#include <linux/oprofile.h>
13
#include <linux/init.h>
14
#include <linux/smp.h>
15
#include <asm/firmware.h>
16
#include <asm/ptrace.h>
17
#include <asm/system.h>
18
#include <asm/processor.h>
19
#include <asm/cputable.h>
20
#include <asm/rtas.h>
21
#include <asm/oprofile_impl.h>
22
#include <asm/reg.h>
23
24
#define dbg(args...)
25
26
static unsigned long reset_value[OP_MAX_COUNTER];
27
28
static int oprofile_running;
29
static int use_slot_nums;
30
31
/* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */
32
static u32 mmcr0_val;
33
static u64 mmcr1_val;
34
static u64 mmcra_val;
35
36
static int power4_reg_setup(struct op_counter_config *ctr,
37
struct op_system_config *sys,
38
int num_ctrs)
39
{
40
int i;
41
42
/*
43
* The performance counter event settings are given in the mmcr0,
44
* mmcr1 and mmcra values passed from the user in the
45
* op_system_config structure (sys variable).
46
*/
47
mmcr0_val = sys->mmcr0;
48
mmcr1_val = sys->mmcr1;
49
mmcra_val = sys->mmcra;
50
51
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i)
52
reset_value[i] = 0x80000000UL - ctr[i].count;
53
54
/* setup user and kernel profiling */
55
if (sys->enable_kernel)
56
mmcr0_val &= ~MMCR0_KERNEL_DISABLE;
57
else
58
mmcr0_val |= MMCR0_KERNEL_DISABLE;
59
60
if (sys->enable_user)
61
mmcr0_val &= ~MMCR0_PROBLEM_DISABLE;
62
else
63
mmcr0_val |= MMCR0_PROBLEM_DISABLE;
64
65
if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p) ||
66
__is_processor(PV_970) || __is_processor(PV_970FX) ||
67
__is_processor(PV_970MP) || __is_processor(PV_970GX) ||
68
__is_processor(PV_POWER5) || __is_processor(PV_POWER5p))
69
use_slot_nums = 1;
70
71
return 0;
72
}
73
74
extern void ppc_enable_pmcs(void);
75
76
/*
77
* Older CPUs require the MMCRA sample bit to be always set, but newer
78
* CPUs only want it set for some groups. Eventually we will remove all
79
* knowledge of this bit in the kernel, oprofile userspace should be
80
* setting it when required.
81
*
82
* In order to keep current installations working we force the bit for
83
* those older CPUs. Once everyone has updated their oprofile userspace we
84
* can remove this hack.
85
*/
86
static inline int mmcra_must_set_sample(void)
87
{
88
if (__is_processor(PV_POWER4) || __is_processor(PV_POWER4p) ||
89
__is_processor(PV_970) || __is_processor(PV_970FX) ||
90
__is_processor(PV_970MP) || __is_processor(PV_970GX))
91
return 1;
92
93
return 0;
94
}
95
96
static int power4_cpu_setup(struct op_counter_config *ctr)
97
{
98
unsigned int mmcr0 = mmcr0_val;
99
unsigned long mmcra = mmcra_val;
100
101
ppc_enable_pmcs();
102
103
/* set the freeze bit */
104
mmcr0 |= MMCR0_FC;
105
mtspr(SPRN_MMCR0, mmcr0);
106
107
mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE;
108
mmcr0 |= MMCR0_PMC1CE|MMCR0_PMCjCE;
109
mtspr(SPRN_MMCR0, mmcr0);
110
111
mtspr(SPRN_MMCR1, mmcr1_val);
112
113
if (mmcra_must_set_sample())
114
mmcra |= MMCRA_SAMPLE_ENABLE;
115
mtspr(SPRN_MMCRA, mmcra);
116
117
dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(),
118
mfspr(SPRN_MMCR0));
119
dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(),
120
mfspr(SPRN_MMCR1));
121
dbg("setup on cpu %d, mmcra %lx\n", smp_processor_id(),
122
mfspr(SPRN_MMCRA));
123
124
return 0;
125
}
126
127
static int power4_start(struct op_counter_config *ctr)
128
{
129
int i;
130
unsigned int mmcr0;
131
132
/* set the PMM bit (see comment below) */
133
mtmsrd(mfmsr() | MSR_PMM);
134
135
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
136
if (ctr[i].enabled) {
137
classic_ctr_write(i, reset_value[i]);
138
} else {
139
classic_ctr_write(i, 0);
140
}
141
}
142
143
mmcr0 = mfspr(SPRN_MMCR0);
144
145
/*
146
* We must clear the PMAO bit on some (GQ) chips. Just do it
147
* all the time
148
*/
149
mmcr0 &= ~MMCR0_PMAO;
150
151
/*
152
* now clear the freeze bit, counting will not start until we
153
* rfid from this excetion, because only at that point will
154
* the PMM bit be cleared
155
*/
156
mmcr0 &= ~MMCR0_FC;
157
mtspr(SPRN_MMCR0, mmcr0);
158
159
oprofile_running = 1;
160
161
dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
162
return 0;
163
}
164
165
static void power4_stop(void)
166
{
167
unsigned int mmcr0;
168
169
/* freeze counters */
170
mmcr0 = mfspr(SPRN_MMCR0);
171
mmcr0 |= MMCR0_FC;
172
mtspr(SPRN_MMCR0, mmcr0);
173
174
oprofile_running = 0;
175
176
dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
177
178
mb();
179
}
180
181
/* Fake functions used by canonicalize_pc */
182
static void __used hypervisor_bucket(void)
183
{
184
}
185
186
static void __used rtas_bucket(void)
187
{
188
}
189
190
static void __used kernel_unknown_bucket(void)
191
{
192
}
193
194
/*
195
* On GQ and newer the MMCRA stores the HV and PR bits at the time
196
* the SIAR was sampled. We use that to work out if the SIAR was sampled in
197
* the hypervisor, our exception vectors or RTAS.
198
* If the MMCRA_SAMPLE_ENABLE bit is set, we can use the MMCRA[slot] bits
199
* to more accurately identify the address of the sampled instruction. The
200
* mmcra[slot] bits represent the slot number of a sampled instruction
201
* within an instruction group. The slot will contain a value between 1
202
* and 5 if MMCRA_SAMPLE_ENABLE is set, otherwise 0.
203
*/
204
static unsigned long get_pc(struct pt_regs *regs)
205
{
206
unsigned long pc = mfspr(SPRN_SIAR);
207
unsigned long mmcra;
208
unsigned long slot;
209
210
/* Can't do much about it */
211
if (!cur_cpu_spec->oprofile_mmcra_sihv)
212
return pc;
213
214
mmcra = mfspr(SPRN_MMCRA);
215
216
if (use_slot_nums && (mmcra & MMCRA_SAMPLE_ENABLE)) {
217
slot = ((mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT);
218
if (slot > 1)
219
pc += 4 * (slot - 1);
220
}
221
222
/* Were we in the hypervisor? */
223
if (firmware_has_feature(FW_FEATURE_LPAR) &&
224
(mmcra & cur_cpu_spec->oprofile_mmcra_sihv))
225
/* function descriptor madness */
226
return *((unsigned long *)hypervisor_bucket);
227
228
/* We were in userspace, nothing to do */
229
if (mmcra & cur_cpu_spec->oprofile_mmcra_sipr)
230
return pc;
231
232
#ifdef CONFIG_PPC_RTAS
233
/* Were we in RTAS? */
234
if (pc >= rtas.base && pc < (rtas.base + rtas.size))
235
/* function descriptor madness */
236
return *((unsigned long *)rtas_bucket);
237
#endif
238
239
/* Were we in our exception vectors or SLB real mode miss handler? */
240
if (pc < 0x1000000UL)
241
return (unsigned long)__va(pc);
242
243
/* Not sure where we were */
244
if (!is_kernel_addr(pc))
245
/* function descriptor madness */
246
return *((unsigned long *)kernel_unknown_bucket);
247
248
return pc;
249
}
250
251
static int get_kernel(unsigned long pc, unsigned long mmcra)
252
{
253
int is_kernel;
254
255
if (!cur_cpu_spec->oprofile_mmcra_sihv) {
256
is_kernel = is_kernel_addr(pc);
257
} else {
258
is_kernel = ((mmcra & cur_cpu_spec->oprofile_mmcra_sipr) == 0);
259
}
260
261
return is_kernel;
262
}
263
264
static bool pmc_overflow(unsigned long val)
265
{
266
if ((int)val < 0)
267
return true;
268
269
/*
270
* Events on POWER7 can roll back if a speculative event doesn't
271
* eventually complete. Unfortunately in some rare cases they will
272
* raise a performance monitor exception. We need to catch this to
273
* ensure we reset the PMC. In all cases the PMC will be 256 or less
274
* cycles from overflow.
275
*
276
* We only do this if the first pass fails to find any overflowing
277
* PMCs because a user might set a period of less than 256 and we
278
* don't want to mistakenly reset them.
279
*/
280
if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
281
return true;
282
283
return false;
284
}
285
286
static void power4_handle_interrupt(struct pt_regs *regs,
287
struct op_counter_config *ctr)
288
{
289
unsigned long pc;
290
int is_kernel;
291
int val;
292
int i;
293
unsigned int mmcr0;
294
unsigned long mmcra;
295
296
mmcra = mfspr(SPRN_MMCRA);
297
298
pc = get_pc(regs);
299
is_kernel = get_kernel(pc, mmcra);
300
301
/* set the PMM bit (see comment below) */
302
mtmsrd(mfmsr() | MSR_PMM);
303
304
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
305
val = classic_ctr_read(i);
306
if (pmc_overflow(val)) {
307
if (oprofile_running && ctr[i].enabled) {
308
oprofile_add_ext_sample(pc, regs, i, is_kernel);
309
classic_ctr_write(i, reset_value[i]);
310
} else {
311
classic_ctr_write(i, 0);
312
}
313
}
314
}
315
316
mmcr0 = mfspr(SPRN_MMCR0);
317
318
/* reset the perfmon trigger */
319
mmcr0 |= MMCR0_PMXE;
320
321
/*
322
* We must clear the PMAO bit on some (GQ) chips. Just do it
323
* all the time
324
*/
325
mmcr0 &= ~MMCR0_PMAO;
326
327
/* Clear the appropriate bits in the MMCRA */
328
mmcra &= ~cur_cpu_spec->oprofile_mmcra_clear;
329
mtspr(SPRN_MMCRA, mmcra);
330
331
/*
332
* now clear the freeze bit, counting will not start until we
333
* rfid from this exception, because only at that point will
334
* the PMM bit be cleared
335
*/
336
mmcr0 &= ~MMCR0_FC;
337
mtspr(SPRN_MMCR0, mmcr0);
338
}
339
340
struct op_powerpc_model op_model_power4 = {
341
.reg_setup = power4_reg_setup,
342
.cpu_setup = power4_cpu_setup,
343
.start = power4_start,
344
.stop = power4_stop,
345
.handle_interrupt = power4_handle_interrupt,
346
};
347
348