Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/arm/kernel/perf_event_xscale.c
10817 views
1
/*
2
* ARMv5 [xscale] Performance counter handling code.
3
*
4
* Copyright (C) 2010, ARM Ltd., Will Deacon <[email protected]>
5
*
6
* Based on the previous xscale OProfile code.
7
*
8
* There are two variants of the xscale PMU that we support:
9
* - xscale1pmu: 2 event counters and a cycle counter
10
* - xscale2pmu: 4 event counters and a cycle counter
11
* The two variants share event definitions, but have different
12
* PMU structures.
13
*/
14
15
#ifdef CONFIG_CPU_XSCALE
16
enum xscale_perf_types {
17
XSCALE_PERFCTR_ICACHE_MISS = 0x00,
18
XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
19
XSCALE_PERFCTR_DATA_STALL = 0x02,
20
XSCALE_PERFCTR_ITLB_MISS = 0x03,
21
XSCALE_PERFCTR_DTLB_MISS = 0x04,
22
XSCALE_PERFCTR_BRANCH = 0x05,
23
XSCALE_PERFCTR_BRANCH_MISS = 0x06,
24
XSCALE_PERFCTR_INSTRUCTION = 0x07,
25
XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
26
XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
27
XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
28
XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
29
XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
30
XSCALE_PERFCTR_PC_CHANGED = 0x0D,
31
XSCALE_PERFCTR_BCU_REQUEST = 0x10,
32
XSCALE_PERFCTR_BCU_FULL = 0x11,
33
XSCALE_PERFCTR_BCU_DRAIN = 0x12,
34
XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
35
XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
36
XSCALE_PERFCTR_RMW = 0x16,
37
/* XSCALE_PERFCTR_CCNT is not hardware defined */
38
XSCALE_PERFCTR_CCNT = 0xFE,
39
XSCALE_PERFCTR_UNUSED = 0xFF,
40
};
41
42
enum xscale_counters {
43
XSCALE_CYCLE_COUNTER = 1,
44
XSCALE_COUNTER0,
45
XSCALE_COUNTER1,
46
XSCALE_COUNTER2,
47
XSCALE_COUNTER3,
48
};
49
50
static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
51
[PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
52
[PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
53
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
54
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
55
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
56
[PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
57
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
58
};
59
60
static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
61
[PERF_COUNT_HW_CACHE_OP_MAX]
62
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
63
[C(L1D)] = {
64
[C(OP_READ)] = {
65
[C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
66
[C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
67
},
68
[C(OP_WRITE)] = {
69
[C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
70
[C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
71
},
72
[C(OP_PREFETCH)] = {
73
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
74
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
75
},
76
},
77
[C(L1I)] = {
78
[C(OP_READ)] = {
79
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
80
[C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
81
},
82
[C(OP_WRITE)] = {
83
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
84
[C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
85
},
86
[C(OP_PREFETCH)] = {
87
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
88
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
89
},
90
},
91
[C(LL)] = {
92
[C(OP_READ)] = {
93
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
94
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
95
},
96
[C(OP_WRITE)] = {
97
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
98
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
99
},
100
[C(OP_PREFETCH)] = {
101
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
102
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
103
},
104
},
105
[C(DTLB)] = {
106
[C(OP_READ)] = {
107
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
108
[C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
109
},
110
[C(OP_WRITE)] = {
111
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
112
[C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
113
},
114
[C(OP_PREFETCH)] = {
115
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
116
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
117
},
118
},
119
[C(ITLB)] = {
120
[C(OP_READ)] = {
121
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
122
[C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
123
},
124
[C(OP_WRITE)] = {
125
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
126
[C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
127
},
128
[C(OP_PREFETCH)] = {
129
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
130
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
131
},
132
},
133
[C(BPU)] = {
134
[C(OP_READ)] = {
135
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
136
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
137
},
138
[C(OP_WRITE)] = {
139
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
140
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
141
},
142
[C(OP_PREFETCH)] = {
143
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
144
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
145
},
146
},
147
};
148
149
#define XSCALE_PMU_ENABLE 0x001
150
#define XSCALE_PMN_RESET 0x002
151
#define XSCALE_CCNT_RESET 0x004
152
#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
153
#define XSCALE_PMU_CNT64 0x008
154
155
#define XSCALE1_OVERFLOWED_MASK 0x700
156
#define XSCALE1_CCOUNT_OVERFLOW 0x400
157
#define XSCALE1_COUNT0_OVERFLOW 0x100
158
#define XSCALE1_COUNT1_OVERFLOW 0x200
159
#define XSCALE1_CCOUNT_INT_EN 0x040
160
#define XSCALE1_COUNT0_INT_EN 0x010
161
#define XSCALE1_COUNT1_INT_EN 0x020
162
#define XSCALE1_COUNT0_EVT_SHFT 12
163
#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
164
#define XSCALE1_COUNT1_EVT_SHFT 20
165
#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
166
167
static inline u32
168
xscale1pmu_read_pmnc(void)
169
{
170
u32 val;
171
asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
172
return val;
173
}
174
175
static inline void
176
xscale1pmu_write_pmnc(u32 val)
177
{
178
/* upper 4bits and 7, 11 are write-as-0 */
179
val &= 0xffff77f;
180
asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
181
}
182
183
static inline int
184
xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
185
enum xscale_counters counter)
186
{
187
int ret = 0;
188
189
switch (counter) {
190
case XSCALE_CYCLE_COUNTER:
191
ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
192
break;
193
case XSCALE_COUNTER0:
194
ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
195
break;
196
case XSCALE_COUNTER1:
197
ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
198
break;
199
default:
200
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
201
}
202
203
return ret;
204
}
205
206
static irqreturn_t
207
xscale1pmu_handle_irq(int irq_num, void *dev)
208
{
209
unsigned long pmnc;
210
struct perf_sample_data data;
211
struct cpu_hw_events *cpuc;
212
struct pt_regs *regs;
213
int idx;
214
215
/*
216
* NOTE: there's an A stepping erratum that states if an overflow
217
* bit already exists and another occurs, the previous
218
* Overflow bit gets cleared. There's no workaround.
219
* Fixed in B stepping or later.
220
*/
221
pmnc = xscale1pmu_read_pmnc();
222
223
/*
224
* Write the value back to clear the overflow flags. Overflow
225
* flags remain in pmnc for use below. We also disable the PMU
226
* while we process the interrupt.
227
*/
228
xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
229
230
if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
231
return IRQ_NONE;
232
233
regs = get_irq_regs();
234
235
perf_sample_data_init(&data, 0);
236
237
cpuc = &__get_cpu_var(cpu_hw_events);
238
for (idx = 0; idx <= armpmu->num_events; ++idx) {
239
struct perf_event *event = cpuc->events[idx];
240
struct hw_perf_event *hwc;
241
242
if (!test_bit(idx, cpuc->active_mask))
243
continue;
244
245
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
246
continue;
247
248
hwc = &event->hw;
249
armpmu_event_update(event, hwc, idx, 1);
250
data.period = event->hw.last_period;
251
if (!armpmu_event_set_period(event, hwc, idx))
252
continue;
253
254
if (perf_event_overflow(event, 0, &data, regs))
255
armpmu->disable(hwc, idx);
256
}
257
258
irq_work_run();
259
260
/*
261
* Re-enable the PMU.
262
*/
263
pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
264
xscale1pmu_write_pmnc(pmnc);
265
266
return IRQ_HANDLED;
267
}
268
269
static void
270
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
271
{
272
unsigned long val, mask, evt, flags;
273
274
switch (idx) {
275
case XSCALE_CYCLE_COUNTER:
276
mask = 0;
277
evt = XSCALE1_CCOUNT_INT_EN;
278
break;
279
case XSCALE_COUNTER0:
280
mask = XSCALE1_COUNT0_EVT_MASK;
281
evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
282
XSCALE1_COUNT0_INT_EN;
283
break;
284
case XSCALE_COUNTER1:
285
mask = XSCALE1_COUNT1_EVT_MASK;
286
evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
287
XSCALE1_COUNT1_INT_EN;
288
break;
289
default:
290
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
291
return;
292
}
293
294
raw_spin_lock_irqsave(&pmu_lock, flags);
295
val = xscale1pmu_read_pmnc();
296
val &= ~mask;
297
val |= evt;
298
xscale1pmu_write_pmnc(val);
299
raw_spin_unlock_irqrestore(&pmu_lock, flags);
300
}
301
302
static void
303
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
304
{
305
unsigned long val, mask, evt, flags;
306
307
switch (idx) {
308
case XSCALE_CYCLE_COUNTER:
309
mask = XSCALE1_CCOUNT_INT_EN;
310
evt = 0;
311
break;
312
case XSCALE_COUNTER0:
313
mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
314
evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
315
break;
316
case XSCALE_COUNTER1:
317
mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
318
evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
319
break;
320
default:
321
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
322
return;
323
}
324
325
raw_spin_lock_irqsave(&pmu_lock, flags);
326
val = xscale1pmu_read_pmnc();
327
val &= ~mask;
328
val |= evt;
329
xscale1pmu_write_pmnc(val);
330
raw_spin_unlock_irqrestore(&pmu_lock, flags);
331
}
332
333
static int
334
xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
335
struct hw_perf_event *event)
336
{
337
if (XSCALE_PERFCTR_CCNT == event->config_base) {
338
if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
339
return -EAGAIN;
340
341
return XSCALE_CYCLE_COUNTER;
342
} else {
343
if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
344
return XSCALE_COUNTER1;
345
346
if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
347
return XSCALE_COUNTER0;
348
349
return -EAGAIN;
350
}
351
}
352
353
static void
354
xscale1pmu_start(void)
355
{
356
unsigned long flags, val;
357
358
raw_spin_lock_irqsave(&pmu_lock, flags);
359
val = xscale1pmu_read_pmnc();
360
val |= XSCALE_PMU_ENABLE;
361
xscale1pmu_write_pmnc(val);
362
raw_spin_unlock_irqrestore(&pmu_lock, flags);
363
}
364
365
static void
366
xscale1pmu_stop(void)
367
{
368
unsigned long flags, val;
369
370
raw_spin_lock_irqsave(&pmu_lock, flags);
371
val = xscale1pmu_read_pmnc();
372
val &= ~XSCALE_PMU_ENABLE;
373
xscale1pmu_write_pmnc(val);
374
raw_spin_unlock_irqrestore(&pmu_lock, flags);
375
}
376
377
static inline u32
378
xscale1pmu_read_counter(int counter)
379
{
380
u32 val = 0;
381
382
switch (counter) {
383
case XSCALE_CYCLE_COUNTER:
384
asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
385
break;
386
case XSCALE_COUNTER0:
387
asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
388
break;
389
case XSCALE_COUNTER1:
390
asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
391
break;
392
}
393
394
return val;
395
}
396
397
static inline void
398
xscale1pmu_write_counter(int counter, u32 val)
399
{
400
switch (counter) {
401
case XSCALE_CYCLE_COUNTER:
402
asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
403
break;
404
case XSCALE_COUNTER0:
405
asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
406
break;
407
case XSCALE_COUNTER1:
408
asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
409
break;
410
}
411
}
412
413
static const struct arm_pmu xscale1pmu = {
414
.id = ARM_PERF_PMU_ID_XSCALE1,
415
.name = "xscale1",
416
.handle_irq = xscale1pmu_handle_irq,
417
.enable = xscale1pmu_enable_event,
418
.disable = xscale1pmu_disable_event,
419
.read_counter = xscale1pmu_read_counter,
420
.write_counter = xscale1pmu_write_counter,
421
.get_event_idx = xscale1pmu_get_event_idx,
422
.start = xscale1pmu_start,
423
.stop = xscale1pmu_stop,
424
.cache_map = &xscale_perf_cache_map,
425
.event_map = &xscale_perf_map,
426
.raw_event_mask = 0xFF,
427
.num_events = 3,
428
.max_period = (1LLU << 32) - 1,
429
};
430
431
static const struct arm_pmu *__init xscale1pmu_init(void)
432
{
433
return &xscale1pmu;
434
}
435
436
#define XSCALE2_OVERFLOWED_MASK 0x01f
437
#define XSCALE2_CCOUNT_OVERFLOW 0x001
438
#define XSCALE2_COUNT0_OVERFLOW 0x002
439
#define XSCALE2_COUNT1_OVERFLOW 0x004
440
#define XSCALE2_COUNT2_OVERFLOW 0x008
441
#define XSCALE2_COUNT3_OVERFLOW 0x010
442
#define XSCALE2_CCOUNT_INT_EN 0x001
443
#define XSCALE2_COUNT0_INT_EN 0x002
444
#define XSCALE2_COUNT1_INT_EN 0x004
445
#define XSCALE2_COUNT2_INT_EN 0x008
446
#define XSCALE2_COUNT3_INT_EN 0x010
447
#define XSCALE2_COUNT0_EVT_SHFT 0
448
#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
449
#define XSCALE2_COUNT1_EVT_SHFT 8
450
#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
451
#define XSCALE2_COUNT2_EVT_SHFT 16
452
#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
453
#define XSCALE2_COUNT3_EVT_SHFT 24
454
#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
455
456
static inline u32
457
xscale2pmu_read_pmnc(void)
458
{
459
u32 val;
460
asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
461
/* bits 1-2 and 4-23 are read-unpredictable */
462
return val & 0xff000009;
463
}
464
465
static inline void
466
xscale2pmu_write_pmnc(u32 val)
467
{
468
/* bits 4-23 are write-as-0, 24-31 are write ignored */
469
val &= 0xf;
470
asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
471
}
472
473
static inline u32
474
xscale2pmu_read_overflow_flags(void)
475
{
476
u32 val;
477
asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
478
return val;
479
}
480
481
static inline void
482
xscale2pmu_write_overflow_flags(u32 val)
483
{
484
asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
485
}
486
487
static inline u32
488
xscale2pmu_read_event_select(void)
489
{
490
u32 val;
491
asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
492
return val;
493
}
494
495
static inline void
496
xscale2pmu_write_event_select(u32 val)
497
{
498
asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
499
}
500
501
static inline u32
502
xscale2pmu_read_int_enable(void)
503
{
504
u32 val;
505
asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
506
return val;
507
}
508
509
static void
510
xscale2pmu_write_int_enable(u32 val)
511
{
512
asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
513
}
514
515
static inline int
516
xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
517
enum xscale_counters counter)
518
{
519
int ret = 0;
520
521
switch (counter) {
522
case XSCALE_CYCLE_COUNTER:
523
ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
524
break;
525
case XSCALE_COUNTER0:
526
ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
527
break;
528
case XSCALE_COUNTER1:
529
ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
530
break;
531
case XSCALE_COUNTER2:
532
ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
533
break;
534
case XSCALE_COUNTER3:
535
ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
536
break;
537
default:
538
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
539
}
540
541
return ret;
542
}
543
544
static irqreturn_t
545
xscale2pmu_handle_irq(int irq_num, void *dev)
546
{
547
unsigned long pmnc, of_flags;
548
struct perf_sample_data data;
549
struct cpu_hw_events *cpuc;
550
struct pt_regs *regs;
551
int idx;
552
553
/* Disable the PMU. */
554
pmnc = xscale2pmu_read_pmnc();
555
xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
556
557
/* Check the overflow flag register. */
558
of_flags = xscale2pmu_read_overflow_flags();
559
if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
560
return IRQ_NONE;
561
562
/* Clear the overflow bits. */
563
xscale2pmu_write_overflow_flags(of_flags);
564
565
regs = get_irq_regs();
566
567
perf_sample_data_init(&data, 0);
568
569
cpuc = &__get_cpu_var(cpu_hw_events);
570
for (idx = 0; idx <= armpmu->num_events; ++idx) {
571
struct perf_event *event = cpuc->events[idx];
572
struct hw_perf_event *hwc;
573
574
if (!test_bit(idx, cpuc->active_mask))
575
continue;
576
577
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
578
continue;
579
580
hwc = &event->hw;
581
armpmu_event_update(event, hwc, idx, 1);
582
data.period = event->hw.last_period;
583
if (!armpmu_event_set_period(event, hwc, idx))
584
continue;
585
586
if (perf_event_overflow(event, 0, &data, regs))
587
armpmu->disable(hwc, idx);
588
}
589
590
irq_work_run();
591
592
/*
593
* Re-enable the PMU.
594
*/
595
pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
596
xscale2pmu_write_pmnc(pmnc);
597
598
return IRQ_HANDLED;
599
}
600
601
static void
602
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
603
{
604
unsigned long flags, ien, evtsel;
605
606
ien = xscale2pmu_read_int_enable();
607
evtsel = xscale2pmu_read_event_select();
608
609
switch (idx) {
610
case XSCALE_CYCLE_COUNTER:
611
ien |= XSCALE2_CCOUNT_INT_EN;
612
break;
613
case XSCALE_COUNTER0:
614
ien |= XSCALE2_COUNT0_INT_EN;
615
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
616
evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
617
break;
618
case XSCALE_COUNTER1:
619
ien |= XSCALE2_COUNT1_INT_EN;
620
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
621
evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
622
break;
623
case XSCALE_COUNTER2:
624
ien |= XSCALE2_COUNT2_INT_EN;
625
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
626
evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
627
break;
628
case XSCALE_COUNTER3:
629
ien |= XSCALE2_COUNT3_INT_EN;
630
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
631
evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
632
break;
633
default:
634
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
635
return;
636
}
637
638
raw_spin_lock_irqsave(&pmu_lock, flags);
639
xscale2pmu_write_event_select(evtsel);
640
xscale2pmu_write_int_enable(ien);
641
raw_spin_unlock_irqrestore(&pmu_lock, flags);
642
}
643
644
static void
645
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
646
{
647
unsigned long flags, ien, evtsel;
648
649
ien = xscale2pmu_read_int_enable();
650
evtsel = xscale2pmu_read_event_select();
651
652
switch (idx) {
653
case XSCALE_CYCLE_COUNTER:
654
ien &= ~XSCALE2_CCOUNT_INT_EN;
655
break;
656
case XSCALE_COUNTER0:
657
ien &= ~XSCALE2_COUNT0_INT_EN;
658
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
659
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
660
break;
661
case XSCALE_COUNTER1:
662
ien &= ~XSCALE2_COUNT1_INT_EN;
663
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
664
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
665
break;
666
case XSCALE_COUNTER2:
667
ien &= ~XSCALE2_COUNT2_INT_EN;
668
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
669
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
670
break;
671
case XSCALE_COUNTER3:
672
ien &= ~XSCALE2_COUNT3_INT_EN;
673
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
674
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
675
break;
676
default:
677
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
678
return;
679
}
680
681
raw_spin_lock_irqsave(&pmu_lock, flags);
682
xscale2pmu_write_event_select(evtsel);
683
xscale2pmu_write_int_enable(ien);
684
raw_spin_unlock_irqrestore(&pmu_lock, flags);
685
}
686
687
static int
688
xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
689
struct hw_perf_event *event)
690
{
691
int idx = xscale1pmu_get_event_idx(cpuc, event);
692
if (idx >= 0)
693
goto out;
694
695
if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
696
idx = XSCALE_COUNTER3;
697
else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
698
idx = XSCALE_COUNTER2;
699
out:
700
return idx;
701
}
702
703
static void
704
xscale2pmu_start(void)
705
{
706
unsigned long flags, val;
707
708
raw_spin_lock_irqsave(&pmu_lock, flags);
709
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
710
val |= XSCALE_PMU_ENABLE;
711
xscale2pmu_write_pmnc(val);
712
raw_spin_unlock_irqrestore(&pmu_lock, flags);
713
}
714
715
static void
716
xscale2pmu_stop(void)
717
{
718
unsigned long flags, val;
719
720
raw_spin_lock_irqsave(&pmu_lock, flags);
721
val = xscale2pmu_read_pmnc();
722
val &= ~XSCALE_PMU_ENABLE;
723
xscale2pmu_write_pmnc(val);
724
raw_spin_unlock_irqrestore(&pmu_lock, flags);
725
}
726
727
static inline u32
728
xscale2pmu_read_counter(int counter)
729
{
730
u32 val = 0;
731
732
switch (counter) {
733
case XSCALE_CYCLE_COUNTER:
734
asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
735
break;
736
case XSCALE_COUNTER0:
737
asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
738
break;
739
case XSCALE_COUNTER1:
740
asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
741
break;
742
case XSCALE_COUNTER2:
743
asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
744
break;
745
case XSCALE_COUNTER3:
746
asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
747
break;
748
}
749
750
return val;
751
}
752
753
static inline void
754
xscale2pmu_write_counter(int counter, u32 val)
755
{
756
switch (counter) {
757
case XSCALE_CYCLE_COUNTER:
758
asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
759
break;
760
case XSCALE_COUNTER0:
761
asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
762
break;
763
case XSCALE_COUNTER1:
764
asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
765
break;
766
case XSCALE_COUNTER2:
767
asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
768
break;
769
case XSCALE_COUNTER3:
770
asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
771
break;
772
}
773
}
774
775
static const struct arm_pmu xscale2pmu = {
776
.id = ARM_PERF_PMU_ID_XSCALE2,
777
.name = "xscale2",
778
.handle_irq = xscale2pmu_handle_irq,
779
.enable = xscale2pmu_enable_event,
780
.disable = xscale2pmu_disable_event,
781
.read_counter = xscale2pmu_read_counter,
782
.write_counter = xscale2pmu_write_counter,
783
.get_event_idx = xscale2pmu_get_event_idx,
784
.start = xscale2pmu_start,
785
.stop = xscale2pmu_stop,
786
.cache_map = &xscale_perf_cache_map,
787
.event_map = &xscale_perf_map,
788
.raw_event_mask = 0xFF,
789
.num_events = 5,
790
.max_period = (1LLU << 32) - 1,
791
};
792
793
static const struct arm_pmu *__init xscale2pmu_init(void)
794
{
795
return &xscale2pmu;
796
}
797
#else
798
static const struct arm_pmu *__init xscale1pmu_init(void)
799
{
800
return NULL;
801
}
802
803
static const struct arm_pmu *__init xscale2pmu_init(void)
804
{
805
return NULL;
806
}
807
#endif /* CONFIG_CPU_XSCALE */
808
809