Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/arm/include/cpu.h
102292 views
1
/*-
2
* Copyright 2014 Svatopluk Kraus <[email protected]>
3
* Copyright 2014 Michal Meloun <[email protected]>
4
* All rights reserved.
5
*
6
* Redistribution and use in source and binary forms, with or without
7
* modification, are permitted provided that the following conditions
8
* are met:
9
* 1. Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* 2. Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
*
15
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25
* SUCH DAMAGE.
26
*/
27
/* $NetBSD: cpu.h,v 1.2 2001/02/23 21:23:52 reinoud Exp $ */
28
29
#ifndef MACHINE_CPU_H
30
#define MACHINE_CPU_H
31
32
#include <machine/armreg.h>
33
#include <machine/frame.h>
34
35
void cpu_halt(void);
36
37
#ifdef _KERNEL
38
#include <machine/atomic.h>
39
#include <machine/cpufunc.h>
40
#include <machine/cpuinfo.h>
41
#include <machine/sysreg.h>
42
43
/*
44
* Some kernel modules (dtrace all for example) are compiled
45
* unconditionally with -DSMP. Although it looks like a bug,
46
* handle this case here and in #elif condition in ARM_SMP_UP macro.
47
*/
48
49
50
#if !defined(SMP) && defined(SMP_ON_UP)
51
#error SMP option must be defined for SMP_ON_UP option
52
#endif
53
54
#define CPU_ASID_KERNEL 0
55
56
#if defined(SMP_ON_UP)
57
#define ARM_SMP_UP(smp_code, up_code) \
58
do { \
59
if (cpuinfo.mp_ext != 0) { \
60
smp_code; \
61
} else { \
62
up_code; \
63
} \
64
} while (0)
65
#elif defined(SMP)
66
#define ARM_SMP_UP(smp_code, up_code) \
67
do { \
68
smp_code; \
69
} while (0)
70
#else
71
#define ARM_SMP_UP(smp_code, up_code) \
72
do { \
73
up_code; \
74
} while (0)
75
#endif
76
77
void dcache_wbinv_poc_all(void); /* !!! NOT SMP coherent function !!! */
78
vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t);
79
vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t);
80
81
#ifdef DEV_PMU
82
#include <sys/pcpu.h>
83
#define PMU_OVSR_C 0x80000000 /* Cycle Counter */
84
extern uint32_t ccnt_hi[MAXCPU];
85
extern int pmu_attched;
86
#endif /* DEV_PMU */
87
88
#define sev() __asm __volatile("sev" : : : "memory")
89
#define wfe() __asm __volatile("wfe" : : : "memory")
90
91
/*
92
* Macros to generate CP15 (system control processor) read/write functions.
93
*/
94
#define _FX(s...) #s
95
96
#define _RF0(fname, aname...) \
97
static __inline uint32_t \
98
fname(void) \
99
{ \
100
uint32_t reg; \
101
__asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \
102
return(reg); \
103
}
104
105
#define _R64F0(fname, aname) \
106
static __inline uint64_t \
107
fname(void) \
108
{ \
109
uint64_t reg; \
110
__asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \
111
return(reg); \
112
}
113
114
#define _WF0(fname, aname...) \
115
static __inline void \
116
fname(void) \
117
{ \
118
__asm __volatile("mcr\t" _FX(aname)); \
119
}
120
121
#define _WF1(fname, aname...) \
122
static __inline void \
123
fname(uint32_t reg) \
124
{ \
125
__asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \
126
}
127
128
#define _W64F1(fname, aname...) \
129
static __inline void \
130
fname(uint64_t reg) \
131
{ \
132
__asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \
133
}
134
135
/*
136
* Raw CP15 maintenance operations
137
* !!! not for external use !!!
138
*/
139
140
/* TLB */
141
142
_WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */
143
#if defined(SMP)
144
_WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */
145
#endif
146
_WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */
147
#if defined(SMP)
148
_WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */
149
#endif
150
_WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */
151
#if defined(SMP)
152
_WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */
153
#endif
154
_WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */
155
156
_WF1(_CP15_TTB_SET, CP15_TTBR0(%0))
157
158
/* Cache and Branch predictor */
159
160
_WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */
161
#if defined(SMP)
162
_WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */
163
#endif
164
_WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */
165
_WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */
166
_WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */
167
_WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */
168
_WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */
169
_WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */
170
_WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */
171
_WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */
172
_WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */
173
#if defined(SMP)
174
_WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */
175
#endif
176
_WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */
177
178
/*
179
* Publicly accessible functions
180
*/
181
182
/* CP14 Debug Registers */
183
_RF0(cp14_dbgdidr_get, CP14_DBGDIDR(%0))
184
_RF0(cp14_dbgprsr_get, CP14_DBGPRSR(%0))
185
_RF0(cp14_dbgoslsr_get, CP14_DBGOSLSR(%0))
186
_RF0(cp14_dbgosdlr_get, CP14_DBGOSDLR(%0))
187
_RF0(cp14_dbgdscrint_get, CP14_DBGDSCRint(%0))
188
189
_WF1(cp14_dbgdscr_v7_set, CP14_DBGDSCRext_V7(%0))
190
_WF1(cp14_dbgvcr_set, CP14_DBGVCR(%0))
191
_WF1(cp14_dbgoslar_set, CP14_DBGOSLAR(%0))
192
193
/* Various control registers */
194
195
_RF0(cp15_cpacr_get, CP15_CPACR(%0))
196
_WF1(cp15_cpacr_set, CP15_CPACR(%0))
197
_RF0(cp15_dfsr_get, CP15_DFSR(%0))
198
_RF0(cp15_ifsr_get, CP15_IFSR(%0))
199
_WF1(cp15_prrr_set, CP15_PRRR(%0))
200
_WF1(cp15_nmrr_set, CP15_NMRR(%0))
201
_RF0(cp15_ttbr_get, CP15_TTBR0(%0))
202
_RF0(cp15_dfar_get, CP15_DFAR(%0))
203
_RF0(cp15_ifar_get, CP15_IFAR(%0))
204
_RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0))
205
_RF0(cp15_actlr_get, CP15_ACTLR(%0))
206
_WF1(cp15_actlr_set, CP15_ACTLR(%0))
207
_WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0))
208
_WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0))
209
_WF1(cp15_ats1cur_set, CP15_ATS1CUR(%0))
210
_WF1(cp15_ats1cuw_set, CP15_ATS1CUW(%0))
211
_RF0(cp15_par_get, CP15_PAR(%0))
212
_RF0(cp15_sctlr_get, CP15_SCTLR(%0))
213
214
/*CPU id registers */
215
_RF0(cp15_midr_get, CP15_MIDR(%0))
216
_RF0(cp15_ctr_get, CP15_CTR(%0))
217
_RF0(cp15_tcmtr_get, CP15_TCMTR(%0))
218
_RF0(cp15_tlbtr_get, CP15_TLBTR(%0))
219
_RF0(cp15_mpidr_get, CP15_MPIDR(%0))
220
_RF0(cp15_revidr_get, CP15_REVIDR(%0))
221
_RF0(cp15_ccsidr_get, CP15_CCSIDR(%0))
222
_RF0(cp15_clidr_get, CP15_CLIDR(%0))
223
_RF0(cp15_aidr_get, CP15_AIDR(%0))
224
_WF1(cp15_csselr_set, CP15_CSSELR(%0))
225
_RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0))
226
_RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0))
227
_RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0))
228
_RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0))
229
_RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0))
230
_RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0))
231
_RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0))
232
_RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0))
233
_RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0))
234
_RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0))
235
_RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0))
236
_RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0))
237
_RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0))
238
_RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0))
239
_RF0(cp15_cbar_get, CP15_CBAR(%0))
240
241
/* Performance Monitor registers */
242
243
_RF0(cp15_pmcr_get, CP15_PMCR(%0))
244
_WF1(cp15_pmcr_set, CP15_PMCR(%0))
245
_RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0))
246
_WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0))
247
_WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0))
248
_RF0(cp15_pmovsr_get, CP15_PMOVSR(%0))
249
_WF1(cp15_pmovsr_set, CP15_PMOVSR(%0))
250
_WF1(cp15_pmswinc_set, CP15_PMSWINC(%0))
251
_RF0(cp15_pmselr_get, CP15_PMSELR(%0))
252
_WF1(cp15_pmselr_set, CP15_PMSELR(%0))
253
_RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0))
254
_WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0))
255
_RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0))
256
_WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0))
257
_RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0))
258
_WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0))
259
_RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0))
260
_WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0))
261
_RF0(cp15_pminten_get, CP15_PMINTENSET(%0))
262
_WF1(cp15_pminten_set, CP15_PMINTENSET(%0))
263
_WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0))
264
265
_RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0))
266
_WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0))
267
_RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0))
268
_WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0))
269
_RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0))
270
_WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0))
271
272
/* Generic Timer registers - only use when you know the hardware is available */
273
_RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0))
274
_WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0))
275
_RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0))
276
_WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0))
277
_RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0))
278
_WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0))
279
_RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0))
280
_WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0))
281
_RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0))
282
_WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0))
283
_RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0))
284
_WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0))
285
_RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0))
286
_WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0))
287
_RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0))
288
_WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0))
289
_RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0))
290
_WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0))
291
292
_R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0))
293
_R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0))
294
_R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0))
295
_W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0))
296
_R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0))
297
_W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0))
298
_R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0))
299
_W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0))
300
_R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0))
301
_W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0))
302
303
#undef _FX
304
#undef _RF0
305
#undef _WF0
306
#undef _WF1
307
308
/*
309
* TLB maintenance operations.
310
*/
311
312
/* Local (i.e. not broadcasting ) operations. */
313
314
/* Flush all TLB entries (even global). */
315
static __inline void
316
tlb_flush_all_local(void)
317
{
318
319
dsb();
320
_CP15_TLBIALL();
321
dsb();
322
}
323
324
/* Flush all not global TLB entries. */
325
static __inline void
326
tlb_flush_all_ng_local(void)
327
{
328
329
dsb();
330
_CP15_TLBIASID(CPU_ASID_KERNEL);
331
dsb();
332
}
333
334
/* Flush single TLB entry (even global). */
335
static __inline void
336
tlb_flush_local(vm_offset_t va)
337
{
338
339
KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
340
341
dsb();
342
_CP15_TLBIMVA(va | CPU_ASID_KERNEL);
343
dsb();
344
}
345
346
/* Flush range of TLB entries (even global). */
347
static __inline void
348
tlb_flush_range_local(vm_offset_t va, vm_size_t size)
349
{
350
vm_offset_t eva = va + size;
351
352
KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
353
KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__,
354
size));
355
356
dsb();
357
for (; va < eva; va += PAGE_SIZE)
358
_CP15_TLBIMVA(va | CPU_ASID_KERNEL);
359
dsb();
360
}
361
362
/* Broadcasting operations. */
363
#if defined(SMP)
364
365
static __inline void
366
tlb_flush_all(void)
367
{
368
369
dsb();
370
ARM_SMP_UP(
371
_CP15_TLBIALLIS(),
372
_CP15_TLBIALL()
373
);
374
dsb();
375
}
376
377
static __inline void
378
tlb_flush_all_ng(void)
379
{
380
381
dsb();
382
ARM_SMP_UP(
383
_CP15_TLBIASIDIS(CPU_ASID_KERNEL),
384
_CP15_TLBIASID(CPU_ASID_KERNEL)
385
);
386
dsb();
387
}
388
389
static __inline void
390
tlb_flush(vm_offset_t va)
391
{
392
393
KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
394
395
dsb();
396
ARM_SMP_UP(
397
_CP15_TLBIMVAAIS(va),
398
_CP15_TLBIMVA(va | CPU_ASID_KERNEL)
399
);
400
dsb();
401
}
402
403
static __inline void
404
tlb_flush_range(vm_offset_t va, vm_size_t size)
405
{
406
vm_offset_t eva = va + size;
407
408
KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
409
KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__,
410
size));
411
412
dsb();
413
ARM_SMP_UP(
414
{
415
for (; va < eva; va += PAGE_SIZE)
416
_CP15_TLBIMVAAIS(va);
417
},
418
{
419
for (; va < eva; va += PAGE_SIZE)
420
_CP15_TLBIMVA(va | CPU_ASID_KERNEL);
421
}
422
);
423
dsb();
424
}
425
#else /* !SMP */
426
427
#define tlb_flush_all() tlb_flush_all_local()
428
#define tlb_flush_all_ng() tlb_flush_all_ng_local()
429
#define tlb_flush(va) tlb_flush_local(va)
430
#define tlb_flush_range(va, size) tlb_flush_range_local(va, size)
431
432
#endif /* !SMP */
433
434
/*
435
* Cache maintenance operations.
436
*/
437
438
/* Sync I and D caches to PoU */
439
static __inline void
440
icache_sync(vm_offset_t va, vm_size_t size)
441
{
442
vm_offset_t eva = va + size;
443
444
dsb();
445
va &= ~cpuinfo.dcache_line_mask;
446
447
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
448
_CP15_DCCMVAU(va);
449
}
450
dsb();
451
ARM_SMP_UP(
452
_CP15_ICIALLUIS(),
453
_CP15_ICIALLU()
454
);
455
dsb();
456
isb();
457
}
458
459
/* Invalidate I cache */
460
static __inline void
461
icache_inv_all(void)
462
{
463
464
ARM_SMP_UP(
465
_CP15_ICIALLUIS(),
466
_CP15_ICIALLU()
467
);
468
dsb();
469
isb();
470
}
471
472
/* Invalidate branch predictor buffer */
473
static __inline void
474
bpb_inv_all(void)
475
{
476
477
ARM_SMP_UP(
478
_CP15_BPIALLIS(),
479
_CP15_BPIALL()
480
);
481
dsb();
482
isb();
483
}
484
485
/* Write back D-cache to PoU */
486
static __inline void
487
dcache_wb_pou(vm_offset_t va, vm_size_t size)
488
{
489
vm_offset_t eva = va + size;
490
491
dsb();
492
va &= ~cpuinfo.dcache_line_mask;
493
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
494
_CP15_DCCMVAU(va);
495
}
496
dsb();
497
}
498
499
/*
500
* Invalidate D-cache to PoC
501
*
502
* Caches are invalidated from outermost to innermost as fresh cachelines
503
* flow in this direction. In given range, if there was no dirty cacheline
504
* in any cache before, no stale cacheline should remain in them after this
505
* operation finishes.
506
*/
507
static __inline void
508
dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
509
{
510
vm_offset_t eva = va + size;
511
512
dsb();
513
/* invalidate L2 first */
514
cpu_l2cache_inv_range(pa, size);
515
516
/* then L1 */
517
va &= ~cpuinfo.dcache_line_mask;
518
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
519
_CP15_DCIMVAC(va);
520
}
521
dsb();
522
}
523
524
/*
525
* Discard D-cache lines to PoC, prior to overwrite by DMA engine.
526
*
527
* Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't
528
* flow into L1 while invalidating. This routine is intended to be used only
529
* when invalidating a buffer before a DMA operation loads new data into memory.
530
* The concern in this case is that dirty lines are not evicted to main memory,
531
* overwriting the DMA data. For that reason, the L1 is done first to ensure
532
* that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned.
533
*/
534
static __inline void
535
dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
536
{
537
vm_offset_t eva = va + size;
538
539
/* invalidate L1 first */
540
dsb();
541
va &= ~cpuinfo.dcache_line_mask;
542
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
543
_CP15_DCIMVAC(va);
544
}
545
dsb();
546
547
/* then L2 */
548
cpu_l2cache_inv_range(pa, size);
549
}
550
551
/*
552
* Write back D-cache to PoC
553
*
554
* Caches are written back from innermost to outermost as dirty cachelines
555
* flow in this direction. In given range, no dirty cacheline should remain
556
* in any cache after this operation finishes.
557
*/
558
static __inline void
559
dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
560
{
561
vm_offset_t eva = va + size;
562
563
dsb();
564
va &= ~cpuinfo.dcache_line_mask;
565
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
566
_CP15_DCCMVAC(va);
567
}
568
dsb();
569
570
cpu_l2cache_wb_range(pa, size);
571
}
572
573
/* Write back and invalidate D-cache to PoC */
574
static __inline void
575
dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size)
576
{
577
vm_offset_t va;
578
vm_offset_t eva = sva + size;
579
580
dsb();
581
/* write back L1 first */
582
va = sva & ~cpuinfo.dcache_line_mask;
583
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
584
_CP15_DCCMVAC(va);
585
}
586
dsb();
587
588
/* then write back and invalidate L2 */
589
cpu_l2cache_wbinv_range(pa, size);
590
591
/* then invalidate L1 */
592
va = sva & ~cpuinfo.dcache_line_mask;
593
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
594
_CP15_DCIMVAC(va);
595
}
596
dsb();
597
}
598
599
/* Set TTB0 register */
600
static __inline void
601
cp15_ttbr_set(uint32_t reg)
602
{
603
dsb();
604
_CP15_TTB_SET(reg);
605
dsb();
606
_CP15_BPIALL();
607
dsb();
608
isb();
609
tlb_flush_all_ng_local();
610
}
611
612
/*
613
* Functions for address checking:
614
*
615
* cp15_ats1cpr_check() ... check stage 1 privileged (PL1) read access
616
* cp15_ats1cpw_check() ... check stage 1 privileged (PL1) write access
617
* cp15_ats1cur_check() ... check stage 1 unprivileged (PL0) read access
618
* cp15_ats1cuw_check() ... check stage 1 unprivileged (PL0) write access
619
*
620
* They must be called while interrupts are disabled to get consistent result.
621
*/
622
static __inline int
623
cp15_ats1cpr_check(vm_offset_t addr)
624
{
625
626
cp15_ats1cpr_set(addr);
627
isb();
628
return (cp15_par_get() & 0x01 ? EFAULT : 0);
629
}
630
631
static __inline int
632
cp15_ats1cpw_check(vm_offset_t addr)
633
{
634
635
cp15_ats1cpw_set(addr);
636
isb();
637
return (cp15_par_get() & 0x01 ? EFAULT : 0);
638
}
639
640
static __inline int
641
cp15_ats1cur_check(vm_offset_t addr)
642
{
643
644
cp15_ats1cur_set(addr);
645
isb();
646
return (cp15_par_get() & 0x01 ? EFAULT : 0);
647
}
648
649
static __inline int
650
cp15_ats1cuw_check(vm_offset_t addr)
651
{
652
653
cp15_ats1cuw_set(addr);
654
isb();
655
return (cp15_par_get() & 0x01 ? EFAULT : 0);
656
}
657
658
static __inline uint64_t
659
get_cyclecount(void)
660
{
661
#if defined(DEV_PMU)
662
if (pmu_attched) {
663
u_int cpu;
664
uint64_t h, h2;
665
uint32_t l, r;
666
667
cpu = PCPU_GET(cpuid);
668
h = (uint64_t)atomic_load_acq_32(&ccnt_hi[cpu]);
669
l = cp15_pmccntr_get();
670
/* In case interrupts are disabled we need to check for overflow. */
671
r = cp15_pmovsr_get();
672
if (r & PMU_OVSR_C) {
673
atomic_add_32(&ccnt_hi[cpu], 1);
674
/* Clear the event. */
675
cp15_pmovsr_set(PMU_OVSR_C);
676
}
677
/* Make sure there was no wrap-around while we read the lo half. */
678
h2 = (uint64_t)atomic_load_acq_32(&ccnt_hi[cpu]);
679
if (h != h2)
680
l = cp15_pmccntr_get();
681
return (h2 << 32 | l);
682
} else
683
#endif
684
return cp15_pmccntr_get();
685
}
686
#endif
687
688
#define TRAPF_USERMODE(frame) ((frame->tf_spsr & PSR_MODE) == PSR_USR32_MODE)
689
690
#define TRAPF_PC(tfp) ((tfp)->tf_pc)
691
692
#define cpu_getstack(td) ((td)->td_frame->tf_usr_sp)
693
#define cpu_setstack(td, sp) ((td)->td_frame->tf_usr_sp = (sp))
694
#define cpu_spinwait() /* nothing */
695
#define cpu_lock_delay() DELAY(1)
696
697
#define ARM_NVEC 7
698
#define ARM_VEC_ALL 0xffffffff
699
700
extern vm_offset_t vector_page;
701
702
/*
703
* Params passed into initarm. If you change the size of this you will
704
* need to update locore.S to allocate more memory on the stack before
705
* it calls initarm.
706
*/
707
struct arm_boot_params {
708
register_t abp_size; /* Size of this structure */
709
register_t abp_r0; /* r0 from the boot loader */
710
register_t abp_r1; /* r1 from the boot loader */
711
register_t abp_r2; /* r2 from the boot loader */
712
register_t abp_r3; /* r3 from the boot loader */
713
vm_offset_t abp_physaddr; /* The kernel physical address */
714
vm_offset_t abp_pagetable; /* The early page table */
715
};
716
717
void arm_vector_init(vm_offset_t, int);
718
void fork_trampoline(void);
719
void identify_arm_cpu(void);
720
void *initarm(struct arm_boot_params *);
721
722
extern char btext[];
723
extern char etext[];
724
int badaddr_read(void *, size_t, void *);
725
#endif /* !MACHINE_CPU_H */
726
727