Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/arm/include/cpu.h
39536 views
1
/*-
2
* Copyright 2014 Svatopluk Kraus <[email protected]>
3
* Copyright 2014 Michal Meloun <[email protected]>
4
* All rights reserved.
5
*
6
* Redistribution and use in source and binary forms, with or without
7
* modification, are permitted provided that the following conditions
8
* are met:
9
* 1. Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* 2. Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
*
15
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25
* SUCH DAMAGE.
26
*/
27
/* $NetBSD: cpu.h,v 1.2 2001/02/23 21:23:52 reinoud Exp $ */
28
29
#ifndef MACHINE_CPU_H
30
#define MACHINE_CPU_H
31
32
#include <machine/armreg.h>
33
#include <machine/frame.h>
34
35
void cpu_halt(void);
36
37
#ifdef _KERNEL
38
#include <machine/atomic.h>
39
#include <machine/cpufunc.h>
40
#include <machine/cpuinfo.h>
41
#include <machine/sysreg.h>
42
43
/*
44
* Some kernel modules (dtrace all for example) are compiled
45
* unconditionally with -DSMP. Although it looks like a bug,
46
* handle this case here and in #elif condition in ARM_SMP_UP macro.
47
*/
48
49
50
#if !defined(SMP) && defined(SMP_ON_UP)
51
#error SMP option must be defined for SMP_ON_UP option
52
#endif
53
54
#define CPU_ASID_KERNEL 0
55
56
#if defined(SMP_ON_UP)
57
#define ARM_SMP_UP(smp_code, up_code) \
58
do { \
59
if (cpuinfo.mp_ext != 0) { \
60
smp_code; \
61
} else { \
62
up_code; \
63
} \
64
} while (0)
65
#elif defined(SMP)
66
#define ARM_SMP_UP(smp_code, up_code) \
67
do { \
68
smp_code; \
69
} while (0)
70
#else
71
#define ARM_SMP_UP(smp_code, up_code) \
72
do { \
73
up_code; \
74
} while (0)
75
#endif
76
77
void dcache_wbinv_poc_all(void); /* !!! NOT SMP coherent function !!! */
78
vm_offset_t dcache_wb_pou_checked(vm_offset_t, vm_size_t);
79
vm_offset_t icache_inv_pou_checked(vm_offset_t, vm_size_t);
80
81
#ifdef DEV_PMU
82
#include <sys/pcpu.h>
83
#define PMU_OVSR_C 0x80000000 /* Cycle Counter */
84
extern uint32_t ccnt_hi[MAXCPU];
85
extern int pmu_attched;
86
#endif /* DEV_PMU */
87
88
#define sev() __asm __volatile("sev" : : : "memory")
89
#define wfe() __asm __volatile("wfe" : : : "memory")
90
91
/*
92
* Macros to generate CP15 (system control processor) read/write functions.
93
*/
94
#define _FX(s...) #s
95
96
#define _RF0(fname, aname...) \
97
static __inline uint32_t \
98
fname(void) \
99
{ \
100
uint32_t reg; \
101
__asm __volatile("mrc\t" _FX(aname): "=r" (reg)); \
102
return(reg); \
103
}
104
105
#define _R64F0(fname, aname) \
106
static __inline uint64_t \
107
fname(void) \
108
{ \
109
uint64_t reg; \
110
__asm __volatile("mrrc\t" _FX(aname): "=r" (reg)); \
111
return(reg); \
112
}
113
114
#define _WF0(fname, aname...) \
115
static __inline void \
116
fname(void) \
117
{ \
118
__asm __volatile("mcr\t" _FX(aname)); \
119
}
120
121
#define _WF1(fname, aname...) \
122
static __inline void \
123
fname(uint32_t reg) \
124
{ \
125
__asm __volatile("mcr\t" _FX(aname):: "r" (reg)); \
126
}
127
128
#define _W64F1(fname, aname...) \
129
static __inline void \
130
fname(uint64_t reg) \
131
{ \
132
__asm __volatile("mcrr\t" _FX(aname):: "r" (reg)); \
133
}
134
135
/*
136
* Raw CP15 maintenance operations
137
* !!! not for external use !!!
138
*/
139
140
/* TLB */
141
142
_WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */
143
#if defined(SMP)
144
_WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */
145
#endif
146
_WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */
147
#if defined(SMP)
148
_WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */
149
#endif
150
_WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */
151
#if defined(SMP)
152
_WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */
153
#endif
154
_WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */
155
156
_WF1(_CP15_TTB_SET, CP15_TTBR0(%0))
157
158
/* Cache and Branch predictor */
159
160
_WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */
161
#if defined(SMP)
162
_WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */
163
#endif
164
_WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */
165
_WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */
166
_WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */
167
_WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */
168
_WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */
169
_WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */
170
_WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */
171
_WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */
172
_WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */
173
#if defined(SMP)
174
_WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */
175
#endif
176
_WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */
177
178
/*
179
* Publicly accessible functions
180
*/
181
182
/* CP14 Debug Registers */
183
_RF0(cp14_dbgdidr_get, CP14_DBGDIDR(%0))
184
_RF0(cp14_dbgprsr_get, CP14_DBGPRSR(%0))
185
_RF0(cp14_dbgoslsr_get, CP14_DBGOSLSR(%0))
186
_RF0(cp14_dbgosdlr_get, CP14_DBGOSDLR(%0))
187
_RF0(cp14_dbgdscrint_get, CP14_DBGDSCRint(%0))
188
189
_WF1(cp14_dbgdscr_v6_set, CP14_DBGDSCRext_V6(%0))
190
_WF1(cp14_dbgdscr_v7_set, CP14_DBGDSCRext_V7(%0))
191
_WF1(cp14_dbgvcr_set, CP14_DBGVCR(%0))
192
_WF1(cp14_dbgoslar_set, CP14_DBGOSLAR(%0))
193
194
/* Various control registers */
195
196
_RF0(cp15_cpacr_get, CP15_CPACR(%0))
197
_WF1(cp15_cpacr_set, CP15_CPACR(%0))
198
_RF0(cp15_dfsr_get, CP15_DFSR(%0))
199
_RF0(cp15_ifsr_get, CP15_IFSR(%0))
200
_WF1(cp15_prrr_set, CP15_PRRR(%0))
201
_WF1(cp15_nmrr_set, CP15_NMRR(%0))
202
_RF0(cp15_ttbr_get, CP15_TTBR0(%0))
203
_RF0(cp15_dfar_get, CP15_DFAR(%0))
204
_RF0(cp15_ifar_get, CP15_IFAR(%0))
205
_RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0))
206
_RF0(cp15_actlr_get, CP15_ACTLR(%0))
207
_WF1(cp15_actlr_set, CP15_ACTLR(%0))
208
_WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0))
209
_WF1(cp15_ats1cpw_set, CP15_ATS1CPW(%0))
210
_WF1(cp15_ats1cur_set, CP15_ATS1CUR(%0))
211
_WF1(cp15_ats1cuw_set, CP15_ATS1CUW(%0))
212
_RF0(cp15_par_get, CP15_PAR(%0))
213
_RF0(cp15_sctlr_get, CP15_SCTLR(%0))
214
215
/*CPU id registers */
216
_RF0(cp15_midr_get, CP15_MIDR(%0))
217
_RF0(cp15_ctr_get, CP15_CTR(%0))
218
_RF0(cp15_tcmtr_get, CP15_TCMTR(%0))
219
_RF0(cp15_tlbtr_get, CP15_TLBTR(%0))
220
_RF0(cp15_mpidr_get, CP15_MPIDR(%0))
221
_RF0(cp15_revidr_get, CP15_REVIDR(%0))
222
_RF0(cp15_ccsidr_get, CP15_CCSIDR(%0))
223
_RF0(cp15_clidr_get, CP15_CLIDR(%0))
224
_RF0(cp15_aidr_get, CP15_AIDR(%0))
225
_WF1(cp15_csselr_set, CP15_CSSELR(%0))
226
_RF0(cp15_id_pfr0_get, CP15_ID_PFR0(%0))
227
_RF0(cp15_id_pfr1_get, CP15_ID_PFR1(%0))
228
_RF0(cp15_id_dfr0_get, CP15_ID_DFR0(%0))
229
_RF0(cp15_id_afr0_get, CP15_ID_AFR0(%0))
230
_RF0(cp15_id_mmfr0_get, CP15_ID_MMFR0(%0))
231
_RF0(cp15_id_mmfr1_get, CP15_ID_MMFR1(%0))
232
_RF0(cp15_id_mmfr2_get, CP15_ID_MMFR2(%0))
233
_RF0(cp15_id_mmfr3_get, CP15_ID_MMFR3(%0))
234
_RF0(cp15_id_isar0_get, CP15_ID_ISAR0(%0))
235
_RF0(cp15_id_isar1_get, CP15_ID_ISAR1(%0))
236
_RF0(cp15_id_isar2_get, CP15_ID_ISAR2(%0))
237
_RF0(cp15_id_isar3_get, CP15_ID_ISAR3(%0))
238
_RF0(cp15_id_isar4_get, CP15_ID_ISAR4(%0))
239
_RF0(cp15_id_isar5_get, CP15_ID_ISAR5(%0))
240
_RF0(cp15_cbar_get, CP15_CBAR(%0))
241
242
/* Performance Monitor registers */
243
244
_RF0(cp15_pmcr_get, CP15_PMCR(%0))
245
_WF1(cp15_pmcr_set, CP15_PMCR(%0))
246
_RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0))
247
_WF1(cp15_pmcnten_set, CP15_PMCNTENSET(%0))
248
_WF1(cp15_pmcnten_clr, CP15_PMCNTENCLR(%0))
249
_RF0(cp15_pmovsr_get, CP15_PMOVSR(%0))
250
_WF1(cp15_pmovsr_set, CP15_PMOVSR(%0))
251
_WF1(cp15_pmswinc_set, CP15_PMSWINC(%0))
252
_RF0(cp15_pmselr_get, CP15_PMSELR(%0))
253
_WF1(cp15_pmselr_set, CP15_PMSELR(%0))
254
_RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0))
255
_WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0))
256
_RF0(cp15_pmxevtyper_get, CP15_PMXEVTYPER(%0))
257
_WF1(cp15_pmxevtyper_set, CP15_PMXEVTYPER(%0))
258
_RF0(cp15_pmxevcntr_get, CP15_PMXEVCNTRR(%0))
259
_WF1(cp15_pmxevcntr_set, CP15_PMXEVCNTRR(%0))
260
_RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0))
261
_WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0))
262
_RF0(cp15_pminten_get, CP15_PMINTENSET(%0))
263
_WF1(cp15_pminten_set, CP15_PMINTENSET(%0))
264
_WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0))
265
266
_RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0))
267
_WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0))
268
_RF0(cp15_tpidruro_get, CP15_TPIDRURO(%0))
269
_WF1(cp15_tpidruro_set, CP15_TPIDRURO(%0))
270
_RF0(cp15_tpidrpwr_get, CP15_TPIDRPRW(%0))
271
_WF1(cp15_tpidrpwr_set, CP15_TPIDRPRW(%0))
272
273
/* Generic Timer registers - only use when you know the hardware is available */
274
_RF0(cp15_cntfrq_get, CP15_CNTFRQ(%0))
275
_WF1(cp15_cntfrq_set, CP15_CNTFRQ(%0))
276
_RF0(cp15_cntkctl_get, CP15_CNTKCTL(%0))
277
_WF1(cp15_cntkctl_set, CP15_CNTKCTL(%0))
278
_RF0(cp15_cntp_tval_get, CP15_CNTP_TVAL(%0))
279
_WF1(cp15_cntp_tval_set, CP15_CNTP_TVAL(%0))
280
_RF0(cp15_cntp_ctl_get, CP15_CNTP_CTL(%0))
281
_WF1(cp15_cntp_ctl_set, CP15_CNTP_CTL(%0))
282
_RF0(cp15_cntv_tval_get, CP15_CNTV_TVAL(%0))
283
_WF1(cp15_cntv_tval_set, CP15_CNTV_TVAL(%0))
284
_RF0(cp15_cntv_ctl_get, CP15_CNTV_CTL(%0))
285
_WF1(cp15_cntv_ctl_set, CP15_CNTV_CTL(%0))
286
_RF0(cp15_cnthctl_get, CP15_CNTHCTL(%0))
287
_WF1(cp15_cnthctl_set, CP15_CNTHCTL(%0))
288
_RF0(cp15_cnthp_tval_get, CP15_CNTHP_TVAL(%0))
289
_WF1(cp15_cnthp_tval_set, CP15_CNTHP_TVAL(%0))
290
_RF0(cp15_cnthp_ctl_get, CP15_CNTHP_CTL(%0))
291
_WF1(cp15_cnthp_ctl_set, CP15_CNTHP_CTL(%0))
292
293
_R64F0(cp15_cntpct_get, CP15_CNTPCT(%Q0, %R0))
294
_R64F0(cp15_cntvct_get, CP15_CNTVCT(%Q0, %R0))
295
_R64F0(cp15_cntp_cval_get, CP15_CNTP_CVAL(%Q0, %R0))
296
_W64F1(cp15_cntp_cval_set, CP15_CNTP_CVAL(%Q0, %R0))
297
_R64F0(cp15_cntv_cval_get, CP15_CNTV_CVAL(%Q0, %R0))
298
_W64F1(cp15_cntv_cval_set, CP15_CNTV_CVAL(%Q0, %R0))
299
_R64F0(cp15_cntvoff_get, CP15_CNTVOFF(%Q0, %R0))
300
_W64F1(cp15_cntvoff_set, CP15_CNTVOFF(%Q0, %R0))
301
_R64F0(cp15_cnthp_cval_get, CP15_CNTHP_CVAL(%Q0, %R0))
302
_W64F1(cp15_cnthp_cval_set, CP15_CNTHP_CVAL(%Q0, %R0))
303
304
#undef _FX
305
#undef _RF0
306
#undef _WF0
307
#undef _WF1
308
309
/*
310
* TLB maintenance operations.
311
*/
312
313
/* Local (i.e. not broadcasting ) operations. */
314
315
/* Flush all TLB entries (even global). */
316
static __inline void
317
tlb_flush_all_local(void)
318
{
319
320
dsb();
321
_CP15_TLBIALL();
322
dsb();
323
}
324
325
/* Flush all not global TLB entries. */
326
static __inline void
327
tlb_flush_all_ng_local(void)
328
{
329
330
dsb();
331
_CP15_TLBIASID(CPU_ASID_KERNEL);
332
dsb();
333
}
334
335
/* Flush single TLB entry (even global). */
336
static __inline void
337
tlb_flush_local(vm_offset_t va)
338
{
339
340
KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
341
342
dsb();
343
_CP15_TLBIMVA(va | CPU_ASID_KERNEL);
344
dsb();
345
}
346
347
/* Flush range of TLB entries (even global). */
348
static __inline void
349
tlb_flush_range_local(vm_offset_t va, vm_size_t size)
350
{
351
vm_offset_t eva = va + size;
352
353
KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
354
KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__,
355
size));
356
357
dsb();
358
for (; va < eva; va += PAGE_SIZE)
359
_CP15_TLBIMVA(va | CPU_ASID_KERNEL);
360
dsb();
361
}
362
363
/* Broadcasting operations. */
364
#if defined(SMP)
365
366
static __inline void
367
tlb_flush_all(void)
368
{
369
370
dsb();
371
ARM_SMP_UP(
372
_CP15_TLBIALLIS(),
373
_CP15_TLBIALL()
374
);
375
dsb();
376
}
377
378
static __inline void
379
tlb_flush_all_ng(void)
380
{
381
382
dsb();
383
ARM_SMP_UP(
384
_CP15_TLBIASIDIS(CPU_ASID_KERNEL),
385
_CP15_TLBIASID(CPU_ASID_KERNEL)
386
);
387
dsb();
388
}
389
390
static __inline void
391
tlb_flush(vm_offset_t va)
392
{
393
394
KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
395
396
dsb();
397
ARM_SMP_UP(
398
_CP15_TLBIMVAAIS(va),
399
_CP15_TLBIMVA(va | CPU_ASID_KERNEL)
400
);
401
dsb();
402
}
403
404
static __inline void
405
tlb_flush_range(vm_offset_t va, vm_size_t size)
406
{
407
vm_offset_t eva = va + size;
408
409
KASSERT((va & PAGE_MASK) == 0, ("%s: va %#x not aligned", __func__, va));
410
KASSERT((size & PAGE_MASK) == 0, ("%s: size %#x not aligned", __func__,
411
size));
412
413
dsb();
414
ARM_SMP_UP(
415
{
416
for (; va < eva; va += PAGE_SIZE)
417
_CP15_TLBIMVAAIS(va);
418
},
419
{
420
for (; va < eva; va += PAGE_SIZE)
421
_CP15_TLBIMVA(va | CPU_ASID_KERNEL);
422
}
423
);
424
dsb();
425
}
426
#else /* !SMP */
427
428
#define tlb_flush_all() tlb_flush_all_local()
429
#define tlb_flush_all_ng() tlb_flush_all_ng_local()
430
#define tlb_flush(va) tlb_flush_local(va)
431
#define tlb_flush_range(va, size) tlb_flush_range_local(va, size)
432
433
#endif /* !SMP */
434
435
/*
436
* Cache maintenance operations.
437
*/
438
439
/* Sync I and D caches to PoU */
440
static __inline void
441
icache_sync(vm_offset_t va, vm_size_t size)
442
{
443
vm_offset_t eva = va + size;
444
445
dsb();
446
va &= ~cpuinfo.dcache_line_mask;
447
448
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
449
_CP15_DCCMVAU(va);
450
}
451
dsb();
452
ARM_SMP_UP(
453
_CP15_ICIALLUIS(),
454
_CP15_ICIALLU()
455
);
456
dsb();
457
isb();
458
}
459
460
/* Invalidate I cache */
461
static __inline void
462
icache_inv_all(void)
463
{
464
465
ARM_SMP_UP(
466
_CP15_ICIALLUIS(),
467
_CP15_ICIALLU()
468
);
469
dsb();
470
isb();
471
}
472
473
/* Invalidate branch predictor buffer */
474
static __inline void
475
bpb_inv_all(void)
476
{
477
478
ARM_SMP_UP(
479
_CP15_BPIALLIS(),
480
_CP15_BPIALL()
481
);
482
dsb();
483
isb();
484
}
485
486
/* Write back D-cache to PoU */
487
static __inline void
488
dcache_wb_pou(vm_offset_t va, vm_size_t size)
489
{
490
vm_offset_t eva = va + size;
491
492
dsb();
493
va &= ~cpuinfo.dcache_line_mask;
494
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
495
_CP15_DCCMVAU(va);
496
}
497
dsb();
498
}
499
500
/*
501
* Invalidate D-cache to PoC
502
*
503
* Caches are invalidated from outermost to innermost as fresh cachelines
504
* flow in this direction. In given range, if there was no dirty cacheline
505
* in any cache before, no stale cacheline should remain in them after this
506
* operation finishes.
507
*/
508
static __inline void
509
dcache_inv_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
510
{
511
vm_offset_t eva = va + size;
512
513
dsb();
514
/* invalidate L2 first */
515
cpu_l2cache_inv_range(pa, size);
516
517
/* then L1 */
518
va &= ~cpuinfo.dcache_line_mask;
519
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
520
_CP15_DCIMVAC(va);
521
}
522
dsb();
523
}
524
525
/*
526
* Discard D-cache lines to PoC, prior to overwrite by DMA engine.
527
*
528
* Normal invalidation does L2 then L1 to ensure that stale data from L2 doesn't
529
* flow into L1 while invalidating. This routine is intended to be used only
530
* when invalidating a buffer before a DMA operation loads new data into memory.
531
* The concern in this case is that dirty lines are not evicted to main memory,
532
* overwriting the DMA data. For that reason, the L1 is done first to ensure
533
* that an evicted L1 line doesn't flow to L2 after the L2 has been cleaned.
534
*/
535
static __inline void
536
dcache_inv_poc_dma(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
537
{
538
vm_offset_t eva = va + size;
539
540
/* invalidate L1 first */
541
dsb();
542
va &= ~cpuinfo.dcache_line_mask;
543
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
544
_CP15_DCIMVAC(va);
545
}
546
dsb();
547
548
/* then L2 */
549
cpu_l2cache_inv_range(pa, size);
550
}
551
552
/*
553
* Write back D-cache to PoC
554
*
555
* Caches are written back from innermost to outermost as dirty cachelines
556
* flow in this direction. In given range, no dirty cacheline should remain
557
* in any cache after this operation finishes.
558
*/
559
static __inline void
560
dcache_wb_poc(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
561
{
562
vm_offset_t eva = va + size;
563
564
dsb();
565
va &= ~cpuinfo.dcache_line_mask;
566
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
567
_CP15_DCCMVAC(va);
568
}
569
dsb();
570
571
cpu_l2cache_wb_range(pa, size);
572
}
573
574
/* Write back and invalidate D-cache to PoC */
575
static __inline void
576
dcache_wbinv_poc(vm_offset_t sva, vm_paddr_t pa, vm_size_t size)
577
{
578
vm_offset_t va;
579
vm_offset_t eva = sva + size;
580
581
dsb();
582
/* write back L1 first */
583
va = sva & ~cpuinfo.dcache_line_mask;
584
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
585
_CP15_DCCMVAC(va);
586
}
587
dsb();
588
589
/* then write back and invalidate L2 */
590
cpu_l2cache_wbinv_range(pa, size);
591
592
/* then invalidate L1 */
593
va = sva & ~cpuinfo.dcache_line_mask;
594
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
595
_CP15_DCIMVAC(va);
596
}
597
dsb();
598
}
599
600
/* Set TTB0 register */
601
static __inline void
602
cp15_ttbr_set(uint32_t reg)
603
{
604
dsb();
605
_CP15_TTB_SET(reg);
606
dsb();
607
_CP15_BPIALL();
608
dsb();
609
isb();
610
tlb_flush_all_ng_local();
611
}
612
613
/*
614
* Functions for address checking:
615
*
616
* cp15_ats1cpr_check() ... check stage 1 privileged (PL1) read access
617
* cp15_ats1cpw_check() ... check stage 1 privileged (PL1) write access
618
* cp15_ats1cur_check() ... check stage 1 unprivileged (PL0) read access
619
* cp15_ats1cuw_check() ... check stage 1 unprivileged (PL0) write access
620
*
621
* They must be called while interrupts are disabled to get consistent result.
622
*/
623
static __inline int
624
cp15_ats1cpr_check(vm_offset_t addr)
625
{
626
627
cp15_ats1cpr_set(addr);
628
isb();
629
return (cp15_par_get() & 0x01 ? EFAULT : 0);
630
}
631
632
static __inline int
633
cp15_ats1cpw_check(vm_offset_t addr)
634
{
635
636
cp15_ats1cpw_set(addr);
637
isb();
638
return (cp15_par_get() & 0x01 ? EFAULT : 0);
639
}
640
641
static __inline int
642
cp15_ats1cur_check(vm_offset_t addr)
643
{
644
645
cp15_ats1cur_set(addr);
646
isb();
647
return (cp15_par_get() & 0x01 ? EFAULT : 0);
648
}
649
650
static __inline int
651
cp15_ats1cuw_check(vm_offset_t addr)
652
{
653
654
cp15_ats1cuw_set(addr);
655
isb();
656
return (cp15_par_get() & 0x01 ? EFAULT : 0);
657
}
658
659
static __inline uint64_t
660
get_cyclecount(void)
661
{
662
#if defined(DEV_PMU)
663
if (pmu_attched) {
664
u_int cpu;
665
uint64_t h, h2;
666
uint32_t l, r;
667
668
cpu = PCPU_GET(cpuid);
669
h = (uint64_t)atomic_load_acq_32(&ccnt_hi[cpu]);
670
l = cp15_pmccntr_get();
671
/* In case interrupts are disabled we need to check for overflow. */
672
r = cp15_pmovsr_get();
673
if (r & PMU_OVSR_C) {
674
atomic_add_32(&ccnt_hi[cpu], 1);
675
/* Clear the event. */
676
cp15_pmovsr_set(PMU_OVSR_C);
677
}
678
/* Make sure there was no wrap-around while we read the lo half. */
679
h2 = (uint64_t)atomic_load_acq_32(&ccnt_hi[cpu]);
680
if (h != h2)
681
l = cp15_pmccntr_get();
682
return (h2 << 32 | l);
683
} else
684
#endif
685
return cp15_pmccntr_get();
686
}
687
#endif
688
689
#define TRAPF_USERMODE(frame) ((frame->tf_spsr & PSR_MODE) == PSR_USR32_MODE)
690
691
#define TRAPF_PC(tfp) ((tfp)->tf_pc)
692
693
#define cpu_getstack(td) ((td)->td_frame->tf_usr_sp)
694
#define cpu_setstack(td, sp) ((td)->td_frame->tf_usr_sp = (sp))
695
#define cpu_spinwait() /* nothing */
696
#define cpu_lock_delay() DELAY(1)
697
698
#define ARM_NVEC 7
699
#define ARM_VEC_ALL 0xffffffff
700
701
extern vm_offset_t vector_page;
702
703
/*
704
* Params passed into initarm. If you change the size of this you will
705
* need to update locore.S to allocate more memory on the stack before
706
* it calls initarm.
707
*/
708
struct arm_boot_params {
709
register_t abp_size; /* Size of this structure */
710
register_t abp_r0; /* r0 from the boot loader */
711
register_t abp_r1; /* r1 from the boot loader */
712
register_t abp_r2; /* r2 from the boot loader */
713
register_t abp_r3; /* r3 from the boot loader */
714
vm_offset_t abp_physaddr; /* The kernel physical address */
715
vm_offset_t abp_pagetable; /* The early page table */
716
};
717
718
void arm_vector_init(vm_offset_t, int);
719
void fork_trampoline(void);
720
void identify_arm_cpu(void);
721
void *initarm(struct arm_boot_params *);
722
723
extern char btext[];
724
extern char etext[];
725
int badaddr_read(void *, size_t, void *);
726
#endif /* !MACHINE_CPU_H */
727
728