Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm/include/asm/assembler.h
26295 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* arch/arm/include/asm/assembler.h
4
*
5
* Copyright (C) 1996-2000 Russell King
6
*
7
* This file contains arm architecture specific defines
8
* for the different processors.
9
*
10
* Do not include any C declarations in this file - it is included by
11
* assembler source.
12
*/
13
#ifndef __ASM_ASSEMBLER_H__
14
#define __ASM_ASSEMBLER_H__
15
16
#ifndef __ASSEMBLY__
17
#error "Only include this from assembly code"
18
#endif
19
20
#include <asm/ptrace.h>
21
#include <asm/opcodes-virt.h>
22
#include <asm/asm-offsets.h>
23
#include <asm/page.h>
24
#include <asm/pgtable.h>
25
#include <asm/thread_info.h>
26
#include <asm/uaccess-asm.h>
27
28
#define IOMEM(x) (x)
29
30
/*
31
* Endian independent macros for shifting bytes within registers.
32
*/
33
#ifndef __ARMEB__
34
#define lspull lsr
35
#define lspush lsl
36
#define get_byte_0 lsl #0
37
#define get_byte_1 lsr #8
38
#define get_byte_2 lsr #16
39
#define get_byte_3 lsr #24
40
#define put_byte_0 lsl #0
41
#define put_byte_1 lsl #8
42
#define put_byte_2 lsl #16
43
#define put_byte_3 lsl #24
44
#else
45
#define lspull lsl
46
#define lspush lsr
47
#define get_byte_0 lsr #24
48
#define get_byte_1 lsr #16
49
#define get_byte_2 lsr #8
50
#define get_byte_3 lsl #0
51
#define put_byte_0 lsl #24
52
#define put_byte_1 lsl #16
53
#define put_byte_2 lsl #8
54
#define put_byte_3 lsl #0
55
#endif
56
57
/* Select code for any configuration running in BE8 mode */
58
#ifdef CONFIG_CPU_ENDIAN_BE8
59
#define ARM_BE8(code...) code
60
#else
61
#define ARM_BE8(code...)
62
#endif
63
64
/*
65
* Data preload for architectures that support it
66
*/
67
#if __LINUX_ARM_ARCH__ >= 5
68
#define PLD(code...) code
69
#else
70
#define PLD(code...)
71
#endif
72
73
/*
74
* This can be used to enable code to cacheline align the destination
75
* pointer when bulk writing to memory. Experiments on StrongARM and
76
* XScale didn't show this a worthwhile thing to do when the cache is not
77
* set to write-allocate (this would need further testing on XScale when WA
78
* is used).
79
*
80
* On Feroceon there is much to gain however, regardless of cache mode.
81
*/
82
#ifdef CONFIG_CPU_FEROCEON
83
#define CALGN(code...) code
84
#else
85
#define CALGN(code...)
86
#endif
87
88
#define IMM12_MASK 0xfff
89
90
/* the frame pointer used for stack unwinding */
91
ARM( fpreg .req r11 )
92
THUMB( fpreg .req r7 )
93
94
/*
95
* Enable and disable interrupts
96
*/
97
#if __LINUX_ARM_ARCH__ >= 6
98
.macro disable_irq_notrace
99
cpsid i
100
.endm
101
102
.macro enable_irq_notrace
103
cpsie i
104
.endm
105
#else
106
.macro disable_irq_notrace
107
msr cpsr_c, #PSR_I_BIT | SVC_MODE
108
.endm
109
110
.macro enable_irq_notrace
111
msr cpsr_c, #SVC_MODE
112
.endm
113
#endif
114
115
#if __LINUX_ARM_ARCH__ < 7
116
.macro dsb, args
117
mcr p15, 0, r0, c7, c10, 4
118
.endm
119
120
.macro isb, args
121
mcr p15, 0, r0, c7, c5, 4
122
.endm
123
#endif
124
125
.macro asm_trace_hardirqs_off, save=1
126
#if defined(CONFIG_TRACE_IRQFLAGS)
127
.if \save
128
stmdb sp!, {r0-r3, ip, lr}
129
.endif
130
bl trace_hardirqs_off
131
.if \save
132
ldmia sp!, {r0-r3, ip, lr}
133
.endif
134
#endif
135
.endm
136
137
.macro asm_trace_hardirqs_on, cond=al, save=1
138
#if defined(CONFIG_TRACE_IRQFLAGS)
139
/*
140
* actually the registers should be pushed and pop'd conditionally, but
141
* after bl the flags are certainly clobbered
142
*/
143
.if \save
144
stmdb sp!, {r0-r3, ip, lr}
145
.endif
146
bl\cond trace_hardirqs_on
147
.if \save
148
ldmia sp!, {r0-r3, ip, lr}
149
.endif
150
#endif
151
.endm
152
153
.macro disable_irq, save=1
154
disable_irq_notrace
155
asm_trace_hardirqs_off \save
156
.endm
157
158
.macro enable_irq
159
asm_trace_hardirqs_on
160
enable_irq_notrace
161
.endm
162
/*
163
* Save the current IRQ state and disable IRQs. Note that this macro
164
* assumes FIQs are enabled, and that the processor is in SVC mode.
165
*/
166
.macro save_and_disable_irqs, oldcpsr
167
#ifdef CONFIG_CPU_V7M
168
mrs \oldcpsr, primask
169
#else
170
mrs \oldcpsr, cpsr
171
#endif
172
disable_irq
173
.endm
174
175
.macro save_and_disable_irqs_notrace, oldcpsr
176
#ifdef CONFIG_CPU_V7M
177
mrs \oldcpsr, primask
178
#else
179
mrs \oldcpsr, cpsr
180
#endif
181
disable_irq_notrace
182
.endm
183
184
/*
185
* Restore interrupt state previously stored in a register. We don't
186
* guarantee that this will preserve the flags.
187
*/
188
.macro restore_irqs_notrace, oldcpsr
189
#ifdef CONFIG_CPU_V7M
190
msr primask, \oldcpsr
191
#else
192
msr cpsr_c, \oldcpsr
193
#endif
194
.endm
195
196
.macro restore_irqs, oldcpsr
197
tst \oldcpsr, #PSR_I_BIT
198
asm_trace_hardirqs_on cond=eq
199
restore_irqs_notrace \oldcpsr
200
.endm
201
202
/*
203
* Assembly version of "adr rd, BSYM(sym)". This should only be used to
204
* reference local symbols in the same assembly file which are to be
205
* resolved by the assembler. Other usage is undefined.
206
*/
207
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
208
.macro badr\c, rd, sym
209
#ifdef CONFIG_THUMB2_KERNEL
210
adr\c \rd, \sym + 1
211
#else
212
adr\c \rd, \sym
213
#endif
214
.endm
215
.endr
216
217
/*
218
* Get current thread_info.
219
*/
220
.macro get_thread_info, rd
221
/* thread_info is the first member of struct task_struct */
222
get_current \rd
223
.endm
224
225
/*
226
* Increment/decrement the preempt count.
227
*/
228
#ifdef CONFIG_PREEMPT_COUNT
229
.macro inc_preempt_count, ti, tmp
230
ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
231
add \tmp, \tmp, #1 @ increment it
232
str \tmp, [\ti, #TI_PREEMPT]
233
.endm
234
235
.macro dec_preempt_count, ti, tmp
236
ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
237
sub \tmp, \tmp, #1 @ decrement it
238
str \tmp, [\ti, #TI_PREEMPT]
239
.endm
240
#else
241
.macro inc_preempt_count, ti, tmp
242
.endm
243
244
.macro dec_preempt_count, ti, tmp
245
.endm
246
#endif
247
248
#define USERL(l, x...) \
249
9999: x; \
250
.pushsection __ex_table,"a"; \
251
.align 3; \
252
.long 9999b,l; \
253
.popsection
254
255
#define USER(x...) USERL(9001f, x)
256
257
#ifdef CONFIG_SMP
258
#define ALT_SMP(instr...) \
259
9998: instr
260
/*
261
* Note: if you get assembler errors from ALT_UP() when building with
262
* CONFIG_THUMB2_KERNEL, you almost certainly need to use
263
* ALT_SMP( W(instr) ... )
264
*/
265
#define ALT_UP(instr...) \
266
.pushsection ".alt.smp.init", "a" ;\
267
.align 2 ;\
268
.long 9998b - . ;\
269
9997: instr ;\
270
.if . - 9997b == 2 ;\
271
nop ;\
272
.endif ;\
273
.if . - 9997b != 4 ;\
274
.error "ALT_UP() content must assemble to exactly 4 bytes";\
275
.endif ;\
276
.popsection
277
#define ALT_UP_B(label) \
278
.pushsection ".alt.smp.init", "a" ;\
279
.align 2 ;\
280
.long 9998b - . ;\
281
W(b) . + (label - 9998b) ;\
282
.popsection
283
#else
284
#define ALT_SMP(instr...)
285
#define ALT_UP(instr...) instr
286
#define ALT_UP_B(label) b label
287
#endif
288
289
/*
290
* this_cpu_offset - load the per-CPU offset of this CPU into
291
* register 'rd'
292
*/
293
.macro this_cpu_offset, rd:req
294
#ifdef CONFIG_SMP
295
ALT_SMP(mrc p15, 0, \rd, c13, c0, 4)
296
#ifdef CONFIG_CPU_V6
297
ALT_UP_B(.L1_\@)
298
.L0_\@:
299
.subsection 1
300
.L1_\@: ldr_va \rd, __per_cpu_offset
301
b .L0_\@
302
.previous
303
#endif
304
#else
305
mov \rd, #0
306
#endif
307
.endm
308
309
/*
310
* set_current - store the task pointer of this CPU's current task
311
*/
312
.macro set_current, rn:req, tmp:req
313
#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
314
9998: mcr p15, 0, \rn, c13, c0, 3 @ set TPIDRURO register
315
#ifdef CONFIG_CPU_V6
316
ALT_UP_B(.L0_\@)
317
.subsection 1
318
.L0_\@: str_va \rn, __current, \tmp
319
b .L1_\@
320
.previous
321
.L1_\@:
322
#endif
323
#else
324
str_va \rn, __current, \tmp
325
#endif
326
.endm
327
328
/*
329
* get_current - load the task pointer of this CPU's current task
330
*/
331
.macro get_current, rd:req
332
#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
333
9998: mrc p15, 0, \rd, c13, c0, 3 @ get TPIDRURO register
334
#ifdef CONFIG_CPU_V6
335
ALT_UP_B(.L0_\@)
336
.subsection 1
337
.L0_\@: ldr_va \rd, __current
338
b .L1_\@
339
.previous
340
.L1_\@:
341
#endif
342
#else
343
ldr_va \rd, __current
344
#endif
345
.endm
346
347
/*
348
* reload_current - reload the task pointer of this CPU's current task
349
* into the TLS register
350
*/
351
.macro reload_current, t1:req, t2:req
352
#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
353
#ifdef CONFIG_CPU_V6
354
ALT_SMP(nop)
355
ALT_UP_B(.L0_\@)
356
#endif
357
ldr_this_cpu \t1, __entry_task, \t1, \t2
358
mcr p15, 0, \t1, c13, c0, 3 @ store in TPIDRURO
359
.L0_\@:
360
#endif
361
.endm
362
363
/*
364
* Instruction barrier
365
*/
366
.macro instr_sync
367
#if __LINUX_ARM_ARCH__ >= 7
368
isb
369
#elif __LINUX_ARM_ARCH__ == 6
370
mcr p15, 0, r0, c7, c5, 4
371
#endif
372
.endm
373
374
/*
375
* SMP data memory barrier
376
*/
377
.macro smp_dmb mode
378
#ifdef CONFIG_SMP
379
#if __LINUX_ARM_ARCH__ >= 7
380
.ifeqs "\mode","arm"
381
ALT_SMP(dmb ish)
382
.else
383
ALT_SMP(W(dmb) ish)
384
.endif
385
#elif __LINUX_ARM_ARCH__ == 6
386
ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
387
#else
388
#error Incompatible SMP platform
389
#endif
390
.ifeqs "\mode","arm"
391
ALT_UP(nop)
392
.else
393
ALT_UP(W(nop))
394
.endif
395
#endif
396
.endm
397
398
/*
399
* Raw SMP data memory barrier
400
*/
401
.macro __smp_dmb mode
402
#if __LINUX_ARM_ARCH__ >= 7
403
.ifeqs "\mode","arm"
404
dmb ish
405
.else
406
W(dmb) ish
407
.endif
408
#elif __LINUX_ARM_ARCH__ == 6
409
mcr p15, 0, r0, c7, c10, 5 @ dmb
410
#else
411
.error "Incompatible SMP platform"
412
#endif
413
.endm
414
415
#if defined(CONFIG_CPU_V7M)
416
/*
417
* setmode is used to assert to be in svc mode during boot. For v7-M
418
* this is done in __v7m_setup, so setmode can be empty here.
419
*/
420
.macro setmode, mode, reg
421
.endm
422
#elif defined(CONFIG_THUMB2_KERNEL)
423
.macro setmode, mode, reg
424
mov \reg, #\mode
425
msr cpsr_c, \reg
426
.endm
427
#else
428
.macro setmode, mode, reg
429
msr cpsr_c, #\mode
430
.endm
431
#endif
432
433
/*
434
* Helper macro to enter SVC mode cleanly and mask interrupts. reg is
435
* a scratch register for the macro to overwrite.
436
*
437
* This macro is intended for forcing the CPU into SVC mode at boot time.
438
* you cannot return to the original mode.
439
*/
440
.macro safe_svcmode_maskall reg:req
441
#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
442
mrs \reg , cpsr
443
eor \reg, \reg, #HYP_MODE
444
tst \reg, #MODE_MASK
445
bic \reg , \reg , #MODE_MASK
446
orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
447
THUMB( orr \reg , \reg , #PSR_T_BIT )
448
bne 1f
449
orr \reg, \reg, #PSR_A_BIT
450
badr lr, 2f
451
msr spsr_cxsf, \reg
452
__MSR_ELR_HYP(14)
453
__ERET
454
1: msr cpsr_c, \reg
455
2:
456
#else
457
/*
458
* workaround for possibly broken pre-v6 hardware
459
* (akita, Sharp Zaurus C-1000, PXA270-based)
460
*/
461
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
462
#endif
463
.endm
464
465
/*
466
* STRT/LDRT access macros with ARM and Thumb-2 variants
467
*/
468
#ifdef CONFIG_THUMB2_KERNEL
469
470
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
471
9999:
472
.if \inc == 1
473
\instr\()b\t\cond\().w \reg, [\ptr, #\off]
474
.elseif \inc == 4
475
\instr\t\cond\().w \reg, [\ptr, #\off]
476
.else
477
.error "Unsupported inc macro argument"
478
.endif
479
480
.pushsection __ex_table,"a"
481
.align 3
482
.long 9999b, \abort
483
.popsection
484
.endm
485
486
.macro usracc, instr, reg, ptr, inc, cond, rept, abort
487
@ explicit IT instruction needed because of the label
488
@ introduced by the USER macro
489
.ifnc \cond,al
490
.if \rept == 1
491
itt \cond
492
.elseif \rept == 2
493
ittt \cond
494
.else
495
.error "Unsupported rept macro argument"
496
.endif
497
.endif
498
499
@ Slightly optimised to avoid incrementing the pointer twice
500
usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
501
.if \rept == 2
502
usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
503
.endif
504
505
add\cond \ptr, #\rept * \inc
506
.endm
507
508
#else /* !CONFIG_THUMB2_KERNEL */
509
510
.macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
511
.rept \rept
512
9999:
513
.if \inc == 1
514
\instr\()b\t\cond \reg, [\ptr], #\inc
515
.elseif \inc == 4
516
\instr\t\cond \reg, [\ptr], #\inc
517
.else
518
.error "Unsupported inc macro argument"
519
.endif
520
521
.pushsection __ex_table,"a"
522
.align 3
523
.long 9999b, \abort
524
.popsection
525
.endr
526
.endm
527
528
#endif /* CONFIG_THUMB2_KERNEL */
529
530
.macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
531
usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
532
.endm
533
534
.macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
535
usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
536
.endm
537
538
/* Utility macro for declaring string literals */
539
.macro string name:req, string
540
.type \name , #object
541
\name:
542
.asciz "\string"
543
.size \name , . - \name
544
.endm
545
546
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
547
.macro ret\c, reg
548
#if __LINUX_ARM_ARCH__ < 6
549
mov\c pc, \reg
550
#else
551
.ifeqs "\reg", "lr"
552
bx\c \reg
553
.else
554
mov\c pc, \reg
555
.endif
556
#endif
557
.endm
558
.endr
559
560
.macro ret.w, reg
561
ret \reg
562
#ifdef CONFIG_THUMB2_KERNEL
563
nop
564
#endif
565
.endm
566
567
.macro bug, msg, line
568
#ifdef CONFIG_THUMB2_KERNEL
569
1: .inst 0xde02
570
#else
571
1: .inst 0xe7f001f2
572
#endif
573
#ifdef CONFIG_DEBUG_BUGVERBOSE
574
.pushsection .rodata.str, "aMS", %progbits, 1
575
2: .asciz "\msg"
576
.popsection
577
.pushsection __bug_table, "aw"
578
.align 2
579
.word 1b, 2b
580
.hword \line
581
.popsection
582
#endif
583
.endm
584
585
#ifdef CONFIG_KPROBES
586
#define _ASM_NOKPROBE(entry) \
587
.pushsection "_kprobe_blacklist", "aw" ; \
588
.balign 4 ; \
589
.long entry; \
590
.popsection
591
#else
592
#define _ASM_NOKPROBE(entry)
593
#endif
594
595
.macro __adldst_l, op, reg, sym, tmp, c
596
.if __LINUX_ARM_ARCH__ < 7
597
ldr\c \tmp, .La\@
598
.subsection 1
599
.align 2
600
.La\@: .long \sym - .Lpc\@
601
.previous
602
.else
603
.ifnb \c
604
THUMB( ittt \c )
605
.endif
606
movw\c \tmp, #:lower16:\sym - .Lpc\@
607
movt\c \tmp, #:upper16:\sym - .Lpc\@
608
.endif
609
610
#ifndef CONFIG_THUMB2_KERNEL
611
.set .Lpc\@, . + 8 // PC bias
612
.ifc \op, add
613
add\c \reg, \tmp, pc
614
.else
615
\op\c \reg, [pc, \tmp]
616
.endif
617
#else
618
.Lb\@: add\c \tmp, \tmp, pc
619
/*
620
* In Thumb-2 builds, the PC bias depends on whether we are currently
621
* emitting into a .arm or a .thumb section. The size of the add opcode
622
* above will be 2 bytes when emitting in Thumb mode and 4 bytes when
623
* emitting in ARM mode, so let's use this to account for the bias.
624
*/
625
.set .Lpc\@, . + (. - .Lb\@)
626
627
.ifnc \op, add
628
\op\c \reg, [\tmp]
629
.endif
630
#endif
631
.endm
632
633
/*
634
* mov_l - move a constant value or [relocated] address into a register
635
*/
636
.macro mov_l, dst:req, imm:req, cond
637
.if __LINUX_ARM_ARCH__ < 7
638
ldr\cond \dst, =\imm
639
.else
640
movw\cond \dst, #:lower16:\imm
641
movt\cond \dst, #:upper16:\imm
642
.endif
643
.endm
644
645
/*
646
* adr_l - adr pseudo-op with unlimited range
647
*
648
* @dst: destination register
649
* @sym: name of the symbol
650
* @cond: conditional opcode suffix
651
*/
652
.macro adr_l, dst:req, sym:req, cond
653
__adldst_l add, \dst, \sym, \dst, \cond
654
.endm
655
656
/*
657
* ldr_l - ldr <literal> pseudo-op with unlimited range
658
*
659
* @dst: destination register
660
* @sym: name of the symbol
661
* @cond: conditional opcode suffix
662
*/
663
.macro ldr_l, dst:req, sym:req, cond
664
__adldst_l ldr, \dst, \sym, \dst, \cond
665
.endm
666
667
/*
668
* str_l - str <literal> pseudo-op with unlimited range
669
*
670
* @src: source register
671
* @sym: name of the symbol
672
* @tmp: mandatory scratch register
673
* @cond: conditional opcode suffix
674
*/
675
.macro str_l, src:req, sym:req, tmp:req, cond
676
__adldst_l str, \src, \sym, \tmp, \cond
677
.endm
678
679
.macro __ldst_va, op, reg, tmp, sym, cond, offset
680
#if __LINUX_ARM_ARCH__ >= 7 || \
681
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
682
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
683
mov_l \tmp, \sym, \cond
684
#else
685
/*
686
* Avoid a literal load, by emitting a sequence of ADD/LDR instructions
687
* with the appropriate relocations. The combined sequence has a range
688
* of -/+ 256 MiB, which should be sufficient for the core kernel and
689
* for modules loaded into the module region.
690
*/
691
.globl \sym
692
.reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
693
.reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
694
.reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
695
.L0_\@: sub\cond \tmp, pc, #8 - \offset
696
.L1_\@: sub\cond \tmp, \tmp, #4 - \offset
697
.L2_\@:
698
#endif
699
\op\cond \reg, [\tmp, #\offset]
700
.endm
701
702
/*
703
* ldr_va - load a 32-bit word from the virtual address of \sym
704
*/
705
.macro ldr_va, rd:req, sym:req, cond, tmp, offset=0
706
.ifnb \tmp
707
__ldst_va ldr, \rd, \tmp, \sym, \cond, \offset
708
.else
709
__ldst_va ldr, \rd, \rd, \sym, \cond, \offset
710
.endif
711
.endm
712
713
/*
714
* str_va - store a 32-bit word to the virtual address of \sym
715
*/
716
.macro str_va, rn:req, sym:req, tmp:req, cond
717
__ldst_va str, \rn, \tmp, \sym, \cond, 0
718
.endm
719
720
/*
721
* ldr_this_cpu_armv6 - Load a 32-bit word from the per-CPU variable 'sym',
722
* without using a temp register. Supported in ARM mode
723
* only.
724
*/
725
.macro ldr_this_cpu_armv6, rd:req, sym:req
726
this_cpu_offset \rd
727
.globl \sym
728
.reloc .L0_\@, R_ARM_ALU_PC_G0_NC, \sym
729
.reloc .L1_\@, R_ARM_ALU_PC_G1_NC, \sym
730
.reloc .L2_\@, R_ARM_LDR_PC_G2, \sym
731
add \rd, \rd, pc
732
.L0_\@: sub \rd, \rd, #4
733
.L1_\@: sub \rd, \rd, #0
734
.L2_\@: ldr \rd, [\rd, #4]
735
.endm
736
737
/*
738
* ldr_this_cpu - Load a 32-bit word from the per-CPU variable 'sym'
739
* into register 'rd', which may be the stack pointer,
740
* using 't1' and 't2' as general temp registers. These
741
* are permitted to overlap with 'rd' if != sp
742
*/
743
.macro ldr_this_cpu, rd:req, sym:req, t1:req, t2:req
744
#ifndef CONFIG_SMP
745
ldr_va \rd, \sym, tmp=\t1
746
#elif __LINUX_ARM_ARCH__ >= 7 || \
747
!defined(CONFIG_ARM_HAS_GROUP_RELOCS) || \
748
(defined(MODULE) && defined(CONFIG_ARM_MODULE_PLTS))
749
this_cpu_offset \t1
750
mov_l \t2, \sym
751
ldr \rd, [\t1, \t2]
752
#else
753
ldr_this_cpu_armv6 \rd, \sym
754
#endif
755
.endm
756
757
/*
758
* rev_l - byte-swap a 32-bit value
759
*
760
* @val: source/destination register
761
* @tmp: scratch register
762
*/
763
.macro rev_l, val:req, tmp:req
764
.if __LINUX_ARM_ARCH__ < 6
765
eor \tmp, \val, \val, ror #16
766
bic \tmp, \tmp, #0x00ff0000
767
mov \val, \val, ror #8
768
eor \val, \val, \tmp, lsr #8
769
.else
770
rev \val, \val
771
.endif
772
.endm
773
774
.if __LINUX_ARM_ARCH__ < 6
775
.set .Lrev_l_uses_tmp, 1
776
.else
777
.set .Lrev_l_uses_tmp, 0
778
.endif
779
780
/*
781
* bl_r - branch and link to register
782
*
783
* @dst: target to branch to
784
* @c: conditional opcode suffix
785
*/
786
.macro bl_r, dst:req, c
787
.if __LINUX_ARM_ARCH__ < 6
788
mov\c lr, pc
789
mov\c pc, \dst
790
.else
791
blx\c \dst
792
.endif
793
.endm
794
795
#endif /* __ASM_ASSEMBLER_H__ */
796
797