Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/blackfin/kernel/vmlinux.lds.S
10817 views
1
/*
2
* Copyright 2004-2009 Analog Devices Inc.
3
*
4
* Licensed under the GPL-2 or later
5
*/
6
7
#include <asm-generic/vmlinux.lds.h>
8
#include <asm/mem_map.h>
9
#include <asm/page.h>
10
#include <asm/thread_info.h>
11
12
OUTPUT_FORMAT("elf32-bfin")
13
ENTRY(__start)
14
_jiffies = _jiffies_64;
15
16
SECTIONS
17
{
18
#ifdef CONFIG_RAMKERNEL
19
. = CONFIG_BOOT_LOAD;
20
#else
21
. = CONFIG_ROM_BASE;
22
#endif
23
24
/* Neither the text, ro_data or bss section need to be aligned
25
* So pack them back to back
26
*/
27
.text :
28
{
29
__text = .;
30
_text = .;
31
__stext = .;
32
TEXT_TEXT
33
#ifndef CONFIG_SCHEDULE_L1
34
SCHED_TEXT
35
#endif
36
LOCK_TEXT
37
IRQENTRY_TEXT
38
KPROBES_TEXT
39
#ifdef CONFIG_ROMKERNEL
40
__sinittext = .;
41
INIT_TEXT
42
__einittext = .;
43
EXIT_TEXT
44
#endif
45
*(.text.*)
46
*(.fixup)
47
48
#if !L1_CODE_LENGTH
49
*(.l1.text)
50
#endif
51
__etext = .;
52
}
53
54
EXCEPTION_TABLE(4)
55
NOTES
56
57
/* Just in case the first read only is a 32-bit access */
58
RO_DATA(4)
59
__rodata_end = .;
60
61
#ifdef CONFIG_ROMKERNEL
62
. = CONFIG_BOOT_LOAD;
63
.bss : AT(__rodata_end)
64
#else
65
.bss :
66
#endif
67
{
68
. = ALIGN(4);
69
___bss_start = .;
70
*(.bss .bss.*)
71
*(COMMON)
72
#if !L1_DATA_A_LENGTH
73
*(.l1.bss)
74
#endif
75
#if !L1_DATA_B_LENGTH
76
*(.l1.bss.B)
77
#endif
78
. = ALIGN(4);
79
___bss_stop = .;
80
}
81
82
#if defined(CONFIG_ROMKERNEL)
83
.data : AT(LOADADDR(.bss) + SIZEOF(.bss))
84
#else
85
.data :
86
#endif
87
{
88
__sdata = .;
89
/* This gets done first, so the glob doesn't suck it in */
90
CACHELINE_ALIGNED_DATA(32)
91
92
#if !L1_DATA_A_LENGTH
93
. = ALIGN(32);
94
*(.data_l1.cacheline_aligned)
95
*(.l1.data)
96
#endif
97
#if !L1_DATA_B_LENGTH
98
*(.l1.data.B)
99
#endif
100
#if !L2_LENGTH
101
. = ALIGN(32);
102
*(.data_l2.cacheline_aligned)
103
*(.l2.data)
104
#endif
105
106
DATA_DATA
107
CONSTRUCTORS
108
109
INIT_TASK_DATA(THREAD_SIZE)
110
111
__edata = .;
112
}
113
__data_lma = LOADADDR(.data);
114
__data_len = SIZEOF(.data);
115
116
/* The init section should be last, so when we free it, it goes into
117
* the general memory pool, and (hopefully) will decrease fragmentation
118
* a tiny bit. The init section has a _requirement_ that it be
119
* PAGE_SIZE aligned
120
*/
121
. = ALIGN(PAGE_SIZE);
122
___init_begin = .;
123
124
#ifdef CONFIG_RAMKERNEL
125
INIT_TEXT_SECTION(PAGE_SIZE)
126
127
/* We have to discard exit text and such at runtime, not link time, to
128
* handle embedded cross-section references (alt instructions, bug
129
* table, eh_frame, etc...). We need all of our .text up front and
130
* .data after it for PCREL call issues.
131
*/
132
.exit.text :
133
{
134
EXIT_TEXT
135
}
136
137
. = ALIGN(16);
138
INIT_DATA_SECTION(16)
139
PERCPU_SECTION(32)
140
141
.exit.data :
142
{
143
EXIT_DATA
144
}
145
146
.text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
147
#else
148
.init.data : AT(__data_lma + __data_len)
149
{
150
__sinitdata = .;
151
INIT_DATA
152
INIT_SETUP(16)
153
INIT_CALLS
154
CON_INITCALL
155
SECURITY_INITCALL
156
INIT_RAM_FS
157
158
___per_cpu_load = .;
159
PERCPU_INPUT(32)
160
161
EXIT_DATA
162
__einitdata = .;
163
}
164
__init_data_lma = LOADADDR(.init.data);
165
__init_data_len = SIZEOF(.init.data);
166
__init_data_end = .;
167
168
.text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
169
#endif
170
{
171
. = ALIGN(4);
172
__stext_l1 = .;
173
*(.l1.text.head)
174
*(.l1.text)
175
#ifdef CONFIG_SCHEDULE_L1
176
SCHED_TEXT
177
#endif
178
. = ALIGN(4);
179
__etext_l1 = .;
180
}
181
__text_l1_lma = LOADADDR(.text_l1);
182
__text_l1_len = SIZEOF(.text_l1);
183
ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
184
185
.data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
186
{
187
. = ALIGN(4);
188
__sdata_l1 = .;
189
*(.l1.data)
190
__edata_l1 = .;
191
192
. = ALIGN(32);
193
*(.data_l1.cacheline_aligned)
194
195
. = ALIGN(4);
196
__sbss_l1 = .;
197
*(.l1.bss)
198
. = ALIGN(4);
199
__ebss_l1 = .;
200
}
201
__data_l1_lma = LOADADDR(.data_l1);
202
__data_l1_len = SIZEOF(.data_l1);
203
ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
204
205
.data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
206
{
207
. = ALIGN(4);
208
__sdata_b_l1 = .;
209
*(.l1.data.B)
210
__edata_b_l1 = .;
211
212
. = ALIGN(4);
213
__sbss_b_l1 = .;
214
*(.l1.bss.B)
215
. = ALIGN(4);
216
__ebss_b_l1 = .;
217
}
218
__data_b_l1_lma = LOADADDR(.data_b_l1);
219
__data_b_l1_len = SIZEOF(.data_b_l1);
220
ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
221
222
.text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
223
{
224
. = ALIGN(4);
225
__stext_l2 = .;
226
*(.l2.text)
227
. = ALIGN(4);
228
__etext_l2 = .;
229
230
. = ALIGN(4);
231
__sdata_l2 = .;
232
*(.l2.data)
233
__edata_l2 = .;
234
235
. = ALIGN(32);
236
*(.data_l2.cacheline_aligned)
237
238
. = ALIGN(4);
239
__sbss_l2 = .;
240
*(.l2.bss)
241
. = ALIGN(4);
242
__ebss_l2 = .;
243
}
244
__l2_lma = LOADADDR(.text_data_l2);
245
__l2_len = SIZEOF(.text_data_l2);
246
ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
247
248
/* Force trailing alignment of our init section so that when we
249
* free our init memory, we don't leave behind a partial page.
250
*/
251
#ifdef CONFIG_RAMKERNEL
252
. = __l2_lma + __l2_len;
253
#else
254
. = __init_data_end;
255
#endif
256
. = ALIGN(PAGE_SIZE);
257
___init_end = .;
258
259
__end =.;
260
261
STABS_DEBUG
262
263
DWARF_DEBUG
264
265
DISCARDS
266
}
267
268