Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/x86/kernel/head_32.S
10817 views
1
/*
2
*
3
* Copyright (C) 1991, 1992 Linus Torvalds
4
*
5
* Enhanced CPU detection and feature setting code by Mike Jagdis
6
* and Martin Mares, November 1997.
7
*/
8
9
.text
10
#include <linux/threads.h>
11
#include <linux/init.h>
12
#include <linux/linkage.h>
13
#include <asm/segment.h>
14
#include <asm/page_types.h>
15
#include <asm/pgtable_types.h>
16
#include <asm/cache.h>
17
#include <asm/thread_info.h>
18
#include <asm/asm-offsets.h>
19
#include <asm/setup.h>
20
#include <asm/processor-flags.h>
21
#include <asm/msr-index.h>
22
#include <asm/cpufeature.h>
23
#include <asm/percpu.h>
24
25
/* Physical address */
26
#define pa(X) ((X) - __PAGE_OFFSET)
27
28
/*
29
* References to members of the new_cpu_data structure.
30
*/
31
32
#define X86 new_cpu_data+CPUINFO_x86
33
#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
34
#define X86_MODEL new_cpu_data+CPUINFO_x86_model
35
#define X86_MASK new_cpu_data+CPUINFO_x86_mask
36
#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
37
#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
38
#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
39
#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
40
41
/*
42
* This is how much memory in addition to the memory covered up to
43
* and including _end we need mapped initially.
44
* We need:
45
* (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE)
46
* (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE)
47
*
48
* Modulo rounding, each megabyte assigned here requires a kilobyte of
49
* memory, which is currently unreclaimed.
50
*
51
* This should be a multiple of a page.
52
*
53
* KERNEL_IMAGE_SIZE should be greater than pa(_end)
54
* and small than max_low_pfn, otherwise will waste some page table entries
55
*/
56
57
#if PTRS_PER_PMD > 1
58
#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
59
#else
60
#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
61
#endif
62
63
/* Number of possible pages in the lowmem region */
64
LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
65
66
/* Enough space to fit pagetables for the low memory linear map */
67
MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
68
69
/*
70
* Worst-case size of the kernel mapping we need to make:
71
* a relocatable kernel can live anywhere in lowmem, so we need to be able
72
* to map all of lowmem.
73
*/
74
KERNEL_PAGES = LOWMEM_PAGES
75
76
INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
77
RESERVE_BRK(pagetables, INIT_MAP_SIZE)
78
79
/*
80
* 32-bit kernel entrypoint; only used by the boot CPU. On entry,
81
* %esi points to the real-mode code as a 32-bit pointer.
82
* CS and DS must be 4 GB flat segments, but we don't depend on
83
* any particular GDT layout, because we load our own as soon as we
84
* can.
85
*/
86
__HEAD
87
ENTRY(startup_32)
88
movl pa(stack_start),%ecx
89
90
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
91
us to not reload segments */
92
testb $(1<<6), BP_loadflags(%esi)
93
jnz 2f
94
95
/*
96
* Set segments to known values.
97
*/
98
lgdt pa(boot_gdt_descr)
99
movl $(__BOOT_DS),%eax
100
movl %eax,%ds
101
movl %eax,%es
102
movl %eax,%fs
103
movl %eax,%gs
104
movl %eax,%ss
105
2:
106
leal -__PAGE_OFFSET(%ecx),%esp
107
108
/*
109
* Clear BSS first so that there are no surprises...
110
*/
111
cld
112
xorl %eax,%eax
113
movl $pa(__bss_start),%edi
114
movl $pa(__bss_stop),%ecx
115
subl %edi,%ecx
116
shrl $2,%ecx
117
rep ; stosl
118
/*
119
* Copy bootup parameters out of the way.
120
* Note: %esi still has the pointer to the real-mode data.
121
* With the kexec as boot loader, parameter segment might be loaded beyond
122
* kernel image and might not even be addressable by early boot page tables.
123
* (kexec on panic case). Hence copy out the parameters before initializing
124
* page tables.
125
*/
126
movl $pa(boot_params),%edi
127
movl $(PARAM_SIZE/4),%ecx
128
cld
129
rep
130
movsl
131
movl pa(boot_params) + NEW_CL_POINTER,%esi
132
andl %esi,%esi
133
jz 1f # No command line
134
movl $pa(boot_command_line),%edi
135
movl $(COMMAND_LINE_SIZE/4),%ecx
136
rep
137
movsl
138
1:
139
140
#ifdef CONFIG_OLPC
141
/* save OFW's pgdir table for later use when calling into OFW */
142
movl %cr3, %eax
143
movl %eax, pa(olpc_ofw_pgd)
144
#endif
145
146
/*
147
* Initialize page tables. This creates a PDE and a set of page
148
* tables, which are located immediately beyond __brk_base. The variable
149
* _brk_end is set up to point to the first "safe" location.
150
* Mappings are created both at virtual address 0 (identity mapping)
151
* and PAGE_OFFSET for up to _end.
152
*/
153
#ifdef CONFIG_X86_PAE
154
155
/*
156
* In PAE mode initial_page_table is statically defined to contain
157
* enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3
158
* entries). The identity mapping is handled by pointing two PGD entries
159
* to the first kernel PMD.
160
*
161
* Note the upper half of each PMD or PTE are always zero at this stage.
162
*/
163
164
#define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */
165
166
xorl %ebx,%ebx /* %ebx is kept at zero */
167
168
movl $pa(__brk_base), %edi
169
movl $pa(initial_pg_pmd), %edx
170
movl $PTE_IDENT_ATTR, %eax
171
10:
172
leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */
173
movl %ecx,(%edx) /* Store PMD entry */
174
/* Upper half already zero */
175
addl $8,%edx
176
movl $512,%ecx
177
11:
178
stosl
179
xchgl %eax,%ebx
180
stosl
181
xchgl %eax,%ebx
182
addl $0x1000,%eax
183
loop 11b
184
185
/*
186
* End condition: we must map up to the end + MAPPING_BEYOND_END.
187
*/
188
movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
189
cmpl %ebp,%eax
190
jb 10b
191
1:
192
addl $__PAGE_OFFSET, %edi
193
movl %edi, pa(_brk_end)
194
shrl $12, %eax
195
movl %eax, pa(max_pfn_mapped)
196
197
/* Do early initialization of the fixmap area */
198
movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
199
movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
200
#else /* Not PAE */
201
202
page_pde_offset = (__PAGE_OFFSET >> 20);
203
204
movl $pa(__brk_base), %edi
205
movl $pa(initial_page_table), %edx
206
movl $PTE_IDENT_ATTR, %eax
207
10:
208
leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */
209
movl %ecx,(%edx) /* Store identity PDE entry */
210
movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
211
addl $4,%edx
212
movl $1024, %ecx
213
11:
214
stosl
215
addl $0x1000,%eax
216
loop 11b
217
/*
218
* End condition: we must map up to the end + MAPPING_BEYOND_END.
219
*/
220
movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp
221
cmpl %ebp,%eax
222
jb 10b
223
addl $__PAGE_OFFSET, %edi
224
movl %edi, pa(_brk_end)
225
shrl $12, %eax
226
movl %eax, pa(max_pfn_mapped)
227
228
/* Do early initialization of the fixmap area */
229
movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
230
movl %eax,pa(initial_page_table+0xffc)
231
#endif
232
233
#ifdef CONFIG_PARAVIRT
234
/* This is can only trip for a broken bootloader... */
235
cmpw $0x207, pa(boot_params + BP_version)
236
jb default_entry
237
238
/* Paravirt-compatible boot parameters. Look to see what architecture
239
we're booting under. */
240
movl pa(boot_params + BP_hardware_subarch), %eax
241
cmpl $num_subarch_entries, %eax
242
jae bad_subarch
243
244
movl pa(subarch_entries)(,%eax,4), %eax
245
subl $__PAGE_OFFSET, %eax
246
jmp *%eax
247
248
bad_subarch:
249
WEAK(lguest_entry)
250
WEAK(xen_entry)
251
/* Unknown implementation; there's really
252
nothing we can do at this point. */
253
ud2a
254
255
__INITDATA
256
257
subarch_entries:
258
.long default_entry /* normal x86/PC */
259
.long lguest_entry /* lguest hypervisor */
260
.long xen_entry /* Xen hypervisor */
261
.long default_entry /* Moorestown MID */
262
num_subarch_entries = (. - subarch_entries) / 4
263
.previous
264
#else
265
jmp default_entry
266
#endif /* CONFIG_PARAVIRT */
267
268
/*
269
* Non-boot CPU entry point; entered from trampoline.S
270
* We can't lgdt here, because lgdt itself uses a data segment, but
271
* we know the trampoline has already loaded the boot_gdt for us.
272
*
273
* If cpu hotplug is not supported then this code can go in init section
274
* which will be freed later
275
*/
276
277
__CPUINIT
278
279
#ifdef CONFIG_SMP
280
ENTRY(startup_32_smp)
281
cld
282
movl $(__BOOT_DS),%eax
283
movl %eax,%ds
284
movl %eax,%es
285
movl %eax,%fs
286
movl %eax,%gs
287
movl pa(stack_start),%ecx
288
movl %eax,%ss
289
leal -__PAGE_OFFSET(%ecx),%esp
290
#endif /* CONFIG_SMP */
291
default_entry:
292
293
/*
294
* New page tables may be in 4Mbyte page mode and may
295
* be using the global pages.
296
*
297
* NOTE! If we are on a 486 we may have no cr4 at all!
298
* So we do not try to touch it unless we really have
299
* some bits in it to set. This won't work if the BSP
300
* implements cr4 but this AP does not -- very unlikely
301
* but be warned! The same applies to the pse feature
302
* if not equally supported. --macro
303
*
304
* NOTE! We have to correct for the fact that we're
305
* not yet offset PAGE_OFFSET..
306
*/
307
#define cr4_bits pa(mmu_cr4_features)
308
movl cr4_bits,%edx
309
andl %edx,%edx
310
jz 6f
311
movl %cr4,%eax # Turn on paging options (PSE,PAE,..)
312
orl %edx,%eax
313
movl %eax,%cr4
314
315
testb $X86_CR4_PAE, %al # check if PAE is enabled
316
jz 6f
317
318
/* Check if extended functions are implemented */
319
movl $0x80000000, %eax
320
cpuid
321
/* Value must be in the range 0x80000001 to 0x8000ffff */
322
subl $0x80000001, %eax
323
cmpl $(0x8000ffff-0x80000001), %eax
324
ja 6f
325
326
/* Clear bogus XD_DISABLE bits */
327
call verify_cpu
328
329
mov $0x80000001, %eax
330
cpuid
331
/* Execute Disable bit supported? */
332
btl $(X86_FEATURE_NX & 31), %edx
333
jnc 6f
334
335
/* Setup EFER (Extended Feature Enable Register) */
336
movl $MSR_EFER, %ecx
337
rdmsr
338
339
btsl $_EFER_NX, %eax
340
/* Make changes effective */
341
wrmsr
342
343
6:
344
345
/*
346
* Enable paging
347
*/
348
movl $pa(initial_page_table), %eax
349
movl %eax,%cr3 /* set the page table pointer.. */
350
movl %cr0,%eax
351
orl $X86_CR0_PG,%eax
352
movl %eax,%cr0 /* ..and set paging (PG) bit */
353
ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
354
1:
355
/* Shift the stack pointer to a virtual address */
356
addl $__PAGE_OFFSET, %esp
357
358
/*
359
* Initialize eflags. Some BIOS's leave bits like NT set. This would
360
* confuse the debugger if this code is traced.
361
* XXX - best to initialize before switching to protected mode.
362
*/
363
pushl $0
364
popfl
365
366
#ifdef CONFIG_SMP
367
cmpb $0, ready
368
jnz checkCPUtype
369
#endif /* CONFIG_SMP */
370
371
/*
372
* start system 32-bit setup. We need to re-do some of the things done
373
* in 16-bit mode for the "real" operations.
374
*/
375
call setup_idt
376
377
checkCPUtype:
378
379
movl $-1,X86_CPUID # -1 for no CPUID initially
380
381
/* check if it is 486 or 386. */
382
/*
383
* XXX - this does a lot of unnecessary setup. Alignment checks don't
384
* apply at our cpl of 0 and the stack ought to be aligned already, and
385
* we don't need to preserve eflags.
386
*/
387
388
movb $3,X86 # at least 386
389
pushfl # push EFLAGS
390
popl %eax # get EFLAGS
391
movl %eax,%ecx # save original EFLAGS
392
xorl $0x240000,%eax # flip AC and ID bits in EFLAGS
393
pushl %eax # copy to EFLAGS
394
popfl # set EFLAGS
395
pushfl # get new EFLAGS
396
popl %eax # put it in eax
397
xorl %ecx,%eax # change in flags
398
pushl %ecx # restore original EFLAGS
399
popfl
400
testl $0x40000,%eax # check if AC bit changed
401
je is386
402
403
movb $4,X86 # at least 486
404
testl $0x200000,%eax # check if ID bit changed
405
je is486
406
407
/* get vendor info */
408
xorl %eax,%eax # call CPUID with 0 -> return vendor ID
409
cpuid
410
movl %eax,X86_CPUID # save CPUID level
411
movl %ebx,X86_VENDOR_ID # lo 4 chars
412
movl %edx,X86_VENDOR_ID+4 # next 4 chars
413
movl %ecx,X86_VENDOR_ID+8 # last 4 chars
414
415
orl %eax,%eax # do we have processor info as well?
416
je is486
417
418
movl $1,%eax # Use the CPUID instruction to get CPU type
419
cpuid
420
movb %al,%cl # save reg for future use
421
andb $0x0f,%ah # mask processor family
422
movb %ah,X86
423
andb $0xf0,%al # mask model
424
shrb $4,%al
425
movb %al,X86_MODEL
426
andb $0x0f,%cl # mask mask revision
427
movb %cl,X86_MASK
428
movl %edx,X86_CAPABILITY
429
430
is486: movl $0x50022,%ecx # set AM, WP, NE and MP
431
jmp 2f
432
433
is386: movl $2,%ecx # set MP
434
2: movl %cr0,%eax
435
andl $0x80000011,%eax # Save PG,PE,ET
436
orl %ecx,%eax
437
movl %eax,%cr0
438
439
call check_x87
440
lgdt early_gdt_descr
441
lidt idt_descr
442
ljmp $(__KERNEL_CS),$1f
443
1: movl $(__KERNEL_DS),%eax # reload all the segment registers
444
movl %eax,%ss # after changing gdt.
445
446
movl $(__USER_DS),%eax # DS/ES contains default USER segment
447
movl %eax,%ds
448
movl %eax,%es
449
450
movl $(__KERNEL_PERCPU), %eax
451
movl %eax,%fs # set this cpu's percpu
452
453
#ifdef CONFIG_CC_STACKPROTECTOR
454
/*
455
* The linker can't handle this by relocation. Manually set
456
* base address in stack canary segment descriptor.
457
*/
458
cmpb $0,ready
459
jne 1f
460
movl $gdt_page,%eax
461
movl $stack_canary,%ecx
462
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
463
shrl $16, %ecx
464
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
465
movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
466
1:
467
#endif
468
movl $(__KERNEL_STACK_CANARY),%eax
469
movl %eax,%gs
470
471
xorl %eax,%eax # Clear LDT
472
lldt %ax
473
474
cld # gcc2 wants the direction flag cleared at all times
475
pushl $0 # fake return address for unwinder
476
movb $1, ready
477
jmp *(initial_code)
478
479
/*
480
* We depend on ET to be correct. This checks for 287/387.
481
*/
482
check_x87:
483
movb $0,X86_HARD_MATH
484
clts
485
fninit
486
fstsw %ax
487
cmpb $0,%al
488
je 1f
489
movl %cr0,%eax /* no coprocessor: have to set bits */
490
xorl $4,%eax /* set EM */
491
movl %eax,%cr0
492
ret
493
ALIGN
494
1: movb $1,X86_HARD_MATH
495
.byte 0xDB,0xE4 /* fsetpm for 287, ignored by 387 */
496
ret
497
498
/*
499
* setup_idt
500
*
501
* sets up a idt with 256 entries pointing to
502
* ignore_int, interrupt gates. It doesn't actually load
503
* idt - that can be done only after paging has been enabled
504
* and the kernel moved to PAGE_OFFSET. Interrupts
505
* are enabled elsewhere, when we can be relatively
506
* sure everything is ok.
507
*
508
* Warning: %esi is live across this function.
509
*/
510
setup_idt:
511
lea ignore_int,%edx
512
movl $(__KERNEL_CS << 16),%eax
513
movw %dx,%ax /* selector = 0x0010 = cs */
514
movw $0x8E00,%dx /* interrupt gate - dpl=0, present */
515
516
lea idt_table,%edi
517
mov $256,%ecx
518
rp_sidt:
519
movl %eax,(%edi)
520
movl %edx,4(%edi)
521
addl $8,%edi
522
dec %ecx
523
jne rp_sidt
524
525
.macro set_early_handler handler,trapno
526
lea \handler,%edx
527
movl $(__KERNEL_CS << 16),%eax
528
movw %dx,%ax
529
movw $0x8E00,%dx /* interrupt gate - dpl=0, present */
530
lea idt_table,%edi
531
movl %eax,8*\trapno(%edi)
532
movl %edx,8*\trapno+4(%edi)
533
.endm
534
535
set_early_handler handler=early_divide_err,trapno=0
536
set_early_handler handler=early_illegal_opcode,trapno=6
537
set_early_handler handler=early_protection_fault,trapno=13
538
set_early_handler handler=early_page_fault,trapno=14
539
540
ret
541
542
early_divide_err:
543
xor %edx,%edx
544
pushl $0 /* fake errcode */
545
jmp early_fault
546
547
early_illegal_opcode:
548
movl $6,%edx
549
pushl $0 /* fake errcode */
550
jmp early_fault
551
552
early_protection_fault:
553
movl $13,%edx
554
jmp early_fault
555
556
early_page_fault:
557
movl $14,%edx
558
jmp early_fault
559
560
early_fault:
561
cld
562
#ifdef CONFIG_PRINTK
563
pusha
564
movl $(__KERNEL_DS),%eax
565
movl %eax,%ds
566
movl %eax,%es
567
cmpl $2,early_recursion_flag
568
je hlt_loop
569
incl early_recursion_flag
570
movl %cr2,%eax
571
pushl %eax
572
pushl %edx /* trapno */
573
pushl $fault_msg
574
call printk
575
#endif
576
call dump_stack
577
hlt_loop:
578
hlt
579
jmp hlt_loop
580
581
/* This is the default interrupt "handler" :-) */
582
ALIGN
583
ignore_int:
584
cld
585
#ifdef CONFIG_PRINTK
586
pushl %eax
587
pushl %ecx
588
pushl %edx
589
pushl %es
590
pushl %ds
591
movl $(__KERNEL_DS),%eax
592
movl %eax,%ds
593
movl %eax,%es
594
cmpl $2,early_recursion_flag
595
je hlt_loop
596
incl early_recursion_flag
597
pushl 16(%esp)
598
pushl 24(%esp)
599
pushl 32(%esp)
600
pushl 40(%esp)
601
pushl $int_msg
602
call printk
603
604
call dump_stack
605
606
addl $(5*4),%esp
607
popl %ds
608
popl %es
609
popl %edx
610
popl %ecx
611
popl %eax
612
#endif
613
iret
614
615
#include "verify_cpu.S"
616
617
__REFDATA
618
.align 4
619
ENTRY(initial_code)
620
.long i386_start_kernel
621
622
/*
623
* BSS section
624
*/
625
__PAGE_ALIGNED_BSS
626
.align PAGE_SIZE
627
#ifdef CONFIG_X86_PAE
628
initial_pg_pmd:
629
.fill 1024*KPMDS,4,0
630
#else
631
ENTRY(initial_page_table)
632
.fill 1024,4,0
633
#endif
634
initial_pg_fixmap:
635
.fill 1024,4,0
636
ENTRY(empty_zero_page)
637
.fill 4096,1,0
638
ENTRY(swapper_pg_dir)
639
.fill 1024,4,0
640
641
/*
642
* This starts the data section.
643
*/
644
#ifdef CONFIG_X86_PAE
645
__PAGE_ALIGNED_DATA
646
/* Page-aligned for the benefit of paravirt? */
647
.align PAGE_SIZE
648
ENTRY(initial_page_table)
649
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
650
# if KPMDS == 3
651
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0
652
.long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0
653
.long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0
654
# elif KPMDS == 2
655
.long 0,0
656
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0
657
.long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0
658
# elif KPMDS == 1
659
.long 0,0
660
.long 0,0
661
.long pa(initial_pg_pmd+PGD_IDENT_ATTR),0
662
# else
663
# error "Kernel PMDs should be 1, 2 or 3"
664
# endif
665
.align PAGE_SIZE /* needs to be page-sized too */
666
#endif
667
668
.data
669
.balign 4
670
ENTRY(stack_start)
671
.long init_thread_union+THREAD_SIZE
672
673
early_recursion_flag:
674
.long 0
675
676
ready: .byte 0
677
678
int_msg:
679
.asciz "Unknown interrupt or fault at: %p %p %p\n"
680
681
fault_msg:
682
/* fault info: */
683
.ascii "BUG: Int %d: CR2 %p\n"
684
/* pusha regs: */
685
.ascii " EDI %p ESI %p EBP %p ESP %p\n"
686
.ascii " EBX %p EDX %p ECX %p EAX %p\n"
687
/* fault frame: */
688
.ascii " err %p EIP %p CS %p flg %p\n"
689
.ascii "Stack: %p %p %p %p %p %p %p %p\n"
690
.ascii " %p %p %p %p %p %p %p %p\n"
691
.asciz " %p %p %p %p %p %p %p %p\n"
692
693
#include "../../x86/xen/xen-head.S"
694
695
/*
696
* The IDT and GDT 'descriptors' are a strange 48-bit object
697
* only used by the lidt and lgdt instructions. They are not
698
* like usual segment descriptors - they consist of a 16-bit
699
* segment size, and 32-bit linear address value:
700
*/
701
702
.globl boot_gdt_descr
703
.globl idt_descr
704
705
ALIGN
706
# early boot GDT descriptor (must use 1:1 address mapping)
707
.word 0 # 32 bit align gdt_desc.address
708
boot_gdt_descr:
709
.word __BOOT_DS+7
710
.long boot_gdt - __PAGE_OFFSET
711
712
.word 0 # 32-bit align idt_desc.address
713
idt_descr:
714
.word IDT_ENTRIES*8-1 # idt contains 256 entries
715
.long idt_table
716
717
# boot GDT descriptor (later on used by CPU#0):
718
.word 0 # 32 bit align gdt_desc.address
719
ENTRY(early_gdt_descr)
720
.word GDT_ENTRIES*8-1
721
.long gdt_page /* Overwritten for secondary CPUs */
722
723
/*
724
* The boot_gdt must mirror the equivalent in setup.S and is
725
* used only for booting.
726
*/
727
.align L1_CACHE_BYTES
728
ENTRY(boot_gdt)
729
.fill GDT_ENTRY_BOOT_CS,8,0
730
.quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
731
.quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
732
733