Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/compat/x86bios/x86bios.c
39586 views
1
/*-
2
* Copyright (c) 2009 Alex Keda <[email protected]>
3
* Copyright (c) 2009-2010 Jung-uk Kim <[email protected]>
4
* All rights reserved.
5
*
6
* Redistribution and use in source and binary forms, with or without
7
* modification, are permitted provided that the following conditions
8
* are met:
9
* 1. Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* 2. Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
*
15
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25
* SUCH DAMAGE.
26
*/
27
28
#include <sys/cdefs.h>
29
#include "opt_x86bios.h"
30
31
#include <sys/param.h>
32
#include <sys/bus.h>
33
#include <sys/kernel.h>
34
#include <sys/lock.h>
35
#include <sys/malloc.h>
36
#include <sys/module.h>
37
#include <sys/mutex.h>
38
#include <sys/sysctl.h>
39
40
#include <contrib/x86emu/x86emu.h>
41
#include <contrib/x86emu/x86emu_regs.h>
42
#include <compat/x86bios/x86bios.h>
43
44
#include <dev/pci/pcireg.h>
45
#include <dev/pci/pcivar.h>
46
47
#include <vm/vm.h>
48
#include <vm/pmap.h>
49
50
#ifdef __amd64__
51
#define X86BIOS_NATIVE_ARCH
52
#endif
53
#ifdef __i386__
54
#define X86BIOS_NATIVE_VM86
55
#endif
56
57
#define X86BIOS_MEM_SIZE 0x00100000 /* 1M */
58
59
#define X86BIOS_TRACE(h, n, r) do { \
60
printf(__STRING(h) \
61
" (ax=0x%04x bx=0x%04x cx=0x%04x dx=0x%04x es=0x%04x di=0x%04x)\n",\
62
(n), (r)->R_AX, (r)->R_BX, (r)->R_CX, (r)->R_DX, \
63
(r)->R_ES, (r)->R_DI); \
64
} while (0)
65
66
static struct mtx x86bios_lock;
67
68
static SYSCTL_NODE(_debug, OID_AUTO, x86bios, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
69
"x86bios debugging");
70
static int x86bios_trace_call;
71
SYSCTL_INT(_debug_x86bios, OID_AUTO, call, CTLFLAG_RWTUN, &x86bios_trace_call, 0,
72
"Trace far function calls");
73
static int x86bios_trace_int;
74
SYSCTL_INT(_debug_x86bios, OID_AUTO, int, CTLFLAG_RWTUN, &x86bios_trace_int, 0,
75
"Trace software interrupt handlers");
76
77
#ifdef X86BIOS_NATIVE_VM86
78
79
#include <machine/vm86.h>
80
#include <machine/vmparam.h>
81
#include <machine/pc/bios.h>
82
83
struct vm86context x86bios_vmc;
84
85
static void
86
x86bios_emu2vmf(struct x86emu_regs *regs, struct vm86frame *vmf)
87
{
88
89
vmf->vmf_ds = regs->R_DS;
90
vmf->vmf_es = regs->R_ES;
91
vmf->vmf_ax = regs->R_AX;
92
vmf->vmf_bx = regs->R_BX;
93
vmf->vmf_cx = regs->R_CX;
94
vmf->vmf_dx = regs->R_DX;
95
vmf->vmf_bp = regs->R_BP;
96
vmf->vmf_si = regs->R_SI;
97
vmf->vmf_di = regs->R_DI;
98
}
99
100
static void
101
x86bios_vmf2emu(struct vm86frame *vmf, struct x86emu_regs *regs)
102
{
103
104
regs->R_DS = vmf->vmf_ds;
105
regs->R_ES = vmf->vmf_es;
106
regs->R_FLG = vmf->vmf_flags;
107
regs->R_AX = vmf->vmf_ax;
108
regs->R_BX = vmf->vmf_bx;
109
regs->R_CX = vmf->vmf_cx;
110
regs->R_DX = vmf->vmf_dx;
111
regs->R_BP = vmf->vmf_bp;
112
regs->R_SI = vmf->vmf_si;
113
regs->R_DI = vmf->vmf_di;
114
}
115
116
void *
117
x86bios_alloc(uint32_t *offset, size_t size, int flags)
118
{
119
void *vaddr;
120
u_int i;
121
122
if (offset == NULL || size == 0)
123
return (NULL);
124
vaddr = contigmalloc(size, M_DEVBUF, flags, 0, X86BIOS_MEM_SIZE,
125
PAGE_SIZE, 0);
126
if (vaddr != NULL) {
127
*offset = vtophys(vaddr);
128
mtx_lock(&x86bios_lock);
129
for (i = 0; i < atop(round_page(size)); i++)
130
vm86_addpage(&x86bios_vmc, atop(*offset) + i,
131
(vm_offset_t)vaddr + ptoa(i));
132
mtx_unlock(&x86bios_lock);
133
}
134
135
return (vaddr);
136
}
137
138
void
139
x86bios_free(void *addr, size_t size)
140
{
141
vm_paddr_t paddr;
142
int i, nfree;
143
144
if (addr == NULL || size == 0)
145
return;
146
paddr = vtophys(addr);
147
if (paddr >= X86BIOS_MEM_SIZE || (paddr & PAGE_MASK) != 0)
148
return;
149
mtx_lock(&x86bios_lock);
150
for (i = 0; i < x86bios_vmc.npages; i++)
151
if (x86bios_vmc.pmap[i].kva == (vm_offset_t)addr)
152
break;
153
if (i >= x86bios_vmc.npages) {
154
mtx_unlock(&x86bios_lock);
155
return;
156
}
157
nfree = atop(round_page(size));
158
bzero(x86bios_vmc.pmap + i, sizeof(*x86bios_vmc.pmap) * nfree);
159
if (i + nfree == x86bios_vmc.npages) {
160
x86bios_vmc.npages -= nfree;
161
while (--i >= 0 && x86bios_vmc.pmap[i].kva == 0)
162
x86bios_vmc.npages--;
163
}
164
mtx_unlock(&x86bios_lock);
165
free(addr, M_DEVBUF);
166
}
167
168
void
169
x86bios_init_regs(struct x86regs *regs)
170
{
171
172
bzero(regs, sizeof(*regs));
173
}
174
175
void
176
x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
177
{
178
struct vm86frame vmf;
179
180
if (x86bios_trace_call)
181
X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs);
182
183
bzero(&vmf, sizeof(vmf));
184
x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
185
vmf.vmf_cs = seg;
186
vmf.vmf_ip = off;
187
mtx_lock(&x86bios_lock);
188
vm86_datacall(-1, &vmf, &x86bios_vmc);
189
mtx_unlock(&x86bios_lock);
190
x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
191
192
if (x86bios_trace_call)
193
X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs);
194
}
195
196
uint32_t
197
x86bios_get_intr(int intno)
198
{
199
200
return (readl(BIOS_PADDRTOVADDR(intno * 4)));
201
}
202
203
void
204
x86bios_set_intr(int intno, uint32_t saddr)
205
{
206
207
writel(BIOS_PADDRTOVADDR(intno * 4), saddr);
208
}
209
210
void
211
x86bios_intr(struct x86regs *regs, int intno)
212
{
213
struct vm86frame vmf;
214
215
if (x86bios_trace_int)
216
X86BIOS_TRACE(Calling INT 0x%02x, intno, regs);
217
218
bzero(&vmf, sizeof(vmf));
219
x86bios_emu2vmf((struct x86emu_regs *)regs, &vmf);
220
mtx_lock(&x86bios_lock);
221
vm86_datacall(intno, &vmf, &x86bios_vmc);
222
mtx_unlock(&x86bios_lock);
223
x86bios_vmf2emu(&vmf, (struct x86emu_regs *)regs);
224
225
if (x86bios_trace_int)
226
X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs);
227
}
228
229
void *
230
x86bios_offset(uint32_t offset)
231
{
232
vm_offset_t addr;
233
234
addr = vm86_getaddr(&x86bios_vmc, X86BIOS_PHYSTOSEG(offset),
235
X86BIOS_PHYSTOOFF(offset));
236
if (addr == 0)
237
addr = BIOS_PADDRTOVADDR(offset);
238
239
return ((void *)addr);
240
}
241
242
static int
243
x86bios_init(void)
244
{
245
246
mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
247
bzero(&x86bios_vmc, sizeof(x86bios_vmc));
248
249
return (0);
250
}
251
252
static int
253
x86bios_uninit(void)
254
{
255
256
mtx_destroy(&x86bios_lock);
257
258
return (0);
259
}
260
261
#else
262
263
#include <machine/iodev.h>
264
265
#define X86BIOS_PAGE_SIZE 0x00001000 /* 4K */
266
267
#define X86BIOS_IVT_SIZE 0x00000500 /* 1K + 256 (BDA) */
268
269
#define X86BIOS_IVT_BASE 0x00000000
270
#define X86BIOS_RAM_BASE 0x00001000
271
#define X86BIOS_ROM_BASE 0x000a0000
272
273
#define X86BIOS_ROM_SIZE (X86BIOS_MEM_SIZE - x86bios_rom_phys)
274
#define X86BIOS_SEG_SIZE X86BIOS_PAGE_SIZE
275
276
#define X86BIOS_PAGES (X86BIOS_MEM_SIZE / X86BIOS_PAGE_SIZE)
277
278
#define X86BIOS_R_SS _pad2
279
#define X86BIOS_R_SP _pad3.I16_reg.x_reg
280
281
static struct x86emu x86bios_emu;
282
283
static void *x86bios_ivt;
284
static void *x86bios_rom;
285
static void *x86bios_seg;
286
287
static vm_offset_t *x86bios_map;
288
289
static vm_paddr_t x86bios_rom_phys;
290
static vm_paddr_t x86bios_seg_phys;
291
292
static int x86bios_fault;
293
static uint32_t x86bios_fault_addr;
294
static uint16_t x86bios_fault_cs;
295
static uint16_t x86bios_fault_ip;
296
297
static void
298
x86bios_set_fault(struct x86emu *emu, uint32_t addr)
299
{
300
301
x86bios_fault = 1;
302
x86bios_fault_addr = addr;
303
x86bios_fault_cs = emu->x86.R_CS;
304
x86bios_fault_ip = emu->x86.R_IP;
305
x86emu_halt_sys(emu);
306
}
307
308
static void *
309
x86bios_get_pages(uint32_t offset, size_t size)
310
{
311
vm_offset_t addr;
312
313
if (offset + size > X86BIOS_MEM_SIZE + X86BIOS_IVT_SIZE)
314
return (NULL);
315
316
if (offset >= X86BIOS_MEM_SIZE)
317
offset -= X86BIOS_MEM_SIZE;
318
addr = x86bios_map[offset / X86BIOS_PAGE_SIZE];
319
if (addr != 0)
320
addr += offset % X86BIOS_PAGE_SIZE;
321
322
return ((void *)addr);
323
}
324
325
static void
326
x86bios_set_pages(vm_offset_t va, vm_paddr_t pa, size_t size)
327
{
328
int i, j;
329
330
for (i = pa / X86BIOS_PAGE_SIZE, j = 0;
331
j < howmany(size, X86BIOS_PAGE_SIZE); i++, j++)
332
x86bios_map[i] = va + j * X86BIOS_PAGE_SIZE;
333
}
334
335
static uint8_t
336
x86bios_emu_rdb(struct x86emu *emu, uint32_t addr)
337
{
338
uint8_t *va;
339
340
va = x86bios_get_pages(addr, sizeof(*va));
341
if (va == NULL)
342
x86bios_set_fault(emu, addr);
343
344
return (*va);
345
}
346
347
static uint16_t
348
x86bios_emu_rdw(struct x86emu *emu, uint32_t addr)
349
{
350
uint16_t *va;
351
352
va = x86bios_get_pages(addr, sizeof(*va));
353
if (va == NULL)
354
x86bios_set_fault(emu, addr);
355
356
#ifndef __NO_STRICT_ALIGNMENT
357
if ((addr & 1) != 0)
358
return (le16dec(va));
359
else
360
#endif
361
return (le16toh(*va));
362
}
363
364
static uint32_t
365
x86bios_emu_rdl(struct x86emu *emu, uint32_t addr)
366
{
367
uint32_t *va;
368
369
va = x86bios_get_pages(addr, sizeof(*va));
370
if (va == NULL)
371
x86bios_set_fault(emu, addr);
372
373
#ifndef __NO_STRICT_ALIGNMENT
374
if ((addr & 3) != 0)
375
return (le32dec(va));
376
else
377
#endif
378
return (le32toh(*va));
379
}
380
381
static void
382
x86bios_emu_wrb(struct x86emu *emu, uint32_t addr, uint8_t val)
383
{
384
uint8_t *va;
385
386
va = x86bios_get_pages(addr, sizeof(*va));
387
if (va == NULL)
388
x86bios_set_fault(emu, addr);
389
390
*va = val;
391
}
392
393
static void
394
x86bios_emu_wrw(struct x86emu *emu, uint32_t addr, uint16_t val)
395
{
396
uint16_t *va;
397
398
va = x86bios_get_pages(addr, sizeof(*va));
399
if (va == NULL)
400
x86bios_set_fault(emu, addr);
401
402
#ifndef __NO_STRICT_ALIGNMENT
403
if ((addr & 1) != 0)
404
le16enc(va, val);
405
else
406
#endif
407
*va = htole16(val);
408
}
409
410
static void
411
x86bios_emu_wrl(struct x86emu *emu, uint32_t addr, uint32_t val)
412
{
413
uint32_t *va;
414
415
va = x86bios_get_pages(addr, sizeof(*va));
416
if (va == NULL)
417
x86bios_set_fault(emu, addr);
418
419
#ifndef __NO_STRICT_ALIGNMENT
420
if ((addr & 3) != 0)
421
le32enc(va, val);
422
else
423
#endif
424
*va = htole32(val);
425
}
426
427
static uint8_t
428
x86bios_emu_inb(struct x86emu *emu, uint16_t port)
429
{
430
431
#ifndef X86BIOS_NATIVE_ARCH
432
if (port == 0xb2) /* APM scratch register */
433
return (0);
434
if (port >= 0x80 && port < 0x88) /* POST status register */
435
return (0);
436
#endif
437
438
return (iodev_read_1(port));
439
}
440
441
static uint16_t
442
x86bios_emu_inw(struct x86emu *emu, uint16_t port)
443
{
444
uint16_t val;
445
446
#ifndef X86BIOS_NATIVE_ARCH
447
if (port >= 0x80 && port < 0x88) /* POST status register */
448
return (0);
449
450
if ((port & 1) != 0) {
451
val = iodev_read_1(port);
452
val |= iodev_read_1(port + 1) << 8;
453
} else
454
#endif
455
val = iodev_read_2(port);
456
457
return (val);
458
}
459
460
static uint32_t
461
x86bios_emu_inl(struct x86emu *emu, uint16_t port)
462
{
463
uint32_t val;
464
465
#ifndef X86BIOS_NATIVE_ARCH
466
if (port >= 0x80 && port < 0x88) /* POST status register */
467
return (0);
468
469
if ((port & 1) != 0) {
470
val = iodev_read_1(port);
471
val |= iodev_read_2(port + 1) << 8;
472
val |= iodev_read_1(port + 3) << 24;
473
} else if ((port & 2) != 0) {
474
val = iodev_read_2(port);
475
val |= iodev_read_2(port + 2) << 16;
476
} else
477
#endif
478
val = iodev_read_4(port);
479
480
return (val);
481
}
482
483
static void
484
x86bios_emu_outb(struct x86emu *emu, uint16_t port, uint8_t val)
485
{
486
487
#ifndef X86BIOS_NATIVE_ARCH
488
if (port == 0xb2) /* APM scratch register */
489
return;
490
if (port >= 0x80 && port < 0x88) /* POST status register */
491
return;
492
#endif
493
494
iodev_write_1(port, val);
495
}
496
497
static void
498
x86bios_emu_outw(struct x86emu *emu, uint16_t port, uint16_t val)
499
{
500
501
#ifndef X86BIOS_NATIVE_ARCH
502
if (port >= 0x80 && port < 0x88) /* POST status register */
503
return;
504
505
if ((port & 1) != 0) {
506
iodev_write_1(port, val);
507
iodev_write_1(port + 1, val >> 8);
508
} else
509
#endif
510
iodev_write_2(port, val);
511
}
512
513
static void
514
x86bios_emu_outl(struct x86emu *emu, uint16_t port, uint32_t val)
515
{
516
517
#ifndef X86BIOS_NATIVE_ARCH
518
if (port >= 0x80 && port < 0x88) /* POST status register */
519
return;
520
521
if ((port & 1) != 0) {
522
iodev_write_1(port, val);
523
iodev_write_2(port + 1, val >> 8);
524
iodev_write_1(port + 3, val >> 24);
525
} else if ((port & 2) != 0) {
526
iodev_write_2(port, val);
527
iodev_write_2(port + 2, val >> 16);
528
} else
529
#endif
530
iodev_write_4(port, val);
531
}
532
533
void *
534
x86bios_alloc(uint32_t *offset, size_t size, int flags)
535
{
536
void *vaddr;
537
538
if (offset == NULL || size == 0)
539
return (NULL);
540
vaddr = contigmalloc(size, M_DEVBUF, flags, X86BIOS_RAM_BASE,
541
x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
542
if (vaddr != NULL) {
543
*offset = vtophys(vaddr);
544
mtx_lock(&x86bios_lock);
545
x86bios_set_pages((vm_offset_t)vaddr, *offset, size);
546
mtx_unlock(&x86bios_lock);
547
}
548
549
return (vaddr);
550
}
551
552
void
553
x86bios_free(void *addr, size_t size)
554
{
555
vm_paddr_t paddr;
556
557
if (addr == NULL || size == 0)
558
return;
559
paddr = vtophys(addr);
560
if (paddr < X86BIOS_RAM_BASE || paddr >= x86bios_rom_phys ||
561
paddr % X86BIOS_PAGE_SIZE != 0)
562
return;
563
mtx_lock(&x86bios_lock);
564
bzero(x86bios_map + paddr / X86BIOS_PAGE_SIZE,
565
sizeof(*x86bios_map) * howmany(size, X86BIOS_PAGE_SIZE));
566
mtx_unlock(&x86bios_lock);
567
free(addr, M_DEVBUF);
568
}
569
570
void
571
x86bios_init_regs(struct x86regs *regs)
572
{
573
574
bzero(regs, sizeof(*regs));
575
regs->X86BIOS_R_SS = X86BIOS_PHYSTOSEG(x86bios_seg_phys);
576
regs->X86BIOS_R_SP = X86BIOS_PAGE_SIZE - 2;
577
}
578
579
void
580
x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off)
581
{
582
583
if (x86bios_trace_call)
584
X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs);
585
586
mtx_lock(&x86bios_lock);
587
memcpy((struct x86regs *)&x86bios_emu.x86, regs, sizeof(*regs));
588
x86bios_fault = 0;
589
spinlock_enter();
590
x86emu_exec_call(&x86bios_emu, seg, off);
591
spinlock_exit();
592
memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
593
mtx_unlock(&x86bios_lock);
594
595
if (x86bios_trace_call) {
596
X86BIOS_TRACE(Exiting 0x%06x, (seg << 4) + off, regs);
597
if (x86bios_fault)
598
printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n",
599
x86bios_fault_addr, x86bios_fault_cs,
600
x86bios_fault_ip);
601
}
602
}
603
604
uint32_t
605
x86bios_get_intr(int intno)
606
{
607
608
return (le32toh(*((uint32_t *)x86bios_ivt + intno)));
609
}
610
611
void
612
x86bios_set_intr(int intno, uint32_t saddr)
613
{
614
615
*((uint32_t *)x86bios_ivt + intno) = htole32(saddr);
616
}
617
618
void
619
x86bios_intr(struct x86regs *regs, int intno)
620
{
621
622
if (intno < 0 || intno > 255)
623
return;
624
625
if (x86bios_trace_int)
626
X86BIOS_TRACE(Calling INT 0x%02x, intno, regs);
627
628
mtx_lock(&x86bios_lock);
629
memcpy((struct x86regs *)&x86bios_emu.x86, regs, sizeof(*regs));
630
x86bios_fault = 0;
631
spinlock_enter();
632
x86emu_exec_intr(&x86bios_emu, intno);
633
spinlock_exit();
634
memcpy(regs, &x86bios_emu.x86, sizeof(*regs));
635
mtx_unlock(&x86bios_lock);
636
637
if (x86bios_trace_int) {
638
X86BIOS_TRACE(Exiting INT 0x%02x, intno, regs);
639
if (x86bios_fault)
640
printf("Page fault at 0x%06x from 0x%04x:0x%04x.\n",
641
x86bios_fault_addr, x86bios_fault_cs,
642
x86bios_fault_ip);
643
}
644
}
645
646
void *
647
x86bios_offset(uint32_t offset)
648
{
649
650
return (x86bios_get_pages(offset, 1));
651
}
652
653
static __inline void
654
x86bios_unmap_mem(void)
655
{
656
657
if (x86bios_map != NULL) {
658
free(x86bios_map, M_DEVBUF);
659
x86bios_map = NULL;
660
}
661
if (x86bios_ivt != NULL) {
662
#ifdef X86BIOS_NATIVE_ARCH
663
pmap_unmapbios(x86bios_ivt, X86BIOS_IVT_SIZE);
664
#else
665
free(x86bios_ivt, M_DEVBUF);
666
x86bios_ivt = NULL;
667
#endif
668
}
669
if (x86bios_rom != NULL)
670
pmap_unmapdev(x86bios_rom, X86BIOS_ROM_SIZE);
671
if (x86bios_seg != NULL) {
672
free(x86bios_seg, M_DEVBUF);
673
x86bios_seg = NULL;
674
}
675
}
676
677
static __inline int
678
x86bios_map_mem(void)
679
{
680
681
x86bios_map = malloc(sizeof(*x86bios_map) * X86BIOS_PAGES, M_DEVBUF,
682
M_NOWAIT | M_ZERO);
683
if (x86bios_map == NULL)
684
goto fail;
685
686
#ifdef X86BIOS_NATIVE_ARCH
687
x86bios_ivt = pmap_mapbios(X86BIOS_IVT_BASE, X86BIOS_IVT_SIZE);
688
689
/* Probe EBDA via BDA. */
690
x86bios_rom_phys = *(uint16_t *)((caddr_t)x86bios_ivt + 0x40e);
691
x86bios_rom_phys = x86bios_rom_phys << 4;
692
if (x86bios_rom_phys != 0 && x86bios_rom_phys < X86BIOS_ROM_BASE &&
693
X86BIOS_ROM_BASE - x86bios_rom_phys <= 128 * 1024)
694
x86bios_rom_phys =
695
rounddown(x86bios_rom_phys, X86BIOS_PAGE_SIZE);
696
else
697
#else
698
x86bios_ivt = malloc(X86BIOS_IVT_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO);
699
if (x86bios_ivt == NULL)
700
goto fail;
701
#endif
702
703
x86bios_rom_phys = X86BIOS_ROM_BASE;
704
x86bios_rom = pmap_mapdev(x86bios_rom_phys, X86BIOS_ROM_SIZE);
705
if (x86bios_rom == NULL)
706
goto fail;
707
#ifdef X86BIOS_NATIVE_ARCH
708
/* Change attribute for EBDA. */
709
if (x86bios_rom_phys < X86BIOS_ROM_BASE &&
710
pmap_change_attr((vm_offset_t)x86bios_rom,
711
X86BIOS_ROM_BASE - x86bios_rom_phys, PAT_WRITE_BACK) != 0)
712
goto fail;
713
#endif
714
715
x86bios_seg = contigmalloc(X86BIOS_SEG_SIZE, M_DEVBUF, M_NOWAIT,
716
X86BIOS_RAM_BASE, x86bios_rom_phys, X86BIOS_PAGE_SIZE, 0);
717
if (x86bios_seg == NULL)
718
goto fail;
719
x86bios_seg_phys = vtophys(x86bios_seg);
720
721
x86bios_set_pages((vm_offset_t)x86bios_ivt, X86BIOS_IVT_BASE,
722
X86BIOS_IVT_SIZE);
723
x86bios_set_pages((vm_offset_t)x86bios_rom, x86bios_rom_phys,
724
X86BIOS_ROM_SIZE);
725
x86bios_set_pages((vm_offset_t)x86bios_seg, x86bios_seg_phys,
726
X86BIOS_SEG_SIZE);
727
728
if (bootverbose) {
729
printf("x86bios: IVT 0x%06jx-0x%06jx at %p\n",
730
(vm_paddr_t)X86BIOS_IVT_BASE,
731
(vm_paddr_t)X86BIOS_IVT_SIZE + X86BIOS_IVT_BASE - 1,
732
x86bios_ivt);
733
printf("x86bios: SSEG 0x%06jx-0x%06jx at %p\n",
734
x86bios_seg_phys,
735
(vm_paddr_t)X86BIOS_SEG_SIZE + x86bios_seg_phys - 1,
736
x86bios_seg);
737
if (x86bios_rom_phys < X86BIOS_ROM_BASE)
738
printf("x86bios: EBDA 0x%06jx-0x%06jx at %p\n",
739
x86bios_rom_phys, (vm_paddr_t)X86BIOS_ROM_BASE - 1,
740
x86bios_rom);
741
printf("x86bios: ROM 0x%06jx-0x%06jx at %p\n",
742
(vm_paddr_t)X86BIOS_ROM_BASE,
743
(vm_paddr_t)X86BIOS_MEM_SIZE - X86BIOS_SEG_SIZE - 1,
744
(caddr_t)x86bios_rom + X86BIOS_ROM_BASE - x86bios_rom_phys);
745
}
746
747
return (0);
748
749
fail:
750
x86bios_unmap_mem();
751
752
return (1);
753
}
754
755
static int
756
x86bios_init(void)
757
{
758
759
mtx_init(&x86bios_lock, "x86bios lock", NULL, MTX_DEF);
760
761
if (x86bios_map_mem() != 0)
762
return (ENOMEM);
763
764
bzero(&x86bios_emu, sizeof(x86bios_emu));
765
766
x86bios_emu.emu_rdb = x86bios_emu_rdb;
767
x86bios_emu.emu_rdw = x86bios_emu_rdw;
768
x86bios_emu.emu_rdl = x86bios_emu_rdl;
769
x86bios_emu.emu_wrb = x86bios_emu_wrb;
770
x86bios_emu.emu_wrw = x86bios_emu_wrw;
771
x86bios_emu.emu_wrl = x86bios_emu_wrl;
772
773
x86bios_emu.emu_inb = x86bios_emu_inb;
774
x86bios_emu.emu_inw = x86bios_emu_inw;
775
x86bios_emu.emu_inl = x86bios_emu_inl;
776
x86bios_emu.emu_outb = x86bios_emu_outb;
777
x86bios_emu.emu_outw = x86bios_emu_outw;
778
x86bios_emu.emu_outl = x86bios_emu_outl;
779
780
return (0);
781
}
782
783
static int
784
x86bios_uninit(void)
785
{
786
787
x86bios_unmap_mem();
788
mtx_destroy(&x86bios_lock);
789
790
return (0);
791
}
792
793
#endif
794
795
void *
796
x86bios_get_orm(uint32_t offset)
797
{
798
uint8_t *p;
799
800
/* Does the shadow ROM contain BIOS POST code for x86? */
801
p = x86bios_offset(offset);
802
if (p == NULL || p[0] != 0x55 || p[1] != 0xaa ||
803
(p[3] != 0xe9 && p[3] != 0xeb))
804
return (NULL);
805
806
return (p);
807
}
808
809
int
810
x86bios_match_device(uint32_t offset, device_t dev)
811
{
812
uint8_t *p;
813
uint16_t device, vendor;
814
uint8_t class, progif, subclass;
815
816
/* Does the shadow ROM contain BIOS POST code for x86? */
817
p = x86bios_get_orm(offset);
818
if (p == NULL)
819
return (0);
820
821
/* Does it contain PCI data structure? */
822
p += le16toh(*(uint16_t *)(p + 0x18));
823
if (bcmp(p, "PCIR", 4) != 0 ||
824
le16toh(*(uint16_t *)(p + 0x0a)) < 0x18 || *(p + 0x14) != 0)
825
return (0);
826
827
/* Does it match the vendor, device, and classcode? */
828
vendor = le16toh(*(uint16_t *)(p + 0x04));
829
device = le16toh(*(uint16_t *)(p + 0x06));
830
progif = *(p + 0x0d);
831
subclass = *(p + 0x0e);
832
class = *(p + 0x0f);
833
if (vendor != pci_get_vendor(dev) || device != pci_get_device(dev) ||
834
class != pci_get_class(dev) || subclass != pci_get_subclass(dev) ||
835
progif != pci_get_progif(dev))
836
return (0);
837
838
return (1);
839
}
840
841
static int
842
x86bios_modevent(module_t mod __unused, int type, void *data __unused)
843
{
844
845
switch (type) {
846
case MOD_LOAD:
847
return (x86bios_init());
848
case MOD_UNLOAD:
849
return (x86bios_uninit());
850
default:
851
return (ENOTSUP);
852
}
853
}
854
855
static moduledata_t x86bios_mod = {
856
"x86bios",
857
x86bios_modevent,
858
NULL,
859
};
860
861
DECLARE_MODULE(x86bios, x86bios_mod, SI_SUB_CPU, SI_ORDER_ANY);
862
MODULE_VERSION(x86bios, 1);
863
864