Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/amd64/include/cpufunc.h
39536 views
1
/*-
2
* SPDX-License-Identifier: BSD-3-Clause
3
*
4
* Copyright (c) 2003 Peter Wemm.
5
* Copyright (c) 1993 The Regents of the University of California.
6
* All rights reserved.
7
*
8
* Redistribution and use in source and binary forms, with or without
9
* modification, are permitted provided that the following conditions
10
* are met:
11
* 1. Redistributions of source code must retain the above copyright
12
* notice, this list of conditions and the following disclaimer.
13
* 2. Redistributions in binary form must reproduce the above copyright
14
* notice, this list of conditions and the following disclaimer in the
15
* documentation and/or other materials provided with the distribution.
16
* 3. Neither the name of the University nor the names of its contributors
17
* may be used to endorse or promote products derived from this software
18
* without specific prior written permission.
19
*
20
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30
* SUCH DAMAGE.
31
*/
32
33
/*
34
* Functions to provide access to special i386 instructions.
35
* This in included in sys/systm.h, and that file should be
36
* used in preference to this.
37
*/
38
39
#ifdef __i386__
40
#include <i386/cpufunc.h>
41
#else /* !__i386__ */
42
43
#ifndef _MACHINE_CPUFUNC_H_
44
#define _MACHINE_CPUFUNC_H_
45
46
struct region_descriptor;
47
48
#define readb(va) (*(volatile uint8_t *) (va))
49
#define readw(va) (*(volatile uint16_t *) (va))
50
#define readl(va) (*(volatile uint32_t *) (va))
51
#define readq(va) (*(volatile uint64_t *) (va))
52
53
#define writeb(va, d) (*(volatile uint8_t *) (va) = (d))
54
#define writew(va, d) (*(volatile uint16_t *) (va) = (d))
55
#define writel(va, d) (*(volatile uint32_t *) (va) = (d))
56
#define writeq(va, d) (*(volatile uint64_t *) (va) = (d))
57
58
static __inline void
59
breakpoint(void)
60
{
61
__asm __volatile("int $3");
62
}
63
64
#define bsfl(mask) __builtin_ctz(mask)
65
66
#define bsfq(mask) __builtin_ctzl(mask)
67
68
static __inline void
69
clflush(u_long addr)
70
{
71
72
__asm __volatile("clflush %0" : : "m" (*(char *)addr));
73
}
74
75
static __inline void
76
clflushopt(u_long addr)
77
{
78
79
__asm __volatile("clflushopt %0" : : "m" (*(char *)addr));
80
}
81
82
static __inline void
83
clwb(u_long addr)
84
{
85
86
__asm __volatile("clwb %0" : : "m" (*(char *)addr));
87
}
88
89
static __inline void
90
clts(void)
91
{
92
93
__asm __volatile("clts");
94
}
95
96
static __inline void
97
disable_intr(void)
98
{
99
__asm __volatile("cli" : : : "memory");
100
}
101
102
static __inline void
103
do_cpuid(u_int ax, u_int *p)
104
{
105
__asm __volatile("cpuid"
106
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
107
: "0" (ax));
108
}
109
110
static __inline void
111
cpuid_count(u_int ax, u_int cx, u_int *p)
112
{
113
__asm __volatile("cpuid"
114
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
115
: "0" (ax), "c" (cx));
116
}
117
118
static __inline void
119
enable_intr(void)
120
{
121
__asm __volatile("sti");
122
}
123
124
static __inline void
125
halt(void)
126
{
127
__asm __volatile("hlt");
128
}
129
130
static __inline u_char
131
inb(u_int port)
132
{
133
u_char data;
134
135
__asm __volatile("inb %w1, %0" : "=a" (data) : "Nd" (port));
136
return (data);
137
}
138
139
static __inline u_int
140
inl(u_int port)
141
{
142
u_int data;
143
144
__asm __volatile("inl %w1, %0" : "=a" (data) : "Nd" (port));
145
return (data);
146
}
147
148
static __inline void
149
insb(u_int port, void *addr, size_t count)
150
{
151
__asm __volatile("rep; insb"
152
: "+D" (addr), "+c" (count)
153
: "d" (port)
154
: "memory");
155
}
156
157
static __inline void
158
insw(u_int port, void *addr, size_t count)
159
{
160
__asm __volatile("rep; insw"
161
: "+D" (addr), "+c" (count)
162
: "d" (port)
163
: "memory");
164
}
165
166
static __inline void
167
insl(u_int port, void *addr, size_t count)
168
{
169
__asm __volatile("rep; insl"
170
: "+D" (addr), "+c" (count)
171
: "d" (port)
172
: "memory");
173
}
174
175
static __inline void
176
invd(void)
177
{
178
__asm __volatile("invd");
179
}
180
181
static __inline u_short
182
inw(u_int port)
183
{
184
u_short data;
185
186
__asm __volatile("inw %w1, %0" : "=a" (data) : "Nd" (port));
187
return (data);
188
}
189
190
static __inline void
191
outb(u_int port, u_char data)
192
{
193
__asm __volatile("outb %0, %w1" : : "a" (data), "Nd" (port));
194
}
195
196
static __inline void
197
outl(u_int port, u_int data)
198
{
199
__asm __volatile("outl %0, %w1" : : "a" (data), "Nd" (port));
200
}
201
202
static __inline void
203
outsb(u_int port, const void *addr, size_t count)
204
{
205
__asm __volatile("rep; outsb"
206
: "+S" (addr), "+c" (count)
207
: "d" (port));
208
}
209
210
static __inline void
211
outsw(u_int port, const void *addr, size_t count)
212
{
213
__asm __volatile("rep; outsw"
214
: "+S" (addr), "+c" (count)
215
: "d" (port));
216
}
217
218
static __inline void
219
outsl(u_int port, const void *addr, size_t count)
220
{
221
__asm __volatile("rep; outsl"
222
: "+S" (addr), "+c" (count)
223
: "d" (port));
224
}
225
226
static __inline void
227
outw(u_int port, u_short data)
228
{
229
__asm __volatile("outw %0, %w1" : : "a" (data), "Nd" (port));
230
}
231
232
static __inline u_long
233
popcntq(u_long mask)
234
{
235
u_long result;
236
237
__asm __volatile("popcntq %1,%0" : "=r" (result) : "rm" (mask));
238
return (result);
239
}
240
241
static __inline void
242
lfence(void)
243
{
244
245
__asm __volatile("lfence" : : : "memory");
246
}
247
248
static __inline void
249
mfence(void)
250
{
251
252
__asm __volatile("mfence" : : : "memory");
253
}
254
255
static __inline void
256
sfence(void)
257
{
258
259
__asm __volatile("sfence" : : : "memory");
260
}
261
262
static __inline void
263
ia32_pause(void)
264
{
265
__asm __volatile("pause");
266
}
267
268
static __inline u_long
269
read_rflags(void)
270
{
271
u_long rf;
272
273
__asm __volatile("pushfq; popq %0" : "=r" (rf));
274
return (rf);
275
}
276
277
static __inline uint64_t
278
rdmsr(u_int msr)
279
{
280
uint32_t low, high;
281
282
__asm __volatile("rdmsr" : "=a" (low), "=d" (high) : "c" (msr));
283
return (low | ((uint64_t)high << 32));
284
}
285
286
static __inline uint32_t
287
rdmsr32(u_int msr)
288
{
289
uint32_t low;
290
291
__asm __volatile("rdmsr" : "=a" (low) : "c" (msr) : "rdx");
292
return (low);
293
}
294
295
static __inline uint64_t
296
rdpmc(u_int pmc)
297
{
298
uint32_t low, high;
299
300
__asm __volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (pmc));
301
return (low | ((uint64_t)high << 32));
302
}
303
304
static __inline uint64_t
305
rdtsc(void)
306
{
307
uint32_t low, high;
308
309
__asm __volatile("rdtsc" : "=a" (low), "=d" (high));
310
return (low | ((uint64_t)high << 32));
311
}
312
313
static __inline uint64_t
314
rdtsc_ordered_lfence(void)
315
{
316
lfence();
317
return (rdtsc());
318
}
319
320
static __inline uint64_t
321
rdtsc_ordered_mfence(void)
322
{
323
mfence();
324
return (rdtsc());
325
}
326
327
static __inline uint64_t
328
rdtscp(void)
329
{
330
uint32_t low, high;
331
332
__asm __volatile("rdtscp" : "=a" (low), "=d" (high) : : "ecx");
333
return (low | ((uint64_t)high << 32));
334
}
335
336
static __inline uint64_t
337
rdtscp_aux(uint32_t *aux)
338
{
339
uint32_t low, high;
340
341
__asm __volatile("rdtscp" : "=a" (low), "=d" (high), "=c" (*aux));
342
return (low | ((uint64_t)high << 32));
343
}
344
345
static __inline uint32_t
346
rdtsc32(void)
347
{
348
uint32_t rv;
349
350
__asm __volatile("rdtsc" : "=a" (rv) : : "edx");
351
return (rv);
352
}
353
354
static __inline uint32_t
355
rdtscp32(void)
356
{
357
uint32_t rv;
358
359
__asm __volatile("rdtscp" : "=a" (rv) : : "ecx", "edx");
360
return (rv);
361
}
362
363
static __inline void
364
wbinvd(void)
365
{
366
__asm __volatile("wbinvd");
367
}
368
369
static __inline void
370
write_rflags(u_long rf)
371
{
372
__asm __volatile("pushq %0; popfq" : : "r" (rf));
373
}
374
375
static __inline void
376
wrmsr(u_int msr, uint64_t newval)
377
{
378
uint32_t low, high;
379
380
low = newval;
381
high = newval >> 32;
382
__asm __volatile("wrmsr" : : "a" (low), "d" (high), "c" (msr));
383
}
384
385
static __inline void
386
load_cr0(u_long data)
387
{
388
389
__asm __volatile("movq %0,%%cr0" : : "r" (data));
390
}
391
392
static __inline u_long
393
rcr0(void)
394
{
395
u_long data;
396
397
__asm __volatile("movq %%cr0,%0" : "=r" (data));
398
return (data);
399
}
400
401
static __inline u_long
402
rcr2(void)
403
{
404
u_long data;
405
406
__asm __volatile("movq %%cr2,%0" : "=r" (data));
407
return (data);
408
}
409
410
static __inline void
411
load_cr3(u_long data)
412
{
413
414
__asm __volatile("movq %0,%%cr3" : : "r" (data) : "memory");
415
}
416
417
static __inline u_long
418
rcr3(void)
419
{
420
u_long data;
421
422
__asm __volatile("movq %%cr3,%0" : "=r" (data));
423
return (data);
424
}
425
426
static __inline void
427
load_cr4(u_long data)
428
{
429
__asm __volatile("movq %0,%%cr4" : : "r" (data));
430
}
431
432
static __inline u_long
433
rcr4(void)
434
{
435
u_long data;
436
437
__asm __volatile("movq %%cr4,%0" : "=r" (data));
438
return (data);
439
}
440
441
static __inline u_long
442
rxcr(u_int reg)
443
{
444
u_int low, high;
445
446
__asm __volatile("xgetbv" : "=a" (low), "=d" (high) : "c" (reg));
447
return (low | ((uint64_t)high << 32));
448
}
449
450
static __inline void
451
load_xcr(u_int reg, u_long val)
452
{
453
u_int low, high;
454
455
low = val;
456
high = val >> 32;
457
__asm __volatile("xsetbv" : : "c" (reg), "a" (low), "d" (high));
458
}
459
460
/*
461
* Global TLB flush (except for thise for pages marked PG_G)
462
*/
463
static __inline void
464
invltlb(void)
465
{
466
467
load_cr3(rcr3());
468
}
469
470
#ifndef CR4_PGE
471
#define CR4_PGE 0x00000080 /* Page global enable */
472
#endif
473
474
/*
475
* Perform the guaranteed invalidation of all TLB entries. This
476
* includes the global entries, and entries in all PCIDs, not only the
477
* current context. The function works both on non-PCID CPUs and CPUs
478
* with the PCID turned off or on. See IA-32 SDM Vol. 3a 4.10.4.1
479
* Operations that Invalidate TLBs and Paging-Structure Caches.
480
*/
481
static __inline void
482
invltlb_glob(void)
483
{
484
uint64_t cr4;
485
486
cr4 = rcr4();
487
load_cr4(cr4 & ~CR4_PGE);
488
/*
489
* Although preemption at this point could be detrimental to
490
* performance, it would not lead to an error. PG_G is simply
491
* ignored if CR4.PGE is clear. Moreover, in case this block
492
* is re-entered, the load_cr4() either above or below will
493
* modify CR4.PGE flushing the TLB.
494
*/
495
load_cr4(cr4 | CR4_PGE);
496
}
497
498
/*
499
* TLB flush for an individual page (even if it has PG_G).
500
* Only works on 486+ CPUs (i386 does not have PG_G).
501
*/
502
static __inline void
503
invlpg(u_long addr)
504
{
505
506
__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
507
}
508
509
#define INVPCID_ADDR 0
510
#define INVPCID_CTX 1
511
#define INVPCID_CTXGLOB 2
512
#define INVPCID_ALLCTX 3
513
514
struct invpcid_descr {
515
uint64_t pcid:12 __packed;
516
uint64_t pad:52 __packed;
517
uint64_t addr;
518
} __packed;
519
520
static __inline void
521
invpcid(struct invpcid_descr *d, int type)
522
{
523
524
__asm __volatile("invpcid (%0),%1"
525
: : "r" (d), "r" ((u_long)type) : "memory");
526
}
527
528
#define INVLPGB_VA 0x0001
529
#define INVLPGB_PCID 0x0002
530
#define INVLPGB_ASID 0x0004
531
#define INVLPGB_GLOB 0x0008
532
#define INVLPGB_FIN 0x0010
533
#define INVLPGB_NEST 0x0020
534
535
#define INVLPGB_DESCR(asid, pcid) (((pcid) << 16) | (asid))
536
537
#define INVLPGB_2M_CNT (1u << 31)
538
539
static __inline void
540
invlpgb(uint64_t rax, uint32_t edx, uint32_t ecx)
541
{
542
__asm __volatile("invlpgb" : : "a" (rax), "d" (edx), "c" (ecx));
543
}
544
545
static __inline void
546
tlbsync(void)
547
{
548
__asm __volatile("tlbsync");
549
}
550
551
static __inline u_short
552
rfs(void)
553
{
554
u_short sel;
555
__asm __volatile("movw %%fs,%0" : "=rm" (sel));
556
return (sel);
557
}
558
559
static __inline u_short
560
rgs(void)
561
{
562
u_short sel;
563
__asm __volatile("movw %%gs,%0" : "=rm" (sel));
564
return (sel);
565
}
566
567
static __inline u_short
568
rss(void)
569
{
570
u_short sel;
571
__asm __volatile("movw %%ss,%0" : "=rm" (sel));
572
return (sel);
573
}
574
575
static __inline u_short
576
rcs(void)
577
{
578
u_short sel;
579
580
__asm __volatile("movw %%cs,%0" : "=rm" (sel));
581
return (sel);
582
}
583
584
static __inline void
585
load_ds(u_short sel)
586
{
587
__asm __volatile("movw %0,%%ds" : : "rm" (sel));
588
}
589
590
static __inline void
591
load_es(u_short sel)
592
{
593
__asm __volatile("movw %0,%%es" : : "rm" (sel));
594
}
595
596
static __inline void
597
cpu_monitor(const void *addr, u_long extensions, u_int hints)
598
{
599
600
__asm __volatile("monitor"
601
: : "a" (addr), "c" (extensions), "d" (hints));
602
}
603
604
static __inline void
605
cpu_mwait(u_long extensions, u_int hints)
606
{
607
608
__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
609
}
610
611
static __inline uint32_t
612
rdpkru(void)
613
{
614
uint32_t res;
615
616
__asm __volatile("rdpkru" : "=a" (res) : "c" (0) : "edx");
617
return (res);
618
}
619
620
static __inline void
621
wrpkru(uint32_t mask)
622
{
623
624
__asm __volatile("wrpkru" : : "a" (mask), "c" (0), "d" (0));
625
}
626
627
#ifdef _KERNEL
628
/* This is defined in <machine/specialreg.h> but is too painful to get to */
629
#ifndef MSR_FSBASE
630
#define MSR_FSBASE 0xc0000100
631
#endif
632
static __inline void
633
load_fs(u_short sel)
634
{
635
/* Preserve the fsbase value across the selector load */
636
__asm __volatile("rdmsr; movw %0,%%fs; wrmsr"
637
: : "rm" (sel), "c" (MSR_FSBASE) : "eax", "edx");
638
}
639
640
#ifndef MSR_GSBASE
641
#define MSR_GSBASE 0xc0000101
642
#endif
643
static __inline void
644
load_gs(u_short sel)
645
{
646
/*
647
* Preserve the gsbase value across the selector load.
648
* Note that we have to disable interrupts because the gsbase
649
* being trashed happens to be the kernel gsbase at the time.
650
*/
651
__asm __volatile("pushfq; cli; rdmsr; movw %0,%%gs; wrmsr; popfq"
652
: : "rm" (sel), "c" (MSR_GSBASE) : "eax", "edx");
653
}
654
#else
655
/* Usable by userland */
656
static __inline void
657
load_fs(u_short sel)
658
{
659
__asm __volatile("movw %0,%%fs" : : "rm" (sel));
660
}
661
662
static __inline void
663
load_gs(u_short sel)
664
{
665
__asm __volatile("movw %0,%%gs" : : "rm" (sel));
666
}
667
#endif
668
669
static __inline uint64_t
670
rdfsbase(void)
671
{
672
uint64_t x;
673
674
__asm __volatile("rdfsbase %0" : "=r" (x));
675
return (x);
676
}
677
678
static __inline void
679
wrfsbase(uint64_t x)
680
{
681
682
__asm __volatile("wrfsbase %0" : : "r" (x));
683
}
684
685
static __inline uint64_t
686
rdgsbase(void)
687
{
688
uint64_t x;
689
690
__asm __volatile("rdgsbase %0" : "=r" (x));
691
return (x);
692
}
693
694
static __inline void
695
wrgsbase(uint64_t x)
696
{
697
698
__asm __volatile("wrgsbase %0" : : "r" (x));
699
}
700
701
static __inline void
702
bare_lgdt(struct region_descriptor *addr)
703
{
704
__asm __volatile("lgdt (%0)" : : "r" (addr));
705
}
706
707
static __inline void
708
sgdt(struct region_descriptor *addr)
709
{
710
char *loc;
711
712
loc = (char *)addr;
713
__asm __volatile("sgdt %0" : "=m" (*loc) : : "memory");
714
}
715
716
static __inline void
717
lidt(struct region_descriptor *addr)
718
{
719
__asm __volatile("lidt (%0)" : : "r" (addr));
720
}
721
722
static __inline void
723
sidt(struct region_descriptor *addr)
724
{
725
char *loc;
726
727
loc = (char *)addr;
728
__asm __volatile("sidt %0" : "=m" (*loc) : : "memory");
729
}
730
731
static __inline void
732
lldt(u_short sel)
733
{
734
__asm __volatile("lldt %0" : : "r" (sel));
735
}
736
737
static __inline u_short
738
sldt(void)
739
{
740
u_short sel;
741
742
__asm __volatile("sldt %0" : "=r" (sel));
743
return (sel);
744
}
745
746
static __inline void
747
ltr(u_short sel)
748
{
749
__asm __volatile("ltr %0" : : "r" (sel));
750
}
751
752
static __inline uint32_t
753
read_tr(void)
754
{
755
u_short sel;
756
757
__asm __volatile("str %0" : "=r" (sel));
758
return (sel);
759
}
760
761
static __inline uint64_t
762
rdr0(void)
763
{
764
uint64_t data;
765
__asm __volatile("movq %%dr0,%0" : "=r" (data));
766
return (data);
767
}
768
769
static __inline void
770
load_dr0(uint64_t dr0)
771
{
772
__asm __volatile("movq %0,%%dr0" : : "r" (dr0));
773
}
774
775
static __inline uint64_t
776
rdr1(void)
777
{
778
uint64_t data;
779
__asm __volatile("movq %%dr1,%0" : "=r" (data));
780
return (data);
781
}
782
783
static __inline void
784
load_dr1(uint64_t dr1)
785
{
786
__asm __volatile("movq %0,%%dr1" : : "r" (dr1));
787
}
788
789
static __inline uint64_t
790
rdr2(void)
791
{
792
uint64_t data;
793
__asm __volatile("movq %%dr2,%0" : "=r" (data));
794
return (data);
795
}
796
797
static __inline void
798
load_dr2(uint64_t dr2)
799
{
800
__asm __volatile("movq %0,%%dr2" : : "r" (dr2));
801
}
802
803
static __inline uint64_t
804
rdr3(void)
805
{
806
uint64_t data;
807
__asm __volatile("movq %%dr3,%0" : "=r" (data));
808
return (data);
809
}
810
811
static __inline void
812
load_dr3(uint64_t dr3)
813
{
814
__asm __volatile("movq %0,%%dr3" : : "r" (dr3));
815
}
816
817
static __inline uint64_t
818
rdr6(void)
819
{
820
uint64_t data;
821
__asm __volatile("movq %%dr6,%0" : "=r" (data));
822
return (data);
823
}
824
825
static __inline void
826
load_dr6(uint64_t dr6)
827
{
828
__asm __volatile("movq %0,%%dr6" : : "r" (dr6));
829
}
830
831
static __inline uint64_t
832
rdr7(void)
833
{
834
uint64_t data;
835
__asm __volatile("movq %%dr7,%0" : "=r" (data));
836
return (data);
837
}
838
839
static __inline void
840
load_dr7(uint64_t dr7)
841
{
842
__asm __volatile("movq %0,%%dr7" : : "r" (dr7));
843
}
844
845
static __inline register_t
846
intr_disable(void)
847
{
848
register_t rflags;
849
850
rflags = read_rflags();
851
disable_intr();
852
return (rflags);
853
}
854
855
static __inline void
856
intr_restore(register_t rflags)
857
{
858
write_rflags(rflags);
859
}
860
861
static __inline void
862
stac(void)
863
{
864
865
__asm __volatile("stac" : : : "cc");
866
}
867
868
static __inline void
869
clac(void)
870
{
871
872
__asm __volatile("clac" : : : "cc");
873
}
874
875
enum {
876
SGX_ECREATE = 0x0,
877
SGX_EADD = 0x1,
878
SGX_EINIT = 0x2,
879
SGX_EREMOVE = 0x3,
880
SGX_EDGBRD = 0x4,
881
SGX_EDGBWR = 0x5,
882
SGX_EEXTEND = 0x6,
883
SGX_ELDU = 0x8,
884
SGX_EBLOCK = 0x9,
885
SGX_EPA = 0xA,
886
SGX_EWB = 0xB,
887
SGX_ETRACK = 0xC,
888
};
889
890
enum {
891
SGX_PT_SECS = 0x00,
892
SGX_PT_TCS = 0x01,
893
SGX_PT_REG = 0x02,
894
SGX_PT_VA = 0x03,
895
SGX_PT_TRIM = 0x04,
896
};
897
898
int sgx_encls(uint32_t eax, uint64_t rbx, uint64_t rcx, uint64_t rdx);
899
900
static __inline int
901
sgx_ecreate(void *pginfo, void *secs)
902
{
903
904
return (sgx_encls(SGX_ECREATE, (uint64_t)pginfo,
905
(uint64_t)secs, 0));
906
}
907
908
static __inline int
909
sgx_eadd(void *pginfo, void *epc)
910
{
911
912
return (sgx_encls(SGX_EADD, (uint64_t)pginfo,
913
(uint64_t)epc, 0));
914
}
915
916
static __inline int
917
sgx_einit(void *sigstruct, void *secs, void *einittoken)
918
{
919
920
return (sgx_encls(SGX_EINIT, (uint64_t)sigstruct,
921
(uint64_t)secs, (uint64_t)einittoken));
922
}
923
924
static __inline int
925
sgx_eextend(void *secs, void *epc)
926
{
927
928
return (sgx_encls(SGX_EEXTEND, (uint64_t)secs,
929
(uint64_t)epc, 0));
930
}
931
932
static __inline int
933
sgx_epa(void *epc)
934
{
935
936
return (sgx_encls(SGX_EPA, SGX_PT_VA, (uint64_t)epc, 0));
937
}
938
939
static __inline int
940
sgx_eldu(uint64_t rbx, uint64_t rcx,
941
uint64_t rdx)
942
{
943
944
return (sgx_encls(SGX_ELDU, rbx, rcx, rdx));
945
}
946
947
static __inline int
948
sgx_eremove(void *epc)
949
{
950
951
return (sgx_encls(SGX_EREMOVE, 0, (uint64_t)epc, 0));
952
}
953
954
static __inline void
955
xrstors(uint8_t *save_area, uint64_t state_bitmap)
956
{
957
uint32_t low, hi;
958
959
low = state_bitmap;
960
hi = state_bitmap >> 32;
961
__asm __volatile("xrstors %0" : : "m"(*save_area), "a"(low),
962
"d"(hi));
963
}
964
965
static __inline void
966
xsaves(uint8_t *save_area, uint64_t state_bitmap)
967
{
968
uint32_t low, hi;
969
970
low = state_bitmap;
971
hi = state_bitmap >> 32;
972
__asm __volatile("xsaves %0" : "=m"(*save_area) : "a"(low),
973
"d"(hi)
974
: "memory");
975
}
976
977
void reset_dbregs(void);
978
979
#ifdef _KERNEL
980
int rdmsr_safe(u_int msr, uint64_t *val);
981
int wrmsr_safe(u_int msr, uint64_t newval);
982
#endif
983
984
#endif /* !_MACHINE_CPUFUNC_H_ */
985
986
#endif /* __i386__ */
987
988