Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/jdk17u
Path: blob/master/src/hotspot/os/windows/os_windows.cpp
64441 views
1
/*
2
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
// Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
26
#define _WIN32_WINNT 0x0600
27
28
// no precompiled headers
29
#include "jvm.h"
30
#include "classfile/vmSymbols.hpp"
31
#include "code/codeCache.hpp"
32
#include "code/icBuffer.hpp"
33
#include "code/nativeInst.hpp"
34
#include "code/vtableStubs.hpp"
35
#include "compiler/compileBroker.hpp"
36
#include "compiler/disassembler.hpp"
37
#include "interpreter/interpreter.hpp"
38
#include "jvmtifiles/jvmti.h"
39
#include "logging/log.hpp"
40
#include "logging/logStream.hpp"
41
#include "memory/allocation.inline.hpp"
42
#include "oops/oop.inline.hpp"
43
#include "os_share_windows.hpp"
44
#include "os_windows.inline.hpp"
45
#include "prims/jniFastGetField.hpp"
46
#include "prims/jvm_misc.hpp"
47
#include "runtime/arguments.hpp"
48
#include "runtime/atomic.hpp"
49
#include "runtime/globals.hpp"
50
#include "runtime/globals_extension.hpp"
51
#include "runtime/interfaceSupport.inline.hpp"
52
#include "runtime/java.hpp"
53
#include "runtime/javaCalls.hpp"
54
#include "runtime/mutexLocker.hpp"
55
#include "runtime/objectMonitor.hpp"
56
#include "runtime/orderAccess.hpp"
57
#include "runtime/osThread.hpp"
58
#include "runtime/perfMemory.hpp"
59
#include "runtime/safefetch.inline.hpp"
60
#include "runtime/safepointMechanism.hpp"
61
#include "runtime/semaphore.inline.hpp"
62
#include "runtime/sharedRuntime.hpp"
63
#include "runtime/statSampler.hpp"
64
#include "runtime/thread.inline.hpp"
65
#include "runtime/threadCritical.hpp"
66
#include "runtime/timer.hpp"
67
#include "runtime/vm_version.hpp"
68
#include "services/attachListener.hpp"
69
#include "services/memTracker.hpp"
70
#include "services/runtimeService.hpp"
71
#include "utilities/align.hpp"
72
#include "utilities/decoder.hpp"
73
#include "utilities/defaultStream.hpp"
74
#include "utilities/events.hpp"
75
#include "utilities/macros.hpp"
76
#include "utilities/vmError.hpp"
77
#include "symbolengine.hpp"
78
#include "windbghelp.hpp"
79
80
#ifdef _DEBUG
81
#include <crtdbg.h>
82
#endif
83
84
#include <windows.h>
85
#include <sys/types.h>
86
#include <sys/stat.h>
87
#include <sys/timeb.h>
88
#include <objidl.h>
89
#include <shlobj.h>
90
91
#include <malloc.h>
92
#include <signal.h>
93
#include <direct.h>
94
#include <errno.h>
95
#include <fcntl.h>
96
#include <io.h>
97
#include <process.h> // For _beginthreadex(), _endthreadex()
98
#include <imagehlp.h> // For os::dll_address_to_function_name
99
// for enumerating dll libraries
100
#include <vdmdbg.h>
101
#include <psapi.h>
102
#include <mmsystem.h>
103
#include <winsock2.h>
104
105
// for timer info max values which include all bits
106
#define ALL_64_BITS CONST64(-1)
107
108
// For DLL loading/load error detection
109
// Values of PE COFF
110
#define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
111
#define IMAGE_FILE_SIGNATURE_LENGTH 4
112
113
static HANDLE main_process;
114
static HANDLE main_thread;
115
static int main_thread_id;
116
117
static FILETIME process_creation_time;
118
static FILETIME process_exit_time;
119
static FILETIME process_user_time;
120
static FILETIME process_kernel_time;
121
122
#if defined(_M_ARM64)
123
#define __CPU__ aarch64
124
#elif defined(_M_AMD64)
125
#define __CPU__ amd64
126
#else
127
#define __CPU__ i486
128
#endif
129
130
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
131
PVOID topLevelVectoredExceptionHandler = NULL;
132
LPTOP_LEVEL_EXCEPTION_FILTER previousUnhandledExceptionFilter = NULL;
133
#endif
134
135
// save DLL module handle, used by GetModuleFileName
136
137
HINSTANCE vm_lib_handle;
138
139
BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
140
switch (reason) {
141
case DLL_PROCESS_ATTACH:
142
vm_lib_handle = hinst;
143
if (ForceTimeHighResolution) {
144
timeBeginPeriod(1L);
145
}
146
WindowsDbgHelp::pre_initialize();
147
SymbolEngine::pre_initialize();
148
break;
149
case DLL_PROCESS_DETACH:
150
if (ForceTimeHighResolution) {
151
timeEndPeriod(1L);
152
}
153
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
154
if (topLevelVectoredExceptionHandler != NULL) {
155
RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
156
topLevelVectoredExceptionHandler = NULL;
157
}
158
#endif
159
break;
160
default:
161
break;
162
}
163
return true;
164
}
165
166
static inline double fileTimeAsDouble(FILETIME* time) {
167
const double high = (double) ((unsigned int) ~0);
168
const double split = 10000000.0;
169
double result = (time->dwLowDateTime / split) +
170
time->dwHighDateTime * (high/split);
171
return result;
172
}
173
174
// Implementation of os
175
176
#define RANGE_FORMAT "[" PTR_FORMAT "-" PTR_FORMAT ")"
177
#define RANGE_FORMAT_ARGS(p, len) p2i(p), p2i((address)p + len)
178
179
// A number of wrappers for more frequently used system calls, to add standard logging.
180
181
struct PreserveLastError {
182
const DWORD v;
183
PreserveLastError() : v(::GetLastError()) {}
184
~PreserveLastError() { ::SetLastError(v); }
185
};
186
187
// Logging wrapper for VirtualAlloc
188
static LPVOID virtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect) {
189
LPVOID result = ::VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
190
if (result != NULL) {
191
log_trace(os)("VirtualAlloc(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x) returned " PTR_FORMAT "%s.",
192
p2i(lpAddress), dwSize, flAllocationType, flProtect, p2i(result),
193
((lpAddress != NULL && result != lpAddress) ? " <different base!>" : ""));
194
} else {
195
PreserveLastError ple;
196
log_info(os)("VirtualAlloc(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x) failed (%u).",
197
p2i(lpAddress), dwSize, flAllocationType, flProtect, ple.v);
198
}
199
return result;
200
}
201
202
// Logging wrapper for VirtualFree
203
static BOOL virtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType) {
204
BOOL result = ::VirtualFree(lpAddress, dwSize, dwFreeType);
205
if (result != FALSE) {
206
log_trace(os)("VirtualFree(" PTR_FORMAT ", " SIZE_FORMAT ", %x) succeeded",
207
p2i(lpAddress), dwSize, dwFreeType);
208
} else {
209
PreserveLastError ple;
210
log_info(os)("VirtualFree(" PTR_FORMAT ", " SIZE_FORMAT ", %x) failed (%u).",
211
p2i(lpAddress), dwSize, dwFreeType, ple.v);
212
}
213
return result;
214
}
215
216
// Logging wrapper for VirtualAllocExNuma
217
static LPVOID virtualAllocExNuma(HANDLE hProcess, LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType,
218
DWORD flProtect, DWORD nndPreferred) {
219
LPVOID result = ::VirtualAllocExNuma(hProcess, lpAddress, dwSize, flAllocationType, flProtect, nndPreferred);
220
if (result != NULL) {
221
log_trace(os)("VirtualAllocExNuma(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x, %x) returned " PTR_FORMAT "%s.",
222
p2i(lpAddress), dwSize, flAllocationType, flProtect, nndPreferred, p2i(result),
223
((lpAddress != NULL && result != lpAddress) ? " <different base!>" : ""));
224
} else {
225
PreserveLastError ple;
226
log_info(os)("VirtualAllocExNuma(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x, %x) failed (%u).",
227
p2i(lpAddress), dwSize, flAllocationType, flProtect, nndPreferred, ple.v);
228
}
229
return result;
230
}
231
232
// Logging wrapper for MapViewOfFileEx
233
static LPVOID mapViewOfFileEx(HANDLE hFileMappingObject, DWORD dwDesiredAccess, DWORD dwFileOffsetHigh,
234
DWORD dwFileOffsetLow, SIZE_T dwNumberOfBytesToMap, LPVOID lpBaseAddress) {
235
LPVOID result = ::MapViewOfFileEx(hFileMappingObject, dwDesiredAccess, dwFileOffsetHigh,
236
dwFileOffsetLow, dwNumberOfBytesToMap, lpBaseAddress);
237
if (result != NULL) {
238
log_trace(os)("MapViewOfFileEx(" PTR_FORMAT ", " SIZE_FORMAT ") returned " PTR_FORMAT "%s.",
239
p2i(lpBaseAddress), dwNumberOfBytesToMap, p2i(result),
240
((lpBaseAddress != NULL && result != lpBaseAddress) ? " <different base!>" : ""));
241
} else {
242
PreserveLastError ple;
243
log_info(os)("MapViewOfFileEx(" PTR_FORMAT ", " SIZE_FORMAT ") failed (%u).",
244
p2i(lpBaseAddress), dwNumberOfBytesToMap, ple.v);
245
}
246
return result;
247
}
248
249
// Logging wrapper for UnmapViewOfFile
250
static BOOL unmapViewOfFile(LPCVOID lpBaseAddress) {
251
BOOL result = ::UnmapViewOfFile(lpBaseAddress);
252
if (result != FALSE) {
253
log_trace(os)("UnmapViewOfFile(" PTR_FORMAT ") succeeded", p2i(lpBaseAddress));
254
} else {
255
PreserveLastError ple;
256
log_info(os)("UnmapViewOfFile(" PTR_FORMAT ") failed (%u).", p2i(lpBaseAddress), ple.v);
257
}
258
return result;
259
}
260
261
bool os::unsetenv(const char* name) {
262
assert(name != NULL, "Null pointer");
263
return (SetEnvironmentVariable(name, NULL) == TRUE);
264
}
265
266
char** os::get_environ() { return _environ; }
267
268
// No setuid programs under Windows.
269
bool os::have_special_privileges() {
270
return false;
271
}
272
273
274
// This method is a periodic task to check for misbehaving JNI applications
275
// under CheckJNI, we can add any periodic checks here.
276
// For Windows at the moment does nothing
277
void os::run_periodic_checks() {
278
return;
279
}
280
281
// previous UnhandledExceptionFilter, if there is one
282
static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
283
284
LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
285
286
void os::init_system_properties_values() {
287
// sysclasspath, java_home, dll_dir
288
{
289
char *home_path;
290
char *dll_path;
291
char *pslash;
292
const char *bin = "\\bin";
293
char home_dir[MAX_PATH + 1];
294
char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
295
296
if (alt_home_dir != NULL) {
297
strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
298
home_dir[MAX_PATH] = '\0';
299
} else {
300
os::jvm_path(home_dir, sizeof(home_dir));
301
// Found the full path to jvm.dll.
302
// Now cut the path to <java_home>/jre if we can.
303
*(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll
304
pslash = strrchr(home_dir, '\\');
305
if (pslash != NULL) {
306
*pslash = '\0'; // get rid of \{client|server}
307
pslash = strrchr(home_dir, '\\');
308
if (pslash != NULL) {
309
*pslash = '\0'; // get rid of \bin
310
}
311
}
312
}
313
314
home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
315
strcpy(home_path, home_dir);
316
Arguments::set_java_home(home_path);
317
FREE_C_HEAP_ARRAY(char, home_path);
318
319
dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
320
mtInternal);
321
strcpy(dll_path, home_dir);
322
strcat(dll_path, bin);
323
Arguments::set_dll_dir(dll_path);
324
FREE_C_HEAP_ARRAY(char, dll_path);
325
326
if (!set_boot_path('\\', ';')) {
327
vm_exit_during_initialization("Failed setting boot class path.", NULL);
328
}
329
}
330
331
// library_path
332
#define EXT_DIR "\\lib\\ext"
333
#define BIN_DIR "\\bin"
334
#define PACKAGE_DIR "\\Sun\\Java"
335
{
336
// Win32 library search order (See the documentation for LoadLibrary):
337
//
338
// 1. The directory from which application is loaded.
339
// 2. The system wide Java Extensions directory (Java only)
340
// 3. System directory (GetSystemDirectory)
341
// 4. Windows directory (GetWindowsDirectory)
342
// 5. The PATH environment variable
343
// 6. The current directory
344
345
char *library_path;
346
char tmp[MAX_PATH];
347
char *path_str = ::getenv("PATH");
348
349
library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
350
sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
351
352
library_path[0] = '\0';
353
354
GetModuleFileName(NULL, tmp, sizeof(tmp));
355
*(strrchr(tmp, '\\')) = '\0';
356
strcat(library_path, tmp);
357
358
GetWindowsDirectory(tmp, sizeof(tmp));
359
strcat(library_path, ";");
360
strcat(library_path, tmp);
361
strcat(library_path, PACKAGE_DIR BIN_DIR);
362
363
GetSystemDirectory(tmp, sizeof(tmp));
364
strcat(library_path, ";");
365
strcat(library_path, tmp);
366
367
GetWindowsDirectory(tmp, sizeof(tmp));
368
strcat(library_path, ";");
369
strcat(library_path, tmp);
370
371
if (path_str) {
372
strcat(library_path, ";");
373
strcat(library_path, path_str);
374
}
375
376
strcat(library_path, ";.");
377
378
Arguments::set_library_path(library_path);
379
FREE_C_HEAP_ARRAY(char, library_path);
380
}
381
382
// Default extensions directory
383
{
384
char path[MAX_PATH];
385
char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
386
GetWindowsDirectory(path, MAX_PATH);
387
sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
388
path, PACKAGE_DIR, EXT_DIR);
389
Arguments::set_ext_dirs(buf);
390
}
391
#undef EXT_DIR
392
#undef BIN_DIR
393
#undef PACKAGE_DIR
394
395
#ifndef _WIN64
396
// set our UnhandledExceptionFilter and save any previous one
397
prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
398
#endif
399
400
// Done
401
return;
402
}
403
404
void os::breakpoint() {
405
DebugBreak();
406
}
407
408
// Invoked from the BREAKPOINT Macro
409
extern "C" void breakpoint() {
410
os::breakpoint();
411
}
412
413
// RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
414
// So far, this method is only used by Native Memory Tracking, which is
415
// only supported on Windows XP or later.
416
//
417
int os::get_native_stack(address* stack, int frames, int toSkip) {
418
int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
419
for (int index = captured; index < frames; index ++) {
420
stack[index] = NULL;
421
}
422
return captured;
423
}
424
425
// os::current_stack_base()
426
//
427
// Returns the base of the stack, which is the stack's
428
// starting address. This function must be called
429
// while running on the stack of the thread being queried.
430
431
address os::current_stack_base() {
432
MEMORY_BASIC_INFORMATION minfo;
433
address stack_bottom;
434
size_t stack_size;
435
436
VirtualQuery(&minfo, &minfo, sizeof(minfo));
437
stack_bottom = (address)minfo.AllocationBase;
438
stack_size = minfo.RegionSize;
439
440
// Add up the sizes of all the regions with the same
441
// AllocationBase.
442
while (1) {
443
VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
444
if (stack_bottom == (address)minfo.AllocationBase) {
445
stack_size += minfo.RegionSize;
446
} else {
447
break;
448
}
449
}
450
return stack_bottom + stack_size;
451
}
452
453
size_t os::current_stack_size() {
454
size_t sz;
455
MEMORY_BASIC_INFORMATION minfo;
456
VirtualQuery(&minfo, &minfo, sizeof(minfo));
457
sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
458
return sz;
459
}
460
461
bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
462
MEMORY_BASIC_INFORMATION minfo;
463
committed_start = NULL;
464
committed_size = 0;
465
address top = start + size;
466
const address start_addr = start;
467
while (start < top) {
468
VirtualQuery(start, &minfo, sizeof(minfo));
469
if ((minfo.State & MEM_COMMIT) == 0) { // not committed
470
if (committed_start != NULL) {
471
break;
472
}
473
} else { // committed
474
if (committed_start == NULL) {
475
committed_start = start;
476
}
477
size_t offset = start - (address)minfo.BaseAddress;
478
committed_size += minfo.RegionSize - offset;
479
}
480
start = (address)minfo.BaseAddress + minfo.RegionSize;
481
}
482
483
if (committed_start == NULL) {
484
assert(committed_size == 0, "Sanity");
485
return false;
486
} else {
487
assert(committed_start >= start_addr && committed_start < top, "Out of range");
488
// current region may go beyond the limit, trim to the limit
489
committed_size = MIN2(committed_size, size_t(top - committed_start));
490
return true;
491
}
492
}
493
494
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
495
const struct tm* time_struct_ptr = localtime(clock);
496
if (time_struct_ptr != NULL) {
497
*res = *time_struct_ptr;
498
return res;
499
}
500
return NULL;
501
}
502
503
struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
504
const struct tm* time_struct_ptr = gmtime(clock);
505
if (time_struct_ptr != NULL) {
506
*res = *time_struct_ptr;
507
return res;
508
}
509
return NULL;
510
}
511
512
JNIEXPORT
513
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
514
515
// Thread start routine for all newly created threads
516
static unsigned __stdcall thread_native_entry(Thread* thread) {
517
518
thread->record_stack_base_and_size();
519
thread->initialize_thread_current();
520
521
OSThread* osthr = thread->osthread();
522
assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
523
524
if (UseNUMA) {
525
int lgrp_id = os::numa_get_group_id();
526
if (lgrp_id != -1) {
527
thread->set_lgrp_id(lgrp_id);
528
}
529
}
530
531
// Diagnostic code to investigate JDK-6573254
532
int res = 30115; // non-java thread
533
if (thread->is_Java_thread()) {
534
res = 20115; // java thread
535
}
536
537
log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
538
539
#ifdef USE_VECTORED_EXCEPTION_HANDLING
540
// Any exception is caught by the Vectored Exception Handler, so VM can
541
// generate error dump when an exception occurred in non-Java thread
542
// (e.g. VM thread).
543
thread->call_run();
544
#else
545
// Install a win32 structured exception handler around every thread created
546
// by VM, so VM can generate error dump when an exception occurred in non-
547
// Java thread (e.g. VM thread).
548
__try {
549
thread->call_run();
550
} __except(topLevelExceptionFilter(
551
(_EXCEPTION_POINTERS*)_exception_info())) {
552
// Nothing to do.
553
}
554
#endif
555
556
// Note: at this point the thread object may already have deleted itself.
557
// Do not dereference it from here on out.
558
559
log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
560
561
// One less thread is executing
562
// When the VMThread gets here, the main thread may have already exited
563
// which frees the CodeHeap containing the Atomic::add code
564
if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
565
Atomic::dec(&os::win32::_os_thread_count);
566
}
567
568
// Thread must not return from exit_process_or_thread(), but if it does,
569
// let it proceed to exit normally
570
return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
571
}
572
573
static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
574
int thread_id) {
575
// Allocate the OSThread object
576
OSThread* osthread = new OSThread(NULL, NULL);
577
if (osthread == NULL) return NULL;
578
579
// Initialize the JDK library's interrupt event.
580
// This should really be done when OSThread is constructed,
581
// but there is no way for a constructor to report failure to
582
// allocate the event.
583
HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
584
if (interrupt_event == NULL) {
585
delete osthread;
586
return NULL;
587
}
588
osthread->set_interrupt_event(interrupt_event);
589
590
// Store info on the Win32 thread into the OSThread
591
osthread->set_thread_handle(thread_handle);
592
osthread->set_thread_id(thread_id);
593
594
if (UseNUMA) {
595
int lgrp_id = os::numa_get_group_id();
596
if (lgrp_id != -1) {
597
thread->set_lgrp_id(lgrp_id);
598
}
599
}
600
601
// Initial thread state is INITIALIZED, not SUSPENDED
602
osthread->set_state(INITIALIZED);
603
604
return osthread;
605
}
606
607
608
bool os::create_attached_thread(JavaThread* thread) {
609
#ifdef ASSERT
610
thread->verify_not_published();
611
#endif
612
HANDLE thread_h;
613
if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
614
&thread_h, THREAD_ALL_ACCESS, false, 0)) {
615
fatal("DuplicateHandle failed\n");
616
}
617
OSThread* osthread = create_os_thread(thread, thread_h,
618
(int)current_thread_id());
619
if (osthread == NULL) {
620
return false;
621
}
622
623
// Initial thread state is RUNNABLE
624
osthread->set_state(RUNNABLE);
625
626
thread->set_osthread(osthread);
627
628
log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
629
os::current_thread_id());
630
631
return true;
632
}
633
634
bool os::create_main_thread(JavaThread* thread) {
635
#ifdef ASSERT
636
thread->verify_not_published();
637
#endif
638
if (_starting_thread == NULL) {
639
_starting_thread = create_os_thread(thread, main_thread, main_thread_id);
640
if (_starting_thread == NULL) {
641
return false;
642
}
643
}
644
645
// The primordial thread is runnable from the start)
646
_starting_thread->set_state(RUNNABLE);
647
648
thread->set_osthread(_starting_thread);
649
return true;
650
}
651
652
// Helper function to trace _beginthreadex attributes,
653
// similar to os::Posix::describe_pthread_attr()
654
static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
655
size_t stacksize, unsigned initflag) {
656
stringStream ss(buf, buflen);
657
if (stacksize == 0) {
658
ss.print("stacksize: default, ");
659
} else {
660
ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
661
}
662
ss.print("flags: ");
663
#define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
664
#define ALL(X) \
665
X(CREATE_SUSPENDED) \
666
X(STACK_SIZE_PARAM_IS_A_RESERVATION)
667
ALL(PRINT_FLAG)
668
#undef ALL
669
#undef PRINT_FLAG
670
return buf;
671
}
672
673
// Allocate and initialize a new OSThread
674
bool os::create_thread(Thread* thread, ThreadType thr_type,
675
size_t stack_size) {
676
unsigned thread_id;
677
678
// Allocate the OSThread object
679
OSThread* osthread = new OSThread(NULL, NULL);
680
if (osthread == NULL) {
681
return false;
682
}
683
684
// Initial state is ALLOCATED but not INITIALIZED
685
osthread->set_state(ALLOCATED);
686
687
// Initialize the JDK library's interrupt event.
688
// This should really be done when OSThread is constructed,
689
// but there is no way for a constructor to report failure to
690
// allocate the event.
691
HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
692
if (interrupt_event == NULL) {
693
delete osthread;
694
return false;
695
}
696
osthread->set_interrupt_event(interrupt_event);
697
// We don't call set_interrupted(false) as it will trip the assert in there
698
// as we are not operating on the current thread. We don't need to call it
699
// because the initial state is already correct.
700
701
thread->set_osthread(osthread);
702
703
if (stack_size == 0) {
704
switch (thr_type) {
705
case os::java_thread:
706
// Java threads use ThreadStackSize which default value can be changed with the flag -Xss
707
if (JavaThread::stack_size_at_create() > 0) {
708
stack_size = JavaThread::stack_size_at_create();
709
}
710
break;
711
case os::compiler_thread:
712
if (CompilerThreadStackSize > 0) {
713
stack_size = (size_t)(CompilerThreadStackSize * K);
714
break;
715
} // else fall through:
716
// use VMThreadStackSize if CompilerThreadStackSize is not defined
717
case os::vm_thread:
718
case os::pgc_thread:
719
case os::cgc_thread:
720
case os::asynclog_thread:
721
case os::watcher_thread:
722
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
723
break;
724
}
725
}
726
727
// Create the Win32 thread
728
//
729
// Contrary to what MSDN document says, "stack_size" in _beginthreadex()
730
// does not specify stack size. Instead, it specifies the size of
731
// initially committed space. The stack size is determined by
732
// PE header in the executable. If the committed "stack_size" is larger
733
// than default value in the PE header, the stack is rounded up to the
734
// nearest multiple of 1MB. For example if the launcher has default
735
// stack size of 320k, specifying any size less than 320k does not
736
// affect the actual stack size at all, it only affects the initial
737
// commitment. On the other hand, specifying 'stack_size' larger than
738
// default value may cause significant increase in memory usage, because
739
// not only the stack space will be rounded up to MB, but also the
740
// entire space is committed upfront.
741
//
742
// Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
743
// for CreateThread() that can treat 'stack_size' as stack size. However we
744
// are not supposed to call CreateThread() directly according to MSDN
745
// document because JVM uses C runtime library. The good news is that the
746
// flag appears to work with _beginthredex() as well.
747
748
const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
749
HANDLE thread_handle;
750
int limit = 3;
751
do {
752
thread_handle =
753
(HANDLE)_beginthreadex(NULL,
754
(unsigned)stack_size,
755
(unsigned (__stdcall *)(void*)) thread_native_entry,
756
thread,
757
initflag,
758
&thread_id);
759
} while (thread_handle == NULL && errno == EAGAIN && limit-- > 0);
760
761
ResourceMark rm;
762
char buf[64];
763
if (thread_handle != NULL) {
764
log_info(os, thread)("Thread \"%s\" started (tid: %u, attributes: %s)",
765
thread->name(), thread_id,
766
describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
767
} else {
768
log_warning(os, thread)("Failed to start thread \"%s\" - _beginthreadex failed (%s) for attributes: %s.",
769
thread->name(), os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
770
// Log some OS information which might explain why creating the thread failed.
771
log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
772
LogStream st(Log(os, thread)::info());
773
os::print_memory_info(&st);
774
}
775
776
if (thread_handle == NULL) {
777
// Need to clean up stuff we've allocated so far
778
thread->set_osthread(NULL);
779
delete osthread;
780
return false;
781
}
782
783
Atomic::inc(&os::win32::_os_thread_count);
784
785
// Store info on the Win32 thread into the OSThread
786
osthread->set_thread_handle(thread_handle);
787
osthread->set_thread_id(thread_id);
788
789
// Thread state now is INITIALIZED, not SUSPENDED
790
osthread->set_state(INITIALIZED);
791
792
// The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
793
return true;
794
}
795
796
797
// Free Win32 resources related to the OSThread
798
void os::free_thread(OSThread* osthread) {
799
assert(osthread != NULL, "osthread not set");
800
801
// We are told to free resources of the argument thread,
802
// but we can only really operate on the current thread.
803
assert(Thread::current()->osthread() == osthread,
804
"os::free_thread but not current thread");
805
806
CloseHandle(osthread->thread_handle());
807
delete osthread;
808
}
809
810
static jlong first_filetime;
811
static jlong initial_performance_count;
812
static jlong performance_frequency;
813
814
815
jlong as_long(LARGE_INTEGER x) {
816
jlong result = 0; // initialization to avoid warning
817
set_high(&result, x.HighPart);
818
set_low(&result, x.LowPart);
819
return result;
820
}
821
822
823
jlong os::elapsed_counter() {
824
LARGE_INTEGER count;
825
QueryPerformanceCounter(&count);
826
return as_long(count) - initial_performance_count;
827
}
828
829
830
jlong os::elapsed_frequency() {
831
return performance_frequency;
832
}
833
834
835
julong os::available_memory() {
836
return win32::available_memory();
837
}
838
839
julong os::win32::available_memory() {
840
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
841
// value if total memory is larger than 4GB
842
MEMORYSTATUSEX ms;
843
ms.dwLength = sizeof(ms);
844
GlobalMemoryStatusEx(&ms);
845
846
return (julong)ms.ullAvailPhys;
847
}
848
849
julong os::physical_memory() {
850
return win32::physical_memory();
851
}
852
853
bool os::has_allocatable_memory_limit(size_t* limit) {
854
MEMORYSTATUSEX ms;
855
ms.dwLength = sizeof(ms);
856
GlobalMemoryStatusEx(&ms);
857
#ifdef _LP64
858
*limit = (size_t)ms.ullAvailVirtual;
859
return true;
860
#else
861
// Limit to 1400m because of the 2gb address space wall
862
*limit = MIN2((size_t)1400*M, (size_t)ms.ullAvailVirtual);
863
return true;
864
#endif
865
}
866
867
int os::active_processor_count() {
868
// User has overridden the number of active processors
869
if (ActiveProcessorCount > 0) {
870
log_trace(os)("active_processor_count: "
871
"active processor count set by user : %d",
872
ActiveProcessorCount);
873
return ActiveProcessorCount;
874
}
875
876
DWORD_PTR lpProcessAffinityMask = 0;
877
DWORD_PTR lpSystemAffinityMask = 0;
878
int proc_count = processor_count();
879
if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
880
GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
881
// Nof active processors is number of bits in process affinity mask
882
int bitcount = 0;
883
while (lpProcessAffinityMask != 0) {
884
lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
885
bitcount++;
886
}
887
return bitcount;
888
} else {
889
return proc_count;
890
}
891
}
892
893
uint os::processor_id() {
894
return (uint)GetCurrentProcessorNumber();
895
}
896
897
// For dynamic lookup of SetThreadDescription API
898
typedef HRESULT (WINAPI *SetThreadDescriptionFnPtr)(HANDLE, PCWSTR);
899
typedef HRESULT (WINAPI *GetThreadDescriptionFnPtr)(HANDLE, PWSTR*);
900
static SetThreadDescriptionFnPtr _SetThreadDescription = NULL;
901
DEBUG_ONLY(static GetThreadDescriptionFnPtr _GetThreadDescription = NULL;)
902
903
// forward decl.
904
static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path);
905
906
void os::set_native_thread_name(const char *name) {
907
908
// From Windows 10 and Windows 2016 server, we have a direct API
909
// for setting the thread name/description:
910
// https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-setthreaddescription
911
912
if (_SetThreadDescription != NULL) {
913
// SetThreadDescription takes a PCWSTR but we have conversion routines that produce
914
// LPWSTR. The only difference is that PCWSTR is a pointer to const WCHAR.
915
LPWSTR unicode_name;
916
errno_t err = convert_to_unicode(name, &unicode_name);
917
if (err == ERROR_SUCCESS) {
918
HANDLE current = GetCurrentThread();
919
HRESULT hr = _SetThreadDescription(current, unicode_name);
920
if (FAILED(hr)) {
921
log_debug(os, thread)("set_native_thread_name: SetThreadDescription failed - falling back to debugger method");
922
FREE_C_HEAP_ARRAY(WCHAR, unicode_name);
923
} else {
924
log_trace(os, thread)("set_native_thread_name: SetThreadDescription succeeded - new name: %s", name);
925
926
#ifdef ASSERT
927
// For verification purposes in a debug build we read the thread name back and check it.
928
PWSTR thread_name;
929
HRESULT hr2 = _GetThreadDescription(current, &thread_name);
930
if (FAILED(hr2)) {
931
log_debug(os, thread)("set_native_thread_name: GetThreadDescription failed!");
932
} else {
933
int res = CompareStringW(LOCALE_USER_DEFAULT,
934
0, // no special comparison rules
935
unicode_name,
936
-1, // null-terminated
937
thread_name,
938
-1 // null-terminated
939
);
940
assert(res == CSTR_EQUAL,
941
"Name strings were not the same - set: %ls, but read: %ls", unicode_name, thread_name);
942
LocalFree(thread_name);
943
}
944
#endif
945
FREE_C_HEAP_ARRAY(WCHAR, unicode_name);
946
return;
947
}
948
} else {
949
log_debug(os, thread)("set_native_thread_name: convert_to_unicode failed - falling back to debugger method");
950
}
951
}
952
953
// See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
954
//
955
// Note that unfortunately this only works if the process
956
// is already attached to a debugger; debugger must observe
957
// the exception below to show the correct name.
958
959
// If there is no debugger attached skip raising the exception
960
if (!IsDebuggerPresent()) {
961
log_debug(os, thread)("set_native_thread_name: no debugger present so unable to set thread name");
962
return;
963
}
964
965
const DWORD MS_VC_EXCEPTION = 0x406D1388;
966
struct {
967
DWORD dwType; // must be 0x1000
968
LPCSTR szName; // pointer to name (in user addr space)
969
DWORD dwThreadID; // thread ID (-1=caller thread)
970
DWORD dwFlags; // reserved for future use, must be zero
971
} info;
972
973
info.dwType = 0x1000;
974
info.szName = name;
975
info.dwThreadID = -1;
976
info.dwFlags = 0;
977
978
__try {
979
RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
980
} __except(EXCEPTION_EXECUTE_HANDLER) {}
981
}
982
983
bool os::bind_to_processor(uint processor_id) {
984
// Not yet implemented.
985
return false;
986
}
987
988
void os::win32::initialize_performance_counter() {
989
LARGE_INTEGER count;
990
QueryPerformanceFrequency(&count);
991
performance_frequency = as_long(count);
992
QueryPerformanceCounter(&count);
993
initial_performance_count = as_long(count);
994
}
995
996
997
double os::elapsedTime() {
998
return (double) elapsed_counter() / (double) elapsed_frequency();
999
}
1000
1001
1002
// Windows format:
1003
// The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
1004
// Java format:
1005
// Java standards require the number of milliseconds since 1/1/1970
1006
1007
// Constant offset - calculated using offset()
1008
static jlong _offset = 116444736000000000;
1009
// Fake time counter for reproducible results when debugging
1010
static jlong fake_time = 0;
1011
1012
#ifdef ASSERT
1013
// Just to be safe, recalculate the offset in debug mode
1014
static jlong _calculated_offset = 0;
1015
static int _has_calculated_offset = 0;
1016
1017
jlong offset() {
1018
if (_has_calculated_offset) return _calculated_offset;
1019
SYSTEMTIME java_origin;
1020
java_origin.wYear = 1970;
1021
java_origin.wMonth = 1;
1022
java_origin.wDayOfWeek = 0; // ignored
1023
java_origin.wDay = 1;
1024
java_origin.wHour = 0;
1025
java_origin.wMinute = 0;
1026
java_origin.wSecond = 0;
1027
java_origin.wMilliseconds = 0;
1028
FILETIME jot;
1029
if (!SystemTimeToFileTime(&java_origin, &jot)) {
1030
fatal("Error = %d\nWindows error", GetLastError());
1031
}
1032
_calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
1033
_has_calculated_offset = 1;
1034
assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
1035
return _calculated_offset;
1036
}
1037
#else
1038
jlong offset() {
1039
return _offset;
1040
}
1041
#endif
1042
1043
jlong windows_to_java_time(FILETIME wt) {
1044
jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
1045
return (a - offset()) / 10000;
1046
}
1047
1048
// Returns time ticks in (10th of micro seconds)
1049
jlong windows_to_time_ticks(FILETIME wt) {
1050
jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
1051
return (a - offset());
1052
}
1053
1054
FILETIME java_to_windows_time(jlong l) {
1055
jlong a = (l * 10000) + offset();
1056
FILETIME result;
1057
result.dwHighDateTime = high(a);
1058
result.dwLowDateTime = low(a);
1059
return result;
1060
}
1061
1062
bool os::supports_vtime() { return true; }
1063
1064
double os::elapsedVTime() {
1065
FILETIME created;
1066
FILETIME exited;
1067
FILETIME kernel;
1068
FILETIME user;
1069
if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
1070
// the resolution of windows_to_java_time() should be sufficient (ms)
1071
return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
1072
} else {
1073
return elapsedTime();
1074
}
1075
}
1076
1077
jlong os::javaTimeMillis() {
1078
FILETIME wt;
1079
GetSystemTimeAsFileTime(&wt);
1080
return windows_to_java_time(wt);
1081
}
1082
1083
void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1084
FILETIME wt;
1085
GetSystemTimeAsFileTime(&wt);
1086
jlong ticks = windows_to_time_ticks(wt); // 10th of micros
1087
jlong secs = jlong(ticks / 10000000); // 10000 * 1000
1088
seconds = secs;
1089
nanos = jlong(ticks - (secs*10000000)) * 100;
1090
}
1091
1092
jlong os::javaTimeNanos() {
1093
LARGE_INTEGER current_count;
1094
QueryPerformanceCounter(&current_count);
1095
double current = as_long(current_count);
1096
double freq = performance_frequency;
1097
jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
1098
return time;
1099
}
1100
1101
void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1102
jlong freq = performance_frequency;
1103
if (freq < NANOSECS_PER_SEC) {
1104
// the performance counter is 64 bits and we will
1105
// be multiplying it -- so no wrap in 64 bits
1106
info_ptr->max_value = ALL_64_BITS;
1107
} else if (freq > NANOSECS_PER_SEC) {
1108
// use the max value the counter can reach to
1109
// determine the max value which could be returned
1110
julong max_counter = (julong)ALL_64_BITS;
1111
info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
1112
} else {
1113
// the performance counter is 64 bits and we will
1114
// be using it directly -- so no wrap in 64 bits
1115
info_ptr->max_value = ALL_64_BITS;
1116
}
1117
1118
// using a counter, so no skipping
1119
info_ptr->may_skip_backward = false;
1120
info_ptr->may_skip_forward = false;
1121
1122
info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
1123
}
1124
1125
char* os::local_time_string(char *buf, size_t buflen) {
1126
SYSTEMTIME st;
1127
GetLocalTime(&st);
1128
jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1129
st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
1130
return buf;
1131
}
1132
1133
bool os::getTimesSecs(double* process_real_time,
1134
double* process_user_time,
1135
double* process_system_time) {
1136
HANDLE h_process = GetCurrentProcess();
1137
FILETIME create_time, exit_time, kernel_time, user_time;
1138
BOOL result = GetProcessTimes(h_process,
1139
&create_time,
1140
&exit_time,
1141
&kernel_time,
1142
&user_time);
1143
if (result != 0) {
1144
FILETIME wt;
1145
GetSystemTimeAsFileTime(&wt);
1146
jlong rtc_millis = windows_to_java_time(wt);
1147
*process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
1148
*process_user_time =
1149
(double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
1150
*process_system_time =
1151
(double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1152
return true;
1153
} else {
1154
return false;
1155
}
1156
}
1157
1158
void os::shutdown() {
1159
// allow PerfMemory to attempt cleanup of any persistent resources
1160
perfMemory_exit();
1161
1162
// flush buffered output, finish log files
1163
ostream_abort();
1164
1165
// Check for abort hook
1166
abort_hook_t abort_hook = Arguments::abort_hook();
1167
if (abort_hook != NULL) {
1168
abort_hook();
1169
}
1170
}
1171
1172
1173
static HANDLE dumpFile = NULL;
1174
1175
// Check if dump file can be created.
1176
void os::check_dump_limit(char* buffer, size_t buffsz) {
1177
bool status = true;
1178
if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1179
jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1180
status = false;
1181
}
1182
1183
#ifndef ASSERT
1184
if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1185
jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1186
status = false;
1187
}
1188
#endif
1189
1190
if (status) {
1191
const char* cwd = get_current_directory(NULL, 0);
1192
int pid = current_process_id();
1193
if (cwd != NULL) {
1194
jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1195
} else {
1196
jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1197
}
1198
1199
if (dumpFile == NULL &&
1200
(dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1201
== INVALID_HANDLE_VALUE) {
1202
jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1203
status = false;
1204
}
1205
}
1206
VMError::record_coredump_status(buffer, status);
1207
}
1208
1209
void os::abort(bool dump_core, void* siginfo, const void* context) {
1210
EXCEPTION_POINTERS ep;
1211
MINIDUMP_EXCEPTION_INFORMATION mei;
1212
MINIDUMP_EXCEPTION_INFORMATION* pmei;
1213
1214
HANDLE hProcess = GetCurrentProcess();
1215
DWORD processId = GetCurrentProcessId();
1216
MINIDUMP_TYPE dumpType;
1217
1218
shutdown();
1219
if (!dump_core || dumpFile == NULL) {
1220
if (dumpFile != NULL) {
1221
CloseHandle(dumpFile);
1222
}
1223
win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1224
}
1225
1226
dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1227
MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1228
1229
if (siginfo != NULL && context != NULL) {
1230
ep.ContextRecord = (PCONTEXT) context;
1231
ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1232
1233
mei.ThreadId = GetCurrentThreadId();
1234
mei.ExceptionPointers = &ep;
1235
pmei = &mei;
1236
} else {
1237
pmei = NULL;
1238
}
1239
1240
// Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1241
// the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1242
if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1243
!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1244
jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1245
}
1246
CloseHandle(dumpFile);
1247
win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1248
}
1249
1250
// Die immediately, no exit hook, no abort hook, no cleanup.
1251
void os::die() {
1252
win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1253
}
1254
1255
const char* os::dll_file_extension() { return ".dll"; }
1256
1257
void os::dll_unload(void *lib) {
1258
char name[MAX_PATH];
1259
if (::GetModuleFileName((HMODULE)lib, name, sizeof(name)) == 0) {
1260
snprintf(name, MAX_PATH, "<not available>");
1261
}
1262
if (::FreeLibrary((HMODULE)lib)) {
1263
Events::log_dll_message(NULL, "Unloaded dll \"%s\" [" INTPTR_FORMAT "]", name, p2i(lib));
1264
log_info(os)("Unloaded dll \"%s\" [" INTPTR_FORMAT "]", name, p2i(lib));
1265
} else {
1266
const DWORD errcode = ::GetLastError();
1267
Events::log_dll_message(NULL, "Attempt to unload dll \"%s\" [" INTPTR_FORMAT "] failed (error code %d)", name, p2i(lib), errcode);
1268
log_info(os)("Attempt to unload dll \"%s\" [" INTPTR_FORMAT "] failed (error code %d)", name, p2i(lib), errcode);
1269
}
1270
}
1271
1272
void* os::dll_lookup(void *lib, const char *name) {
1273
return (void*)::GetProcAddress((HMODULE)lib, name);
1274
}
1275
1276
// Directory routines copied from src/win32/native/java/io/dirent_md.c
1277
// * dirent_md.c 1.15 00/02/02
1278
//
1279
// The declarations for DIR and struct dirent are in jvm_win32.h.
1280
1281
// Caller must have already run dirname through JVM_NativePath, which removes
1282
// duplicate slashes and converts all instances of '/' into '\\'.
1283
1284
DIR * os::opendir(const char *dirname) {
1285
assert(dirname != NULL, "just checking"); // hotspot change
1286
DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1287
DWORD fattr; // hotspot change
1288
char alt_dirname[4] = { 0, 0, 0, 0 };
1289
1290
if (dirp == 0) {
1291
errno = ENOMEM;
1292
return 0;
1293
}
1294
1295
// Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1296
// as a directory in FindFirstFile(). We detect this case here and
1297
// prepend the current drive name.
1298
//
1299
if (dirname[1] == '\0' && dirname[0] == '\\') {
1300
alt_dirname[0] = _getdrive() + 'A' - 1;
1301
alt_dirname[1] = ':';
1302
alt_dirname[2] = '\\';
1303
alt_dirname[3] = '\0';
1304
dirname = alt_dirname;
1305
}
1306
1307
dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1308
if (dirp->path == 0) {
1309
free(dirp);
1310
errno = ENOMEM;
1311
return 0;
1312
}
1313
strcpy(dirp->path, dirname);
1314
1315
fattr = GetFileAttributes(dirp->path);
1316
if (fattr == 0xffffffff) {
1317
free(dirp->path);
1318
free(dirp);
1319
errno = ENOENT;
1320
return 0;
1321
} else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1322
free(dirp->path);
1323
free(dirp);
1324
errno = ENOTDIR;
1325
return 0;
1326
}
1327
1328
// Append "*.*", or possibly "\\*.*", to path
1329
if (dirp->path[1] == ':' &&
1330
(dirp->path[2] == '\0' ||
1331
(dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1332
// No '\\' needed for cases like "Z:" or "Z:\"
1333
strcat(dirp->path, "*.*");
1334
} else {
1335
strcat(dirp->path, "\\*.*");
1336
}
1337
1338
dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1339
if (dirp->handle == INVALID_HANDLE_VALUE) {
1340
if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1341
free(dirp->path);
1342
free(dirp);
1343
errno = EACCES;
1344
return 0;
1345
}
1346
}
1347
return dirp;
1348
}
1349
1350
struct dirent * os::readdir(DIR *dirp) {
1351
assert(dirp != NULL, "just checking"); // hotspot change
1352
if (dirp->handle == INVALID_HANDLE_VALUE) {
1353
return NULL;
1354
}
1355
1356
strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1357
1358
if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1359
if (GetLastError() == ERROR_INVALID_HANDLE) {
1360
errno = EBADF;
1361
return NULL;
1362
}
1363
FindClose(dirp->handle);
1364
dirp->handle = INVALID_HANDLE_VALUE;
1365
}
1366
1367
return &dirp->dirent;
1368
}
1369
1370
int os::closedir(DIR *dirp) {
1371
assert(dirp != NULL, "just checking"); // hotspot change
1372
if (dirp->handle != INVALID_HANDLE_VALUE) {
1373
if (!FindClose(dirp->handle)) {
1374
errno = EBADF;
1375
return -1;
1376
}
1377
dirp->handle = INVALID_HANDLE_VALUE;
1378
}
1379
free(dirp->path);
1380
free(dirp);
1381
return 0;
1382
}
1383
1384
// This must be hard coded because it's the system's temporary
1385
// directory not the java application's temp directory, ala java.io.tmpdir.
1386
const char* os::get_temp_directory() {
1387
static char path_buf[MAX_PATH];
1388
if (GetTempPath(MAX_PATH, path_buf) > 0) {
1389
return path_buf;
1390
} else {
1391
path_buf[0] = '\0';
1392
return path_buf;
1393
}
1394
}
1395
1396
// Needs to be in os specific directory because windows requires another
1397
// header file <direct.h>
1398
const char* os::get_current_directory(char *buf, size_t buflen) {
1399
int n = static_cast<int>(buflen);
1400
if (buflen > INT_MAX) n = INT_MAX;
1401
return _getcwd(buf, n);
1402
}
1403
1404
//-----------------------------------------------------------
1405
// Helper functions for fatal error handler
1406
#ifdef _WIN64
1407
// Helper routine which returns true if address in
1408
// within the NTDLL address space.
1409
//
1410
static bool _addr_in_ntdll(address addr) {
1411
HMODULE hmod;
1412
MODULEINFO minfo;
1413
1414
hmod = GetModuleHandle("NTDLL.DLL");
1415
if (hmod == NULL) return false;
1416
if (!GetModuleInformation(GetCurrentProcess(), hmod,
1417
&minfo, sizeof(MODULEINFO))) {
1418
return false;
1419
}
1420
1421
if ((addr >= minfo.lpBaseOfDll) &&
1422
(addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1423
return true;
1424
} else {
1425
return false;
1426
}
1427
}
1428
#endif
1429
1430
struct _modinfo {
1431
address addr;
1432
char* full_path; // point to a char buffer
1433
int buflen; // size of the buffer
1434
address base_addr;
1435
};
1436
1437
static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1438
address top_address, void * param) {
1439
struct _modinfo *pmod = (struct _modinfo *)param;
1440
if (!pmod) return -1;
1441
1442
if (base_addr <= pmod->addr &&
1443
top_address > pmod->addr) {
1444
// if a buffer is provided, copy path name to the buffer
1445
if (pmod->full_path) {
1446
jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1447
}
1448
pmod->base_addr = base_addr;
1449
return 1;
1450
}
1451
return 0;
1452
}
1453
1454
bool os::dll_address_to_library_name(address addr, char* buf,
1455
int buflen, int* offset) {
1456
// buf is not optional, but offset is optional
1457
assert(buf != NULL, "sanity check");
1458
1459
// NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1460
// return the full path to the DLL file, sometimes it returns path
1461
// to the corresponding PDB file (debug info); sometimes it only
1462
// returns partial path, which makes life painful.
1463
1464
struct _modinfo mi;
1465
mi.addr = addr;
1466
mi.full_path = buf;
1467
mi.buflen = buflen;
1468
if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1469
// buf already contains path name
1470
if (offset) *offset = addr - mi.base_addr;
1471
return true;
1472
}
1473
1474
buf[0] = '\0';
1475
if (offset) *offset = -1;
1476
return false;
1477
}
1478
1479
bool os::dll_address_to_function_name(address addr, char *buf,
1480
int buflen, int *offset,
1481
bool demangle) {
1482
// buf is not optional, but offset is optional
1483
assert(buf != NULL, "sanity check");
1484
1485
if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1486
return true;
1487
}
1488
if (offset != NULL) *offset = -1;
1489
buf[0] = '\0';
1490
return false;
1491
}
1492
1493
// save the start and end address of jvm.dll into param[0] and param[1]
1494
static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1495
address top_address, void * param) {
1496
if (!param) return -1;
1497
1498
if (base_addr <= (address)_locate_jvm_dll &&
1499
top_address > (address)_locate_jvm_dll) {
1500
((address*)param)[0] = base_addr;
1501
((address*)param)[1] = top_address;
1502
return 1;
1503
}
1504
return 0;
1505
}
1506
1507
address vm_lib_location[2]; // start and end address of jvm.dll
1508
1509
// check if addr is inside jvm.dll
1510
bool os::address_is_in_vm(address addr) {
1511
if (!vm_lib_location[0] || !vm_lib_location[1]) {
1512
if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1513
assert(false, "Can't find jvm module.");
1514
return false;
1515
}
1516
}
1517
1518
return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1519
}
1520
1521
// print module info; param is outputStream*
1522
static int _print_module(const char* fname, address base_address,
1523
address top_address, void* param) {
1524
if (!param) return -1;
1525
1526
outputStream* st = (outputStream*)param;
1527
1528
st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1529
return 0;
1530
}
1531
1532
// Loads .dll/.so and
1533
// in case of error it checks if .dll/.so was built for the
1534
// same architecture as Hotspot is running on
1535
void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1536
log_info(os)("attempting shared library load of %s", name);
1537
1538
void * result = LoadLibrary(name);
1539
if (result != NULL) {
1540
Events::log_dll_message(NULL, "Loaded shared library %s", name);
1541
// Recalculate pdb search path if a DLL was loaded successfully.
1542
SymbolEngine::recalc_search_path();
1543
log_info(os)("shared library load of %s was successful", name);
1544
return result;
1545
}
1546
DWORD errcode = GetLastError();
1547
// Read system error message into ebuf
1548
// It may or may not be overwritten below (in the for loop and just above)
1549
lasterror(ebuf, (size_t) ebuflen);
1550
ebuf[ebuflen - 1] = '\0';
1551
Events::log_dll_message(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1552
log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1553
1554
if (errcode == ERROR_MOD_NOT_FOUND) {
1555
strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1556
ebuf[ebuflen - 1] = '\0';
1557
return NULL;
1558
}
1559
1560
// Parsing dll below
1561
// If we can read dll-info and find that dll was built
1562
// for an architecture other than Hotspot is running in
1563
// - then print to buffer "DLL was built for a different architecture"
1564
// else call os::lasterror to obtain system error message
1565
int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1566
if (fd < 0) {
1567
return NULL;
1568
}
1569
1570
uint32_t signature_offset;
1571
uint16_t lib_arch = 0;
1572
bool failed_to_get_lib_arch =
1573
( // Go to position 3c in the dll
1574
(os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1575
||
1576
// Read location of signature
1577
(sizeof(signature_offset) !=
1578
(os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1579
||
1580
// Go to COFF File Header in dll
1581
// that is located after "signature" (4 bytes long)
1582
(os::seek_to_file_offset(fd,
1583
signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1584
||
1585
// Read field that contains code of architecture
1586
// that dll was built for
1587
(sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1588
);
1589
1590
::close(fd);
1591
if (failed_to_get_lib_arch) {
1592
// file i/o error - report os::lasterror(...) msg
1593
return NULL;
1594
}
1595
1596
typedef struct {
1597
uint16_t arch_code;
1598
char* arch_name;
1599
} arch_t;
1600
1601
static const arch_t arch_array[] = {
1602
{IMAGE_FILE_MACHINE_I386, (char*)"IA 32"},
1603
{IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"},
1604
{IMAGE_FILE_MACHINE_ARM64, (char*)"ARM 64"}
1605
};
1606
#if (defined _M_ARM64)
1607
static const uint16_t running_arch = IMAGE_FILE_MACHINE_ARM64;
1608
#elif (defined _M_AMD64)
1609
static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1610
#elif (defined _M_IX86)
1611
static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1612
#else
1613
#error Method os::dll_load requires that one of following \
1614
is defined :_M_AMD64 or _M_IX86 or _M_ARM64
1615
#endif
1616
1617
1618
// Obtain a string for printf operation
1619
// lib_arch_str shall contain string what platform this .dll was built for
1620
// running_arch_str shall string contain what platform Hotspot was built for
1621
char *running_arch_str = NULL, *lib_arch_str = NULL;
1622
for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1623
if (lib_arch == arch_array[i].arch_code) {
1624
lib_arch_str = arch_array[i].arch_name;
1625
}
1626
if (running_arch == arch_array[i].arch_code) {
1627
running_arch_str = arch_array[i].arch_name;
1628
}
1629
}
1630
1631
assert(running_arch_str,
1632
"Didn't find running architecture code in arch_array");
1633
1634
// If the architecture is right
1635
// but some other error took place - report os::lasterror(...) msg
1636
if (lib_arch == running_arch) {
1637
return NULL;
1638
}
1639
1640
if (lib_arch_str != NULL) {
1641
::_snprintf(ebuf, ebuflen - 1,
1642
"Can't load %s-bit .dll on a %s-bit platform",
1643
lib_arch_str, running_arch_str);
1644
} else {
1645
// don't know what architecture this dll was build for
1646
::_snprintf(ebuf, ebuflen - 1,
1647
"Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1648
lib_arch, running_arch_str);
1649
}
1650
1651
return NULL;
1652
}
1653
1654
void os::print_dll_info(outputStream *st) {
1655
st->print_cr("Dynamic libraries:");
1656
get_loaded_modules_info(_print_module, (void *)st);
1657
}
1658
1659
int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1660
HANDLE hProcess;
1661
1662
# define MAX_NUM_MODULES 128
1663
HMODULE modules[MAX_NUM_MODULES];
1664
static char filename[MAX_PATH];
1665
int result = 0;
1666
1667
int pid = os::current_process_id();
1668
hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1669
FALSE, pid);
1670
if (hProcess == NULL) return 0;
1671
1672
DWORD size_needed;
1673
if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1674
CloseHandle(hProcess);
1675
return 0;
1676
}
1677
1678
// number of modules that are currently loaded
1679
int num_modules = size_needed / sizeof(HMODULE);
1680
1681
for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1682
// Get Full pathname:
1683
if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1684
filename[0] = '\0';
1685
}
1686
1687
MODULEINFO modinfo;
1688
if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1689
modinfo.lpBaseOfDll = NULL;
1690
modinfo.SizeOfImage = 0;
1691
}
1692
1693
// Invoke callback function
1694
result = callback(filename, (address)modinfo.lpBaseOfDll,
1695
(address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1696
if (result) break;
1697
}
1698
1699
CloseHandle(hProcess);
1700
return result;
1701
}
1702
1703
bool os::get_host_name(char* buf, size_t buflen) {
1704
DWORD size = (DWORD)buflen;
1705
return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1706
}
1707
1708
void os::get_summary_os_info(char* buf, size_t buflen) {
1709
stringStream sst(buf, buflen);
1710
os::win32::print_windows_version(&sst);
1711
// chop off newline character
1712
char* nl = strchr(buf, '\n');
1713
if (nl != NULL) *nl = '\0';
1714
}
1715
1716
int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1717
#if _MSC_VER >= 1900
1718
// Starting with Visual Studio 2015, vsnprint is C99 compliant.
1719
int result = ::vsnprintf(buf, len, fmt, args);
1720
// If an encoding error occurred (result < 0) then it's not clear
1721
// whether the buffer is NUL terminated, so ensure it is.
1722
if ((result < 0) && (len > 0)) {
1723
buf[len - 1] = '\0';
1724
}
1725
return result;
1726
#else
1727
// Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1728
// _vsnprintf, whose behavior seems to be *mostly* consistent across
1729
// versions. However, when len == 0, avoid _vsnprintf too, and just
1730
// go straight to _vscprintf. The output is going to be truncated in
1731
// that case, except in the unusual case of empty output. More
1732
// importantly, the documentation for various versions of Visual Studio
1733
// are inconsistent about the behavior of _vsnprintf when len == 0,
1734
// including it possibly being an error.
1735
int result = -1;
1736
if (len > 0) {
1737
result = _vsnprintf(buf, len, fmt, args);
1738
// If output (including NUL terminator) is truncated, the buffer
1739
// won't be NUL terminated. Add the trailing NUL specified by C99.
1740
if ((result < 0) || ((size_t)result >= len)) {
1741
buf[len - 1] = '\0';
1742
}
1743
}
1744
if (result < 0) {
1745
result = _vscprintf(fmt, args);
1746
}
1747
return result;
1748
#endif // _MSC_VER dispatch
1749
}
1750
1751
static inline time_t get_mtime(const char* filename) {
1752
struct stat st;
1753
int ret = os::stat(filename, &st);
1754
assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1755
return st.st_mtime;
1756
}
1757
1758
int os::compare_file_modified_times(const char* file1, const char* file2) {
1759
time_t t1 = get_mtime(file1);
1760
time_t t2 = get_mtime(file2);
1761
return t1 - t2;
1762
}
1763
1764
void os::print_os_info_brief(outputStream* st) {
1765
os::print_os_info(st);
1766
}
1767
1768
void os::win32::print_uptime_info(outputStream* st) {
1769
unsigned long long ticks = GetTickCount64();
1770
os::print_dhm(st, "OS uptime:", ticks/1000);
1771
}
1772
1773
void os::print_os_info(outputStream* st) {
1774
#ifdef ASSERT
1775
char buffer[1024];
1776
st->print("HostName: ");
1777
if (get_host_name(buffer, sizeof(buffer))) {
1778
st->print_cr(buffer);
1779
} else {
1780
st->print_cr("N/A");
1781
}
1782
#endif
1783
st->print_cr("OS:");
1784
os::win32::print_windows_version(st);
1785
1786
os::win32::print_uptime_info(st);
1787
1788
VM_Version::print_platform_virtualization_info(st);
1789
}
1790
1791
void os::win32::print_windows_version(outputStream* st) {
1792
OSVERSIONINFOEX osvi;
1793
VS_FIXEDFILEINFO *file_info;
1794
TCHAR kernel32_path[MAX_PATH];
1795
UINT len, ret;
1796
1797
// Use the GetVersionEx information to see if we're on a server or
1798
// workstation edition of Windows. Starting with Windows 8.1 we can't
1799
// trust the OS version information returned by this API.
1800
ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1801
osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1802
if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1803
st->print_cr("Call to GetVersionEx failed");
1804
return;
1805
}
1806
bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1807
1808
// Get the full path to \Windows\System32\kernel32.dll and use that for
1809
// determining what version of Windows we're running on.
1810
len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1811
ret = GetSystemDirectory(kernel32_path, len);
1812
if (ret == 0 || ret > len) {
1813
st->print_cr("Call to GetSystemDirectory failed");
1814
return;
1815
}
1816
strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1817
1818
DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1819
if (version_size == 0) {
1820
st->print_cr("Call to GetFileVersionInfoSize failed");
1821
return;
1822
}
1823
1824
LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1825
if (version_info == NULL) {
1826
st->print_cr("Failed to allocate version_info");
1827
return;
1828
}
1829
1830
if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1831
os::free(version_info);
1832
st->print_cr("Call to GetFileVersionInfo failed");
1833
return;
1834
}
1835
1836
if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1837
os::free(version_info);
1838
st->print_cr("Call to VerQueryValue failed");
1839
return;
1840
}
1841
1842
int major_version = HIWORD(file_info->dwProductVersionMS);
1843
int minor_version = LOWORD(file_info->dwProductVersionMS);
1844
int build_number = HIWORD(file_info->dwProductVersionLS);
1845
int build_minor = LOWORD(file_info->dwProductVersionLS);
1846
int os_vers = major_version * 1000 + minor_version;
1847
os::free(version_info);
1848
1849
st->print(" Windows ");
1850
switch (os_vers) {
1851
1852
case 6000:
1853
if (is_workstation) {
1854
st->print("Vista");
1855
} else {
1856
st->print("Server 2008");
1857
}
1858
break;
1859
1860
case 6001:
1861
if (is_workstation) {
1862
st->print("7");
1863
} else {
1864
st->print("Server 2008 R2");
1865
}
1866
break;
1867
1868
case 6002:
1869
if (is_workstation) {
1870
st->print("8");
1871
} else {
1872
st->print("Server 2012");
1873
}
1874
break;
1875
1876
case 6003:
1877
if (is_workstation) {
1878
st->print("8.1");
1879
} else {
1880
st->print("Server 2012 R2");
1881
}
1882
break;
1883
1884
case 10000:
1885
if (is_workstation) {
1886
if (build_number >= 22000) {
1887
st->print("11");
1888
} else {
1889
st->print("10");
1890
}
1891
} else {
1892
// distinguish Windows Server by build number
1893
// - 2016 GA 10/2016 build: 14393
1894
// - 2019 GA 11/2018 build: 17763
1895
// - 2022 GA 08/2021 build: 20348
1896
if (build_number > 20347) {
1897
st->print("Server 2022");
1898
} else if (build_number > 17762) {
1899
st->print("Server 2019");
1900
} else {
1901
st->print("Server 2016");
1902
}
1903
}
1904
break;
1905
1906
default:
1907
// Unrecognized windows, print out its major and minor versions
1908
st->print("%d.%d", major_version, minor_version);
1909
break;
1910
}
1911
1912
// Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1913
// find out whether we are running on 64 bit processor or not
1914
SYSTEM_INFO si;
1915
ZeroMemory(&si, sizeof(SYSTEM_INFO));
1916
GetNativeSystemInfo(&si);
1917
if ((si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) ||
1918
(si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_ARM64)) {
1919
st->print(" , 64 bit");
1920
}
1921
1922
st->print(" Build %d", build_number);
1923
st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1924
st->cr();
1925
}
1926
1927
void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1928
// Nothing to do for now.
1929
}
1930
1931
void os::get_summary_cpu_info(char* buf, size_t buflen) {
1932
HKEY key;
1933
DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1934
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1935
if (status == ERROR_SUCCESS) {
1936
DWORD size = (DWORD)buflen;
1937
status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1938
if (status != ERROR_SUCCESS) {
1939
strncpy(buf, "## __CPU__", buflen);
1940
}
1941
RegCloseKey(key);
1942
} else {
1943
// Put generic cpu info to return
1944
strncpy(buf, "## __CPU__", buflen);
1945
}
1946
}
1947
1948
void os::print_memory_info(outputStream* st) {
1949
st->print("Memory:");
1950
st->print(" %dk page", os::vm_page_size()>>10);
1951
1952
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1953
// value if total memory is larger than 4GB
1954
MEMORYSTATUSEX ms;
1955
ms.dwLength = sizeof(ms);
1956
int r1 = GlobalMemoryStatusEx(&ms);
1957
1958
if (r1 != 0) {
1959
st->print(", system-wide physical " INT64_FORMAT "M ",
1960
(int64_t) ms.ullTotalPhys >> 20);
1961
st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1962
1963
st->print("TotalPageFile size " INT64_FORMAT "M ",
1964
(int64_t) ms.ullTotalPageFile >> 20);
1965
st->print("(AvailPageFile size " INT64_FORMAT "M)",
1966
(int64_t) ms.ullAvailPageFile >> 20);
1967
1968
// on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1969
#if defined(_M_IX86)
1970
st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1971
(int64_t) ms.ullTotalVirtual >> 20);
1972
st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1973
#endif
1974
} else {
1975
st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1976
}
1977
1978
// extended memory statistics for a process
1979
PROCESS_MEMORY_COUNTERS_EX pmex;
1980
ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1981
pmex.cb = sizeof(pmex);
1982
int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1983
1984
if (r2 != 0) {
1985
st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1986
(int64_t) pmex.WorkingSetSize >> 20);
1987
st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1988
1989
st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1990
(int64_t) pmex.PrivateUsage >> 20);
1991
st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1992
} else {
1993
st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1994
}
1995
1996
st->cr();
1997
}
1998
1999
bool os::signal_sent_by_kill(const void* siginfo) {
2000
// TODO: Is this possible?
2001
return false;
2002
}
2003
2004
void os::print_siginfo(outputStream *st, const void* siginfo) {
2005
const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
2006
st->print("siginfo:");
2007
2008
char tmp[64];
2009
if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
2010
strcpy(tmp, "EXCEPTION_??");
2011
}
2012
st->print(" %s (0x%x)", tmp, er->ExceptionCode);
2013
2014
if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
2015
er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
2016
er->NumberParameters >= 2) {
2017
switch (er->ExceptionInformation[0]) {
2018
case 0: st->print(", reading address"); break;
2019
case 1: st->print(", writing address"); break;
2020
case 8: st->print(", data execution prevention violation at address"); break;
2021
default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
2022
er->ExceptionInformation[0]);
2023
}
2024
st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
2025
} else {
2026
int num = er->NumberParameters;
2027
if (num > 0) {
2028
st->print(", ExceptionInformation=");
2029
for (int i = 0; i < num; i++) {
2030
st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
2031
}
2032
}
2033
}
2034
st->cr();
2035
}
2036
2037
bool os::signal_thread(Thread* thread, int sig, const char* reason) {
2038
// TODO: Can we kill thread?
2039
return false;
2040
}
2041
2042
void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
2043
// do nothing
2044
}
2045
2046
static char saved_jvm_path[MAX_PATH] = {0};
2047
2048
// Find the full path to the current module, jvm.dll
2049
void os::jvm_path(char *buf, jint buflen) {
2050
// Error checking.
2051
if (buflen < MAX_PATH) {
2052
assert(false, "must use a large-enough buffer");
2053
buf[0] = '\0';
2054
return;
2055
}
2056
// Lazy resolve the path to current module.
2057
if (saved_jvm_path[0] != 0) {
2058
strcpy(buf, saved_jvm_path);
2059
return;
2060
}
2061
2062
buf[0] = '\0';
2063
if (Arguments::sun_java_launcher_is_altjvm()) {
2064
// Support for the java launcher's '-XXaltjvm=<path>' option. Check
2065
// for a JAVA_HOME environment variable and fix up the path so it
2066
// looks like jvm.dll is installed there (append a fake suffix
2067
// hotspot/jvm.dll).
2068
char* java_home_var = ::getenv("JAVA_HOME");
2069
if (java_home_var != NULL && java_home_var[0] != 0 &&
2070
strlen(java_home_var) < (size_t)buflen) {
2071
strncpy(buf, java_home_var, buflen);
2072
2073
// determine if this is a legacy image or modules image
2074
// modules image doesn't have "jre" subdirectory
2075
size_t len = strlen(buf);
2076
char* jrebin_p = buf + len;
2077
jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
2078
if (0 != _access(buf, 0)) {
2079
jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
2080
}
2081
len = strlen(buf);
2082
jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
2083
}
2084
}
2085
2086
if (buf[0] == '\0') {
2087
GetModuleFileName(vm_lib_handle, buf, buflen);
2088
}
2089
strncpy(saved_jvm_path, buf, MAX_PATH);
2090
saved_jvm_path[MAX_PATH - 1] = '\0';
2091
}
2092
2093
2094
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2095
#ifndef _WIN64
2096
st->print("_");
2097
#endif
2098
}
2099
2100
2101
void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2102
#ifndef _WIN64
2103
st->print("@%d", args_size * sizeof(int));
2104
#endif
2105
}
2106
2107
// This method is a copy of JDK's sysGetLastErrorString
2108
// from src/windows/hpi/src/system_md.c
2109
2110
size_t os::lasterror(char* buf, size_t len) {
2111
DWORD errval;
2112
2113
if ((errval = GetLastError()) != 0) {
2114
// DOS error
2115
size_t n = (size_t)FormatMessage(
2116
FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
2117
NULL,
2118
errval,
2119
0,
2120
buf,
2121
(DWORD)len,
2122
NULL);
2123
if (n > 3) {
2124
// Drop final '.', CR, LF
2125
if (buf[n - 1] == '\n') n--;
2126
if (buf[n - 1] == '\r') n--;
2127
if (buf[n - 1] == '.') n--;
2128
buf[n] = '\0';
2129
}
2130
return n;
2131
}
2132
2133
if (errno != 0) {
2134
// C runtime error that has no corresponding DOS error code
2135
const char* s = os::strerror(errno);
2136
size_t n = strlen(s);
2137
if (n >= len) n = len - 1;
2138
strncpy(buf, s, n);
2139
buf[n] = '\0';
2140
return n;
2141
}
2142
2143
return 0;
2144
}
2145
2146
int os::get_last_error() {
2147
DWORD error = GetLastError();
2148
if (error == 0) {
2149
error = errno;
2150
}
2151
return (int)error;
2152
}
2153
2154
// sun.misc.Signal
2155
// NOTE that this is a workaround for an apparent kernel bug where if
2156
// a signal handler for SIGBREAK is installed then that signal handler
2157
// takes priority over the console control handler for CTRL_CLOSE_EVENT.
2158
// See bug 4416763.
2159
static void (*sigbreakHandler)(int) = NULL;
2160
2161
static void UserHandler(int sig, void *siginfo, void *context) {
2162
os::signal_notify(sig);
2163
// We need to reinstate the signal handler each time...
2164
os::signal(sig, (void*)UserHandler);
2165
}
2166
2167
void* os::user_handler() {
2168
return (void*) UserHandler;
2169
}
2170
2171
void* os::signal(int signal_number, void* handler) {
2172
if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
2173
void (*oldHandler)(int) = sigbreakHandler;
2174
sigbreakHandler = (void (*)(int)) handler;
2175
return (void*) oldHandler;
2176
} else {
2177
return (void*)::signal(signal_number, (void (*)(int))handler);
2178
}
2179
}
2180
2181
void os::signal_raise(int signal_number) {
2182
raise(signal_number);
2183
}
2184
2185
// The Win32 C runtime library maps all console control events other than ^C
2186
// into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2187
// logoff, and shutdown events. We therefore install our own console handler
2188
// that raises SIGTERM for the latter cases.
2189
//
2190
static BOOL WINAPI consoleHandler(DWORD event) {
2191
switch (event) {
2192
case CTRL_C_EVENT:
2193
if (VMError::is_error_reported()) {
2194
// Ctrl-C is pressed during error reporting, likely because the error
2195
// handler fails to abort. Let VM die immediately.
2196
os::die();
2197
}
2198
2199
os::signal_raise(SIGINT);
2200
return TRUE;
2201
break;
2202
case CTRL_BREAK_EVENT:
2203
if (sigbreakHandler != NULL) {
2204
(*sigbreakHandler)(SIGBREAK);
2205
}
2206
return TRUE;
2207
break;
2208
case CTRL_LOGOFF_EVENT: {
2209
// Don't terminate JVM if it is running in a non-interactive session,
2210
// such as a service process.
2211
USEROBJECTFLAGS flags;
2212
HANDLE handle = GetProcessWindowStation();
2213
if (handle != NULL &&
2214
GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2215
sizeof(USEROBJECTFLAGS), NULL)) {
2216
// If it is a non-interactive session, let next handler to deal
2217
// with it.
2218
if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2219
return FALSE;
2220
}
2221
}
2222
}
2223
case CTRL_CLOSE_EVENT:
2224
case CTRL_SHUTDOWN_EVENT:
2225
os::signal_raise(SIGTERM);
2226
return TRUE;
2227
break;
2228
default:
2229
break;
2230
}
2231
return FALSE;
2232
}
2233
2234
// The following code is moved from os.cpp for making this
2235
// code platform specific, which it is by its very nature.
2236
2237
// Return maximum OS signal used + 1 for internal use only
2238
// Used as exit signal for signal_thread
2239
int os::sigexitnum_pd() {
2240
return NSIG;
2241
}
2242
2243
// a counter for each possible signal value, including signal_thread exit signal
2244
static volatile jint pending_signals[NSIG+1] = { 0 };
2245
static Semaphore* sig_sem = NULL;
2246
2247
static void jdk_misc_signal_init() {
2248
// Initialize signal structures
2249
memset((void*)pending_signals, 0, sizeof(pending_signals));
2250
2251
// Initialize signal semaphore
2252
sig_sem = new Semaphore();
2253
2254
// Programs embedding the VM do not want it to attempt to receive
2255
// events like CTRL_LOGOFF_EVENT, which are used to implement the
2256
// shutdown hooks mechanism introduced in 1.3. For example, when
2257
// the VM is run as part of a Windows NT service (i.e., a servlet
2258
// engine in a web server), the correct behavior is for any console
2259
// control handler to return FALSE, not TRUE, because the OS's
2260
// "final" handler for such events allows the process to continue if
2261
// it is a service (while terminating it if it is not a service).
2262
// To make this behavior uniform and the mechanism simpler, we
2263
// completely disable the VM's usage of these console events if -Xrs
2264
// (=ReduceSignalUsage) is specified. This means, for example, that
2265
// the CTRL-BREAK thread dump mechanism is also disabled in this
2266
// case. See bugs 4323062, 4345157, and related bugs.
2267
2268
// Add a CTRL-C handler
2269
SetConsoleCtrlHandler(consoleHandler, TRUE);
2270
}
2271
2272
void os::signal_notify(int sig) {
2273
if (sig_sem != NULL) {
2274
Atomic::inc(&pending_signals[sig]);
2275
sig_sem->signal();
2276
} else {
2277
// Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2278
// initialization isn't called.
2279
assert(ReduceSignalUsage, "signal semaphore should be created");
2280
}
2281
}
2282
2283
static int check_pending_signals() {
2284
while (true) {
2285
for (int i = 0; i < NSIG + 1; i++) {
2286
jint n = pending_signals[i];
2287
if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2288
return i;
2289
}
2290
}
2291
sig_sem->wait_with_safepoint_check(JavaThread::current());
2292
}
2293
ShouldNotReachHere();
2294
return 0; // Satisfy compiler
2295
}
2296
2297
int os::signal_wait() {
2298
return check_pending_signals();
2299
}
2300
2301
// Implicit OS exception handling
2302
2303
LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2304
address handler) {
2305
Thread* thread = Thread::current_or_null();
2306
2307
#if defined(_M_ARM64)
2308
#define PC_NAME Pc
2309
#elif defined(_M_AMD64)
2310
#define PC_NAME Rip
2311
#elif defined(_M_IX86)
2312
#define PC_NAME Eip
2313
#else
2314
#error unknown architecture
2315
#endif
2316
2317
// Save pc in thread
2318
if (thread != nullptr && thread->is_Java_thread()) {
2319
thread->as_Java_thread()->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->PC_NAME);
2320
}
2321
2322
// Set pc to handler
2323
exceptionInfo->ContextRecord->PC_NAME = (DWORD64)handler;
2324
2325
// Continue the execution
2326
return EXCEPTION_CONTINUE_EXECUTION;
2327
}
2328
2329
2330
// Used for PostMortemDump
2331
extern "C" void safepoints();
2332
extern "C" void find(int x);
2333
extern "C" void events();
2334
2335
// According to Windows API documentation, an illegal instruction sequence should generate
2336
// the 0xC000001C exception code. However, real world experience shows that occasionnaly
2337
// the execution of an illegal instruction can generate the exception code 0xC000001E. This
2338
// seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2339
2340
#define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2341
2342
// From "Execution Protection in the Windows Operating System" draft 0.35
2343
// Once a system header becomes available, the "real" define should be
2344
// included or copied here.
2345
#define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2346
2347
// Windows Vista/2008 heap corruption check
2348
#define EXCEPTION_HEAP_CORRUPTION 0xC0000374
2349
2350
// All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2351
// C++ compiler contain this error code. Because this is a compiler-generated
2352
// error, the code is not listed in the Win32 API header files.
2353
// The code is actually a cryptic mnemonic device, with the initial "E"
2354
// standing for "exception" and the final 3 bytes (0x6D7363) representing the
2355
// ASCII values of "msc".
2356
2357
#define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363
2358
2359
#define def_excpt(val) { #val, (val) }
2360
2361
static const struct { const char* name; uint number; } exceptlabels[] = {
2362
def_excpt(EXCEPTION_ACCESS_VIOLATION),
2363
def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2364
def_excpt(EXCEPTION_BREAKPOINT),
2365
def_excpt(EXCEPTION_SINGLE_STEP),
2366
def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2367
def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2368
def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2369
def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2370
def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2371
def_excpt(EXCEPTION_FLT_OVERFLOW),
2372
def_excpt(EXCEPTION_FLT_STACK_CHECK),
2373
def_excpt(EXCEPTION_FLT_UNDERFLOW),
2374
def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2375
def_excpt(EXCEPTION_INT_OVERFLOW),
2376
def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2377
def_excpt(EXCEPTION_IN_PAGE_ERROR),
2378
def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2379
def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2380
def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2381
def_excpt(EXCEPTION_STACK_OVERFLOW),
2382
def_excpt(EXCEPTION_INVALID_DISPOSITION),
2383
def_excpt(EXCEPTION_GUARD_PAGE),
2384
def_excpt(EXCEPTION_INVALID_HANDLE),
2385
def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2386
def_excpt(EXCEPTION_HEAP_CORRUPTION)
2387
};
2388
2389
#undef def_excpt
2390
2391
const char* os::exception_name(int exception_code, char *buf, size_t size) {
2392
uint code = static_cast<uint>(exception_code);
2393
for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2394
if (exceptlabels[i].number == code) {
2395
jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2396
return buf;
2397
}
2398
}
2399
2400
return NULL;
2401
}
2402
2403
//-----------------------------------------------------------------------------
2404
LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2405
// handle exception caused by idiv; should only happen for -MinInt/-1
2406
// (division by zero is handled explicitly)
2407
#if defined(_M_ARM64)
2408
PCONTEXT ctx = exceptionInfo->ContextRecord;
2409
address pc = (address)ctx->Sp;
2410
assert(pc[0] == 0x83, "not an sdiv opcode"); //Fixme did i get the right opcode?
2411
assert(ctx->X4 == min_jint, "unexpected idiv exception");
2412
// set correct result values and continue after idiv instruction
2413
ctx->Pc = (uint64_t)pc + 4; // idiv reg, reg, reg is 4 bytes
2414
ctx->X4 = (uint64_t)min_jint; // result
2415
ctx->X5 = (uint64_t)0; // remainder
2416
// Continue the execution
2417
#elif defined(_M_AMD64)
2418
PCONTEXT ctx = exceptionInfo->ContextRecord;
2419
address pc = (address)ctx->Rip;
2420
assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2421
assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2422
if (pc[0] == 0xF7) {
2423
// set correct result values and continue after idiv instruction
2424
ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes
2425
} else {
2426
ctx->Rip = (DWORD64)pc + 3; // REX idiv reg, reg is 3 bytes
2427
}
2428
// Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2429
// this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2430
// idiv opcode (0xF7).
2431
ctx->Rdx = (DWORD)0; // remainder
2432
// Continue the execution
2433
#else
2434
PCONTEXT ctx = exceptionInfo->ContextRecord;
2435
address pc = (address)ctx->Eip;
2436
assert(pc[0] == 0xF7, "not an idiv opcode");
2437
assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2438
assert(ctx->Eax == min_jint, "unexpected idiv exception");
2439
// set correct result values and continue after idiv instruction
2440
ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes
2441
ctx->Eax = (DWORD)min_jint; // result
2442
ctx->Edx = (DWORD)0; // remainder
2443
// Continue the execution
2444
#endif
2445
return EXCEPTION_CONTINUE_EXECUTION;
2446
}
2447
2448
#if defined(_M_AMD64) || defined(_M_IX86)
2449
//-----------------------------------------------------------------------------
2450
LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2451
PCONTEXT ctx = exceptionInfo->ContextRecord;
2452
#ifndef _WIN64
2453
// handle exception caused by native method modifying control word
2454
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2455
2456
switch (exception_code) {
2457
case EXCEPTION_FLT_DENORMAL_OPERAND:
2458
case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2459
case EXCEPTION_FLT_INEXACT_RESULT:
2460
case EXCEPTION_FLT_INVALID_OPERATION:
2461
case EXCEPTION_FLT_OVERFLOW:
2462
case EXCEPTION_FLT_STACK_CHECK:
2463
case EXCEPTION_FLT_UNDERFLOW:
2464
jint fp_control_word = (* (jint*) StubRoutines::x86::addr_fpu_cntrl_wrd_std());
2465
if (fp_control_word != ctx->FloatSave.ControlWord) {
2466
// Restore FPCW and mask out FLT exceptions
2467
ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2468
// Mask out pending FLT exceptions
2469
ctx->FloatSave.StatusWord &= 0xffffff00;
2470
return EXCEPTION_CONTINUE_EXECUTION;
2471
}
2472
}
2473
2474
if (prev_uef_handler != NULL) {
2475
// We didn't handle this exception so pass it to the previous
2476
// UnhandledExceptionFilter.
2477
return (prev_uef_handler)(exceptionInfo);
2478
}
2479
#else // !_WIN64
2480
// On Windows, the mxcsr control bits are non-volatile across calls
2481
// See also CR 6192333
2482
//
2483
jint MxCsr = INITIAL_MXCSR;
2484
// we can't use StubRoutines::x86::addr_mxcsr_std()
2485
// because in Win64 mxcsr is not saved there
2486
if (MxCsr != ctx->MxCsr) {
2487
ctx->MxCsr = MxCsr;
2488
return EXCEPTION_CONTINUE_EXECUTION;
2489
}
2490
#endif // !_WIN64
2491
2492
return EXCEPTION_CONTINUE_SEARCH;
2493
}
2494
#endif
2495
2496
static inline void report_error(Thread* t, DWORD exception_code,
2497
address addr, void* siginfo, void* context) {
2498
VMError::report_and_die(t, exception_code, addr, siginfo, context);
2499
2500
// If UseOSErrorReporting, this will return here and save the error file
2501
// somewhere where we can find it in the minidump.
2502
}
2503
2504
//-----------------------------------------------------------------------------
2505
JNIEXPORT
2506
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2507
if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2508
PEXCEPTION_RECORD exception_record = exceptionInfo->ExceptionRecord;
2509
DWORD exception_code = exception_record->ExceptionCode;
2510
#if defined(_M_ARM64)
2511
address pc = (address) exceptionInfo->ContextRecord->Pc;
2512
#elif defined(_M_AMD64)
2513
address pc = (address) exceptionInfo->ContextRecord->Rip;
2514
#else
2515
address pc = (address) exceptionInfo->ContextRecord->Eip;
2516
#endif
2517
Thread* t = Thread::current_or_null_safe();
2518
2519
// Handle SafeFetch32 and SafeFetchN exceptions.
2520
if (StubRoutines::is_safefetch_fault(pc)) {
2521
return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2522
}
2523
2524
#ifndef _WIN64
2525
// Execution protection violation - win32 running on AMD64 only
2526
// Handled first to avoid misdiagnosis as a "normal" access violation;
2527
// This is safe to do because we have a new/unique ExceptionInformation
2528
// code for this condition.
2529
if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2530
int exception_subcode = (int) exception_record->ExceptionInformation[0];
2531
address addr = (address) exception_record->ExceptionInformation[1];
2532
2533
if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2534
int page_size = os::vm_page_size();
2535
2536
// Make sure the pc and the faulting address are sane.
2537
//
2538
// If an instruction spans a page boundary, and the page containing
2539
// the beginning of the instruction is executable but the following
2540
// page is not, the pc and the faulting address might be slightly
2541
// different - we still want to unguard the 2nd page in this case.
2542
//
2543
// 15 bytes seems to be a (very) safe value for max instruction size.
2544
bool pc_is_near_addr =
2545
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2546
bool instr_spans_page_boundary =
2547
(align_down((intptr_t) pc ^ (intptr_t) addr,
2548
(intptr_t) page_size) > 0);
2549
2550
if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2551
static volatile address last_addr =
2552
(address) os::non_memory_address_word();
2553
2554
// In conservative mode, don't unguard unless the address is in the VM
2555
if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2556
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2557
2558
// Set memory to RWX and retry
2559
address page_start = align_down(addr, page_size);
2560
bool res = os::protect_memory((char*) page_start, page_size,
2561
os::MEM_PROT_RWX);
2562
2563
log_debug(os)("Execution protection violation "
2564
"at " INTPTR_FORMAT
2565
", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2566
p2i(page_start), (res ? "success" : os::strerror(errno)));
2567
2568
// Set last_addr so if we fault again at the same address, we don't
2569
// end up in an endless loop.
2570
//
2571
// There are two potential complications here. Two threads trapping
2572
// at the same address at the same time could cause one of the
2573
// threads to think it already unguarded, and abort the VM. Likely
2574
// very rare.
2575
//
2576
// The other race involves two threads alternately trapping at
2577
// different addresses and failing to unguard the page, resulting in
2578
// an endless loop. This condition is probably even more unlikely
2579
// than the first.
2580
//
2581
// Although both cases could be avoided by using locks or thread
2582
// local last_addr, these solutions are unnecessary complication:
2583
// this handler is a best-effort safety net, not a complete solution.
2584
// It is disabled by default and should only be used as a workaround
2585
// in case we missed any no-execute-unsafe VM code.
2586
2587
last_addr = addr;
2588
2589
return EXCEPTION_CONTINUE_EXECUTION;
2590
}
2591
}
2592
2593
// Last unguard failed or not unguarding
2594
tty->print_raw_cr("Execution protection violation");
2595
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2596
report_error(t, exception_code, addr, exception_record,
2597
exceptionInfo->ContextRecord);
2598
#endif
2599
return EXCEPTION_CONTINUE_SEARCH;
2600
}
2601
}
2602
#endif // _WIN64
2603
2604
#if defined(_M_AMD64) || defined(_M_IX86)
2605
if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2606
VM_Version::is_cpuinfo_segv_addr(pc)) {
2607
// Verify that OS save/restore AVX registers.
2608
return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2609
}
2610
#endif
2611
2612
if (t != NULL && t->is_Java_thread()) {
2613
JavaThread* thread = t->as_Java_thread();
2614
bool in_java = thread->thread_state() == _thread_in_Java;
2615
bool in_native = thread->thread_state() == _thread_in_native;
2616
bool in_vm = thread->thread_state() == _thread_in_vm;
2617
2618
// Handle potential stack overflows up front.
2619
if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2620
StackOverflow* overflow_state = thread->stack_overflow_state();
2621
if (overflow_state->stack_guards_enabled()) {
2622
if (in_java) {
2623
frame fr;
2624
if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2625
assert(fr.is_java_frame(), "Must be a Java frame");
2626
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2627
}
2628
}
2629
// Yellow zone violation. The o/s has unprotected the first yellow
2630
// zone page for us. Note: must call disable_stack_yellow_zone to
2631
// update the enabled status, even if the zone contains only one page.
2632
assert(!in_vm, "Undersized StackShadowPages");
2633
overflow_state->disable_stack_yellow_reserved_zone();
2634
// If not in java code, return and hope for the best.
2635
return in_java
2636
? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2637
: EXCEPTION_CONTINUE_EXECUTION;
2638
} else {
2639
// Fatal red zone violation.
2640
overflow_state->disable_stack_red_zone();
2641
tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2642
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2643
report_error(t, exception_code, pc, exception_record,
2644
exceptionInfo->ContextRecord);
2645
#endif
2646
return EXCEPTION_CONTINUE_SEARCH;
2647
}
2648
} else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2649
if (in_java) {
2650
// Either stack overflow or null pointer exception.
2651
address addr = (address) exception_record->ExceptionInformation[1];
2652
address stack_end = thread->stack_end();
2653
if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2654
// Stack overflow.
2655
assert(!os::uses_stack_guard_pages(),
2656
"should be caught by red zone code above.");
2657
return Handle_Exception(exceptionInfo,
2658
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2659
}
2660
// Check for safepoint polling and implicit null
2661
// We only expect null pointers in the stubs (vtable)
2662
// the rest are checked explicitly now.
2663
CodeBlob* cb = CodeCache::find_blob(pc);
2664
if (cb != NULL) {
2665
if (SafepointMechanism::is_poll_address(addr)) {
2666
address stub = SharedRuntime::get_poll_stub(pc);
2667
return Handle_Exception(exceptionInfo, stub);
2668
}
2669
}
2670
#ifdef _WIN64
2671
// If it's a legal stack address map the entire region in
2672
if (thread->is_in_usable_stack(addr)) {
2673
addr = (address)((uintptr_t)addr &
2674
(~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2675
os::commit_memory((char *)addr, thread->stack_base() - addr,
2676
!ExecMem);
2677
return EXCEPTION_CONTINUE_EXECUTION;
2678
}
2679
#endif
2680
// Null pointer exception.
2681
if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2682
address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2683
if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2684
}
2685
report_error(t, exception_code, pc, exception_record,
2686
exceptionInfo->ContextRecord);
2687
return EXCEPTION_CONTINUE_SEARCH;
2688
}
2689
2690
#ifdef _WIN64
2691
// Special care for fast JNI field accessors.
2692
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2693
// in and the heap gets shrunk before the field access.
2694
address slowcase_pc = JNI_FastGetField::find_slowcase_pc(pc);
2695
if (slowcase_pc != (address)-1) {
2696
return Handle_Exception(exceptionInfo, slowcase_pc);
2697
}
2698
#endif
2699
2700
// Stack overflow or null pointer exception in native code.
2701
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2702
report_error(t, exception_code, pc, exception_record,
2703
exceptionInfo->ContextRecord);
2704
#endif
2705
return EXCEPTION_CONTINUE_SEARCH;
2706
} // /EXCEPTION_ACCESS_VIOLATION
2707
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2708
2709
if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2710
CompiledMethod* nm = NULL;
2711
if (in_java) {
2712
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2713
nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2714
}
2715
2716
bool is_unsafe_arraycopy = (in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2717
if (((in_vm || in_native || is_unsafe_arraycopy) && thread->doing_unsafe_access()) ||
2718
(nm != NULL && nm->has_unsafe_access())) {
2719
address next_pc = Assembler::locate_next_instruction(pc);
2720
if (is_unsafe_arraycopy) {
2721
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2722
}
2723
return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2724
}
2725
}
2726
2727
#ifdef _M_ARM64
2728
if (in_java &&
2729
(exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2730
exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2731
if (nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
2732
if (TraceTraps) {
2733
tty->print_cr("trap: zombie_not_entrant");
2734
}
2735
return Handle_Exception(exceptionInfo, SharedRuntime::get_handle_wrong_method_stub());
2736
}
2737
}
2738
#endif
2739
2740
if (in_java) {
2741
switch (exception_code) {
2742
case EXCEPTION_INT_DIVIDE_BY_ZERO:
2743
return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2744
2745
case EXCEPTION_INT_OVERFLOW:
2746
return Handle_IDiv_Exception(exceptionInfo);
2747
2748
} // switch
2749
}
2750
2751
#if defined(_M_AMD64) || defined(_M_IX86)
2752
if ((in_java || in_native) && exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2753
LONG result=Handle_FLT_Exception(exceptionInfo);
2754
if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2755
}
2756
#endif
2757
}
2758
2759
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2760
if (exception_code != EXCEPTION_BREAKPOINT) {
2761
report_error(t, exception_code, pc, exception_record,
2762
exceptionInfo->ContextRecord);
2763
}
2764
#endif
2765
return EXCEPTION_CONTINUE_SEARCH;
2766
}
2767
2768
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
2769
LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2770
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2771
#if defined(_M_ARM64)
2772
address pc = (address) exceptionInfo->ContextRecord->Pc;
2773
#elif defined(_M_AMD64)
2774
address pc = (address) exceptionInfo->ContextRecord->Rip;
2775
#else
2776
address pc = (address) exceptionInfo->ContextRecord->Eip;
2777
#endif
2778
2779
// Fast path for code part of the code cache
2780
if (CodeCache::low_bound() <= pc && pc < CodeCache::high_bound()) {
2781
return topLevelExceptionFilter(exceptionInfo);
2782
}
2783
2784
// If the exception occurred in the codeCache, pass control
2785
// to our normal exception handler.
2786
CodeBlob* cb = CodeCache::find_blob(pc);
2787
if (cb != NULL) {
2788
return topLevelExceptionFilter(exceptionInfo);
2789
}
2790
2791
return EXCEPTION_CONTINUE_SEARCH;
2792
}
2793
#endif
2794
2795
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
2796
LONG WINAPI topLevelUnhandledExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2797
if (InterceptOSException) goto exit;
2798
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2799
#if defined(_M_ARM64)
2800
address pc = (address)exceptionInfo->ContextRecord->Pc;
2801
#elif defined(_M_AMD64)
2802
address pc = (address) exceptionInfo->ContextRecord->Rip;
2803
#else
2804
address pc = (address) exceptionInfo->ContextRecord->Eip;
2805
#endif
2806
Thread* t = Thread::current_or_null_safe();
2807
2808
if (exception_code != EXCEPTION_BREAKPOINT) {
2809
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2810
exceptionInfo->ContextRecord);
2811
}
2812
exit:
2813
return previousUnhandledExceptionFilter ? previousUnhandledExceptionFilter(exceptionInfo) : EXCEPTION_CONTINUE_SEARCH;
2814
}
2815
#endif
2816
2817
#ifndef _WIN64
2818
// Special care for fast JNI accessors.
2819
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2820
// the heap gets shrunk before the field access.
2821
// Need to install our own structured exception handler since native code may
2822
// install its own.
2823
LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2824
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2825
if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2826
address pc = (address) exceptionInfo->ContextRecord->Eip;
2827
address addr = JNI_FastGetField::find_slowcase_pc(pc);
2828
if (addr != (address)-1) {
2829
return Handle_Exception(exceptionInfo, addr);
2830
}
2831
}
2832
return EXCEPTION_CONTINUE_SEARCH;
2833
}
2834
2835
#define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \
2836
Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \
2837
jobject obj, \
2838
jfieldID fieldID) { \
2839
__try { \
2840
return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \
2841
obj, \
2842
fieldID); \
2843
} __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \
2844
_exception_info())) { \
2845
} \
2846
return 0; \
2847
}
2848
2849
DEFINE_FAST_GETFIELD(jboolean, bool, Boolean)
2850
DEFINE_FAST_GETFIELD(jbyte, byte, Byte)
2851
DEFINE_FAST_GETFIELD(jchar, char, Char)
2852
DEFINE_FAST_GETFIELD(jshort, short, Short)
2853
DEFINE_FAST_GETFIELD(jint, int, Int)
2854
DEFINE_FAST_GETFIELD(jlong, long, Long)
2855
DEFINE_FAST_GETFIELD(jfloat, float, Float)
2856
DEFINE_FAST_GETFIELD(jdouble, double, Double)
2857
2858
address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2859
switch (type) {
2860
case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2861
case T_BYTE: return (address)jni_fast_GetByteField_wrapper;
2862
case T_CHAR: return (address)jni_fast_GetCharField_wrapper;
2863
case T_SHORT: return (address)jni_fast_GetShortField_wrapper;
2864
case T_INT: return (address)jni_fast_GetIntField_wrapper;
2865
case T_LONG: return (address)jni_fast_GetLongField_wrapper;
2866
case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper;
2867
case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper;
2868
default: ShouldNotReachHere();
2869
}
2870
return (address)-1;
2871
}
2872
#endif
2873
2874
// Virtual Memory
2875
2876
int os::vm_page_size() { return os::win32::vm_page_size(); }
2877
int os::vm_allocation_granularity() {
2878
return os::win32::vm_allocation_granularity();
2879
}
2880
2881
// Windows large page support is available on Windows 2003. In order to use
2882
// large page memory, the administrator must first assign additional privilege
2883
// to the user:
2884
// + select Control Panel -> Administrative Tools -> Local Security Policy
2885
// + select Local Policies -> User Rights Assignment
2886
// + double click "Lock pages in memory", add users and/or groups
2887
// + reboot
2888
// Note the above steps are needed for administrator as well, as administrators
2889
// by default do not have the privilege to lock pages in memory.
2890
//
2891
// Note about Windows 2003: although the API supports committing large page
2892
// memory on a page-by-page basis and VirtualAlloc() returns success under this
2893
// scenario, I found through experiment it only uses large page if the entire
2894
// memory region is reserved and committed in a single VirtualAlloc() call.
2895
// This makes Windows large page support more or less like Solaris ISM, in
2896
// that the entire heap must be committed upfront. This probably will change
2897
// in the future, if so the code below needs to be revisited.
2898
2899
#ifndef MEM_LARGE_PAGES
2900
#define MEM_LARGE_PAGES 0x20000000
2901
#endif
2902
2903
// Container for NUMA node list info
2904
class NUMANodeListHolder {
2905
private:
2906
int *_numa_used_node_list; // allocated below
2907
int _numa_used_node_count;
2908
2909
void free_node_list() {
2910
FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2911
}
2912
2913
public:
2914
NUMANodeListHolder() {
2915
_numa_used_node_count = 0;
2916
_numa_used_node_list = NULL;
2917
// do rest of initialization in build routine (after function pointers are set up)
2918
}
2919
2920
~NUMANodeListHolder() {
2921
free_node_list();
2922
}
2923
2924
bool build() {
2925
DWORD_PTR proc_aff_mask;
2926
DWORD_PTR sys_aff_mask;
2927
if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2928
ULONG highest_node_number;
2929
if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2930
free_node_list();
2931
_numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2932
for (unsigned int i = 0; i <= highest_node_number; i++) {
2933
ULONGLONG proc_mask_numa_node;
2934
if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2935
if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2936
_numa_used_node_list[_numa_used_node_count++] = i;
2937
}
2938
}
2939
return (_numa_used_node_count > 1);
2940
}
2941
2942
int get_count() { return _numa_used_node_count; }
2943
int get_node_list_entry(int n) {
2944
// for indexes out of range, returns -1
2945
return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2946
}
2947
2948
} numa_node_list_holder;
2949
2950
static size_t _large_page_size = 0;
2951
2952
static bool request_lock_memory_privilege() {
2953
HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2954
os::current_process_id());
2955
2956
bool success = false;
2957
HANDLE hToken = NULL;
2958
LUID luid;
2959
if (hProcess != NULL &&
2960
OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &hToken) &&
2961
LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2962
2963
TOKEN_PRIVILEGES tp;
2964
tp.PrivilegeCount = 1;
2965
tp.Privileges[0].Luid = luid;
2966
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2967
2968
// AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2969
// privilege. Check GetLastError() too. See MSDN document.
2970
if (AdjustTokenPrivileges(hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2971
(GetLastError() == ERROR_SUCCESS)) {
2972
success = true;
2973
}
2974
}
2975
2976
// Cleanup
2977
if (hProcess != NULL) {
2978
CloseHandle(hProcess);
2979
}
2980
if (hToken != NULL) {
2981
CloseHandle(hToken);
2982
}
2983
2984
return success;
2985
}
2986
2987
static bool numa_interleaving_init() {
2988
bool success = false;
2989
2990
// print a warning if UseNUMAInterleaving flag is specified on command line
2991
bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2992
2993
#define WARN(msg) if (warn_on_failure) { warning(msg); }
2994
2995
// NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2996
size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2997
NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2998
2999
if (!numa_node_list_holder.build()) {
3000
WARN("Process does not cover multiple NUMA nodes.");
3001
WARN("...Ignoring UseNUMAInterleaving flag.");
3002
return false;
3003
}
3004
3005
if (log_is_enabled(Debug, os, cpu)) {
3006
Log(os, cpu) log;
3007
log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
3008
for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
3009
log.debug(" %d ", numa_node_list_holder.get_node_list_entry(i));
3010
}
3011
}
3012
3013
#undef WARN
3014
3015
return true;
3016
}
3017
3018
// this routine is used whenever we need to reserve a contiguous VA range
3019
// but we need to make separate VirtualAlloc calls for each piece of the range
3020
// Reasons for doing this:
3021
// * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
3022
// * UseNUMAInterleaving requires a separate node for each piece
3023
static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
3024
DWORD prot,
3025
bool should_inject_error = false) {
3026
char * p_buf;
3027
// note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
3028
size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
3029
size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
3030
3031
// first reserve enough address space in advance since we want to be
3032
// able to break a single contiguous virtual address range into multiple
3033
// large page commits but WS2003 does not allow reserving large page space
3034
// so we just use 4K pages for reserve, this gives us a legal contiguous
3035
// address space. then we will deallocate that reservation, and re alloc
3036
// using large pages
3037
const size_t size_of_reserve = bytes + chunk_size;
3038
if (bytes > size_of_reserve) {
3039
// Overflowed.
3040
return NULL;
3041
}
3042
p_buf = (char *) virtualAlloc(addr,
3043
size_of_reserve, // size of Reserve
3044
MEM_RESERVE,
3045
PAGE_READWRITE);
3046
// If reservation failed, return NULL
3047
if (p_buf == NULL) return NULL;
3048
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
3049
os::release_memory(p_buf, bytes + chunk_size);
3050
3051
// we still need to round up to a page boundary (in case we are using large pages)
3052
// but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
3053
// instead we handle this in the bytes_to_rq computation below
3054
p_buf = align_up(p_buf, page_size);
3055
3056
// now go through and allocate one chunk at a time until all bytes are
3057
// allocated
3058
size_t bytes_remaining = bytes;
3059
// An overflow of align_up() would have been caught above
3060
// in the calculation of size_of_reserve.
3061
char * next_alloc_addr = p_buf;
3062
HANDLE hProc = GetCurrentProcess();
3063
3064
#ifdef ASSERT
3065
// Variable for the failure injection
3066
int ran_num = os::random();
3067
size_t fail_after = ran_num % bytes;
3068
#endif
3069
3070
int count=0;
3071
while (bytes_remaining) {
3072
// select bytes_to_rq to get to the next chunk_size boundary
3073
3074
size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
3075
// Note allocate and commit
3076
char * p_new;
3077
3078
#ifdef ASSERT
3079
bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
3080
#else
3081
const bool inject_error_now = false;
3082
#endif
3083
3084
if (inject_error_now) {
3085
p_new = NULL;
3086
} else {
3087
if (!UseNUMAInterleaving) {
3088
p_new = (char *) virtualAlloc(next_alloc_addr,
3089
bytes_to_rq,
3090
flags,
3091
prot);
3092
} else {
3093
// get the next node to use from the used_node_list
3094
assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3095
DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3096
p_new = (char *)virtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3097
}
3098
}
3099
3100
if (p_new == NULL) {
3101
// Free any allocated pages
3102
if (next_alloc_addr > p_buf) {
3103
// Some memory was committed so release it.
3104
size_t bytes_to_release = bytes - bytes_remaining;
3105
// NMT has yet to record any individual blocks, so it
3106
// need to create a dummy 'reserve' record to match
3107
// the release.
3108
MemTracker::record_virtual_memory_reserve((address)p_buf,
3109
bytes_to_release, CALLER_PC);
3110
os::release_memory(p_buf, bytes_to_release);
3111
}
3112
#ifdef ASSERT
3113
if (should_inject_error) {
3114
log_develop_debug(pagesize)("Reserving pages individually failed.");
3115
}
3116
#endif
3117
return NULL;
3118
}
3119
3120
bytes_remaining -= bytes_to_rq;
3121
next_alloc_addr += bytes_to_rq;
3122
count++;
3123
}
3124
// Although the memory is allocated individually, it is returned as one.
3125
// NMT records it as one block.
3126
if ((flags & MEM_COMMIT) != 0) {
3127
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3128
} else {
3129
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3130
}
3131
3132
// made it this far, success
3133
return p_buf;
3134
}
3135
3136
static size_t large_page_init_decide_size() {
3137
// print a warning if any large page related flag is specified on command line
3138
bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3139
!FLAG_IS_DEFAULT(LargePageSizeInBytes);
3140
3141
#define WARN(msg) if (warn_on_failure) { warning(msg); }
3142
3143
if (!request_lock_memory_privilege()) {
3144
WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3145
return 0;
3146
}
3147
3148
size_t size = GetLargePageMinimum();
3149
if (size == 0) {
3150
WARN("Large page is not supported by the processor.");
3151
return 0;
3152
}
3153
3154
#if defined(IA32) || defined(AMD64)
3155
if (size > 4*M || LargePageSizeInBytes > 4*M) {
3156
WARN("JVM cannot use large pages bigger than 4mb.");
3157
return 0;
3158
}
3159
#endif
3160
3161
if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
3162
size = LargePageSizeInBytes;
3163
}
3164
3165
#undef WARN
3166
3167
return size;
3168
}
3169
3170
void os::large_page_init() {
3171
if (!UseLargePages) {
3172
return;
3173
}
3174
3175
_large_page_size = large_page_init_decide_size();
3176
const size_t default_page_size = (size_t) vm_page_size();
3177
if (_large_page_size > default_page_size) {
3178
_page_sizes.add(_large_page_size);
3179
}
3180
3181
UseLargePages = _large_page_size != 0;
3182
}
3183
3184
int os::create_file_for_heap(const char* dir) {
3185
3186
const char name_template[] = "/jvmheap.XXXXXX";
3187
3188
size_t fullname_len = strlen(dir) + strlen(name_template);
3189
char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3190
if (fullname == NULL) {
3191
vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3192
return -1;
3193
}
3194
int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3195
assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3196
3197
os::native_path(fullname);
3198
3199
char *path = _mktemp(fullname);
3200
if (path == NULL) {
3201
warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3202
os::free(fullname);
3203
return -1;
3204
}
3205
3206
int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3207
3208
os::free(fullname);
3209
if (fd < 0) {
3210
warning("Problem opening file for heap (%s)", os::strerror(errno));
3211
return -1;
3212
}
3213
return fd;
3214
}
3215
3216
// If 'base' is not NULL, function will return NULL if it cannot get 'base'
3217
char* os::map_memory_to_file(char* base, size_t size, int fd) {
3218
assert(fd != -1, "File descriptor is not valid");
3219
3220
HANDLE fh = (HANDLE)_get_osfhandle(fd);
3221
#ifdef _LP64
3222
HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3223
(DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3224
#else
3225
HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3226
0, (DWORD)size, NULL);
3227
#endif
3228
if (fileMapping == NULL) {
3229
if (GetLastError() == ERROR_DISK_FULL) {
3230
vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3231
}
3232
else {
3233
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3234
}
3235
3236
return NULL;
3237
}
3238
3239
LPVOID addr = mapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3240
3241
CloseHandle(fileMapping);
3242
3243
return (char*)addr;
3244
}
3245
3246
char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3247
assert(fd != -1, "File descriptor is not valid");
3248
assert(base != NULL, "Base address cannot be NULL");
3249
3250
release_memory(base, size);
3251
return map_memory_to_file(base, size, fd);
3252
}
3253
3254
// Multiple threads can race in this code but it's not possible to unmap small sections of
3255
// virtual space to get requested alignment, like posix-like os's.
3256
// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3257
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3258
assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3259
"Alignment must be a multiple of allocation granularity (page size)");
3260
assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3261
3262
size_t extra_size = size + alignment;
3263
assert(extra_size >= size, "overflow, size is too large to allow alignment");
3264
3265
char* aligned_base = NULL;
3266
static const int max_attempts = 20;
3267
3268
for (int attempt = 0; attempt < max_attempts && aligned_base == NULL; attempt ++) {
3269
char* extra_base = file_desc != -1 ? os::map_memory_to_file(extra_size, file_desc) :
3270
os::reserve_memory(extra_size);
3271
if (extra_base == NULL) {
3272
return NULL;
3273
}
3274
// Do manual alignment
3275
aligned_base = align_up(extra_base, alignment);
3276
3277
bool rc = (file_desc != -1) ? os::unmap_memory(extra_base, extra_size) :
3278
os::release_memory(extra_base, extra_size);
3279
assert(rc, "release failed");
3280
if (!rc) {
3281
return NULL;
3282
}
3283
3284
// Attempt to map, into the just vacated space, the slightly smaller aligned area.
3285
// Which may fail, hence the loop.
3286
aligned_base = file_desc != -1 ? os::attempt_map_memory_to_file_at(aligned_base, size, file_desc) :
3287
os::attempt_reserve_memory_at(aligned_base, size);
3288
}
3289
3290
assert(aligned_base != NULL, "Did not manage to re-map after %d attempts?", max_attempts);
3291
3292
return aligned_base;
3293
}
3294
3295
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
3296
// exec can be ignored
3297
return map_or_reserve_memory_aligned(size, alignment, -1 /* file_desc */);
3298
}
3299
3300
char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int fd) {
3301
return map_or_reserve_memory_aligned(size, alignment, fd);
3302
}
3303
3304
char* os::pd_reserve_memory(size_t bytes, bool exec) {
3305
return pd_attempt_reserve_memory_at(NULL /* addr */, bytes, exec);
3306
}
3307
3308
// Reserve memory at an arbitrary address, only if that area is
3309
// available (and not reserved for something else).
3310
char* os::pd_attempt_reserve_memory_at(char* addr, size_t bytes, bool exec) {
3311
assert((size_t)addr % os::vm_allocation_granularity() == 0,
3312
"reserve alignment");
3313
assert(bytes % os::vm_page_size() == 0, "reserve page size");
3314
char* res;
3315
// note that if UseLargePages is on, all the areas that require interleaving
3316
// will go thru reserve_memory_special rather than thru here.
3317
bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3318
if (!use_individual) {
3319
res = (char*)virtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3320
} else {
3321
elapsedTimer reserveTimer;
3322
if (Verbose && PrintMiscellaneous) reserveTimer.start();
3323
// in numa interleaving, we have to allocate pages individually
3324
// (well really chunks of NUMAInterleaveGranularity size)
3325
res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3326
if (res == NULL) {
3327
warning("NUMA page allocation failed");
3328
}
3329
if (Verbose && PrintMiscellaneous) {
3330
reserveTimer.stop();
3331
tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3332
reserveTimer.milliseconds(), reserveTimer.ticks());
3333
}
3334
}
3335
assert(res == NULL || addr == NULL || addr == res,
3336
"Unexpected address from reserve.");
3337
3338
return res;
3339
}
3340
3341
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
3342
assert(file_desc >= 0, "file_desc is not valid");
3343
return map_memory_to_file(requested_addr, bytes, file_desc);
3344
}
3345
3346
size_t os::large_page_size() {
3347
return _large_page_size;
3348
}
3349
3350
bool os::can_commit_large_page_memory() {
3351
// Windows only uses large page memory when the entire region is reserved
3352
// and committed in a single VirtualAlloc() call. This may change in the
3353
// future, but with Windows 2003 it's not possible to commit on demand.
3354
return false;
3355
}
3356
3357
bool os::can_execute_large_page_memory() {
3358
return true;
3359
}
3360
3361
static char* reserve_large_pages_individually(size_t size, char* req_addr, bool exec) {
3362
log_debug(pagesize)("Reserving large pages individually.");
3363
3364
const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3365
const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3366
3367
char * p_buf = allocate_pages_individually(size, req_addr, flags, prot, LargePagesIndividualAllocationInjectError);
3368
if (p_buf == NULL) {
3369
// give an appropriate warning message
3370
if (UseNUMAInterleaving) {
3371
warning("NUMA large page allocation failed, UseLargePages flag ignored");
3372
}
3373
if (UseLargePagesIndividualAllocation) {
3374
warning("Individually allocated large pages failed, "
3375
"use -XX:-UseLargePagesIndividualAllocation to turn off");
3376
}
3377
return NULL;
3378
}
3379
return p_buf;
3380
}
3381
3382
static char* reserve_large_pages_single_range(size_t size, char* req_addr, bool exec) {
3383
log_debug(pagesize)("Reserving large pages in a single large chunk.");
3384
3385
const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3386
const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3387
3388
return (char *) virtualAlloc(req_addr, size, flags, prot);
3389
}
3390
3391
static char* reserve_large_pages(size_t size, char* req_addr, bool exec) {
3392
// with large pages, there are two cases where we need to use Individual Allocation
3393
// 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3394
// 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3395
if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3396
return reserve_large_pages_individually(size, req_addr, exec);
3397
}
3398
return reserve_large_pages_single_range(size, req_addr, exec);
3399
}
3400
3401
static char* find_aligned_address(size_t size, size_t alignment) {
3402
// Temporary reserve memory large enough to ensure we can get the requested
3403
// alignment and still fit the reservation.
3404
char* addr = (char*) virtualAlloc(NULL, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
3405
// Align the address to the requested alignment.
3406
char* aligned_addr = align_up(addr, alignment);
3407
// Free the temporary reservation.
3408
virtualFree(addr, 0, MEM_RELEASE);
3409
3410
return aligned_addr;
3411
}
3412
3413
static char* reserve_large_pages_aligned(size_t size, size_t alignment, bool exec) {
3414
log_debug(pagesize)("Reserving large pages at an aligned address, alignment=" SIZE_FORMAT "%s",
3415
byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment));
3416
3417
// Will try to find a suitable address at most 20 times. The reason we need to try
3418
// multiple times is that between finding the aligned address and trying to commit
3419
// the large pages another thread might have reserved an overlapping region.
3420
const int attempts_limit = 20;
3421
for (int attempts = 0; attempts < attempts_limit; attempts++) {
3422
// Find aligned address.
3423
char* aligned_address = find_aligned_address(size, alignment);
3424
3425
// Try to do the large page reservation using the aligned address.
3426
aligned_address = reserve_large_pages(size, aligned_address, exec);
3427
if (aligned_address != NULL) {
3428
// Reservation at the aligned address succeeded.
3429
guarantee(is_aligned(aligned_address, alignment), "Must be aligned");
3430
return aligned_address;
3431
}
3432
}
3433
3434
log_debug(pagesize)("Failed reserving large pages at aligned address");
3435
return NULL;
3436
}
3437
3438
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* addr,
3439
bool exec) {
3440
assert(UseLargePages, "only for large pages");
3441
assert(page_size == os::large_page_size(), "Currently only support one large page size on Windows");
3442
assert(is_aligned(addr, alignment), "Must be");
3443
assert(is_aligned(addr, page_size), "Must be");
3444
3445
if (!is_aligned(bytes, page_size)) {
3446
// Fallback to small pages, Windows does not support mixed mappings.
3447
return NULL;
3448
}
3449
3450
// The requested alignment can be larger than the page size, for example with G1
3451
// the alignment is bound to the heap region size. So this reservation needs to
3452
// ensure that the requested alignment is met. When there is a requested address
3453
// this solves it self, since it must be properly aligned already.
3454
if (addr == NULL && alignment > page_size) {
3455
return reserve_large_pages_aligned(bytes, alignment, exec);
3456
}
3457
3458
// No additional requirements, just reserve the large pages.
3459
return reserve_large_pages(bytes, addr, exec);
3460
}
3461
3462
bool os::pd_release_memory_special(char* base, size_t bytes) {
3463
assert(base != NULL, "Sanity check");
3464
return pd_release_memory(base, bytes);
3465
}
3466
3467
void os::print_statistics() {
3468
}
3469
3470
static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3471
int err = os::get_last_error();
3472
char buf[256];
3473
size_t buf_len = os::lasterror(buf, sizeof(buf));
3474
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3475
", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3476
exec, buf_len != 0 ? buf : "<no_error_string>", err);
3477
}
3478
3479
bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3480
if (bytes == 0) {
3481
// Don't bother the OS with noops.
3482
return true;
3483
}
3484
assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3485
assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3486
// Don't attempt to print anything if the OS call fails. We're
3487
// probably low on resources, so the print itself may cause crashes.
3488
3489
// unless we have NUMAInterleaving enabled, the range of a commit
3490
// is always within a reserve covered by a single VirtualAlloc
3491
// in that case we can just do a single commit for the requested size
3492
if (!UseNUMAInterleaving) {
3493
if (virtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3494
NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3495
return false;
3496
}
3497
if (exec) {
3498
DWORD oldprot;
3499
// Windows doc says to use VirtualProtect to get execute permissions
3500
if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3501
NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3502
return false;
3503
}
3504
}
3505
return true;
3506
} else {
3507
3508
// when NUMAInterleaving is enabled, the commit might cover a range that
3509
// came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3510
// VirtualQuery can help us determine that. The RegionSize that VirtualQuery
3511
// returns represents the number of bytes that can be committed in one step.
3512
size_t bytes_remaining = bytes;
3513
char * next_alloc_addr = addr;
3514
while (bytes_remaining > 0) {
3515
MEMORY_BASIC_INFORMATION alloc_info;
3516
VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3517
size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3518
if (virtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3519
PAGE_READWRITE) == NULL) {
3520
NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3521
exec);)
3522
return false;
3523
}
3524
if (exec) {
3525
DWORD oldprot;
3526
if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3527
PAGE_EXECUTE_READWRITE, &oldprot)) {
3528
NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3529
exec);)
3530
return false;
3531
}
3532
}
3533
bytes_remaining -= bytes_to_rq;
3534
next_alloc_addr += bytes_to_rq;
3535
}
3536
}
3537
// if we made it this far, return true
3538
return true;
3539
}
3540
3541
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3542
bool exec) {
3543
// alignment_hint is ignored on this OS
3544
return pd_commit_memory(addr, size, exec);
3545
}
3546
3547
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3548
const char* mesg) {
3549
assert(mesg != NULL, "mesg must be specified");
3550
if (!pd_commit_memory(addr, size, exec)) {
3551
warn_fail_commit_memory(addr, size, exec);
3552
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3553
}
3554
}
3555
3556
void os::pd_commit_memory_or_exit(char* addr, size_t size,
3557
size_t alignment_hint, bool exec,
3558
const char* mesg) {
3559
// alignment_hint is ignored on this OS
3560
pd_commit_memory_or_exit(addr, size, exec, mesg);
3561
}
3562
3563
bool os::pd_uncommit_memory(char* addr, size_t bytes, bool exec) {
3564
if (bytes == 0) {
3565
// Don't bother the OS with noops.
3566
return true;
3567
}
3568
assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3569
assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3570
return (virtualFree(addr, bytes, MEM_DECOMMIT) == TRUE);
3571
}
3572
3573
bool os::pd_release_memory(char* addr, size_t bytes) {
3574
// Given a range we are to release, we require a mapping to start at the beginning of that range;
3575
// if NUMA or LP we allow the range to contain multiple mappings, which have to cover the range
3576
// completely; otherwise the range must match an OS mapping exactly.
3577
address start = (address)addr;
3578
address end = start + bytes;
3579
os::win32::mapping_info_t mi;
3580
const bool multiple_mappings_allowed = UseLargePagesIndividualAllocation || UseNUMAInterleaving;
3581
address p = start;
3582
bool first_mapping = true;
3583
3584
do {
3585
// Find mapping and check it
3586
const char* err = NULL;
3587
if (!os::win32::find_mapping(p, &mi)) {
3588
err = "no mapping found";
3589
} else {
3590
if (first_mapping) {
3591
if (mi.base != start) {
3592
err = "base address mismatch";
3593
}
3594
if (multiple_mappings_allowed ? (mi.size > bytes) : (mi.size != bytes)) {
3595
err = "size mismatch";
3596
}
3597
} else {
3598
assert(p == mi.base && mi.size > 0, "Sanity");
3599
if (mi.base + mi.size > end) {
3600
err = "mapping overlaps end";
3601
}
3602
if (mi.size == 0) {
3603
err = "zero length mapping?"; // Should never happen; just to prevent endlessly looping in release.
3604
}
3605
}
3606
}
3607
// Handle mapping error. We assert in debug, unconditionally print a warning in release.
3608
if (err != NULL) {
3609
log_warning(os)("bad release: [" PTR_FORMAT "-" PTR_FORMAT "): %s", p2i(start), p2i(end), err);
3610
#ifdef ASSERT
3611
os::print_memory_mappings((char*)start, bytes, tty);
3612
assert(false, "bad release: [" PTR_FORMAT "-" PTR_FORMAT "): %s", p2i(start), p2i(end), err);
3613
#endif
3614
return false;
3615
}
3616
// Free this range
3617
if (virtualFree(p, 0, MEM_RELEASE) == FALSE) {
3618
return false;
3619
}
3620
first_mapping = false;
3621
p = mi.base + mi.size;
3622
} while (p < end);
3623
3624
return true;
3625
}
3626
3627
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3628
return os::commit_memory(addr, size, !ExecMem);
3629
}
3630
3631
bool os::remove_stack_guard_pages(char* addr, size_t size) {
3632
return os::uncommit_memory(addr, size);
3633
}
3634
3635
static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3636
uint count = 0;
3637
bool ret = false;
3638
size_t bytes_remaining = bytes;
3639
char * next_protect_addr = addr;
3640
3641
// Use VirtualQuery() to get the chunk size.
3642
while (bytes_remaining) {
3643
MEMORY_BASIC_INFORMATION alloc_info;
3644
if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3645
return false;
3646
}
3647
3648
size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3649
// We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3650
// but we don't distinguish here as both cases are protected by same API.
3651
ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3652
warning("Failed protecting pages individually for chunk #%u", count);
3653
if (!ret) {
3654
return false;
3655
}
3656
3657
bytes_remaining -= bytes_to_protect;
3658
next_protect_addr += bytes_to_protect;
3659
count++;
3660
}
3661
return ret;
3662
}
3663
3664
// Set protections specified
3665
bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3666
bool is_committed) {
3667
unsigned int p = 0;
3668
switch (prot) {
3669
case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3670
case MEM_PROT_READ: p = PAGE_READONLY; break;
3671
case MEM_PROT_RW: p = PAGE_READWRITE; break;
3672
case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break;
3673
default:
3674
ShouldNotReachHere();
3675
}
3676
3677
DWORD old_status;
3678
3679
// Strange enough, but on Win32 one can change protection only for committed
3680
// memory, not a big deal anyway, as bytes less or equal than 64K
3681
if (!is_committed) {
3682
commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3683
"cannot commit protection page");
3684
}
3685
// One cannot use os::guard_memory() here, as on Win32 guard page
3686
// have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3687
//
3688
// Pages in the region become guard pages. Any attempt to access a guard page
3689
// causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3690
// the guard page status. Guard pages thus act as a one-time access alarm.
3691
bool ret;
3692
if (UseNUMAInterleaving) {
3693
// If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3694
// so we must protect the chunks individually.
3695
ret = protect_pages_individually(addr, bytes, p, &old_status);
3696
} else {
3697
ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3698
}
3699
#ifdef ASSERT
3700
if (!ret) {
3701
int err = os::get_last_error();
3702
char buf[256];
3703
size_t buf_len = os::lasterror(buf, sizeof(buf));
3704
warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3705
") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3706
buf_len != 0 ? buf : "<no_error_string>", err);
3707
}
3708
#endif
3709
return ret;
3710
}
3711
3712
bool os::guard_memory(char* addr, size_t bytes) {
3713
DWORD old_status;
3714
return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3715
}
3716
3717
bool os::unguard_memory(char* addr, size_t bytes) {
3718
DWORD old_status;
3719
return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3720
}
3721
3722
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3723
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3724
void os::numa_make_global(char *addr, size_t bytes) { }
3725
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { }
3726
bool os::numa_topology_changed() { return false; }
3727
size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); }
3728
int os::numa_get_group_id() { return 0; }
3729
size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3730
if (numa_node_list_holder.get_count() == 0 && size > 0) {
3731
// Provide an answer for UMA systems
3732
ids[0] = 0;
3733
return 1;
3734
} else {
3735
// check for size bigger than actual groups_num
3736
size = MIN2(size, numa_get_groups_num());
3737
for (int i = 0; i < (int)size; i++) {
3738
ids[i] = numa_node_list_holder.get_node_list_entry(i);
3739
}
3740
return size;
3741
}
3742
}
3743
3744
int os::numa_get_group_id_for_address(const void* address) {
3745
return 0;
3746
}
3747
3748
bool os::get_page_info(char *start, page_info* info) {
3749
return false;
3750
}
3751
3752
char *os::scan_pages(char *start, char* end, page_info* page_expected,
3753
page_info* page_found) {
3754
return end;
3755
}
3756
3757
char* os::non_memory_address_word() {
3758
// Must never look like an address returned by reserve_memory,
3759
// even in its subfields (as defined by the CPU immediate fields,
3760
// if the CPU splits constants across multiple instructions).
3761
#ifdef _M_ARM64
3762
// AArch64 has a maximum addressable space of 48-bits
3763
return (char*)((1ull << 48) - 1);
3764
#else
3765
return (char*)-1;
3766
#endif
3767
}
3768
3769
#define MAX_ERROR_COUNT 100
3770
#define SYS_THREAD_ERROR 0xffffffffUL
3771
3772
void os::pd_start_thread(Thread* thread) {
3773
DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3774
// Returns previous suspend state:
3775
// 0: Thread was not suspended
3776
// 1: Thread is running now
3777
// >1: Thread is still suspended.
3778
assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3779
}
3780
3781
3782
// Short sleep, direct OS call.
3783
//
3784
// ms = 0, means allow others (if any) to run.
3785
//
3786
void os::naked_short_sleep(jlong ms) {
3787
assert(ms < 1000, "Un-interruptable sleep, short time use only");
3788
Sleep(ms);
3789
}
3790
3791
// Windows does not provide sleep functionality with nanosecond resolution, so we
3792
// try to approximate this with spinning combined with yielding if another thread
3793
// is ready to run on the current processor.
3794
void os::naked_short_nanosleep(jlong ns) {
3795
assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3796
3797
int64_t start = os::javaTimeNanos();
3798
do {
3799
if (SwitchToThread() == 0) {
3800
// Nothing else is ready to run on this cpu, spin a little
3801
SpinPause();
3802
}
3803
} while (os::javaTimeNanos() - start < ns);
3804
}
3805
3806
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
3807
void os::infinite_sleep() {
3808
while (true) { // sleep forever ...
3809
Sleep(100000); // ... 100 seconds at a time
3810
}
3811
}
3812
3813
typedef BOOL (WINAPI * STTSignature)(void);
3814
3815
void os::naked_yield() {
3816
// Consider passing back the return value from SwitchToThread().
3817
SwitchToThread();
3818
}
3819
3820
// Win32 only gives you access to seven real priorities at a time,
3821
// so we compress Java's ten down to seven. It would be better
3822
// if we dynamically adjusted relative priorities.
3823
3824
int os::java_to_os_priority[CriticalPriority + 1] = {
3825
THREAD_PRIORITY_IDLE, // 0 Entry should never be used
3826
THREAD_PRIORITY_LOWEST, // 1 MinPriority
3827
THREAD_PRIORITY_LOWEST, // 2
3828
THREAD_PRIORITY_BELOW_NORMAL, // 3
3829
THREAD_PRIORITY_BELOW_NORMAL, // 4
3830
THREAD_PRIORITY_NORMAL, // 5 NormPriority
3831
THREAD_PRIORITY_NORMAL, // 6
3832
THREAD_PRIORITY_ABOVE_NORMAL, // 7
3833
THREAD_PRIORITY_ABOVE_NORMAL, // 8
3834
THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
3835
THREAD_PRIORITY_HIGHEST, // 10 MaxPriority
3836
THREAD_PRIORITY_HIGHEST // 11 CriticalPriority
3837
};
3838
3839
int prio_policy1[CriticalPriority + 1] = {
3840
THREAD_PRIORITY_IDLE, // 0 Entry should never be used
3841
THREAD_PRIORITY_LOWEST, // 1 MinPriority
3842
THREAD_PRIORITY_LOWEST, // 2
3843
THREAD_PRIORITY_BELOW_NORMAL, // 3
3844
THREAD_PRIORITY_BELOW_NORMAL, // 4
3845
THREAD_PRIORITY_NORMAL, // 5 NormPriority
3846
THREAD_PRIORITY_ABOVE_NORMAL, // 6
3847
THREAD_PRIORITY_ABOVE_NORMAL, // 7
3848
THREAD_PRIORITY_HIGHEST, // 8
3849
THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
3850
THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority
3851
THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority
3852
};
3853
3854
static int prio_init() {
3855
// If ThreadPriorityPolicy is 1, switch tables
3856
if (ThreadPriorityPolicy == 1) {
3857
int i;
3858
for (i = 0; i < CriticalPriority + 1; i++) {
3859
os::java_to_os_priority[i] = prio_policy1[i];
3860
}
3861
}
3862
if (UseCriticalJavaThreadPriority) {
3863
os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3864
}
3865
return 0;
3866
}
3867
3868
OSReturn os::set_native_priority(Thread* thread, int priority) {
3869
if (!UseThreadPriorities) return OS_OK;
3870
bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3871
return ret ? OS_OK : OS_ERR;
3872
}
3873
3874
OSReturn os::get_native_priority(const Thread* const thread,
3875
int* priority_ptr) {
3876
if (!UseThreadPriorities) {
3877
*priority_ptr = java_to_os_priority[NormPriority];
3878
return OS_OK;
3879
}
3880
int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3881
if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3882
assert(false, "GetThreadPriority failed");
3883
return OS_ERR;
3884
}
3885
*priority_ptr = os_prio;
3886
return OS_OK;
3887
}
3888
3889
// GetCurrentThreadId() returns DWORD
3890
intx os::current_thread_id() { return GetCurrentThreadId(); }
3891
3892
static int _initial_pid = 0;
3893
3894
int os::current_process_id() {
3895
return (_initial_pid ? _initial_pid : _getpid());
3896
}
3897
3898
int os::win32::_vm_page_size = 0;
3899
int os::win32::_vm_allocation_granularity = 0;
3900
int os::win32::_processor_type = 0;
3901
// Processor level is not available on non-NT systems, use vm_version instead
3902
int os::win32::_processor_level = 0;
3903
julong os::win32::_physical_memory = 0;
3904
size_t os::win32::_default_stack_size = 0;
3905
3906
intx os::win32::_os_thread_limit = 0;
3907
volatile intx os::win32::_os_thread_count = 0;
3908
3909
bool os::win32::_is_windows_server = false;
3910
3911
// 6573254
3912
// Currently, the bug is observed across all the supported Windows releases,
3913
// including the latest one (as of this writing - Windows Server 2012 R2)
3914
bool os::win32::_has_exit_bug = true;
3915
3916
void os::win32::initialize_system_info() {
3917
SYSTEM_INFO si;
3918
GetSystemInfo(&si);
3919
_vm_page_size = si.dwPageSize;
3920
_vm_allocation_granularity = si.dwAllocationGranularity;
3921
_processor_type = si.dwProcessorType;
3922
_processor_level = si.wProcessorLevel;
3923
set_processor_count(si.dwNumberOfProcessors);
3924
3925
MEMORYSTATUSEX ms;
3926
ms.dwLength = sizeof(ms);
3927
3928
// also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3929
// dwMemoryLoad (% of memory in use)
3930
GlobalMemoryStatusEx(&ms);
3931
_physical_memory = ms.ullTotalPhys;
3932
3933
if (FLAG_IS_DEFAULT(MaxRAM)) {
3934
// Adjust MaxRAM according to the maximum virtual address space available.
3935
FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3936
}
3937
3938
OSVERSIONINFOEX oi;
3939
oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3940
GetVersionEx((OSVERSIONINFO*)&oi);
3941
switch (oi.dwPlatformId) {
3942
case VER_PLATFORM_WIN32_NT:
3943
{
3944
int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3945
if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3946
oi.wProductType == VER_NT_SERVER) {
3947
_is_windows_server = true;
3948
}
3949
}
3950
break;
3951
default: fatal("Unknown platform");
3952
}
3953
3954
_default_stack_size = os::current_stack_size();
3955
assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3956
assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3957
"stack size not a multiple of page size");
3958
3959
initialize_performance_counter();
3960
}
3961
3962
3963
HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3964
int ebuflen) {
3965
char path[MAX_PATH];
3966
DWORD size;
3967
DWORD pathLen = (DWORD)sizeof(path);
3968
HINSTANCE result = NULL;
3969
3970
// only allow library name without path component
3971
assert(strchr(name, '\\') == NULL, "path not allowed");
3972
assert(strchr(name, ':') == NULL, "path not allowed");
3973
if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3974
jio_snprintf(ebuf, ebuflen,
3975
"Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3976
return NULL;
3977
}
3978
3979
// search system directory
3980
if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3981
if (size >= pathLen) {
3982
return NULL; // truncated
3983
}
3984
if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3985
return NULL; // truncated
3986
}
3987
if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3988
return result;
3989
}
3990
}
3991
3992
// try Windows directory
3993
if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3994
if (size >= pathLen) {
3995
return NULL; // truncated
3996
}
3997
if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3998
return NULL; // truncated
3999
}
4000
if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
4001
return result;
4002
}
4003
}
4004
4005
jio_snprintf(ebuf, ebuflen,
4006
"os::win32::load_windows_dll() cannot load %s from system directories.", name);
4007
return NULL;
4008
}
4009
4010
#define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
4011
#define EXIT_TIMEOUT 300000 /* 5 minutes */
4012
4013
static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
4014
InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
4015
return TRUE;
4016
}
4017
4018
int os::win32::exit_process_or_thread(Ept what, int exit_code) {
4019
// Basic approach:
4020
// - Each exiting thread registers its intent to exit and then does so.
4021
// - A thread trying to terminate the process must wait for all
4022
// threads currently exiting to complete their exit.
4023
4024
if (os::win32::has_exit_bug()) {
4025
// The array holds handles of the threads that have started exiting by calling
4026
// _endthreadex().
4027
// Should be large enough to avoid blocking the exiting thread due to lack of
4028
// a free slot.
4029
static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
4030
static int handle_count = 0;
4031
4032
static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
4033
static CRITICAL_SECTION crit_sect;
4034
static volatile DWORD process_exiting = 0;
4035
int i, j;
4036
DWORD res;
4037
HANDLE hproc, hthr;
4038
4039
// We only attempt to register threads until a process exiting
4040
// thread manages to set the process_exiting flag. Any threads
4041
// that come through here after the process_exiting flag is set
4042
// are unregistered and will be caught in the SuspendThread()
4043
// infinite loop below.
4044
bool registered = false;
4045
4046
// The first thread that reached this point, initializes the critical section.
4047
if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
4048
warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
4049
} else if (Atomic::load_acquire(&process_exiting) == 0) {
4050
if (what != EPT_THREAD) {
4051
// Atomically set process_exiting before the critical section
4052
// to increase the visibility between racing threads.
4053
Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
4054
}
4055
EnterCriticalSection(&crit_sect);
4056
4057
if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
4058
// Remove from the array those handles of the threads that have completed exiting.
4059
for (i = 0, j = 0; i < handle_count; ++i) {
4060
res = WaitForSingleObject(handles[i], 0 /* don't wait */);
4061
if (res == WAIT_TIMEOUT) {
4062
handles[j++] = handles[i];
4063
} else {
4064
if (res == WAIT_FAILED) {
4065
warning("WaitForSingleObject failed (%u) in %s: %d\n",
4066
GetLastError(), __FILE__, __LINE__);
4067
}
4068
// Don't keep the handle, if we failed waiting for it.
4069
CloseHandle(handles[i]);
4070
}
4071
}
4072
4073
// If there's no free slot in the array of the kept handles, we'll have to
4074
// wait until at least one thread completes exiting.
4075
if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
4076
// Raise the priority of the oldest exiting thread to increase its chances
4077
// to complete sooner.
4078
SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
4079
res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
4080
if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
4081
i = (res - WAIT_OBJECT_0);
4082
handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
4083
for (; i < handle_count; ++i) {
4084
handles[i] = handles[i + 1];
4085
}
4086
} else {
4087
warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4088
(res == WAIT_FAILED ? "failed" : "timed out"),
4089
GetLastError(), __FILE__, __LINE__);
4090
// Don't keep handles, if we failed waiting for them.
4091
for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
4092
CloseHandle(handles[i]);
4093
}
4094
handle_count = 0;
4095
}
4096
}
4097
4098
// Store a duplicate of the current thread handle in the array of handles.
4099
hproc = GetCurrentProcess();
4100
hthr = GetCurrentThread();
4101
if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
4102
0, FALSE, DUPLICATE_SAME_ACCESS)) {
4103
warning("DuplicateHandle failed (%u) in %s: %d\n",
4104
GetLastError(), __FILE__, __LINE__);
4105
4106
// We can't register this thread (no more handles) so this thread
4107
// may be racing with a thread that is calling exit(). If the thread
4108
// that is calling exit() has managed to set the process_exiting
4109
// flag, then this thread will be caught in the SuspendThread()
4110
// infinite loop below which closes that race. A small timing
4111
// window remains before the process_exiting flag is set, but it
4112
// is only exposed when we are out of handles.
4113
} else {
4114
++handle_count;
4115
registered = true;
4116
4117
// The current exiting thread has stored its handle in the array, and now
4118
// should leave the critical section before calling _endthreadex().
4119
}
4120
4121
} else if (what != EPT_THREAD && handle_count > 0) {
4122
jlong start_time, finish_time, timeout_left;
4123
// Before ending the process, make sure all the threads that had called
4124
// _endthreadex() completed.
4125
4126
// Set the priority level of the current thread to the same value as
4127
// the priority level of exiting threads.
4128
// This is to ensure it will be given a fair chance to execute if
4129
// the timeout expires.
4130
hthr = GetCurrentThread();
4131
SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
4132
start_time = os::javaTimeNanos();
4133
finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
4134
for (i = 0; ; ) {
4135
int portion_count = handle_count - i;
4136
if (portion_count > MAXIMUM_WAIT_OBJECTS) {
4137
portion_count = MAXIMUM_WAIT_OBJECTS;
4138
}
4139
for (j = 0; j < portion_count; ++j) {
4140
SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
4141
}
4142
timeout_left = (finish_time - start_time) / 1000000L;
4143
if (timeout_left < 0) {
4144
timeout_left = 0;
4145
}
4146
res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4147
if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4148
warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4149
(res == WAIT_FAILED ? "failed" : "timed out"),
4150
GetLastError(), __FILE__, __LINE__);
4151
// Reset portion_count so we close the remaining
4152
// handles due to this error.
4153
portion_count = handle_count - i;
4154
}
4155
for (j = 0; j < portion_count; ++j) {
4156
CloseHandle(handles[i + j]);
4157
}
4158
if ((i += portion_count) >= handle_count) {
4159
break;
4160
}
4161
start_time = os::javaTimeNanos();
4162
}
4163
handle_count = 0;
4164
}
4165
4166
LeaveCriticalSection(&crit_sect);
4167
}
4168
4169
if (!registered &&
4170
Atomic::load_acquire(&process_exiting) != 0 &&
4171
process_exiting != GetCurrentThreadId()) {
4172
// Some other thread is about to call exit(), so we don't let
4173
// the current unregistered thread proceed to exit() or _endthreadex()
4174
while (true) {
4175
SuspendThread(GetCurrentThread());
4176
// Avoid busy-wait loop, if SuspendThread() failed.
4177
Sleep(EXIT_TIMEOUT);
4178
}
4179
}
4180
}
4181
4182
// We are here if either
4183
// - there's no 'race at exit' bug on this OS release;
4184
// - initialization of the critical section failed (unlikely);
4185
// - the current thread has registered itself and left the critical section;
4186
// - the process-exiting thread has raised the flag and left the critical section.
4187
if (what == EPT_THREAD) {
4188
_endthreadex((unsigned)exit_code);
4189
} else if (what == EPT_PROCESS) {
4190
::exit(exit_code);
4191
} else {
4192
_exit(exit_code);
4193
}
4194
4195
// Should not reach here
4196
return exit_code;
4197
}
4198
4199
#undef EXIT_TIMEOUT
4200
4201
void os::win32::setmode_streams() {
4202
_setmode(_fileno(stdin), _O_BINARY);
4203
_setmode(_fileno(stdout), _O_BINARY);
4204
_setmode(_fileno(stderr), _O_BINARY);
4205
}
4206
4207
void os::wait_for_keypress_at_exit(void) {
4208
if (PauseAtExit) {
4209
fprintf(stderr, "Press any key to continue...\n");
4210
fgetc(stdin);
4211
}
4212
}
4213
4214
4215
bool os::message_box(const char* title, const char* message) {
4216
int result = MessageBox(NULL, message, title,
4217
MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4218
return result == IDYES;
4219
}
4220
4221
#ifndef PRODUCT
4222
#ifndef _WIN64
4223
// Helpers to check whether NX protection is enabled
4224
int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4225
if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4226
pex->ExceptionRecord->NumberParameters > 0 &&
4227
pex->ExceptionRecord->ExceptionInformation[0] ==
4228
EXCEPTION_INFO_EXEC_VIOLATION) {
4229
return EXCEPTION_EXECUTE_HANDLER;
4230
}
4231
return EXCEPTION_CONTINUE_SEARCH;
4232
}
4233
4234
void nx_check_protection() {
4235
// If NX is enabled we'll get an exception calling into code on the stack
4236
char code[] = { (char)0xC3 }; // ret
4237
void *code_ptr = (void *)code;
4238
__try {
4239
__asm call code_ptr
4240
} __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4241
tty->print_raw_cr("NX protection detected.");
4242
}
4243
}
4244
#endif // _WIN64
4245
#endif // PRODUCT
4246
4247
// This is called _before_ the global arguments have been parsed
4248
void os::init(void) {
4249
_initial_pid = _getpid();
4250
4251
win32::initialize_system_info();
4252
win32::setmode_streams();
4253
_page_sizes.add(win32::vm_page_size());
4254
4255
// This may be overridden later when argument processing is done.
4256
FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
4257
4258
// Initialize main_process and main_thread
4259
main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle
4260
if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4261
&main_thread, THREAD_ALL_ACCESS, false, 0)) {
4262
fatal("DuplicateHandle failed\n");
4263
}
4264
main_thread_id = (int) GetCurrentThreadId();
4265
4266
// initialize fast thread access - only used for 32-bit
4267
win32::initialize_thread_ptr_offset();
4268
}
4269
4270
// To install functions for atexit processing
4271
extern "C" {
4272
static void perfMemory_exit_helper() {
4273
perfMemory_exit();
4274
}
4275
}
4276
4277
static jint initSock();
4278
4279
4280
// this is called _after_ the global arguments have been parsed
4281
jint os::init_2(void) {
4282
4283
// This could be set any time but all platforms
4284
// have to set it the same so we have to mirror Solaris.
4285
DEBUG_ONLY(os::set_mutex_init_done();)
4286
4287
// Setup Windows Exceptions
4288
4289
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
4290
topLevelVectoredExceptionHandler = AddVectoredExceptionHandler(1, topLevelVectoredExceptionFilter);
4291
previousUnhandledExceptionFilter = SetUnhandledExceptionFilter(topLevelUnhandledExceptionFilter);
4292
#endif
4293
4294
// for debugging float code generation bugs
4295
#if defined(ASSERT) && !defined(_WIN64)
4296
static long fp_control_word = 0;
4297
__asm { fstcw fp_control_word }
4298
// see Intel PPro Manual, Vol. 2, p 7-16
4299
const long invalid = 0x01;
4300
fp_control_word |= invalid;
4301
__asm { fldcw fp_control_word }
4302
#endif
4303
4304
// If stack_commit_size is 0, windows will reserve the default size,
4305
// but only commit a small portion of it.
4306
size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4307
size_t default_reserve_size = os::win32::default_stack_size();
4308
size_t actual_reserve_size = stack_commit_size;
4309
if (stack_commit_size < default_reserve_size) {
4310
// If stack_commit_size == 0, we want this too
4311
actual_reserve_size = default_reserve_size;
4312
}
4313
4314
// Check minimum allowable stack size for thread creation and to initialize
4315
// the java system classes, including StackOverflowError - depends on page
4316
// size. Add two 4K pages for compiler2 recursion in main thread.
4317
// Add in 4*BytesPerWord 4K pages to account for VM stack during
4318
// class initialization depending on 32 or 64 bit VM.
4319
size_t min_stack_allowed =
4320
(size_t)(StackOverflow::stack_guard_zone_size() +
4321
StackOverflow::stack_shadow_zone_size() +
4322
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4323
4324
min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4325
4326
if (actual_reserve_size < min_stack_allowed) {
4327
tty->print_cr("\nThe Java thread stack size specified is too small. "
4328
"Specify at least %dk",
4329
min_stack_allowed / K);
4330
return JNI_ERR;
4331
}
4332
4333
JavaThread::set_stack_size_at_create(stack_commit_size);
4334
4335
// Calculate theoretical max. size of Threads to guard gainst artifical
4336
// out-of-memory situations, where all available address-space has been
4337
// reserved by thread stacks.
4338
assert(actual_reserve_size != 0, "Must have a stack");
4339
4340
// Calculate the thread limit when we should start doing Virtual Memory
4341
// banging. Currently when the threads will have used all but 200Mb of space.
4342
//
4343
// TODO: consider performing a similar calculation for commit size instead
4344
// as reserve size, since on a 64-bit platform we'll run into that more
4345
// often than running out of virtual memory space. We can use the
4346
// lower value of the two calculations as the os_thread_limit.
4347
size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4348
win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4349
4350
// at exit methods are called in the reverse order of their registration.
4351
// there is no limit to the number of functions registered. atexit does
4352
// not set errno.
4353
4354
if (PerfAllowAtExitRegistration) {
4355
// only register atexit functions if PerfAllowAtExitRegistration is set.
4356
// atexit functions can be delayed until process exit time, which
4357
// can be problematic for embedded VM situations. Embedded VMs should
4358
// call DestroyJavaVM() to assure that VM resources are released.
4359
4360
// note: perfMemory_exit_helper atexit function may be removed in
4361
// the future if the appropriate cleanup code can be added to the
4362
// VM_Exit VMOperation's doit method.
4363
if (atexit(perfMemory_exit_helper) != 0) {
4364
warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4365
}
4366
}
4367
4368
#ifndef _WIN64
4369
// Print something if NX is enabled (win32 on AMD64)
4370
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4371
#endif
4372
4373
// initialize thread priority policy
4374
prio_init();
4375
4376
UseNUMA = false; // We don't fully support this yet
4377
4378
if (UseNUMAInterleaving || (UseNUMA && FLAG_IS_DEFAULT(UseNUMAInterleaving))) {
4379
if (!numa_interleaving_init()) {
4380
FLAG_SET_ERGO(UseNUMAInterleaving, false);
4381
} else if (!UseNUMAInterleaving) {
4382
// When NUMA requested, not-NUMA-aware allocations default to interleaving.
4383
FLAG_SET_ERGO(UseNUMAInterleaving, true);
4384
}
4385
}
4386
4387
if (initSock() != JNI_OK) {
4388
return JNI_ERR;
4389
}
4390
4391
SymbolEngine::recalc_search_path();
4392
4393
// Initialize data for jdk.internal.misc.Signal
4394
if (!ReduceSignalUsage) {
4395
jdk_misc_signal_init();
4396
}
4397
4398
// Lookup SetThreadDescription - the docs state we must use runtime-linking of
4399
// kernelbase.dll, so that is what we do.
4400
HINSTANCE _kernelbase = LoadLibrary(TEXT("kernelbase.dll"));
4401
if (_kernelbase != NULL) {
4402
_SetThreadDescription =
4403
reinterpret_cast<SetThreadDescriptionFnPtr>(
4404
GetProcAddress(_kernelbase,
4405
"SetThreadDescription"));
4406
#ifdef ASSERT
4407
_GetThreadDescription =
4408
reinterpret_cast<GetThreadDescriptionFnPtr>(
4409
GetProcAddress(_kernelbase,
4410
"GetThreadDescription"));
4411
#endif
4412
}
4413
log_info(os, thread)("The SetThreadDescription API is%s available.", _SetThreadDescription == NULL ? " not" : "");
4414
4415
4416
return JNI_OK;
4417
}
4418
4419
// combine the high and low DWORD into a ULONGLONG
4420
static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4421
ULONGLONG value = high_word;
4422
value <<= sizeof(high_word) * 8;
4423
value |= low_word;
4424
return value;
4425
}
4426
4427
// Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4428
static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4429
::memset((void*)sbuf, 0, sizeof(struct stat));
4430
sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4431
sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4432
file_data.ftLastWriteTime.dwLowDateTime);
4433
sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4434
file_data.ftCreationTime.dwLowDateTime);
4435
sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4436
file_data.ftLastAccessTime.dwLowDateTime);
4437
if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4438
sbuf->st_mode |= S_IFDIR;
4439
} else {
4440
sbuf->st_mode |= S_IFREG;
4441
}
4442
}
4443
4444
static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path) {
4445
// Get required buffer size to convert to Unicode
4446
int unicode_path_len = MultiByteToWideChar(CP_ACP,
4447
MB_ERR_INVALID_CHARS,
4448
char_path, -1,
4449
NULL, 0);
4450
if (unicode_path_len == 0) {
4451
return EINVAL;
4452
}
4453
4454
*unicode_path = NEW_C_HEAP_ARRAY(WCHAR, unicode_path_len, mtInternal);
4455
4456
int result = MultiByteToWideChar(CP_ACP,
4457
MB_ERR_INVALID_CHARS,
4458
char_path, -1,
4459
*unicode_path, unicode_path_len);
4460
assert(result == unicode_path_len, "length already checked above");
4461
4462
return ERROR_SUCCESS;
4463
}
4464
4465
static errno_t get_full_path(LPCWSTR unicode_path, LPWSTR* full_path) {
4466
// Get required buffer size to convert to full path. The return
4467
// value INCLUDES the terminating null character.
4468
DWORD full_path_len = GetFullPathNameW(unicode_path, 0, NULL, NULL);
4469
if (full_path_len == 0) {
4470
return EINVAL;
4471
}
4472
4473
*full_path = NEW_C_HEAP_ARRAY(WCHAR, full_path_len, mtInternal);
4474
4475
// When the buffer has sufficient size, the return value EXCLUDES the
4476
// terminating null character
4477
DWORD result = GetFullPathNameW(unicode_path, full_path_len, *full_path, NULL);
4478
assert(result <= full_path_len, "length already checked above");
4479
4480
return ERROR_SUCCESS;
4481
}
4482
4483
static void set_path_prefix(char* buf, LPWSTR* prefix, int* prefix_off, bool* needs_fullpath) {
4484
*prefix_off = 0;
4485
*needs_fullpath = true;
4486
4487
if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4488
*prefix = L"\\\\?\\";
4489
} else if (buf[0] == '\\' && buf[1] == '\\') {
4490
if (buf[2] == '?' && buf[3] == '\\') {
4491
*prefix = L"";
4492
*needs_fullpath = false;
4493
} else {
4494
*prefix = L"\\\\?\\UNC";
4495
*prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4496
}
4497
} else {
4498
*prefix = L"\\\\?\\";
4499
}
4500
}
4501
4502
// Returns the given path as an absolute wide path in unc format. The returned path is NULL
4503
// on error (with err being set accordingly) and should be freed via os::free() otherwise.
4504
// additional_space is the size of space, in wchar_t, the function will additionally add to
4505
// the allocation of return buffer (such that the size of the returned buffer is at least
4506
// wcslen(buf) + 1 + additional_space).
4507
static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4508
if ((path == NULL) || (path[0] == '\0')) {
4509
err = ENOENT;
4510
return NULL;
4511
}
4512
4513
// Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4514
size_t buf_len = 1 + MAX2((size_t)3, strlen(path));
4515
char* buf = NEW_C_HEAP_ARRAY(char, buf_len, mtInternal);
4516
strncpy(buf, path, buf_len);
4517
os::native_path(buf);
4518
4519
LPWSTR prefix = NULL;
4520
int prefix_off = 0;
4521
bool needs_fullpath = true;
4522
set_path_prefix(buf, &prefix, &prefix_off, &needs_fullpath);
4523
4524
LPWSTR unicode_path = NULL;
4525
err = convert_to_unicode(buf, &unicode_path);
4526
FREE_C_HEAP_ARRAY(char, buf);
4527
if (err != ERROR_SUCCESS) {
4528
return NULL;
4529
}
4530
4531
LPWSTR converted_path = NULL;
4532
if (needs_fullpath) {
4533
err = get_full_path(unicode_path, &converted_path);
4534
} else {
4535
converted_path = unicode_path;
4536
}
4537
4538
LPWSTR result = NULL;
4539
if (converted_path != NULL) {
4540
size_t prefix_len = wcslen(prefix);
4541
size_t result_len = prefix_len - prefix_off + wcslen(converted_path) + additional_space + 1;
4542
result = NEW_C_HEAP_ARRAY(WCHAR, result_len, mtInternal);
4543
_snwprintf(result, result_len, L"%s%s", prefix, &converted_path[prefix_off]);
4544
4545
// Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4546
result_len = wcslen(result);
4547
if ((result[result_len - 1] == L'\\') &&
4548
!(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4549
result[result_len - 1] = L'\0';
4550
}
4551
}
4552
4553
if (converted_path != unicode_path) {
4554
FREE_C_HEAP_ARRAY(WCHAR, converted_path);
4555
}
4556
FREE_C_HEAP_ARRAY(WCHAR, unicode_path);
4557
4558
return static_cast<wchar_t*>(result); // LPWSTR and wchat_t* are the same type on Windows.
4559
}
4560
4561
int os::stat(const char *path, struct stat *sbuf) {
4562
errno_t err;
4563
wchar_t* wide_path = wide_abs_unc_path(path, err);
4564
4565
if (wide_path == NULL) {
4566
errno = err;
4567
return -1;
4568
}
4569
4570
WIN32_FILE_ATTRIBUTE_DATA file_data;;
4571
BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4572
os::free(wide_path);
4573
4574
if (!bret) {
4575
errno = ::GetLastError();
4576
return -1;
4577
}
4578
4579
file_attribute_data_to_stat(sbuf, file_data);
4580
return 0;
4581
}
4582
4583
static HANDLE create_read_only_file_handle(const char* file) {
4584
errno_t err;
4585
wchar_t* wide_path = wide_abs_unc_path(file, err);
4586
4587
if (wide_path == NULL) {
4588
errno = err;
4589
return INVALID_HANDLE_VALUE;
4590
}
4591
4592
HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4593
NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4594
os::free(wide_path);
4595
4596
return handle;
4597
}
4598
4599
bool os::same_files(const char* file1, const char* file2) {
4600
4601
if (file1 == NULL && file2 == NULL) {
4602
return true;
4603
}
4604
4605
if (file1 == NULL || file2 == NULL) {
4606
return false;
4607
}
4608
4609
if (strcmp(file1, file2) == 0) {
4610
return true;
4611
}
4612
4613
char* native_file1 = os::strdup_check_oom(file1);
4614
native_file1 = os::native_path(native_file1);
4615
char* native_file2 = os::strdup_check_oom(file2);
4616
native_file2 = os::native_path(native_file2);
4617
if (strcmp(native_file1, native_file2) == 0) {
4618
os::free(native_file1);
4619
os::free(native_file2);
4620
return true;
4621
}
4622
4623
HANDLE handle1 = create_read_only_file_handle(native_file1);
4624
HANDLE handle2 = create_read_only_file_handle(native_file2);
4625
bool result = false;
4626
4627
// if we could open both paths...
4628
if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4629
BY_HANDLE_FILE_INFORMATION fileInfo1;
4630
BY_HANDLE_FILE_INFORMATION fileInfo2;
4631
if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4632
::GetFileInformationByHandle(handle2, &fileInfo2)) {
4633
// the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4634
if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4635
fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4636
fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4637
result = true;
4638
}
4639
}
4640
}
4641
4642
//free the handles
4643
if (handle1 != INVALID_HANDLE_VALUE) {
4644
::CloseHandle(handle1);
4645
}
4646
4647
if (handle2 != INVALID_HANDLE_VALUE) {
4648
::CloseHandle(handle2);
4649
}
4650
4651
os::free(native_file1);
4652
os::free(native_file2);
4653
4654
return result;
4655
}
4656
4657
#define FT2INT64(ft) \
4658
((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4659
4660
4661
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4662
// are used by JVM M&M and JVMTI to get user+sys or user CPU time
4663
// of a thread.
4664
//
4665
// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4666
// the fast estimate available on the platform.
4667
4668
// current_thread_cpu_time() is not optimized for Windows yet
4669
jlong os::current_thread_cpu_time() {
4670
// return user + sys since the cost is the same
4671
return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4672
}
4673
4674
jlong os::thread_cpu_time(Thread* thread) {
4675
// consistent with what current_thread_cpu_time() returns.
4676
return os::thread_cpu_time(thread, true /* user+sys */);
4677
}
4678
4679
jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4680
return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4681
}
4682
4683
jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4684
// This code is copy from clasic VM -> hpi::sysThreadCPUTime
4685
// If this function changes, os::is_thread_cpu_time_supported() should too
4686
FILETIME CreationTime;
4687
FILETIME ExitTime;
4688
FILETIME KernelTime;
4689
FILETIME UserTime;
4690
4691
if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4692
&ExitTime, &KernelTime, &UserTime) == 0) {
4693
return -1;
4694
} else if (user_sys_cpu_time) {
4695
return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4696
} else {
4697
return FT2INT64(UserTime) * 100;
4698
}
4699
}
4700
4701
void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4702
info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
4703
info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
4704
info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
4705
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4706
}
4707
4708
void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4709
info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
4710
info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
4711
info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
4712
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4713
}
4714
4715
bool os::is_thread_cpu_time_supported() {
4716
// see os::thread_cpu_time
4717
FILETIME CreationTime;
4718
FILETIME ExitTime;
4719
FILETIME KernelTime;
4720
FILETIME UserTime;
4721
4722
if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4723
&KernelTime, &UserTime) == 0) {
4724
return false;
4725
} else {
4726
return true;
4727
}
4728
}
4729
4730
// Windows does't provide a loadavg primitive so this is stubbed out for now.
4731
// It does have primitives (PDH API) to get CPU usage and run queue length.
4732
// "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4733
// If we wanted to implement loadavg on Windows, we have a few options:
4734
//
4735
// a) Query CPU usage and run queue length and "fake" an answer by
4736
// returning the CPU usage if it's under 100%, and the run queue
4737
// length otherwise. It turns out that querying is pretty slow
4738
// on Windows, on the order of 200 microseconds on a fast machine.
4739
// Note that on the Windows the CPU usage value is the % usage
4740
// since the last time the API was called (and the first call
4741
// returns 100%), so we'd have to deal with that as well.
4742
//
4743
// b) Sample the "fake" answer using a sampling thread and store
4744
// the answer in a global variable. The call to loadavg would
4745
// just return the value of the global, avoiding the slow query.
4746
//
4747
// c) Sample a better answer using exponential decay to smooth the
4748
// value. This is basically the algorithm used by UNIX kernels.
4749
//
4750
// Note that sampling thread starvation could affect both (b) and (c).
4751
int os::loadavg(double loadavg[], int nelem) {
4752
return -1;
4753
}
4754
4755
4756
// DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4757
bool os::dont_yield() {
4758
return DontYieldALot;
4759
}
4760
4761
int os::open(const char *path, int oflag, int mode) {
4762
errno_t err;
4763
wchar_t* wide_path = wide_abs_unc_path(path, err);
4764
4765
if (wide_path == NULL) {
4766
errno = err;
4767
return -1;
4768
}
4769
int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4770
os::free(wide_path);
4771
4772
if (fd == -1) {
4773
errno = ::GetLastError();
4774
}
4775
4776
return fd;
4777
}
4778
4779
FILE* os::open(int fd, const char* mode) {
4780
return ::_fdopen(fd, mode);
4781
}
4782
4783
size_t os::write(int fd, const void *buf, unsigned int nBytes) {
4784
return ::write(fd, buf, nBytes);
4785
}
4786
4787
int os::close(int fd) {
4788
return ::close(fd);
4789
}
4790
4791
void os::exit(int num) {
4792
win32::exit_process_or_thread(win32::EPT_PROCESS, num);
4793
}
4794
4795
// Is a (classpath) directory empty?
4796
bool os::dir_is_empty(const char* path) {
4797
errno_t err;
4798
wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4799
4800
if (wide_path == NULL) {
4801
errno = err;
4802
return false;
4803
}
4804
4805
// Make sure we end with "\\*"
4806
if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4807
wcscat(wide_path, L"*");
4808
} else {
4809
wcscat(wide_path, L"\\*");
4810
}
4811
4812
WIN32_FIND_DATAW fd;
4813
HANDLE f = ::FindFirstFileW(wide_path, &fd);
4814
os::free(wide_path);
4815
bool is_empty = true;
4816
4817
if (f != INVALID_HANDLE_VALUE) {
4818
while (is_empty && ::FindNextFileW(f, &fd)) {
4819
// An empty directory contains only the current directory file
4820
// and the previous directory file.
4821
if ((wcscmp(fd.cFileName, L".") != 0) &&
4822
(wcscmp(fd.cFileName, L"..") != 0)) {
4823
is_empty = false;
4824
}
4825
}
4826
FindClose(f);
4827
} else {
4828
errno = ::GetLastError();
4829
}
4830
4831
return is_empty;
4832
}
4833
4834
// create binary file, rewriting existing file if required
4835
int os::create_binary_file(const char* path, bool rewrite_existing) {
4836
int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4837
oflags |= rewrite_existing ? _O_TRUNC : _O_EXCL;
4838
return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4839
}
4840
4841
// return current position of file pointer
4842
jlong os::current_file_offset(int fd) {
4843
return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4844
}
4845
4846
// move file pointer to the specified offset
4847
jlong os::seek_to_file_offset(int fd, jlong offset) {
4848
return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4849
}
4850
4851
4852
jlong os::lseek(int fd, jlong offset, int whence) {
4853
return (jlong) ::_lseeki64(fd, offset, whence);
4854
}
4855
4856
ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4857
OVERLAPPED ov;
4858
DWORD nread;
4859
BOOL result;
4860
4861
ZeroMemory(&ov, sizeof(ov));
4862
ov.Offset = (DWORD)offset;
4863
ov.OffsetHigh = (DWORD)(offset >> 32);
4864
4865
HANDLE h = (HANDLE)::_get_osfhandle(fd);
4866
4867
result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4868
4869
return result ? nread : 0;
4870
}
4871
4872
4873
// This method is a slightly reworked copy of JDK's sysNativePath
4874
// from src/windows/hpi/src/path_md.c
4875
4876
// Convert a pathname to native format. On win32, this involves forcing all
4877
// separators to be '\\' rather than '/' (both are legal inputs, but Win95
4878
// sometimes rejects '/') and removing redundant separators. The input path is
4879
// assumed to have been converted into the character encoding used by the local
4880
// system. Because this might be a double-byte encoding, care is taken to
4881
// treat double-byte lead characters correctly.
4882
//
4883
// This procedure modifies the given path in place, as the result is never
4884
// longer than the original. There is no error return; this operation always
4885
// succeeds.
4886
char * os::native_path(char *path) {
4887
char *src = path, *dst = path, *end = path;
4888
char *colon = NULL; // If a drive specifier is found, this will
4889
// point to the colon following the drive letter
4890
4891
// Assumption: '/', '\\', ':', and drive letters are never lead bytes
4892
assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4893
&& (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4894
4895
// Check for leading separators
4896
#define isfilesep(c) ((c) == '/' || (c) == '\\')
4897
while (isfilesep(*src)) {
4898
src++;
4899
}
4900
4901
if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4902
// Remove leading separators if followed by drive specifier. This
4903
// hack is necessary to support file URLs containing drive
4904
// specifiers (e.g., "file://c:/path"). As a side effect,
4905
// "/c:/path" can be used as an alternative to "c:/path".
4906
*dst++ = *src++;
4907
colon = dst;
4908
*dst++ = ':';
4909
src++;
4910
} else {
4911
src = path;
4912
if (isfilesep(src[0]) && isfilesep(src[1])) {
4913
// UNC pathname: Retain first separator; leave src pointed at
4914
// second separator so that further separators will be collapsed
4915
// into the second separator. The result will be a pathname
4916
// beginning with "\\\\" followed (most likely) by a host name.
4917
src = dst = path + 1;
4918
path[0] = '\\'; // Force first separator to '\\'
4919
}
4920
}
4921
4922
end = dst;
4923
4924
// Remove redundant separators from remainder of path, forcing all
4925
// separators to be '\\' rather than '/'. Also, single byte space
4926
// characters are removed from the end of the path because those
4927
// are not legal ending characters on this operating system.
4928
//
4929
while (*src != '\0') {
4930
if (isfilesep(*src)) {
4931
*dst++ = '\\'; src++;
4932
while (isfilesep(*src)) src++;
4933
if (*src == '\0') {
4934
// Check for trailing separator
4935
end = dst;
4936
if (colon == dst - 2) break; // "z:\\"
4937
if (dst == path + 1) break; // "\\"
4938
if (dst == path + 2 && isfilesep(path[0])) {
4939
// "\\\\" is not collapsed to "\\" because "\\\\" marks the
4940
// beginning of a UNC pathname. Even though it is not, by
4941
// itself, a valid UNC pathname, we leave it as is in order
4942
// to be consistent with the path canonicalizer as well
4943
// as the win32 APIs, which treat this case as an invalid
4944
// UNC pathname rather than as an alias for the root
4945
// directory of the current drive.
4946
break;
4947
}
4948
end = --dst; // Path does not denote a root directory, so
4949
// remove trailing separator
4950
break;
4951
}
4952
end = dst;
4953
} else {
4954
if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character
4955
*dst++ = *src++;
4956
if (*src) *dst++ = *src++;
4957
end = dst;
4958
} else { // Copy a single-byte character
4959
char c = *src++;
4960
*dst++ = c;
4961
// Space is not a legal ending character
4962
if (c != ' ') end = dst;
4963
}
4964
}
4965
}
4966
4967
*end = '\0';
4968
4969
// For "z:", add "." to work around a bug in the C runtime library
4970
if (colon == dst - 1) {
4971
path[2] = '.';
4972
path[3] = '\0';
4973
}
4974
4975
return path;
4976
}
4977
4978
// This code is a copy of JDK's sysSetLength
4979
// from src/windows/hpi/src/sys_api_md.c
4980
4981
int os::ftruncate(int fd, jlong length) {
4982
HANDLE h = (HANDLE)::_get_osfhandle(fd);
4983
long high = (long)(length >> 32);
4984
DWORD ret;
4985
4986
if (h == (HANDLE)(-1)) {
4987
return -1;
4988
}
4989
4990
ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4991
if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4992
return -1;
4993
}
4994
4995
if (::SetEndOfFile(h) == FALSE) {
4996
return -1;
4997
}
4998
4999
return 0;
5000
}
5001
5002
int os::get_fileno(FILE* fp) {
5003
return _fileno(fp);
5004
}
5005
5006
// This code is a copy of JDK's sysSync
5007
// from src/windows/hpi/src/sys_api_md.c
5008
// except for the legacy workaround for a bug in Win 98
5009
5010
int os::fsync(int fd) {
5011
HANDLE handle = (HANDLE)::_get_osfhandle(fd);
5012
5013
if ((!::FlushFileBuffers(handle)) &&
5014
(GetLastError() != ERROR_ACCESS_DENIED)) {
5015
// from winerror.h
5016
return -1;
5017
}
5018
return 0;
5019
}
5020
5021
static int nonSeekAvailable(int, long *);
5022
static int stdinAvailable(int, long *);
5023
5024
// This code is a copy of JDK's sysAvailable
5025
// from src/windows/hpi/src/sys_api_md.c
5026
5027
int os::available(int fd, jlong *bytes) {
5028
jlong cur, end;
5029
struct _stati64 stbuf64;
5030
5031
if (::_fstati64(fd, &stbuf64) >= 0) {
5032
int mode = stbuf64.st_mode;
5033
if (S_ISCHR(mode) || S_ISFIFO(mode)) {
5034
int ret;
5035
long lpbytes;
5036
if (fd == 0) {
5037
ret = stdinAvailable(fd, &lpbytes);
5038
} else {
5039
ret = nonSeekAvailable(fd, &lpbytes);
5040
}
5041
(*bytes) = (jlong)(lpbytes);
5042
return ret;
5043
}
5044
if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
5045
return FALSE;
5046
} else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
5047
return FALSE;
5048
} else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
5049
return FALSE;
5050
}
5051
*bytes = end - cur;
5052
return TRUE;
5053
} else {
5054
return FALSE;
5055
}
5056
}
5057
5058
void os::flockfile(FILE* fp) {
5059
_lock_file(fp);
5060
}
5061
5062
void os::funlockfile(FILE* fp) {
5063
_unlock_file(fp);
5064
}
5065
5066
// This code is a copy of JDK's nonSeekAvailable
5067
// from src/windows/hpi/src/sys_api_md.c
5068
5069
static int nonSeekAvailable(int fd, long *pbytes) {
5070
// This is used for available on non-seekable devices
5071
// (like both named and anonymous pipes, such as pipes
5072
// connected to an exec'd process).
5073
// Standard Input is a special case.
5074
HANDLE han;
5075
5076
if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
5077
return FALSE;
5078
}
5079
5080
if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
5081
// PeekNamedPipe fails when at EOF. In that case we
5082
// simply make *pbytes = 0 which is consistent with the
5083
// behavior we get on Solaris when an fd is at EOF.
5084
// The only alternative is to raise an Exception,
5085
// which isn't really warranted.
5086
//
5087
if (::GetLastError() != ERROR_BROKEN_PIPE) {
5088
return FALSE;
5089
}
5090
*pbytes = 0;
5091
}
5092
return TRUE;
5093
}
5094
5095
#define MAX_INPUT_EVENTS 2000
5096
5097
// This code is a copy of JDK's stdinAvailable
5098
// from src/windows/hpi/src/sys_api_md.c
5099
5100
static int stdinAvailable(int fd, long *pbytes) {
5101
HANDLE han;
5102
DWORD numEventsRead = 0; // Number of events read from buffer
5103
DWORD numEvents = 0; // Number of events in buffer
5104
DWORD i = 0; // Loop index
5105
DWORD curLength = 0; // Position marker
5106
DWORD actualLength = 0; // Number of bytes readable
5107
BOOL error = FALSE; // Error holder
5108
INPUT_RECORD *lpBuffer; // Pointer to records of input events
5109
5110
if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
5111
return FALSE;
5112
}
5113
5114
// Construct an array of input records in the console buffer
5115
error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
5116
if (error == 0) {
5117
return nonSeekAvailable(fd, pbytes);
5118
}
5119
5120
// lpBuffer must fit into 64K or else PeekConsoleInput fails
5121
if (numEvents > MAX_INPUT_EVENTS) {
5122
numEvents = MAX_INPUT_EVENTS;
5123
}
5124
5125
lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
5126
if (lpBuffer == NULL) {
5127
return FALSE;
5128
}
5129
5130
error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
5131
if (error == 0) {
5132
os::free(lpBuffer);
5133
return FALSE;
5134
}
5135
5136
// Examine input records for the number of bytes available
5137
for (i=0; i<numEvents; i++) {
5138
if (lpBuffer[i].EventType == KEY_EVENT) {
5139
5140
KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
5141
&(lpBuffer[i].Event);
5142
if (keyRecord->bKeyDown == TRUE) {
5143
CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
5144
curLength++;
5145
if (*keyPressed == '\r') {
5146
actualLength = curLength;
5147
}
5148
}
5149
}
5150
}
5151
5152
if (lpBuffer != NULL) {
5153
os::free(lpBuffer);
5154
}
5155
5156
*pbytes = (long) actualLength;
5157
return TRUE;
5158
}
5159
5160
// Map a block of memory.
5161
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5162
char *addr, size_t bytes, bool read_only,
5163
bool allow_exec) {
5164
5165
errno_t err;
5166
wchar_t* wide_path = wide_abs_unc_path(file_name, err);
5167
5168
if (wide_path == NULL) {
5169
return NULL;
5170
}
5171
5172
HANDLE hFile;
5173
char* base;
5174
5175
hFile = CreateFileW(wide_path, GENERIC_READ, FILE_SHARE_READ, NULL,
5176
OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
5177
if (hFile == INVALID_HANDLE_VALUE) {
5178
log_info(os)("CreateFileW() failed: GetLastError->%ld.", GetLastError());
5179
os::free(wide_path);
5180
return NULL;
5181
}
5182
os::free(wide_path);
5183
5184
if (allow_exec) {
5185
// CreateFileMapping/MapViewOfFileEx can't map executable memory
5186
// unless it comes from a PE image (which the shared archive is not.)
5187
// Even VirtualProtect refuses to give execute access to mapped memory
5188
// that was not previously executable.
5189
//
5190
// Instead, stick the executable region in anonymous memory. Yuck.
5191
// Penalty is that ~4 pages will not be shareable - in the future
5192
// we might consider DLLizing the shared archive with a proper PE
5193
// header so that mapping executable + sharing is possible.
5194
5195
base = (char*) virtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
5196
PAGE_READWRITE);
5197
if (base == NULL) {
5198
CloseHandle(hFile);
5199
return NULL;
5200
}
5201
5202
// Record virtual memory allocation
5203
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
5204
5205
DWORD bytes_read;
5206
OVERLAPPED overlapped;
5207
overlapped.Offset = (DWORD)file_offset;
5208
overlapped.OffsetHigh = 0;
5209
overlapped.hEvent = NULL;
5210
// ReadFile guarantees that if the return value is true, the requested
5211
// number of bytes were read before returning.
5212
bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
5213
if (!res) {
5214
log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
5215
release_memory(base, bytes);
5216
CloseHandle(hFile);
5217
return NULL;
5218
}
5219
} else {
5220
HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
5221
NULL /* file_name */);
5222
if (hMap == NULL) {
5223
log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
5224
CloseHandle(hFile);
5225
return NULL;
5226
}
5227
5228
DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
5229
base = (char*)mapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
5230
(DWORD)bytes, addr);
5231
if (base == NULL) {
5232
CloseHandle(hMap);
5233
CloseHandle(hFile);
5234
return NULL;
5235
}
5236
5237
if (CloseHandle(hMap) == 0) {
5238
log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
5239
CloseHandle(hFile);
5240
return base;
5241
}
5242
}
5243
5244
if (allow_exec) {
5245
DWORD old_protect;
5246
DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
5247
bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
5248
5249
if (!res) {
5250
log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
5251
// Don't consider this a hard error, on IA32 even if the
5252
// VirtualProtect fails, we should still be able to execute
5253
CloseHandle(hFile);
5254
return base;
5255
}
5256
}
5257
5258
if (CloseHandle(hFile) == 0) {
5259
log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
5260
return base;
5261
}
5262
5263
return base;
5264
}
5265
5266
5267
// Remap a block of memory.
5268
char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5269
char *addr, size_t bytes, bool read_only,
5270
bool allow_exec) {
5271
// This OS does not allow existing memory maps to be remapped so we
5272
// would have to unmap the memory before we remap it.
5273
5274
// Because there is a small window between unmapping memory and mapping
5275
// it in again with different protections, CDS archives are mapped RW
5276
// on windows, so this function isn't called.
5277
ShouldNotReachHere();
5278
return NULL;
5279
}
5280
5281
5282
// Unmap a block of memory.
5283
// Returns true=success, otherwise false.
5284
5285
bool os::pd_unmap_memory(char* addr, size_t bytes) {
5286
MEMORY_BASIC_INFORMATION mem_info;
5287
if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5288
log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5289
return false;
5290
}
5291
5292
// Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5293
// Instead, executable region was allocated using VirtualAlloc(). See
5294
// pd_map_memory() above.
5295
//
5296
// The following flags should match the 'exec_access' flages used for
5297
// VirtualProtect() in pd_map_memory().
5298
if (mem_info.Protect == PAGE_EXECUTE_READ ||
5299
mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5300
return pd_release_memory(addr, bytes);
5301
}
5302
5303
BOOL result = unmapViewOfFile(addr);
5304
if (result == 0) {
5305
return false;
5306
}
5307
return true;
5308
}
5309
5310
void os::pause() {
5311
char filename[MAX_PATH];
5312
if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5313
jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5314
} else {
5315
jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5316
}
5317
5318
int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5319
if (fd != -1) {
5320
struct stat buf;
5321
::close(fd);
5322
while (::stat(filename, &buf) == 0) {
5323
Sleep(100);
5324
}
5325
} else {
5326
jio_fprintf(stderr,
5327
"Could not open pause file '%s', continuing immediately.\n", filename);
5328
}
5329
}
5330
5331
Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5332
os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5333
5334
os::ThreadCrashProtection::ThreadCrashProtection() {
5335
_protected_thread = Thread::current();
5336
assert(_protected_thread->is_JfrSampler_thread(), "should be JFRSampler");
5337
}
5338
5339
// See the caveats for this class in os_windows.hpp
5340
// Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5341
// into this method and returns false. If no OS EXCEPTION was raised, returns
5342
// true.
5343
// The callback is supposed to provide the method that should be protected.
5344
//
5345
bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5346
bool success = true;
5347
__try {
5348
_crash_protection = this;
5349
cb.call();
5350
} __except(EXCEPTION_EXECUTE_HANDLER) {
5351
// only for protection, nothing to do
5352
success = false;
5353
}
5354
_crash_protection = NULL;
5355
_protected_thread = NULL;
5356
return success;
5357
}
5358
5359
5360
class HighResolutionInterval : public CHeapObj<mtThread> {
5361
// The default timer resolution seems to be 10 milliseconds.
5362
// (Where is this written down?)
5363
// If someone wants to sleep for only a fraction of the default,
5364
// then we set the timer resolution down to 1 millisecond for
5365
// the duration of their interval.
5366
// We carefully set the resolution back, since otherwise we
5367
// seem to incur an overhead (3%?) that we don't need.
5368
// CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5369
// Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5370
// Alternatively, we could compute the relative error (503/500 = .6%) and only use
5371
// timeBeginPeriod() if the relative error exceeded some threshold.
5372
// timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5373
// to decreased efficiency related to increased timer "tick" rates. We want to minimize
5374
// (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5375
// resolution timers running.
5376
private:
5377
jlong resolution;
5378
public:
5379
HighResolutionInterval(jlong ms) {
5380
resolution = ms % 10L;
5381
if (resolution != 0) {
5382
MMRESULT result = timeBeginPeriod(1L);
5383
}
5384
}
5385
~HighResolutionInterval() {
5386
if (resolution != 0) {
5387
MMRESULT result = timeEndPeriod(1L);
5388
}
5389
resolution = 0L;
5390
}
5391
};
5392
5393
// An Event wraps a win32 "CreateEvent" kernel handle.
5394
//
5395
// We have a number of choices regarding "CreateEvent" win32 handle leakage:
5396
//
5397
// 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5398
// field, and call CloseHandle() on the win32 event handle. Unpark() would
5399
// need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5400
// In addition, an unpark() operation might fetch the handle field, but the
5401
// event could recycle between the fetch and the SetEvent() operation.
5402
// SetEvent() would either fail because the handle was invalid, or inadvertently work,
5403
// as the win32 handle value had been recycled. In an ideal world calling SetEvent()
5404
// on an stale but recycled handle would be harmless, but in practice this might
5405
// confuse other non-Sun code, so it's not a viable approach.
5406
//
5407
// 2: Once a win32 event handle is associated with an Event, it remains associated
5408
// with the Event. The event handle is never closed. This could be construed
5409
// as handle leakage, but only up to the maximum # of threads that have been extant
5410
// at any one time. This shouldn't be an issue, as windows platforms typically
5411
// permit a process to have hundreds of thousands of open handles.
5412
//
5413
// 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5414
// and release unused handles.
5415
//
5416
// 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5417
// It's not clear, however, that we wouldn't be trading one type of leak for another.
5418
//
5419
// 5. Use an RCU-like mechanism (Read-Copy Update).
5420
// Or perhaps something similar to Maged Michael's "Hazard pointers".
5421
//
5422
// We use (2).
5423
//
5424
// TODO-FIXME:
5425
// 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5426
// 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5427
// to recover from (or at least detect) the dreaded Windows 841176 bug.
5428
// 3. Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5429
// into a single win32 CreateEvent() handle.
5430
//
5431
// Assumption:
5432
// Only one parker can exist on an event, which is why we allocate
5433
// them per-thread. Multiple unparkers can coexist.
5434
//
5435
// _Event transitions in park()
5436
// -1 => -1 : illegal
5437
// 1 => 0 : pass - return immediately
5438
// 0 => -1 : block; then set _Event to 0 before returning
5439
//
5440
// _Event transitions in unpark()
5441
// 0 => 1 : just return
5442
// 1 => 1 : just return
5443
// -1 => either 0 or 1; must signal target thread
5444
// That is, we can safely transition _Event from -1 to either
5445
// 0 or 1.
5446
//
5447
// _Event serves as a restricted-range semaphore.
5448
// -1 : thread is blocked, i.e. there is a waiter
5449
// 0 : neutral: thread is running or ready,
5450
// could have been signaled after a wait started
5451
// 1 : signaled - thread is running or ready
5452
//
5453
// Another possible encoding of _Event would be with
5454
// explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5455
//
5456
5457
int os::PlatformEvent::park(jlong Millis) {
5458
// Transitions for _Event:
5459
// -1 => -1 : illegal
5460
// 1 => 0 : pass - return immediately
5461
// 0 => -1 : block; then set _Event to 0 before returning
5462
5463
guarantee(_ParkHandle != NULL , "Invariant");
5464
guarantee(Millis > 0 , "Invariant");
5465
5466
// CONSIDER: defer assigning a CreateEvent() handle to the Event until
5467
// the initial park() operation.
5468
// Consider: use atomic decrement instead of CAS-loop
5469
5470
int v;
5471
for (;;) {
5472
v = _Event;
5473
if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5474
}
5475
guarantee((v == 0) || (v == 1), "invariant");
5476
if (v != 0) return OS_OK;
5477
5478
// Do this the hard way by blocking ...
5479
// TODO: consider a brief spin here, gated on the success of recent
5480
// spin attempts by this thread.
5481
//
5482
// We decompose long timeouts into series of shorter timed waits.
5483
// Evidently large timo values passed in WaitForSingleObject() are problematic on some
5484
// versions of Windows. See EventWait() for details. This may be superstition. Or not.
5485
// We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5486
// with os::javaTimeNanos(). Furthermore, we assume that spurious returns from
5487
// ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5488
// to happen early in the wait interval. Specifically, after a spurious wakeup (rv ==
5489
// WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5490
// for the already waited time. This policy does not admit any new outcomes.
5491
// In the future, however, we might want to track the accumulated wait time and
5492
// adjust Millis accordingly if we encounter a spurious wakeup.
5493
5494
const int MAXTIMEOUT = 0x10000000;
5495
DWORD rv = WAIT_TIMEOUT;
5496
while (_Event < 0 && Millis > 0) {
5497
DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT)
5498
if (Millis > MAXTIMEOUT) {
5499
prd = MAXTIMEOUT;
5500
}
5501
HighResolutionInterval *phri = NULL;
5502
if (!ForceTimeHighResolution) {
5503
phri = new HighResolutionInterval(prd);
5504
}
5505
rv = ::WaitForSingleObject(_ParkHandle, prd);
5506
assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5507
if (rv == WAIT_TIMEOUT) {
5508
Millis -= prd;
5509
}
5510
delete phri; // if it is NULL, harmless
5511
}
5512
v = _Event;
5513
_Event = 0;
5514
// see comment at end of os::PlatformEvent::park() below:
5515
OrderAccess::fence();
5516
// If we encounter a nearly simultanous timeout expiry and unpark()
5517
// we return OS_OK indicating we awoke via unpark().
5518
// Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5519
return (v >= 0) ? OS_OK : OS_TIMEOUT;
5520
}
5521
5522
void os::PlatformEvent::park() {
5523
// Transitions for _Event:
5524
// -1 => -1 : illegal
5525
// 1 => 0 : pass - return immediately
5526
// 0 => -1 : block; then set _Event to 0 before returning
5527
5528
guarantee(_ParkHandle != NULL, "Invariant");
5529
// Invariant: Only the thread associated with the Event/PlatformEvent
5530
// may call park().
5531
// Consider: use atomic decrement instead of CAS-loop
5532
int v;
5533
for (;;) {
5534
v = _Event;
5535
if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5536
}
5537
guarantee((v == 0) || (v == 1), "invariant");
5538
if (v != 0) return;
5539
5540
// Do this the hard way by blocking ...
5541
// TODO: consider a brief spin here, gated on the success of recent
5542
// spin attempts by this thread.
5543
while (_Event < 0) {
5544
DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5545
assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5546
}
5547
5548
// Usually we'll find _Event == 0 at this point, but as
5549
// an optional optimization we clear it, just in case can
5550
// multiple unpark() operations drove _Event up to 1.
5551
_Event = 0;
5552
OrderAccess::fence();
5553
guarantee(_Event >= 0, "invariant");
5554
}
5555
5556
void os::PlatformEvent::unpark() {
5557
guarantee(_ParkHandle != NULL, "Invariant");
5558
5559
// Transitions for _Event:
5560
// 0 => 1 : just return
5561
// 1 => 1 : just return
5562
// -1 => either 0 or 1; must signal target thread
5563
// That is, we can safely transition _Event from -1 to either
5564
// 0 or 1.
5565
// See also: "Semaphores in Plan 9" by Mullender & Cox
5566
//
5567
// Note: Forcing a transition from "-1" to "1" on an unpark() means
5568
// that it will take two back-to-back park() calls for the owning
5569
// thread to block. This has the benefit of forcing a spurious return
5570
// from the first park() call after an unpark() call which will help
5571
// shake out uses of park() and unpark() without condition variables.
5572
5573
if (Atomic::xchg(&_Event, 1) >= 0) return;
5574
5575
::SetEvent(_ParkHandle);
5576
}
5577
5578
5579
// JSR166
5580
// -------------------------------------------------------
5581
5582
// The Windows implementation of Park is very straightforward: Basic
5583
// operations on Win32 Events turn out to have the right semantics to
5584
// use them directly.
5585
5586
void Parker::park(bool isAbsolute, jlong time) {
5587
guarantee(_ParkHandle != NULL, "invariant");
5588
// First, demultiplex/decode time arguments
5589
if (time < 0) { // don't wait
5590
return;
5591
} else if (time == 0 && !isAbsolute) {
5592
time = INFINITE;
5593
} else if (isAbsolute) {
5594
time -= os::javaTimeMillis(); // convert to relative time
5595
if (time <= 0) { // already elapsed
5596
return;
5597
}
5598
} else { // relative
5599
time /= 1000000; // Must coarsen from nanos to millis
5600
if (time == 0) { // Wait for the minimal time unit if zero
5601
time = 1;
5602
}
5603
}
5604
5605
JavaThread* thread = JavaThread::current();
5606
5607
// Don't wait if interrupted or already triggered
5608
if (thread->is_interrupted(false) ||
5609
WaitForSingleObject(_ParkHandle, 0) == WAIT_OBJECT_0) {
5610
ResetEvent(_ParkHandle);
5611
return;
5612
} else {
5613
ThreadBlockInVM tbivm(thread);
5614
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5615
5616
WaitForSingleObject(_ParkHandle, time);
5617
ResetEvent(_ParkHandle);
5618
}
5619
}
5620
5621
void Parker::unpark() {
5622
guarantee(_ParkHandle != NULL, "invariant");
5623
SetEvent(_ParkHandle);
5624
}
5625
5626
// Platform Monitor implementation
5627
5628
// Must already be locked
5629
int os::PlatformMonitor::wait(jlong millis) {
5630
assert(millis >= 0, "negative timeout");
5631
int ret = OS_TIMEOUT;
5632
int status = SleepConditionVariableCS(&_cond, &_mutex,
5633
millis == 0 ? INFINITE : millis);
5634
if (status != 0) {
5635
ret = OS_OK;
5636
}
5637
#ifndef PRODUCT
5638
else {
5639
DWORD err = GetLastError();
5640
assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5641
}
5642
#endif
5643
return ret;
5644
}
5645
5646
// Run the specified command in a separate process. Return its exit value,
5647
// or -1 on failure (e.g. can't create a new process).
5648
int os::fork_and_exec(const char* cmd, bool dummy /* ignored */) {
5649
STARTUPINFO si;
5650
PROCESS_INFORMATION pi;
5651
DWORD exit_code;
5652
5653
char * cmd_string;
5654
const char * cmd_prefix = "cmd /C ";
5655
size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5656
cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5657
if (cmd_string == NULL) {
5658
return -1;
5659
}
5660
cmd_string[0] = '\0';
5661
strcat(cmd_string, cmd_prefix);
5662
strcat(cmd_string, cmd);
5663
5664
// now replace all '\n' with '&'
5665
char * substring = cmd_string;
5666
while ((substring = strchr(substring, '\n')) != NULL) {
5667
substring[0] = '&';
5668
substring++;
5669
}
5670
memset(&si, 0, sizeof(si));
5671
si.cb = sizeof(si);
5672
memset(&pi, 0, sizeof(pi));
5673
BOOL rslt = CreateProcess(NULL, // executable name - use command line
5674
cmd_string, // command line
5675
NULL, // process security attribute
5676
NULL, // thread security attribute
5677
TRUE, // inherits system handles
5678
0, // no creation flags
5679
NULL, // use parent's environment block
5680
NULL, // use parent's starting directory
5681
&si, // (in) startup information
5682
&pi); // (out) process information
5683
5684
if (rslt) {
5685
// Wait until child process exits.
5686
WaitForSingleObject(pi.hProcess, INFINITE);
5687
5688
GetExitCodeProcess(pi.hProcess, &exit_code);
5689
5690
// Close process and thread handles.
5691
CloseHandle(pi.hProcess);
5692
CloseHandle(pi.hThread);
5693
} else {
5694
exit_code = -1;
5695
}
5696
5697
FREE_C_HEAP_ARRAY(char, cmd_string);
5698
return (int)exit_code;
5699
}
5700
5701
bool os::find(address addr, outputStream* st) {
5702
int offset = -1;
5703
bool result = false;
5704
char buf[256];
5705
if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5706
st->print(PTR_FORMAT " ", addr);
5707
if (strlen(buf) < sizeof(buf) - 1) {
5708
char* p = strrchr(buf, '\\');
5709
if (p) {
5710
st->print("%s", p + 1);
5711
} else {
5712
st->print("%s", buf);
5713
}
5714
} else {
5715
// The library name is probably truncated. Let's omit the library name.
5716
// See also JDK-8147512.
5717
}
5718
if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5719
st->print("::%s + 0x%x", buf, offset);
5720
}
5721
st->cr();
5722
result = true;
5723
}
5724
return result;
5725
}
5726
5727
static jint initSock() {
5728
WSADATA wsadata;
5729
5730
if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5731
jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5732
::GetLastError());
5733
return JNI_ERR;
5734
}
5735
return JNI_OK;
5736
}
5737
5738
struct hostent* os::get_host_by_name(char* name) {
5739
return (struct hostent*)gethostbyname(name);
5740
}
5741
5742
int os::socket_close(int fd) {
5743
return ::closesocket(fd);
5744
}
5745
5746
int os::socket(int domain, int type, int protocol) {
5747
return ::socket(domain, type, protocol);
5748
}
5749
5750
int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5751
return ::connect(fd, him, len);
5752
}
5753
5754
int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5755
return ::recv(fd, buf, (int)nBytes, flags);
5756
}
5757
5758
int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5759
return ::send(fd, buf, (int)nBytes, flags);
5760
}
5761
5762
int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5763
return ::send(fd, buf, (int)nBytes, flags);
5764
}
5765
5766
// WINDOWS CONTEXT Flags for THREAD_SAMPLING
5767
#if defined(IA32)
5768
#define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5769
#elif defined(AMD64) || defined(_M_ARM64)
5770
#define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5771
#endif
5772
5773
// returns true if thread could be suspended,
5774
// false otherwise
5775
static bool do_suspend(HANDLE* h) {
5776
if (h != NULL) {
5777
if (SuspendThread(*h) != ~0) {
5778
return true;
5779
}
5780
}
5781
return false;
5782
}
5783
5784
// resume the thread
5785
// calling resume on an active thread is a no-op
5786
static void do_resume(HANDLE* h) {
5787
if (h != NULL) {
5788
ResumeThread(*h);
5789
}
5790
}
5791
5792
// retrieve a suspend/resume context capable handle
5793
// from the tid. Caller validates handle return value.
5794
void get_thread_handle_for_extended_context(HANDLE* h,
5795
OSThread::thread_id_t tid) {
5796
if (h != NULL) {
5797
*h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5798
}
5799
}
5800
5801
// Thread sampling implementation
5802
//
5803
void os::SuspendedThreadTask::internal_do_task() {
5804
CONTEXT ctxt;
5805
HANDLE h = NULL;
5806
5807
// get context capable handle for thread
5808
get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5809
5810
// sanity
5811
if (h == NULL || h == INVALID_HANDLE_VALUE) {
5812
return;
5813
}
5814
5815
// suspend the thread
5816
if (do_suspend(&h)) {
5817
ctxt.ContextFlags = sampling_context_flags;
5818
// get thread context
5819
GetThreadContext(h, &ctxt);
5820
SuspendedThreadTaskContext context(_thread, &ctxt);
5821
// pass context to Thread Sampling impl
5822
do_task(context);
5823
// resume thread
5824
do_resume(&h);
5825
}
5826
5827
// close handle
5828
CloseHandle(h);
5829
}
5830
5831
bool os::start_debugging(char *buf, int buflen) {
5832
int len = (int)strlen(buf);
5833
char *p = &buf[len];
5834
5835
jio_snprintf(p, buflen-len,
5836
"\n\n"
5837
"Do you want to debug the problem?\n\n"
5838
"To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5839
"Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5840
"Otherwise, select 'No' to abort...",
5841
os::current_process_id(), os::current_thread_id());
5842
5843
bool yes = os::message_box("Unexpected Error", buf);
5844
5845
if (yes) {
5846
// os::breakpoint() calls DebugBreak(), which causes a breakpoint
5847
// exception. If VM is running inside a debugger, the debugger will
5848
// catch the exception. Otherwise, the breakpoint exception will reach
5849
// the default windows exception handler, which can spawn a debugger and
5850
// automatically attach to the dying VM.
5851
os::breakpoint();
5852
yes = false;
5853
}
5854
return yes;
5855
}
5856
5857
void* os::get_default_process_handle() {
5858
return (void*)GetModuleHandle(NULL);
5859
}
5860
5861
// Builds a platform dependent Agent_OnLoad_<lib_name> function name
5862
// which is used to find statically linked in agents.
5863
// Additionally for windows, takes into account __stdcall names.
5864
// Parameters:
5865
// sym_name: Symbol in library we are looking for
5866
// lib_name: Name of library to look in, NULL for shared libs.
5867
// is_absolute_path == true if lib_name is absolute path to agent
5868
// such as "C:/a/b/L.dll"
5869
// == false if only the base name of the library is passed in
5870
// such as "L"
5871
char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5872
bool is_absolute_path) {
5873
char *agent_entry_name;
5874
size_t len;
5875
size_t name_len;
5876
size_t prefix_len = strlen(JNI_LIB_PREFIX);
5877
size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5878
const char *start;
5879
5880
if (lib_name != NULL) {
5881
len = name_len = strlen(lib_name);
5882
if (is_absolute_path) {
5883
// Need to strip path, prefix and suffix
5884
if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5885
lib_name = ++start;
5886
} else {
5887
// Need to check for drive prefix
5888
if ((start = strchr(lib_name, ':')) != NULL) {
5889
lib_name = ++start;
5890
}
5891
}
5892
if (len <= (prefix_len + suffix_len)) {
5893
return NULL;
5894
}
5895
lib_name += prefix_len;
5896
name_len = strlen(lib_name) - suffix_len;
5897
}
5898
}
5899
len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5900
agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5901
if (agent_entry_name == NULL) {
5902
return NULL;
5903
}
5904
if (lib_name != NULL) {
5905
const char *p = strrchr(sym_name, '@');
5906
if (p != NULL && p != sym_name) {
5907
// sym_name == _Agent_OnLoad@XX
5908
strncpy(agent_entry_name, sym_name, (p - sym_name));
5909
agent_entry_name[(p-sym_name)] = '\0';
5910
// agent_entry_name == _Agent_OnLoad
5911
strcat(agent_entry_name, "_");
5912
strncat(agent_entry_name, lib_name, name_len);
5913
strcat(agent_entry_name, p);
5914
// agent_entry_name == _Agent_OnLoad_lib_name@XX
5915
} else {
5916
strcpy(agent_entry_name, sym_name);
5917
strcat(agent_entry_name, "_");
5918
strncat(agent_entry_name, lib_name, name_len);
5919
}
5920
} else {
5921
strcpy(agent_entry_name, sym_name);
5922
}
5923
return agent_entry_name;
5924
}
5925
5926
/*
5927
All the defined signal names for Windows.
5928
5929
NOTE that not all of these names are accepted by FindSignal!
5930
5931
For various reasons some of these may be rejected at runtime.
5932
5933
Here are the names currently accepted by a user of sun.misc.Signal with
5934
1.4.1 (ignoring potential interaction with use of chaining, etc):
5935
5936
(LIST TBD)
5937
5938
*/
5939
int os::get_signal_number(const char* name) {
5940
static const struct {
5941
const char* name;
5942
int number;
5943
} siglabels [] =
5944
// derived from version 6.0 VC98/include/signal.h
5945
{"ABRT", SIGABRT, // abnormal termination triggered by abort cl
5946
"FPE", SIGFPE, // floating point exception
5947
"SEGV", SIGSEGV, // segment violation
5948
"INT", SIGINT, // interrupt
5949
"TERM", SIGTERM, // software term signal from kill
5950
"BREAK", SIGBREAK, // Ctrl-Break sequence
5951
"ILL", SIGILL}; // illegal instruction
5952
for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5953
if (strcmp(name, siglabels[i].name) == 0) {
5954
return siglabels[i].number;
5955
}
5956
}
5957
return -1;
5958
}
5959
5960
// Fast current thread access
5961
5962
int os::win32::_thread_ptr_offset = 0;
5963
5964
static void call_wrapper_dummy() {}
5965
5966
// We need to call the os_exception_wrapper once so that it sets
5967
// up the offset from FS of the thread pointer.
5968
void os::win32::initialize_thread_ptr_offset() {
5969
os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5970
NULL, methodHandle(), NULL, NULL);
5971
}
5972
5973
bool os::supports_map_sync() {
5974
return false;
5975
}
5976
5977
#ifdef ASSERT
5978
static void check_meminfo(MEMORY_BASIC_INFORMATION* minfo) {
5979
assert(minfo->State == MEM_FREE || minfo->State == MEM_COMMIT || minfo->State == MEM_RESERVE, "Invalid state");
5980
if (minfo->State != MEM_FREE) {
5981
assert(minfo->AllocationBase != NULL && minfo->BaseAddress >= minfo->AllocationBase, "Invalid pointers");
5982
assert(minfo->RegionSize > 0, "Invalid region size");
5983
}
5984
}
5985
#endif
5986
5987
5988
static bool checkedVirtualQuery(address addr, MEMORY_BASIC_INFORMATION* minfo) {
5989
ZeroMemory(minfo, sizeof(MEMORY_BASIC_INFORMATION));
5990
if (::VirtualQuery(addr, minfo, sizeof(MEMORY_BASIC_INFORMATION)) == sizeof(MEMORY_BASIC_INFORMATION)) {
5991
DEBUG_ONLY(check_meminfo(minfo);)
5992
return true;
5993
}
5994
return false;
5995
}
5996
5997
// Given a pointer pointing into an allocation (an area allocated with VirtualAlloc),
5998
// return information about that allocation.
5999
bool os::win32::find_mapping(address addr, mapping_info_t* mi) {
6000
// Query at addr to find allocation base; then, starting at allocation base,
6001
// query all regions, until we either find the next allocation or a free area.
6002
ZeroMemory(mi, sizeof(mapping_info_t));
6003
MEMORY_BASIC_INFORMATION minfo;
6004
address allocation_base = NULL;
6005
address allocation_end = NULL;
6006
bool rc = false;
6007
if (checkedVirtualQuery(addr, &minfo)) {
6008
if (minfo.State != MEM_FREE) {
6009
allocation_base = (address)minfo.AllocationBase;
6010
allocation_end = allocation_base;
6011
// Iterate through all regions in this allocation to find its end. While we are here, also count things.
6012
for (;;) {
6013
bool rc = checkedVirtualQuery(allocation_end, &minfo);
6014
if (rc == false || // VirtualQuery error, end of allocation?
6015
minfo.State == MEM_FREE || // end of allocation, free memory follows
6016
(address)minfo.AllocationBase != allocation_base) // end of allocation, a new one starts
6017
{
6018
break;
6019
}
6020
const size_t region_size = minfo.RegionSize;
6021
mi->regions ++;
6022
if (minfo.State == MEM_COMMIT) {
6023
mi->committed_size += minfo.RegionSize;
6024
}
6025
allocation_end += region_size;
6026
}
6027
if (allocation_base != NULL && allocation_end > allocation_base) {
6028
mi->base = allocation_base;
6029
mi->size = allocation_end - allocation_base;
6030
rc = true;
6031
}
6032
}
6033
}
6034
#ifdef ASSERT
6035
if (rc) {
6036
assert(mi->size > 0 && mi->size >= mi->committed_size, "Sanity");
6037
assert(addr >= mi->base && addr < mi->base + mi->size, "Sanity");
6038
assert(mi->regions > 0, "Sanity");
6039
}
6040
#endif
6041
return rc;
6042
}
6043
6044
// Helper for print_one_mapping: print n words, both as hex and ascii.
6045
// Use Safefetch for all values.
6046
static void print_snippet(const void* p, outputStream* st) {
6047
static const int num_words = LP64_ONLY(3) NOT_LP64(6);
6048
static const int num_bytes = num_words * sizeof(int);
6049
intptr_t v[num_words];
6050
const int errval = 0xDE210244;
6051
for (int i = 0; i < num_words; i++) {
6052
v[i] = SafeFetchN((intptr_t*)p + i, errval);
6053
if (v[i] == errval &&
6054
SafeFetchN((intptr_t*)p + i, ~errval) == ~errval) {
6055
return;
6056
}
6057
}
6058
st->put('[');
6059
for (int i = 0; i < num_words; i++) {
6060
st->print(INTPTR_FORMAT " ", v[i]);
6061
}
6062
const char* b = (char*)v;
6063
st->put('\"');
6064
for (int i = 0; i < num_bytes; i++) {
6065
st->put(::isgraph(b[i]) ? b[i] : '.');
6066
}
6067
st->put('\"');
6068
st->put(']');
6069
}
6070
6071
// Helper function for print_memory_mappings:
6072
// Given a MEMORY_BASIC_INFORMATION, containing information about a non-free region:
6073
// print out all regions in that allocation. If any of those regions
6074
// fall outside the given range [start, end), indicate that in the output.
6075
// Return the pointer to the end of the allocation.
6076
static address print_one_mapping(MEMORY_BASIC_INFORMATION* minfo, address start, address end, outputStream* st) {
6077
// Print it like this:
6078
//
6079
// Base: <xxxxx>: [xxxx - xxxx], state=MEM_xxx, prot=x, type=MEM_xxx (region 1)
6080
// [xxxx - xxxx], state=MEM_xxx, prot=x, type=MEM_xxx (region 2)
6081
assert(minfo->State != MEM_FREE, "Not inside an allocation.");
6082
address allocation_base = (address)minfo->AllocationBase;
6083
#define IS_IN(p) (p >= start && p < end)
6084
bool first_line = true;
6085
bool is_dll = false;
6086
for(;;) {
6087
if (first_line) {
6088
st->print("Base " PTR_FORMAT ": ", p2i(allocation_base));
6089
} else {
6090
st->print_raw(NOT_LP64 (" ")
6091
LP64_ONLY(" "));
6092
}
6093
address region_start = (address)minfo->BaseAddress;
6094
address region_end = region_start + minfo->RegionSize;
6095
assert(region_end > region_start, "Sanity");
6096
if (region_end <= start) {
6097
st->print("<outside range> ");
6098
} else if (region_start >= end) {
6099
st->print("<outside range> ");
6100
} else if (!IS_IN(region_start) || !IS_IN(region_end - 1)) {
6101
st->print("<partly outside range> ");
6102
}
6103
st->print("[" PTR_FORMAT "-" PTR_FORMAT "), state=", p2i(region_start), p2i(region_end));
6104
switch (minfo->State) {
6105
case MEM_COMMIT: st->print_raw("MEM_COMMIT "); break;
6106
case MEM_FREE: st->print_raw("MEM_FREE "); break;
6107
case MEM_RESERVE: st->print_raw("MEM_RESERVE"); break;
6108
default: st->print("%x?", (unsigned)minfo->State);
6109
}
6110
st->print(", prot=%3x, type=", (unsigned)minfo->Protect);
6111
switch (minfo->Type) {
6112
case MEM_IMAGE: st->print_raw("MEM_IMAGE "); break;
6113
case MEM_MAPPED: st->print_raw("MEM_MAPPED "); break;
6114
case MEM_PRIVATE: st->print_raw("MEM_PRIVATE"); break;
6115
default: st->print("%x?", (unsigned)minfo->State);
6116
}
6117
// At the start of every allocation, print some more information about this mapping.
6118
// Notes:
6119
// - this could be beefed up a lot, similar to os::print_location
6120
// - for now we just query the allocation start point. This may be confusing for cases where
6121
// the kernel merges multiple mappings.
6122
if (first_line) {
6123
char buf[MAX_PATH];
6124
if (os::dll_address_to_library_name(allocation_base, buf, sizeof(buf), nullptr)) {
6125
st->print(", %s", buf);
6126
is_dll = true;
6127
}
6128
}
6129
// If memory is accessible, and we do not know anything else about it, print a snippet
6130
if (!is_dll &&
6131
minfo->State == MEM_COMMIT &&
6132
!(minfo->Protect & PAGE_NOACCESS || minfo->Protect & PAGE_GUARD)) {
6133
st->print_raw(", ");
6134
print_snippet(region_start, st);
6135
}
6136
st->cr();
6137
// Next region...
6138
bool rc = checkedVirtualQuery(region_end, minfo);
6139
if (rc == false || // VirtualQuery error, end of allocation?
6140
(minfo->State == MEM_FREE) || // end of allocation, free memory follows
6141
((address)minfo->AllocationBase != allocation_base) || // end of allocation, a new one starts
6142
(region_end > end)) // end of range to print.
6143
{
6144
return region_end;
6145
}
6146
first_line = false;
6147
}
6148
#undef IS_IN
6149
ShouldNotReachHere();
6150
return NULL;
6151
}
6152
6153
void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {
6154
MEMORY_BASIC_INFORMATION minfo;
6155
address start = (address)addr;
6156
address end = start + bytes;
6157
address p = start;
6158
if (p == nullptr) { // Lets skip the zero pages.
6159
p += os::vm_allocation_granularity();
6160
}
6161
address p2 = p; // guard against wraparounds
6162
int fuse = 0;
6163
6164
while (p < end && p >= p2) {
6165
p2 = p;
6166
// Probe for the next mapping.
6167
if (checkedVirtualQuery(p, &minfo)) {
6168
if (minfo.State != MEM_FREE) {
6169
// Found one. Print it out.
6170
address p2 = print_one_mapping(&minfo, start, end, st);
6171
assert(p2 > p, "Sanity");
6172
p = p2;
6173
} else {
6174
// Note: for free regions, most of MEMORY_BASIC_INFORMATION is undefined.
6175
// Only region dimensions are not: use those to jump to the end of
6176
// the free range.
6177
address region_start = (address)minfo.BaseAddress;
6178
address region_end = region_start + minfo.RegionSize;
6179
assert(p >= region_start && p < region_end, "Sanity");
6180
p = region_end;
6181
}
6182
} else {
6183
// MSDN doc on VirtualQuery is unclear about what it means if it returns an error.
6184
// In particular, whether querying an address outside any mappings would report
6185
// a MEM_FREE region or just return an error. From experiments, it seems to return
6186
// a MEM_FREE region for unmapped areas in valid address space and an error if we
6187
// are outside valid address space.
6188
// Here, we advance the probe pointer by alloc granularity. But if the range to print
6189
// is large, this may take a long time. Therefore lets stop right away if the address
6190
// is outside of what we know are valid addresses on Windows. Also, add a loop fuse.
6191
static const address end_virt = (address)(LP64_ONLY(0x7ffffffffffULL) NOT_LP64(3*G));
6192
if (p >= end_virt) {
6193
break;
6194
} else {
6195
// Advance probe pointer, but with a fuse to break long loops.
6196
if (fuse++ == 100000) {
6197
break;
6198
}
6199
p += os::vm_allocation_granularity();
6200
}
6201
}
6202
}
6203
}
6204
6205