Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/os/windows/os_windows.cpp
40930 views
1
/*
2
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
// Must be at least Windows Vista or Server 2008 to use InitOnceExecuteOnce
26
#define _WIN32_WINNT 0x0600
27
28
// no precompiled headers
29
#include "jvm.h"
30
#include "classfile/vmSymbols.hpp"
31
#include "code/codeCache.hpp"
32
#include "code/icBuffer.hpp"
33
#include "code/nativeInst.hpp"
34
#include "code/vtableStubs.hpp"
35
#include "compiler/compileBroker.hpp"
36
#include "compiler/disassembler.hpp"
37
#include "interpreter/interpreter.hpp"
38
#include "jvmtifiles/jvmti.h"
39
#include "logging/log.hpp"
40
#include "logging/logStream.hpp"
41
#include "memory/allocation.inline.hpp"
42
#include "oops/oop.inline.hpp"
43
#include "os_share_windows.hpp"
44
#include "os_windows.inline.hpp"
45
#include "prims/jniFastGetField.hpp"
46
#include "prims/jvm_misc.hpp"
47
#include "runtime/arguments.hpp"
48
#include "runtime/atomic.hpp"
49
#include "runtime/globals.hpp"
50
#include "runtime/globals_extension.hpp"
51
#include "runtime/interfaceSupport.inline.hpp"
52
#include "runtime/java.hpp"
53
#include "runtime/javaCalls.hpp"
54
#include "runtime/mutexLocker.hpp"
55
#include "runtime/objectMonitor.hpp"
56
#include "runtime/orderAccess.hpp"
57
#include "runtime/osThread.hpp"
58
#include "runtime/perfMemory.hpp"
59
#include "runtime/safefetch.inline.hpp"
60
#include "runtime/safepointMechanism.hpp"
61
#include "runtime/semaphore.inline.hpp"
62
#include "runtime/sharedRuntime.hpp"
63
#include "runtime/statSampler.hpp"
64
#include "runtime/thread.inline.hpp"
65
#include "runtime/threadCritical.hpp"
66
#include "runtime/timer.hpp"
67
#include "runtime/vm_version.hpp"
68
#include "services/attachListener.hpp"
69
#include "services/memTracker.hpp"
70
#include "services/runtimeService.hpp"
71
#include "utilities/align.hpp"
72
#include "utilities/decoder.hpp"
73
#include "utilities/defaultStream.hpp"
74
#include "utilities/events.hpp"
75
#include "utilities/macros.hpp"
76
#include "utilities/vmError.hpp"
77
#include "symbolengine.hpp"
78
#include "windbghelp.hpp"
79
80
#ifdef _DEBUG
81
#include <crtdbg.h>
82
#endif
83
84
#include <windows.h>
85
#include <sys/types.h>
86
#include <sys/stat.h>
87
#include <sys/timeb.h>
88
#include <objidl.h>
89
#include <shlobj.h>
90
91
#include <malloc.h>
92
#include <signal.h>
93
#include <direct.h>
94
#include <errno.h>
95
#include <fcntl.h>
96
#include <io.h>
97
#include <process.h> // For _beginthreadex(), _endthreadex()
98
#include <imagehlp.h> // For os::dll_address_to_function_name
99
// for enumerating dll libraries
100
#include <vdmdbg.h>
101
#include <psapi.h>
102
#include <mmsystem.h>
103
#include <winsock2.h>
104
105
// for timer info max values which include all bits
106
#define ALL_64_BITS CONST64(-1)
107
108
// For DLL loading/load error detection
109
// Values of PE COFF
110
#define IMAGE_FILE_PTR_TO_SIGNATURE 0x3c
111
#define IMAGE_FILE_SIGNATURE_LENGTH 4
112
113
static HANDLE main_process;
114
static HANDLE main_thread;
115
static int main_thread_id;
116
117
static FILETIME process_creation_time;
118
static FILETIME process_exit_time;
119
static FILETIME process_user_time;
120
static FILETIME process_kernel_time;
121
122
#if defined(_M_ARM64)
123
#define __CPU__ aarch64
124
#elif defined(_M_AMD64)
125
#define __CPU__ amd64
126
#else
127
#define __CPU__ i486
128
#endif
129
130
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
131
PVOID topLevelVectoredExceptionHandler = NULL;
132
LPTOP_LEVEL_EXCEPTION_FILTER previousUnhandledExceptionFilter = NULL;
133
#endif
134
135
// save DLL module handle, used by GetModuleFileName
136
137
HINSTANCE vm_lib_handle;
138
139
BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
140
switch (reason) {
141
case DLL_PROCESS_ATTACH:
142
vm_lib_handle = hinst;
143
if (ForceTimeHighResolution) {
144
timeBeginPeriod(1L);
145
}
146
WindowsDbgHelp::pre_initialize();
147
SymbolEngine::pre_initialize();
148
break;
149
case DLL_PROCESS_DETACH:
150
if (ForceTimeHighResolution) {
151
timeEndPeriod(1L);
152
}
153
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
154
if (topLevelVectoredExceptionHandler != NULL) {
155
RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
156
topLevelVectoredExceptionHandler = NULL;
157
}
158
#endif
159
break;
160
default:
161
break;
162
}
163
return true;
164
}
165
166
static inline double fileTimeAsDouble(FILETIME* time) {
167
const double high = (double) ((unsigned int) ~0);
168
const double split = 10000000.0;
169
double result = (time->dwLowDateTime / split) +
170
time->dwHighDateTime * (high/split);
171
return result;
172
}
173
174
// Implementation of os
175
176
#define RANGE_FORMAT "[" PTR_FORMAT "-" PTR_FORMAT ")"
177
#define RANGE_FORMAT_ARGS(p, len) p2i(p), p2i((address)p + len)
178
179
// A number of wrappers for more frequently used system calls, to add standard logging.
180
181
struct PreserveLastError {
182
const DWORD v;
183
PreserveLastError() : v(::GetLastError()) {}
184
~PreserveLastError() { ::SetLastError(v); }
185
};
186
187
// Logging wrapper for VirtualAlloc
188
static LPVOID virtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect) {
189
LPVOID result = ::VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
190
if (result != NULL) {
191
log_trace(os)("VirtualAlloc(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x) returned " PTR_FORMAT "%s.",
192
p2i(lpAddress), dwSize, flAllocationType, flProtect, p2i(result),
193
((lpAddress != NULL && result != lpAddress) ? " <different base!>" : ""));
194
} else {
195
PreserveLastError ple;
196
log_info(os)("VirtualAlloc(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x) failed (%u).",
197
p2i(lpAddress), dwSize, flAllocationType, flProtect, ple.v);
198
}
199
return result;
200
}
201
202
// Logging wrapper for VirtualFree
203
static BOOL virtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType) {
204
BOOL result = ::VirtualFree(lpAddress, dwSize, dwFreeType);
205
if (result != FALSE) {
206
log_trace(os)("VirtualFree(" PTR_FORMAT ", " SIZE_FORMAT ", %x) succeeded",
207
p2i(lpAddress), dwSize, dwFreeType);
208
} else {
209
PreserveLastError ple;
210
log_info(os)("VirtualFree(" PTR_FORMAT ", " SIZE_FORMAT ", %x) failed (%u).",
211
p2i(lpAddress), dwSize, dwFreeType, ple.v);
212
}
213
return result;
214
}
215
216
// Logging wrapper for VirtualAllocExNuma
217
static LPVOID virtualAllocExNuma(HANDLE hProcess, LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType,
218
DWORD flProtect, DWORD nndPreferred) {
219
LPVOID result = ::VirtualAllocExNuma(hProcess, lpAddress, dwSize, flAllocationType, flProtect, nndPreferred);
220
if (result != NULL) {
221
log_trace(os)("VirtualAllocExNuma(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x, %x) returned " PTR_FORMAT "%s.",
222
p2i(lpAddress), dwSize, flAllocationType, flProtect, nndPreferred, p2i(result),
223
((lpAddress != NULL && result != lpAddress) ? " <different base!>" : ""));
224
} else {
225
PreserveLastError ple;
226
log_info(os)("VirtualAllocExNuma(" PTR_FORMAT ", " SIZE_FORMAT ", %x, %x, %x) failed (%u).",
227
p2i(lpAddress), dwSize, flAllocationType, flProtect, nndPreferred, ple.v);
228
}
229
return result;
230
}
231
232
// Logging wrapper for MapViewOfFileEx
233
static LPVOID mapViewOfFileEx(HANDLE hFileMappingObject, DWORD dwDesiredAccess, DWORD dwFileOffsetHigh,
234
DWORD dwFileOffsetLow, SIZE_T dwNumberOfBytesToMap, LPVOID lpBaseAddress) {
235
LPVOID result = ::MapViewOfFileEx(hFileMappingObject, dwDesiredAccess, dwFileOffsetHigh,
236
dwFileOffsetLow, dwNumberOfBytesToMap, lpBaseAddress);
237
if (result != NULL) {
238
log_trace(os)("MapViewOfFileEx(" PTR_FORMAT ", " SIZE_FORMAT ") returned " PTR_FORMAT "%s.",
239
p2i(lpBaseAddress), dwNumberOfBytesToMap, p2i(result),
240
((lpBaseAddress != NULL && result != lpBaseAddress) ? " <different base!>" : ""));
241
} else {
242
PreserveLastError ple;
243
log_info(os)("MapViewOfFileEx(" PTR_FORMAT ", " SIZE_FORMAT ") failed (%u).",
244
p2i(lpBaseAddress), dwNumberOfBytesToMap, ple.v);
245
}
246
return result;
247
}
248
249
// Logging wrapper for UnmapViewOfFile
250
static BOOL unmapViewOfFile(LPCVOID lpBaseAddress) {
251
BOOL result = ::UnmapViewOfFile(lpBaseAddress);
252
if (result != FALSE) {
253
log_trace(os)("UnmapViewOfFile(" PTR_FORMAT ") succeeded", p2i(lpBaseAddress));
254
} else {
255
PreserveLastError ple;
256
log_info(os)("UnmapViewOfFile(" PTR_FORMAT ") failed (%u).", p2i(lpBaseAddress), ple.v);
257
}
258
return result;
259
}
260
261
bool os::unsetenv(const char* name) {
262
assert(name != NULL, "Null pointer");
263
return (SetEnvironmentVariable(name, NULL) == TRUE);
264
}
265
266
char** os::get_environ() { return _environ; }
267
268
// No setuid programs under Windows.
269
bool os::have_special_privileges() {
270
return false;
271
}
272
273
274
// This method is a periodic task to check for misbehaving JNI applications
275
// under CheckJNI, we can add any periodic checks here.
276
// For Windows at the moment does nothing
277
void os::run_periodic_checks() {
278
return;
279
}
280
281
// previous UnhandledExceptionFilter, if there is one
282
static LPTOP_LEVEL_EXCEPTION_FILTER prev_uef_handler = NULL;
283
284
LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo);
285
286
void os::init_system_properties_values() {
287
// sysclasspath, java_home, dll_dir
288
{
289
char *home_path;
290
char *dll_path;
291
char *pslash;
292
const char *bin = "\\bin";
293
char home_dir[MAX_PATH + 1];
294
char *alt_home_dir = ::getenv("_ALT_JAVA_HOME_DIR");
295
296
if (alt_home_dir != NULL) {
297
strncpy(home_dir, alt_home_dir, MAX_PATH + 1);
298
home_dir[MAX_PATH] = '\0';
299
} else {
300
os::jvm_path(home_dir, sizeof(home_dir));
301
// Found the full path to jvm.dll.
302
// Now cut the path to <java_home>/jre if we can.
303
*(strrchr(home_dir, '\\')) = '\0'; // get rid of \jvm.dll
304
pslash = strrchr(home_dir, '\\');
305
if (pslash != NULL) {
306
*pslash = '\0'; // get rid of \{client|server}
307
pslash = strrchr(home_dir, '\\');
308
if (pslash != NULL) {
309
*pslash = '\0'; // get rid of \bin
310
}
311
}
312
}
313
314
home_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + 1, mtInternal);
315
strcpy(home_path, home_dir);
316
Arguments::set_java_home(home_path);
317
FREE_C_HEAP_ARRAY(char, home_path);
318
319
dll_path = NEW_C_HEAP_ARRAY(char, strlen(home_dir) + strlen(bin) + 1,
320
mtInternal);
321
strcpy(dll_path, home_dir);
322
strcat(dll_path, bin);
323
Arguments::set_dll_dir(dll_path);
324
FREE_C_HEAP_ARRAY(char, dll_path);
325
326
if (!set_boot_path('\\', ';')) {
327
vm_exit_during_initialization("Failed setting boot class path.", NULL);
328
}
329
}
330
331
// library_path
332
#define EXT_DIR "\\lib\\ext"
333
#define BIN_DIR "\\bin"
334
#define PACKAGE_DIR "\\Sun\\Java"
335
{
336
// Win32 library search order (See the documentation for LoadLibrary):
337
//
338
// 1. The directory from which application is loaded.
339
// 2. The system wide Java Extensions directory (Java only)
340
// 3. System directory (GetSystemDirectory)
341
// 4. Windows directory (GetWindowsDirectory)
342
// 5. The PATH environment variable
343
// 6. The current directory
344
345
char *library_path;
346
char tmp[MAX_PATH];
347
char *path_str = ::getenv("PATH");
348
349
library_path = NEW_C_HEAP_ARRAY(char, MAX_PATH * 5 + sizeof(PACKAGE_DIR) +
350
sizeof(BIN_DIR) + (path_str ? strlen(path_str) : 0) + 10, mtInternal);
351
352
library_path[0] = '\0';
353
354
GetModuleFileName(NULL, tmp, sizeof(tmp));
355
*(strrchr(tmp, '\\')) = '\0';
356
strcat(library_path, tmp);
357
358
GetWindowsDirectory(tmp, sizeof(tmp));
359
strcat(library_path, ";");
360
strcat(library_path, tmp);
361
strcat(library_path, PACKAGE_DIR BIN_DIR);
362
363
GetSystemDirectory(tmp, sizeof(tmp));
364
strcat(library_path, ";");
365
strcat(library_path, tmp);
366
367
GetWindowsDirectory(tmp, sizeof(tmp));
368
strcat(library_path, ";");
369
strcat(library_path, tmp);
370
371
if (path_str) {
372
strcat(library_path, ";");
373
strcat(library_path, path_str);
374
}
375
376
strcat(library_path, ";.");
377
378
Arguments::set_library_path(library_path);
379
FREE_C_HEAP_ARRAY(char, library_path);
380
}
381
382
// Default extensions directory
383
{
384
char path[MAX_PATH];
385
char buf[2 * MAX_PATH + 2 * sizeof(EXT_DIR) + sizeof(PACKAGE_DIR) + 1];
386
GetWindowsDirectory(path, MAX_PATH);
387
sprintf(buf, "%s%s;%s%s%s", Arguments::get_java_home(), EXT_DIR,
388
path, PACKAGE_DIR, EXT_DIR);
389
Arguments::set_ext_dirs(buf);
390
}
391
#undef EXT_DIR
392
#undef BIN_DIR
393
#undef PACKAGE_DIR
394
395
#ifndef _WIN64
396
// set our UnhandledExceptionFilter and save any previous one
397
prev_uef_handler = SetUnhandledExceptionFilter(Handle_FLT_Exception);
398
#endif
399
400
// Done
401
return;
402
}
403
404
void os::breakpoint() {
405
DebugBreak();
406
}
407
408
// Invoked from the BREAKPOINT Macro
409
extern "C" void breakpoint() {
410
os::breakpoint();
411
}
412
413
// RtlCaptureStackBackTrace Windows API may not exist prior to Windows XP.
414
// So far, this method is only used by Native Memory Tracking, which is
415
// only supported on Windows XP or later.
416
//
417
int os::get_native_stack(address* stack, int frames, int toSkip) {
418
int captured = RtlCaptureStackBackTrace(toSkip + 1, frames, (PVOID*)stack, NULL);
419
for (int index = captured; index < frames; index ++) {
420
stack[index] = NULL;
421
}
422
return captured;
423
}
424
425
// os::current_stack_base()
426
//
427
// Returns the base of the stack, which is the stack's
428
// starting address. This function must be called
429
// while running on the stack of the thread being queried.
430
431
address os::current_stack_base() {
432
MEMORY_BASIC_INFORMATION minfo;
433
address stack_bottom;
434
size_t stack_size;
435
436
VirtualQuery(&minfo, &minfo, sizeof(minfo));
437
stack_bottom = (address)minfo.AllocationBase;
438
stack_size = minfo.RegionSize;
439
440
// Add up the sizes of all the regions with the same
441
// AllocationBase.
442
while (1) {
443
VirtualQuery(stack_bottom+stack_size, &minfo, sizeof(minfo));
444
if (stack_bottom == (address)minfo.AllocationBase) {
445
stack_size += minfo.RegionSize;
446
} else {
447
break;
448
}
449
}
450
return stack_bottom + stack_size;
451
}
452
453
size_t os::current_stack_size() {
454
size_t sz;
455
MEMORY_BASIC_INFORMATION minfo;
456
VirtualQuery(&minfo, &minfo, sizeof(minfo));
457
sz = (size_t)os::current_stack_base() - (size_t)minfo.AllocationBase;
458
return sz;
459
}
460
461
bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) {
462
MEMORY_BASIC_INFORMATION minfo;
463
committed_start = NULL;
464
committed_size = 0;
465
address top = start + size;
466
const address start_addr = start;
467
while (start < top) {
468
VirtualQuery(start, &minfo, sizeof(minfo));
469
if ((minfo.State & MEM_COMMIT) == 0) { // not committed
470
if (committed_start != NULL) {
471
break;
472
}
473
} else { // committed
474
if (committed_start == NULL) {
475
committed_start = start;
476
}
477
size_t offset = start - (address)minfo.BaseAddress;
478
committed_size += minfo.RegionSize - offset;
479
}
480
start = (address)minfo.BaseAddress + minfo.RegionSize;
481
}
482
483
if (committed_start == NULL) {
484
assert(committed_size == 0, "Sanity");
485
return false;
486
} else {
487
assert(committed_start >= start_addr && committed_start < top, "Out of range");
488
// current region may go beyond the limit, trim to the limit
489
committed_size = MIN2(committed_size, size_t(top - committed_start));
490
return true;
491
}
492
}
493
494
struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
495
const struct tm* time_struct_ptr = localtime(clock);
496
if (time_struct_ptr != NULL) {
497
*res = *time_struct_ptr;
498
return res;
499
}
500
return NULL;
501
}
502
503
struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
504
const struct tm* time_struct_ptr = gmtime(clock);
505
if (time_struct_ptr != NULL) {
506
*res = *time_struct_ptr;
507
return res;
508
}
509
return NULL;
510
}
511
512
JNIEXPORT
513
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
514
515
// Thread start routine for all newly created threads
516
static unsigned __stdcall thread_native_entry(Thread* thread) {
517
518
thread->record_stack_base_and_size();
519
thread->initialize_thread_current();
520
521
OSThread* osthr = thread->osthread();
522
assert(osthr->get_state() == RUNNABLE, "invalid os thread state");
523
524
if (UseNUMA) {
525
int lgrp_id = os::numa_get_group_id();
526
if (lgrp_id != -1) {
527
thread->set_lgrp_id(lgrp_id);
528
}
529
}
530
531
// Diagnostic code to investigate JDK-6573254
532
int res = 30115; // non-java thread
533
if (thread->is_Java_thread()) {
534
res = 20115; // java thread
535
}
536
537
log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id());
538
539
#ifdef USE_VECTORED_EXCEPTION_HANDLING
540
// Any exception is caught by the Vectored Exception Handler, so VM can
541
// generate error dump when an exception occurred in non-Java thread
542
// (e.g. VM thread).
543
thread->call_run();
544
#else
545
// Install a win32 structured exception handler around every thread created
546
// by VM, so VM can generate error dump when an exception occurred in non-
547
// Java thread (e.g. VM thread).
548
__try {
549
thread->call_run();
550
} __except(topLevelExceptionFilter(
551
(_EXCEPTION_POINTERS*)_exception_info())) {
552
// Nothing to do.
553
}
554
#endif
555
556
// Note: at this point the thread object may already have deleted itself.
557
// Do not dereference it from here on out.
558
559
log_info(os, thread)("Thread finished (tid: " UINTX_FORMAT ").", os::current_thread_id());
560
561
// One less thread is executing
562
// When the VMThread gets here, the main thread may have already exited
563
// which frees the CodeHeap containing the Atomic::add code
564
if (thread != VMThread::vm_thread() && VMThread::vm_thread() != NULL) {
565
Atomic::dec(&os::win32::_os_thread_count);
566
}
567
568
// Thread must not return from exit_process_or_thread(), but if it does,
569
// let it proceed to exit normally
570
return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
571
}
572
573
static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
574
int thread_id) {
575
// Allocate the OSThread object
576
OSThread* osthread = new OSThread(NULL, NULL);
577
if (osthread == NULL) return NULL;
578
579
// Initialize the JDK library's interrupt event.
580
// This should really be done when OSThread is constructed,
581
// but there is no way for a constructor to report failure to
582
// allocate the event.
583
HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
584
if (interrupt_event == NULL) {
585
delete osthread;
586
return NULL;
587
}
588
osthread->set_interrupt_event(interrupt_event);
589
590
// Store info on the Win32 thread into the OSThread
591
osthread->set_thread_handle(thread_handle);
592
osthread->set_thread_id(thread_id);
593
594
if (UseNUMA) {
595
int lgrp_id = os::numa_get_group_id();
596
if (lgrp_id != -1) {
597
thread->set_lgrp_id(lgrp_id);
598
}
599
}
600
601
// Initial thread state is INITIALIZED, not SUSPENDED
602
osthread->set_state(INITIALIZED);
603
604
return osthread;
605
}
606
607
608
bool os::create_attached_thread(JavaThread* thread) {
609
#ifdef ASSERT
610
thread->verify_not_published();
611
#endif
612
HANDLE thread_h;
613
if (!DuplicateHandle(main_process, GetCurrentThread(), GetCurrentProcess(),
614
&thread_h, THREAD_ALL_ACCESS, false, 0)) {
615
fatal("DuplicateHandle failed\n");
616
}
617
OSThread* osthread = create_os_thread(thread, thread_h,
618
(int)current_thread_id());
619
if (osthread == NULL) {
620
return false;
621
}
622
623
// Initial thread state is RUNNABLE
624
osthread->set_state(RUNNABLE);
625
626
thread->set_osthread(osthread);
627
628
log_info(os, thread)("Thread attached (tid: " UINTX_FORMAT ").",
629
os::current_thread_id());
630
631
return true;
632
}
633
634
bool os::create_main_thread(JavaThread* thread) {
635
#ifdef ASSERT
636
thread->verify_not_published();
637
#endif
638
if (_starting_thread == NULL) {
639
_starting_thread = create_os_thread(thread, main_thread, main_thread_id);
640
if (_starting_thread == NULL) {
641
return false;
642
}
643
}
644
645
// The primordial thread is runnable from the start)
646
_starting_thread->set_state(RUNNABLE);
647
648
thread->set_osthread(_starting_thread);
649
return true;
650
}
651
652
// Helper function to trace _beginthreadex attributes,
653
// similar to os::Posix::describe_pthread_attr()
654
static char* describe_beginthreadex_attributes(char* buf, size_t buflen,
655
size_t stacksize, unsigned initflag) {
656
stringStream ss(buf, buflen);
657
if (stacksize == 0) {
658
ss.print("stacksize: default, ");
659
} else {
660
ss.print("stacksize: " SIZE_FORMAT "k, ", stacksize / 1024);
661
}
662
ss.print("flags: ");
663
#define PRINT_FLAG(f) if (initflag & f) ss.print( #f " ");
664
#define ALL(X) \
665
X(CREATE_SUSPENDED) \
666
X(STACK_SIZE_PARAM_IS_A_RESERVATION)
667
ALL(PRINT_FLAG)
668
#undef ALL
669
#undef PRINT_FLAG
670
return buf;
671
}
672
673
// Allocate and initialize a new OSThread
674
bool os::create_thread(Thread* thread, ThreadType thr_type,
675
size_t stack_size) {
676
unsigned thread_id;
677
678
// Allocate the OSThread object
679
OSThread* osthread = new OSThread(NULL, NULL);
680
if (osthread == NULL) {
681
return false;
682
}
683
684
// Initialize the JDK library's interrupt event.
685
// This should really be done when OSThread is constructed,
686
// but there is no way for a constructor to report failure to
687
// allocate the event.
688
HANDLE interrupt_event = CreateEvent(NULL, true, false, NULL);
689
if (interrupt_event == NULL) {
690
delete osthread;
691
return false;
692
}
693
osthread->set_interrupt_event(interrupt_event);
694
// We don't call set_interrupted(false) as it will trip the assert in there
695
// as we are not operating on the current thread. We don't need to call it
696
// because the initial state is already correct.
697
698
thread->set_osthread(osthread);
699
700
if (stack_size == 0) {
701
switch (thr_type) {
702
case os::java_thread:
703
// Java threads use ThreadStackSize which default value can be changed with the flag -Xss
704
if (JavaThread::stack_size_at_create() > 0) {
705
stack_size = JavaThread::stack_size_at_create();
706
}
707
break;
708
case os::compiler_thread:
709
if (CompilerThreadStackSize > 0) {
710
stack_size = (size_t)(CompilerThreadStackSize * K);
711
break;
712
} // else fall through:
713
// use VMThreadStackSize if CompilerThreadStackSize is not defined
714
case os::vm_thread:
715
case os::pgc_thread:
716
case os::cgc_thread:
717
case os::asynclog_thread:
718
case os::watcher_thread:
719
if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
720
break;
721
}
722
}
723
724
// Create the Win32 thread
725
//
726
// Contrary to what MSDN document says, "stack_size" in _beginthreadex()
727
// does not specify stack size. Instead, it specifies the size of
728
// initially committed space. The stack size is determined by
729
// PE header in the executable. If the committed "stack_size" is larger
730
// than default value in the PE header, the stack is rounded up to the
731
// nearest multiple of 1MB. For example if the launcher has default
732
// stack size of 320k, specifying any size less than 320k does not
733
// affect the actual stack size at all, it only affects the initial
734
// commitment. On the other hand, specifying 'stack_size' larger than
735
// default value may cause significant increase in memory usage, because
736
// not only the stack space will be rounded up to MB, but also the
737
// entire space is committed upfront.
738
//
739
// Finally Windows XP added a new flag 'STACK_SIZE_PARAM_IS_A_RESERVATION'
740
// for CreateThread() that can treat 'stack_size' as stack size. However we
741
// are not supposed to call CreateThread() directly according to MSDN
742
// document because JVM uses C runtime library. The good news is that the
743
// flag appears to work with _beginthredex() as well.
744
745
const unsigned initflag = CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION;
746
HANDLE thread_handle =
747
(HANDLE)_beginthreadex(NULL,
748
(unsigned)stack_size,
749
(unsigned (__stdcall *)(void*)) thread_native_entry,
750
thread,
751
initflag,
752
&thread_id);
753
754
char buf[64];
755
if (thread_handle != NULL) {
756
log_info(os, thread)("Thread started (tid: %u, attributes: %s)",
757
thread_id, describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
758
} else {
759
log_warning(os, thread)("Failed to start thread - _beginthreadex failed (%s) for attributes: %s.",
760
os::errno_name(errno), describe_beginthreadex_attributes(buf, sizeof(buf), stack_size, initflag));
761
// Log some OS information which might explain why creating the thread failed.
762
log_info(os, thread)("Number of threads approx. running in the VM: %d", Threads::number_of_threads());
763
LogStream st(Log(os, thread)::info());
764
os::print_memory_info(&st);
765
}
766
767
if (thread_handle == NULL) {
768
// Need to clean up stuff we've allocated so far
769
thread->set_osthread(NULL);
770
delete osthread;
771
return false;
772
}
773
774
Atomic::inc(&os::win32::_os_thread_count);
775
776
// Store info on the Win32 thread into the OSThread
777
osthread->set_thread_handle(thread_handle);
778
osthread->set_thread_id(thread_id);
779
780
// Initial thread state is INITIALIZED, not SUSPENDED
781
osthread->set_state(INITIALIZED);
782
783
// The thread is returned suspended (in state INITIALIZED), and is started higher up in the call chain
784
return true;
785
}
786
787
788
// Free Win32 resources related to the OSThread
789
void os::free_thread(OSThread* osthread) {
790
assert(osthread != NULL, "osthread not set");
791
792
// We are told to free resources of the argument thread,
793
// but we can only really operate on the current thread.
794
assert(Thread::current()->osthread() == osthread,
795
"os::free_thread but not current thread");
796
797
CloseHandle(osthread->thread_handle());
798
delete osthread;
799
}
800
801
static jlong first_filetime;
802
static jlong initial_performance_count;
803
static jlong performance_frequency;
804
805
806
jlong as_long(LARGE_INTEGER x) {
807
jlong result = 0; // initialization to avoid warning
808
set_high(&result, x.HighPart);
809
set_low(&result, x.LowPart);
810
return result;
811
}
812
813
814
jlong os::elapsed_counter() {
815
LARGE_INTEGER count;
816
QueryPerformanceCounter(&count);
817
return as_long(count) - initial_performance_count;
818
}
819
820
821
jlong os::elapsed_frequency() {
822
return performance_frequency;
823
}
824
825
826
julong os::available_memory() {
827
return win32::available_memory();
828
}
829
830
julong os::win32::available_memory() {
831
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
832
// value if total memory is larger than 4GB
833
MEMORYSTATUSEX ms;
834
ms.dwLength = sizeof(ms);
835
GlobalMemoryStatusEx(&ms);
836
837
return (julong)ms.ullAvailPhys;
838
}
839
840
julong os::physical_memory() {
841
return win32::physical_memory();
842
}
843
844
bool os::has_allocatable_memory_limit(size_t* limit) {
845
MEMORYSTATUSEX ms;
846
ms.dwLength = sizeof(ms);
847
GlobalMemoryStatusEx(&ms);
848
#ifdef _LP64
849
*limit = (size_t)ms.ullAvailVirtual;
850
return true;
851
#else
852
// Limit to 1400m because of the 2gb address space wall
853
*limit = MIN2((size_t)1400*M, (size_t)ms.ullAvailVirtual);
854
return true;
855
#endif
856
}
857
858
int os::active_processor_count() {
859
// User has overridden the number of active processors
860
if (ActiveProcessorCount > 0) {
861
log_trace(os)("active_processor_count: "
862
"active processor count set by user : %d",
863
ActiveProcessorCount);
864
return ActiveProcessorCount;
865
}
866
867
DWORD_PTR lpProcessAffinityMask = 0;
868
DWORD_PTR lpSystemAffinityMask = 0;
869
int proc_count = processor_count();
870
if (proc_count <= sizeof(UINT_PTR) * BitsPerByte &&
871
GetProcessAffinityMask(GetCurrentProcess(), &lpProcessAffinityMask, &lpSystemAffinityMask)) {
872
// Nof active processors is number of bits in process affinity mask
873
int bitcount = 0;
874
while (lpProcessAffinityMask != 0) {
875
lpProcessAffinityMask = lpProcessAffinityMask & (lpProcessAffinityMask-1);
876
bitcount++;
877
}
878
return bitcount;
879
} else {
880
return proc_count;
881
}
882
}
883
884
uint os::processor_id() {
885
return (uint)GetCurrentProcessorNumber();
886
}
887
888
void os::set_native_thread_name(const char *name) {
889
890
// See: http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx
891
//
892
// Note that unfortunately this only works if the process
893
// is already attached to a debugger; debugger must observe
894
// the exception below to show the correct name.
895
896
// If there is no debugger attached skip raising the exception
897
if (!IsDebuggerPresent()) {
898
return;
899
}
900
901
const DWORD MS_VC_EXCEPTION = 0x406D1388;
902
struct {
903
DWORD dwType; // must be 0x1000
904
LPCSTR szName; // pointer to name (in user addr space)
905
DWORD dwThreadID; // thread ID (-1=caller thread)
906
DWORD dwFlags; // reserved for future use, must be zero
907
} info;
908
909
info.dwType = 0x1000;
910
info.szName = name;
911
info.dwThreadID = -1;
912
info.dwFlags = 0;
913
914
__try {
915
RaiseException (MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(DWORD), (const ULONG_PTR*)&info );
916
} __except(EXCEPTION_EXECUTE_HANDLER) {}
917
}
918
919
bool os::bind_to_processor(uint processor_id) {
920
// Not yet implemented.
921
return false;
922
}
923
924
void os::win32::initialize_performance_counter() {
925
LARGE_INTEGER count;
926
QueryPerformanceFrequency(&count);
927
performance_frequency = as_long(count);
928
QueryPerformanceCounter(&count);
929
initial_performance_count = as_long(count);
930
}
931
932
933
double os::elapsedTime() {
934
return (double) elapsed_counter() / (double) elapsed_frequency();
935
}
936
937
938
// Windows format:
939
// The FILETIME structure is a 64-bit value representing the number of 100-nanosecond intervals since January 1, 1601.
940
// Java format:
941
// Java standards require the number of milliseconds since 1/1/1970
942
943
// Constant offset - calculated using offset()
944
static jlong _offset = 116444736000000000;
945
// Fake time counter for reproducible results when debugging
946
static jlong fake_time = 0;
947
948
#ifdef ASSERT
949
// Just to be safe, recalculate the offset in debug mode
950
static jlong _calculated_offset = 0;
951
static int _has_calculated_offset = 0;
952
953
jlong offset() {
954
if (_has_calculated_offset) return _calculated_offset;
955
SYSTEMTIME java_origin;
956
java_origin.wYear = 1970;
957
java_origin.wMonth = 1;
958
java_origin.wDayOfWeek = 0; // ignored
959
java_origin.wDay = 1;
960
java_origin.wHour = 0;
961
java_origin.wMinute = 0;
962
java_origin.wSecond = 0;
963
java_origin.wMilliseconds = 0;
964
FILETIME jot;
965
if (!SystemTimeToFileTime(&java_origin, &jot)) {
966
fatal("Error = %d\nWindows error", GetLastError());
967
}
968
_calculated_offset = jlong_from(jot.dwHighDateTime, jot.dwLowDateTime);
969
_has_calculated_offset = 1;
970
assert(_calculated_offset == _offset, "Calculated and constant time offsets must be equal");
971
return _calculated_offset;
972
}
973
#else
974
jlong offset() {
975
return _offset;
976
}
977
#endif
978
979
jlong windows_to_java_time(FILETIME wt) {
980
jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
981
return (a - offset()) / 10000;
982
}
983
984
// Returns time ticks in (10th of micro seconds)
985
jlong windows_to_time_ticks(FILETIME wt) {
986
jlong a = jlong_from(wt.dwHighDateTime, wt.dwLowDateTime);
987
return (a - offset());
988
}
989
990
FILETIME java_to_windows_time(jlong l) {
991
jlong a = (l * 10000) + offset();
992
FILETIME result;
993
result.dwHighDateTime = high(a);
994
result.dwLowDateTime = low(a);
995
return result;
996
}
997
998
bool os::supports_vtime() { return true; }
999
1000
double os::elapsedVTime() {
1001
FILETIME created;
1002
FILETIME exited;
1003
FILETIME kernel;
1004
FILETIME user;
1005
if (GetThreadTimes(GetCurrentThread(), &created, &exited, &kernel, &user) != 0) {
1006
// the resolution of windows_to_java_time() should be sufficient (ms)
1007
return (double) (windows_to_java_time(kernel) + windows_to_java_time(user)) / MILLIUNITS;
1008
} else {
1009
return elapsedTime();
1010
}
1011
}
1012
1013
jlong os::javaTimeMillis() {
1014
FILETIME wt;
1015
GetSystemTimeAsFileTime(&wt);
1016
return windows_to_java_time(wt);
1017
}
1018
1019
void os::javaTimeSystemUTC(jlong &seconds, jlong &nanos) {
1020
FILETIME wt;
1021
GetSystemTimeAsFileTime(&wt);
1022
jlong ticks = windows_to_time_ticks(wt); // 10th of micros
1023
jlong secs = jlong(ticks / 10000000); // 10000 * 1000
1024
seconds = secs;
1025
nanos = jlong(ticks - (secs*10000000)) * 100;
1026
}
1027
1028
jlong os::javaTimeNanos() {
1029
LARGE_INTEGER current_count;
1030
QueryPerformanceCounter(&current_count);
1031
double current = as_long(current_count);
1032
double freq = performance_frequency;
1033
jlong time = (jlong)((current/freq) * NANOSECS_PER_SEC);
1034
return time;
1035
}
1036
1037
void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1038
jlong freq = performance_frequency;
1039
if (freq < NANOSECS_PER_SEC) {
1040
// the performance counter is 64 bits and we will
1041
// be multiplying it -- so no wrap in 64 bits
1042
info_ptr->max_value = ALL_64_BITS;
1043
} else if (freq > NANOSECS_PER_SEC) {
1044
// use the max value the counter can reach to
1045
// determine the max value which could be returned
1046
julong max_counter = (julong)ALL_64_BITS;
1047
info_ptr->max_value = (jlong)(max_counter / (freq / NANOSECS_PER_SEC));
1048
} else {
1049
// the performance counter is 64 bits and we will
1050
// be using it directly -- so no wrap in 64 bits
1051
info_ptr->max_value = ALL_64_BITS;
1052
}
1053
1054
// using a counter, so no skipping
1055
info_ptr->may_skip_backward = false;
1056
info_ptr->may_skip_forward = false;
1057
1058
info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
1059
}
1060
1061
char* os::local_time_string(char *buf, size_t buflen) {
1062
SYSTEMTIME st;
1063
GetLocalTime(&st);
1064
jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1065
st.wYear, st.wMonth, st.wDay, st.wHour, st.wMinute, st.wSecond);
1066
return buf;
1067
}
1068
1069
bool os::getTimesSecs(double* process_real_time,
1070
double* process_user_time,
1071
double* process_system_time) {
1072
HANDLE h_process = GetCurrentProcess();
1073
FILETIME create_time, exit_time, kernel_time, user_time;
1074
BOOL result = GetProcessTimes(h_process,
1075
&create_time,
1076
&exit_time,
1077
&kernel_time,
1078
&user_time);
1079
if (result != 0) {
1080
FILETIME wt;
1081
GetSystemTimeAsFileTime(&wt);
1082
jlong rtc_millis = windows_to_java_time(wt);
1083
*process_real_time = ((double) rtc_millis) / ((double) MILLIUNITS);
1084
*process_user_time =
1085
(double) jlong_from(user_time.dwHighDateTime, user_time.dwLowDateTime) / (10 * MICROUNITS);
1086
*process_system_time =
1087
(double) jlong_from(kernel_time.dwHighDateTime, kernel_time.dwLowDateTime) / (10 * MICROUNITS);
1088
return true;
1089
} else {
1090
return false;
1091
}
1092
}
1093
1094
void os::shutdown() {
1095
// allow PerfMemory to attempt cleanup of any persistent resources
1096
perfMemory_exit();
1097
1098
// flush buffered output, finish log files
1099
ostream_abort();
1100
1101
// Check for abort hook
1102
abort_hook_t abort_hook = Arguments::abort_hook();
1103
if (abort_hook != NULL) {
1104
abort_hook();
1105
}
1106
}
1107
1108
1109
static HANDLE dumpFile = NULL;
1110
1111
// Check if dump file can be created.
1112
void os::check_dump_limit(char* buffer, size_t buffsz) {
1113
bool status = true;
1114
if (!FLAG_IS_DEFAULT(CreateCoredumpOnCrash) && !CreateCoredumpOnCrash) {
1115
jio_snprintf(buffer, buffsz, "CreateCoredumpOnCrash is disabled from command line");
1116
status = false;
1117
}
1118
1119
#ifndef ASSERT
1120
if (!os::win32::is_windows_server() && FLAG_IS_DEFAULT(CreateCoredumpOnCrash)) {
1121
jio_snprintf(buffer, buffsz, "Minidumps are not enabled by default on client versions of Windows");
1122
status = false;
1123
}
1124
#endif
1125
1126
if (status) {
1127
const char* cwd = get_current_directory(NULL, 0);
1128
int pid = current_process_id();
1129
if (cwd != NULL) {
1130
jio_snprintf(buffer, buffsz, "%s\\hs_err_pid%u.mdmp", cwd, pid);
1131
} else {
1132
jio_snprintf(buffer, buffsz, ".\\hs_err_pid%u.mdmp", pid);
1133
}
1134
1135
if (dumpFile == NULL &&
1136
(dumpFile = CreateFile(buffer, GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL))
1137
== INVALID_HANDLE_VALUE) {
1138
jio_snprintf(buffer, buffsz, "Failed to create minidump file (0x%x).", GetLastError());
1139
status = false;
1140
}
1141
}
1142
VMError::record_coredump_status(buffer, status);
1143
}
1144
1145
void os::abort(bool dump_core, void* siginfo, const void* context) {
1146
EXCEPTION_POINTERS ep;
1147
MINIDUMP_EXCEPTION_INFORMATION mei;
1148
MINIDUMP_EXCEPTION_INFORMATION* pmei;
1149
1150
HANDLE hProcess = GetCurrentProcess();
1151
DWORD processId = GetCurrentProcessId();
1152
MINIDUMP_TYPE dumpType;
1153
1154
shutdown();
1155
if (!dump_core || dumpFile == NULL) {
1156
if (dumpFile != NULL) {
1157
CloseHandle(dumpFile);
1158
}
1159
win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1160
}
1161
1162
dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
1163
MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
1164
1165
if (siginfo != NULL && context != NULL) {
1166
ep.ContextRecord = (PCONTEXT) context;
1167
ep.ExceptionRecord = (PEXCEPTION_RECORD) siginfo;
1168
1169
mei.ThreadId = GetCurrentThreadId();
1170
mei.ExceptionPointers = &ep;
1171
pmei = &mei;
1172
} else {
1173
pmei = NULL;
1174
}
1175
1176
// Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
1177
// the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
1178
if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
1179
!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
1180
jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
1181
}
1182
CloseHandle(dumpFile);
1183
win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
1184
}
1185
1186
// Die immediately, no exit hook, no abort hook, no cleanup.
1187
void os::die() {
1188
win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
1189
}
1190
1191
const char* os::dll_file_extension() { return ".dll"; }
1192
1193
void os::dll_unload(void *lib) {
1194
::FreeLibrary((HMODULE)lib);
1195
}
1196
1197
void* os::dll_lookup(void *lib, const char *name) {
1198
return (void*)::GetProcAddress((HMODULE)lib, name);
1199
}
1200
1201
// Directory routines copied from src/win32/native/java/io/dirent_md.c
1202
// * dirent_md.c 1.15 00/02/02
1203
//
1204
// The declarations for DIR and struct dirent are in jvm_win32.h.
1205
1206
// Caller must have already run dirname through JVM_NativePath, which removes
1207
// duplicate slashes and converts all instances of '/' into '\\'.
1208
1209
DIR * os::opendir(const char *dirname) {
1210
assert(dirname != NULL, "just checking"); // hotspot change
1211
DIR *dirp = (DIR *)malloc(sizeof(DIR), mtInternal);
1212
DWORD fattr; // hotspot change
1213
char alt_dirname[4] = { 0, 0, 0, 0 };
1214
1215
if (dirp == 0) {
1216
errno = ENOMEM;
1217
return 0;
1218
}
1219
1220
// Win32 accepts "\" in its POSIX stat(), but refuses to treat it
1221
// as a directory in FindFirstFile(). We detect this case here and
1222
// prepend the current drive name.
1223
//
1224
if (dirname[1] == '\0' && dirname[0] == '\\') {
1225
alt_dirname[0] = _getdrive() + 'A' - 1;
1226
alt_dirname[1] = ':';
1227
alt_dirname[2] = '\\';
1228
alt_dirname[3] = '\0';
1229
dirname = alt_dirname;
1230
}
1231
1232
dirp->path = (char *)malloc(strlen(dirname) + 5, mtInternal);
1233
if (dirp->path == 0) {
1234
free(dirp);
1235
errno = ENOMEM;
1236
return 0;
1237
}
1238
strcpy(dirp->path, dirname);
1239
1240
fattr = GetFileAttributes(dirp->path);
1241
if (fattr == 0xffffffff) {
1242
free(dirp->path);
1243
free(dirp);
1244
errno = ENOENT;
1245
return 0;
1246
} else if ((fattr & FILE_ATTRIBUTE_DIRECTORY) == 0) {
1247
free(dirp->path);
1248
free(dirp);
1249
errno = ENOTDIR;
1250
return 0;
1251
}
1252
1253
// Append "*.*", or possibly "\\*.*", to path
1254
if (dirp->path[1] == ':' &&
1255
(dirp->path[2] == '\0' ||
1256
(dirp->path[2] == '\\' && dirp->path[3] == '\0'))) {
1257
// No '\\' needed for cases like "Z:" or "Z:\"
1258
strcat(dirp->path, "*.*");
1259
} else {
1260
strcat(dirp->path, "\\*.*");
1261
}
1262
1263
dirp->handle = FindFirstFile(dirp->path, &dirp->find_data);
1264
if (dirp->handle == INVALID_HANDLE_VALUE) {
1265
if (GetLastError() != ERROR_FILE_NOT_FOUND) {
1266
free(dirp->path);
1267
free(dirp);
1268
errno = EACCES;
1269
return 0;
1270
}
1271
}
1272
return dirp;
1273
}
1274
1275
struct dirent * os::readdir(DIR *dirp) {
1276
assert(dirp != NULL, "just checking"); // hotspot change
1277
if (dirp->handle == INVALID_HANDLE_VALUE) {
1278
return NULL;
1279
}
1280
1281
strcpy(dirp->dirent.d_name, dirp->find_data.cFileName);
1282
1283
if (!FindNextFile(dirp->handle, &dirp->find_data)) {
1284
if (GetLastError() == ERROR_INVALID_HANDLE) {
1285
errno = EBADF;
1286
return NULL;
1287
}
1288
FindClose(dirp->handle);
1289
dirp->handle = INVALID_HANDLE_VALUE;
1290
}
1291
1292
return &dirp->dirent;
1293
}
1294
1295
int os::closedir(DIR *dirp) {
1296
assert(dirp != NULL, "just checking"); // hotspot change
1297
if (dirp->handle != INVALID_HANDLE_VALUE) {
1298
if (!FindClose(dirp->handle)) {
1299
errno = EBADF;
1300
return -1;
1301
}
1302
dirp->handle = INVALID_HANDLE_VALUE;
1303
}
1304
free(dirp->path);
1305
free(dirp);
1306
return 0;
1307
}
1308
1309
// This must be hard coded because it's the system's temporary
1310
// directory not the java application's temp directory, ala java.io.tmpdir.
1311
const char* os::get_temp_directory() {
1312
static char path_buf[MAX_PATH];
1313
if (GetTempPath(MAX_PATH, path_buf) > 0) {
1314
return path_buf;
1315
} else {
1316
path_buf[0] = '\0';
1317
return path_buf;
1318
}
1319
}
1320
1321
// Needs to be in os specific directory because windows requires another
1322
// header file <direct.h>
1323
const char* os::get_current_directory(char *buf, size_t buflen) {
1324
int n = static_cast<int>(buflen);
1325
if (buflen > INT_MAX) n = INT_MAX;
1326
return _getcwd(buf, n);
1327
}
1328
1329
//-----------------------------------------------------------
1330
// Helper functions for fatal error handler
1331
#ifdef _WIN64
1332
// Helper routine which returns true if address in
1333
// within the NTDLL address space.
1334
//
1335
static bool _addr_in_ntdll(address addr) {
1336
HMODULE hmod;
1337
MODULEINFO minfo;
1338
1339
hmod = GetModuleHandle("NTDLL.DLL");
1340
if (hmod == NULL) return false;
1341
if (!GetModuleInformation(GetCurrentProcess(), hmod,
1342
&minfo, sizeof(MODULEINFO))) {
1343
return false;
1344
}
1345
1346
if ((addr >= minfo.lpBaseOfDll) &&
1347
(addr < (address)((uintptr_t)minfo.lpBaseOfDll + (uintptr_t)minfo.SizeOfImage))) {
1348
return true;
1349
} else {
1350
return false;
1351
}
1352
}
1353
#endif
1354
1355
struct _modinfo {
1356
address addr;
1357
char* full_path; // point to a char buffer
1358
int buflen; // size of the buffer
1359
address base_addr;
1360
};
1361
1362
static int _locate_module_by_addr(const char * mod_fname, address base_addr,
1363
address top_address, void * param) {
1364
struct _modinfo *pmod = (struct _modinfo *)param;
1365
if (!pmod) return -1;
1366
1367
if (base_addr <= pmod->addr &&
1368
top_address > pmod->addr) {
1369
// if a buffer is provided, copy path name to the buffer
1370
if (pmod->full_path) {
1371
jio_snprintf(pmod->full_path, pmod->buflen, "%s", mod_fname);
1372
}
1373
pmod->base_addr = base_addr;
1374
return 1;
1375
}
1376
return 0;
1377
}
1378
1379
bool os::dll_address_to_library_name(address addr, char* buf,
1380
int buflen, int* offset) {
1381
// buf is not optional, but offset is optional
1382
assert(buf != NULL, "sanity check");
1383
1384
// NOTE: the reason we don't use SymGetModuleInfo() is it doesn't always
1385
// return the full path to the DLL file, sometimes it returns path
1386
// to the corresponding PDB file (debug info); sometimes it only
1387
// returns partial path, which makes life painful.
1388
1389
struct _modinfo mi;
1390
mi.addr = addr;
1391
mi.full_path = buf;
1392
mi.buflen = buflen;
1393
if (get_loaded_modules_info(_locate_module_by_addr, (void *)&mi)) {
1394
// buf already contains path name
1395
if (offset) *offset = addr - mi.base_addr;
1396
return true;
1397
}
1398
1399
buf[0] = '\0';
1400
if (offset) *offset = -1;
1401
return false;
1402
}
1403
1404
bool os::dll_address_to_function_name(address addr, char *buf,
1405
int buflen, int *offset,
1406
bool demangle) {
1407
// buf is not optional, but offset is optional
1408
assert(buf != NULL, "sanity check");
1409
1410
if (Decoder::decode(addr, buf, buflen, offset, demangle)) {
1411
return true;
1412
}
1413
if (offset != NULL) *offset = -1;
1414
buf[0] = '\0';
1415
return false;
1416
}
1417
1418
// save the start and end address of jvm.dll into param[0] and param[1]
1419
static int _locate_jvm_dll(const char* mod_fname, address base_addr,
1420
address top_address, void * param) {
1421
if (!param) return -1;
1422
1423
if (base_addr <= (address)_locate_jvm_dll &&
1424
top_address > (address)_locate_jvm_dll) {
1425
((address*)param)[0] = base_addr;
1426
((address*)param)[1] = top_address;
1427
return 1;
1428
}
1429
return 0;
1430
}
1431
1432
address vm_lib_location[2]; // start and end address of jvm.dll
1433
1434
// check if addr is inside jvm.dll
1435
bool os::address_is_in_vm(address addr) {
1436
if (!vm_lib_location[0] || !vm_lib_location[1]) {
1437
if (!get_loaded_modules_info(_locate_jvm_dll, (void *)vm_lib_location)) {
1438
assert(false, "Can't find jvm module.");
1439
return false;
1440
}
1441
}
1442
1443
return (vm_lib_location[0] <= addr) && (addr < vm_lib_location[1]);
1444
}
1445
1446
// print module info; param is outputStream*
1447
static int _print_module(const char* fname, address base_address,
1448
address top_address, void* param) {
1449
if (!param) return -1;
1450
1451
outputStream* st = (outputStream*)param;
1452
1453
st->print(PTR_FORMAT " - " PTR_FORMAT " \t%s\n", base_address, top_address, fname);
1454
return 0;
1455
}
1456
1457
// Loads .dll/.so and
1458
// in case of error it checks if .dll/.so was built for the
1459
// same architecture as Hotspot is running on
1460
void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
1461
log_info(os)("attempting shared library load of %s", name);
1462
1463
void * result = LoadLibrary(name);
1464
if (result != NULL) {
1465
Events::log(NULL, "Loaded shared library %s", name);
1466
// Recalculate pdb search path if a DLL was loaded successfully.
1467
SymbolEngine::recalc_search_path();
1468
log_info(os)("shared library load of %s was successful", name);
1469
return result;
1470
}
1471
DWORD errcode = GetLastError();
1472
// Read system error message into ebuf
1473
// It may or may not be overwritten below (in the for loop and just above)
1474
lasterror(ebuf, (size_t) ebuflen);
1475
ebuf[ebuflen - 1] = '\0';
1476
Events::log(NULL, "Loading shared library %s failed, error code %lu", name, errcode);
1477
log_info(os)("shared library load of %s failed, error code %lu", name, errcode);
1478
1479
if (errcode == ERROR_MOD_NOT_FOUND) {
1480
strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
1481
ebuf[ebuflen - 1] = '\0';
1482
return NULL;
1483
}
1484
1485
// Parsing dll below
1486
// If we can read dll-info and find that dll was built
1487
// for an architecture other than Hotspot is running in
1488
// - then print to buffer "DLL was built for a different architecture"
1489
// else call os::lasterror to obtain system error message
1490
int fd = ::open(name, O_RDONLY | O_BINARY, 0);
1491
if (fd < 0) {
1492
return NULL;
1493
}
1494
1495
uint32_t signature_offset;
1496
uint16_t lib_arch = 0;
1497
bool failed_to_get_lib_arch =
1498
( // Go to position 3c in the dll
1499
(os::seek_to_file_offset(fd, IMAGE_FILE_PTR_TO_SIGNATURE) < 0)
1500
||
1501
// Read location of signature
1502
(sizeof(signature_offset) !=
1503
(os::read(fd, (void*)&signature_offset, sizeof(signature_offset))))
1504
||
1505
// Go to COFF File Header in dll
1506
// that is located after "signature" (4 bytes long)
1507
(os::seek_to_file_offset(fd,
1508
signature_offset + IMAGE_FILE_SIGNATURE_LENGTH) < 0)
1509
||
1510
// Read field that contains code of architecture
1511
// that dll was built for
1512
(sizeof(lib_arch) != (os::read(fd, (void*)&lib_arch, sizeof(lib_arch))))
1513
);
1514
1515
::close(fd);
1516
if (failed_to_get_lib_arch) {
1517
// file i/o error - report os::lasterror(...) msg
1518
return NULL;
1519
}
1520
1521
typedef struct {
1522
uint16_t arch_code;
1523
char* arch_name;
1524
} arch_t;
1525
1526
static const arch_t arch_array[] = {
1527
{IMAGE_FILE_MACHINE_I386, (char*)"IA 32"},
1528
{IMAGE_FILE_MACHINE_AMD64, (char*)"AMD 64"},
1529
{IMAGE_FILE_MACHINE_ARM64, (char*)"ARM 64"}
1530
};
1531
#if (defined _M_ARM64)
1532
static const uint16_t running_arch = IMAGE_FILE_MACHINE_ARM64;
1533
#elif (defined _M_AMD64)
1534
static const uint16_t running_arch = IMAGE_FILE_MACHINE_AMD64;
1535
#elif (defined _M_IX86)
1536
static const uint16_t running_arch = IMAGE_FILE_MACHINE_I386;
1537
#else
1538
#error Method os::dll_load requires that one of following \
1539
is defined :_M_AMD64 or _M_IX86 or _M_ARM64
1540
#endif
1541
1542
1543
// Obtain a string for printf operation
1544
// lib_arch_str shall contain string what platform this .dll was built for
1545
// running_arch_str shall string contain what platform Hotspot was built for
1546
char *running_arch_str = NULL, *lib_arch_str = NULL;
1547
for (unsigned int i = 0; i < ARRAY_SIZE(arch_array); i++) {
1548
if (lib_arch == arch_array[i].arch_code) {
1549
lib_arch_str = arch_array[i].arch_name;
1550
}
1551
if (running_arch == arch_array[i].arch_code) {
1552
running_arch_str = arch_array[i].arch_name;
1553
}
1554
}
1555
1556
assert(running_arch_str,
1557
"Didn't find running architecture code in arch_array");
1558
1559
// If the architecture is right
1560
// but some other error took place - report os::lasterror(...) msg
1561
if (lib_arch == running_arch) {
1562
return NULL;
1563
}
1564
1565
if (lib_arch_str != NULL) {
1566
::_snprintf(ebuf, ebuflen - 1,
1567
"Can't load %s-bit .dll on a %s-bit platform",
1568
lib_arch_str, running_arch_str);
1569
} else {
1570
// don't know what architecture this dll was build for
1571
::_snprintf(ebuf, ebuflen - 1,
1572
"Can't load this .dll (machine code=0x%x) on a %s-bit platform",
1573
lib_arch, running_arch_str);
1574
}
1575
1576
return NULL;
1577
}
1578
1579
void os::print_dll_info(outputStream *st) {
1580
st->print_cr("Dynamic libraries:");
1581
get_loaded_modules_info(_print_module, (void *)st);
1582
}
1583
1584
int os::get_loaded_modules_info(os::LoadedModulesCallbackFunc callback, void *param) {
1585
HANDLE hProcess;
1586
1587
# define MAX_NUM_MODULES 128
1588
HMODULE modules[MAX_NUM_MODULES];
1589
static char filename[MAX_PATH];
1590
int result = 0;
1591
1592
int pid = os::current_process_id();
1593
hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ,
1594
FALSE, pid);
1595
if (hProcess == NULL) return 0;
1596
1597
DWORD size_needed;
1598
if (!EnumProcessModules(hProcess, modules, sizeof(modules), &size_needed)) {
1599
CloseHandle(hProcess);
1600
return 0;
1601
}
1602
1603
// number of modules that are currently loaded
1604
int num_modules = size_needed / sizeof(HMODULE);
1605
1606
for (int i = 0; i < MIN2(num_modules, MAX_NUM_MODULES); i++) {
1607
// Get Full pathname:
1608
if (!GetModuleFileNameEx(hProcess, modules[i], filename, sizeof(filename))) {
1609
filename[0] = '\0';
1610
}
1611
1612
MODULEINFO modinfo;
1613
if (!GetModuleInformation(hProcess, modules[i], &modinfo, sizeof(modinfo))) {
1614
modinfo.lpBaseOfDll = NULL;
1615
modinfo.SizeOfImage = 0;
1616
}
1617
1618
// Invoke callback function
1619
result = callback(filename, (address)modinfo.lpBaseOfDll,
1620
(address)((u8)modinfo.lpBaseOfDll + (u8)modinfo.SizeOfImage), param);
1621
if (result) break;
1622
}
1623
1624
CloseHandle(hProcess);
1625
return result;
1626
}
1627
1628
bool os::get_host_name(char* buf, size_t buflen) {
1629
DWORD size = (DWORD)buflen;
1630
return (GetComputerNameEx(ComputerNameDnsHostname, buf, &size) == TRUE);
1631
}
1632
1633
void os::get_summary_os_info(char* buf, size_t buflen) {
1634
stringStream sst(buf, buflen);
1635
os::win32::print_windows_version(&sst);
1636
// chop off newline character
1637
char* nl = strchr(buf, '\n');
1638
if (nl != NULL) *nl = '\0';
1639
}
1640
1641
int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
1642
#if _MSC_VER >= 1900
1643
// Starting with Visual Studio 2015, vsnprint is C99 compliant.
1644
int result = ::vsnprintf(buf, len, fmt, args);
1645
// If an encoding error occurred (result < 0) then it's not clear
1646
// whether the buffer is NUL terminated, so ensure it is.
1647
if ((result < 0) && (len > 0)) {
1648
buf[len - 1] = '\0';
1649
}
1650
return result;
1651
#else
1652
// Before Visual Studio 2015, vsnprintf is not C99 compliant, so use
1653
// _vsnprintf, whose behavior seems to be *mostly* consistent across
1654
// versions. However, when len == 0, avoid _vsnprintf too, and just
1655
// go straight to _vscprintf. The output is going to be truncated in
1656
// that case, except in the unusual case of empty output. More
1657
// importantly, the documentation for various versions of Visual Studio
1658
// are inconsistent about the behavior of _vsnprintf when len == 0,
1659
// including it possibly being an error.
1660
int result = -1;
1661
if (len > 0) {
1662
result = _vsnprintf(buf, len, fmt, args);
1663
// If output (including NUL terminator) is truncated, the buffer
1664
// won't be NUL terminated. Add the trailing NUL specified by C99.
1665
if ((result < 0) || ((size_t)result >= len)) {
1666
buf[len - 1] = '\0';
1667
}
1668
}
1669
if (result < 0) {
1670
result = _vscprintf(fmt, args);
1671
}
1672
return result;
1673
#endif // _MSC_VER dispatch
1674
}
1675
1676
static inline time_t get_mtime(const char* filename) {
1677
struct stat st;
1678
int ret = os::stat(filename, &st);
1679
assert(ret == 0, "failed to stat() file '%s': %s", filename, os::strerror(errno));
1680
return st.st_mtime;
1681
}
1682
1683
int os::compare_file_modified_times(const char* file1, const char* file2) {
1684
time_t t1 = get_mtime(file1);
1685
time_t t2 = get_mtime(file2);
1686
return t1 - t2;
1687
}
1688
1689
void os::print_os_info_brief(outputStream* st) {
1690
os::print_os_info(st);
1691
}
1692
1693
void os::win32::print_uptime_info(outputStream* st) {
1694
unsigned long long ticks = GetTickCount64();
1695
os::print_dhm(st, "OS uptime:", ticks/1000);
1696
}
1697
1698
void os::print_os_info(outputStream* st) {
1699
#ifdef ASSERT
1700
char buffer[1024];
1701
st->print("HostName: ");
1702
if (get_host_name(buffer, sizeof(buffer))) {
1703
st->print_cr(buffer);
1704
} else {
1705
st->print_cr("N/A");
1706
}
1707
#endif
1708
st->print_cr("OS:");
1709
os::win32::print_windows_version(st);
1710
1711
os::win32::print_uptime_info(st);
1712
1713
VM_Version::print_platform_virtualization_info(st);
1714
}
1715
1716
void os::win32::print_windows_version(outputStream* st) {
1717
OSVERSIONINFOEX osvi;
1718
VS_FIXEDFILEINFO *file_info;
1719
TCHAR kernel32_path[MAX_PATH];
1720
UINT len, ret;
1721
1722
// Use the GetVersionEx information to see if we're on a server or
1723
// workstation edition of Windows. Starting with Windows 8.1 we can't
1724
// trust the OS version information returned by this API.
1725
ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
1726
osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
1727
if (!GetVersionEx((OSVERSIONINFO *)&osvi)) {
1728
st->print_cr("Call to GetVersionEx failed");
1729
return;
1730
}
1731
bool is_workstation = (osvi.wProductType == VER_NT_WORKSTATION);
1732
1733
// Get the full path to \Windows\System32\kernel32.dll and use that for
1734
// determining what version of Windows we're running on.
1735
len = MAX_PATH - (UINT)strlen("\\kernel32.dll") - 1;
1736
ret = GetSystemDirectory(kernel32_path, len);
1737
if (ret == 0 || ret > len) {
1738
st->print_cr("Call to GetSystemDirectory failed");
1739
return;
1740
}
1741
strncat(kernel32_path, "\\kernel32.dll", MAX_PATH - ret);
1742
1743
DWORD version_size = GetFileVersionInfoSize(kernel32_path, NULL);
1744
if (version_size == 0) {
1745
st->print_cr("Call to GetFileVersionInfoSize failed");
1746
return;
1747
}
1748
1749
LPTSTR version_info = (LPTSTR)os::malloc(version_size, mtInternal);
1750
if (version_info == NULL) {
1751
st->print_cr("Failed to allocate version_info");
1752
return;
1753
}
1754
1755
if (!GetFileVersionInfo(kernel32_path, NULL, version_size, version_info)) {
1756
os::free(version_info);
1757
st->print_cr("Call to GetFileVersionInfo failed");
1758
return;
1759
}
1760
1761
if (!VerQueryValue(version_info, TEXT("\\"), (LPVOID*)&file_info, &len)) {
1762
os::free(version_info);
1763
st->print_cr("Call to VerQueryValue failed");
1764
return;
1765
}
1766
1767
int major_version = HIWORD(file_info->dwProductVersionMS);
1768
int minor_version = LOWORD(file_info->dwProductVersionMS);
1769
int build_number = HIWORD(file_info->dwProductVersionLS);
1770
int build_minor = LOWORD(file_info->dwProductVersionLS);
1771
int os_vers = major_version * 1000 + minor_version;
1772
os::free(version_info);
1773
1774
st->print(" Windows ");
1775
switch (os_vers) {
1776
1777
case 6000:
1778
if (is_workstation) {
1779
st->print("Vista");
1780
} else {
1781
st->print("Server 2008");
1782
}
1783
break;
1784
1785
case 6001:
1786
if (is_workstation) {
1787
st->print("7");
1788
} else {
1789
st->print("Server 2008 R2");
1790
}
1791
break;
1792
1793
case 6002:
1794
if (is_workstation) {
1795
st->print("8");
1796
} else {
1797
st->print("Server 2012");
1798
}
1799
break;
1800
1801
case 6003:
1802
if (is_workstation) {
1803
st->print("8.1");
1804
} else {
1805
st->print("Server 2012 R2");
1806
}
1807
break;
1808
1809
case 10000:
1810
if (is_workstation) {
1811
st->print("10");
1812
} else {
1813
// distinguish Windows Server 2016 and 2019 by build number
1814
// Windows server 2019 GA 10/2018 build number is 17763
1815
if (build_number > 17762) {
1816
st->print("Server 2019");
1817
} else {
1818
st->print("Server 2016");
1819
}
1820
}
1821
break;
1822
1823
default:
1824
// Unrecognized windows, print out its major and minor versions
1825
st->print("%d.%d", major_version, minor_version);
1826
break;
1827
}
1828
1829
// Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
1830
// find out whether we are running on 64 bit processor or not
1831
SYSTEM_INFO si;
1832
ZeroMemory(&si, sizeof(SYSTEM_INFO));
1833
GetNativeSystemInfo(&si);
1834
if ((si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) ||
1835
(si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_ARM64)) {
1836
st->print(" , 64 bit");
1837
}
1838
1839
st->print(" Build %d", build_number);
1840
st->print(" (%d.%d.%d.%d)", major_version, minor_version, build_number, build_minor);
1841
st->cr();
1842
}
1843
1844
void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
1845
// Nothing to do for now.
1846
}
1847
1848
void os::get_summary_cpu_info(char* buf, size_t buflen) {
1849
HKEY key;
1850
DWORD status = RegOpenKey(HKEY_LOCAL_MACHINE,
1851
"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", &key);
1852
if (status == ERROR_SUCCESS) {
1853
DWORD size = (DWORD)buflen;
1854
status = RegQueryValueEx(key, "ProcessorNameString", NULL, NULL, (byte*)buf, &size);
1855
if (status != ERROR_SUCCESS) {
1856
strncpy(buf, "## __CPU__", buflen);
1857
}
1858
RegCloseKey(key);
1859
} else {
1860
// Put generic cpu info to return
1861
strncpy(buf, "## __CPU__", buflen);
1862
}
1863
}
1864
1865
void os::print_memory_info(outputStream* st) {
1866
st->print("Memory:");
1867
st->print(" %dk page", os::vm_page_size()>>10);
1868
1869
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
1870
// value if total memory is larger than 4GB
1871
MEMORYSTATUSEX ms;
1872
ms.dwLength = sizeof(ms);
1873
int r1 = GlobalMemoryStatusEx(&ms);
1874
1875
if (r1 != 0) {
1876
st->print(", system-wide physical " INT64_FORMAT "M ",
1877
(int64_t) ms.ullTotalPhys >> 20);
1878
st->print("(" INT64_FORMAT "M free)\n", (int64_t) ms.ullAvailPhys >> 20);
1879
1880
st->print("TotalPageFile size " INT64_FORMAT "M ",
1881
(int64_t) ms.ullTotalPageFile >> 20);
1882
st->print("(AvailPageFile size " INT64_FORMAT "M)",
1883
(int64_t) ms.ullAvailPageFile >> 20);
1884
1885
// on 32bit Total/AvailVirtual are interesting (show us how close we get to 2-4 GB per process borders)
1886
#if defined(_M_IX86)
1887
st->print(", user-mode portion of virtual address-space " INT64_FORMAT "M ",
1888
(int64_t) ms.ullTotalVirtual >> 20);
1889
st->print("(" INT64_FORMAT "M free)", (int64_t) ms.ullAvailVirtual >> 20);
1890
#endif
1891
} else {
1892
st->print(", GlobalMemoryStatusEx did not succeed so we miss some memory values.");
1893
}
1894
1895
// extended memory statistics for a process
1896
PROCESS_MEMORY_COUNTERS_EX pmex;
1897
ZeroMemory(&pmex, sizeof(PROCESS_MEMORY_COUNTERS_EX));
1898
pmex.cb = sizeof(pmex);
1899
int r2 = GetProcessMemoryInfo(GetCurrentProcess(), (PROCESS_MEMORY_COUNTERS*) &pmex, sizeof(pmex));
1900
1901
if (r2 != 0) {
1902
st->print("\ncurrent process WorkingSet (physical memory assigned to process): " INT64_FORMAT "M, ",
1903
(int64_t) pmex.WorkingSetSize >> 20);
1904
st->print("peak: " INT64_FORMAT "M\n", (int64_t) pmex.PeakWorkingSetSize >> 20);
1905
1906
st->print("current process commit charge (\"private bytes\"): " INT64_FORMAT "M, ",
1907
(int64_t) pmex.PrivateUsage >> 20);
1908
st->print("peak: " INT64_FORMAT "M", (int64_t) pmex.PeakPagefileUsage >> 20);
1909
} else {
1910
st->print("\nGetProcessMemoryInfo did not succeed so we miss some memory values.");
1911
}
1912
1913
st->cr();
1914
}
1915
1916
bool os::signal_sent_by_kill(const void* siginfo) {
1917
// TODO: Is this possible?
1918
return false;
1919
}
1920
1921
void os::print_siginfo(outputStream *st, const void* siginfo) {
1922
const EXCEPTION_RECORD* const er = (EXCEPTION_RECORD*)siginfo;
1923
st->print("siginfo:");
1924
1925
char tmp[64];
1926
if (os::exception_name(er->ExceptionCode, tmp, sizeof(tmp)) == NULL) {
1927
strcpy(tmp, "EXCEPTION_??");
1928
}
1929
st->print(" %s (0x%x)", tmp, er->ExceptionCode);
1930
1931
if ((er->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
1932
er->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) &&
1933
er->NumberParameters >= 2) {
1934
switch (er->ExceptionInformation[0]) {
1935
case 0: st->print(", reading address"); break;
1936
case 1: st->print(", writing address"); break;
1937
case 8: st->print(", data execution prevention violation at address"); break;
1938
default: st->print(", ExceptionInformation=" INTPTR_FORMAT,
1939
er->ExceptionInformation[0]);
1940
}
1941
st->print(" " INTPTR_FORMAT, er->ExceptionInformation[1]);
1942
} else {
1943
int num = er->NumberParameters;
1944
if (num > 0) {
1945
st->print(", ExceptionInformation=");
1946
for (int i = 0; i < num; i++) {
1947
st->print(INTPTR_FORMAT " ", er->ExceptionInformation[i]);
1948
}
1949
}
1950
}
1951
st->cr();
1952
}
1953
1954
bool os::signal_thread(Thread* thread, int sig, const char* reason) {
1955
// TODO: Can we kill thread?
1956
return false;
1957
}
1958
1959
void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1960
// do nothing
1961
}
1962
1963
static char saved_jvm_path[MAX_PATH] = {0};
1964
1965
// Find the full path to the current module, jvm.dll
1966
void os::jvm_path(char *buf, jint buflen) {
1967
// Error checking.
1968
if (buflen < MAX_PATH) {
1969
assert(false, "must use a large-enough buffer");
1970
buf[0] = '\0';
1971
return;
1972
}
1973
// Lazy resolve the path to current module.
1974
if (saved_jvm_path[0] != 0) {
1975
strcpy(buf, saved_jvm_path);
1976
return;
1977
}
1978
1979
buf[0] = '\0';
1980
if (Arguments::sun_java_launcher_is_altjvm()) {
1981
// Support for the java launcher's '-XXaltjvm=<path>' option. Check
1982
// for a JAVA_HOME environment variable and fix up the path so it
1983
// looks like jvm.dll is installed there (append a fake suffix
1984
// hotspot/jvm.dll).
1985
char* java_home_var = ::getenv("JAVA_HOME");
1986
if (java_home_var != NULL && java_home_var[0] != 0 &&
1987
strlen(java_home_var) < (size_t)buflen) {
1988
strncpy(buf, java_home_var, buflen);
1989
1990
// determine if this is a legacy image or modules image
1991
// modules image doesn't have "jre" subdirectory
1992
size_t len = strlen(buf);
1993
char* jrebin_p = buf + len;
1994
jio_snprintf(jrebin_p, buflen-len, "\\jre\\bin\\");
1995
if (0 != _access(buf, 0)) {
1996
jio_snprintf(jrebin_p, buflen-len, "\\bin\\");
1997
}
1998
len = strlen(buf);
1999
jio_snprintf(buf + len, buflen-len, "hotspot\\jvm.dll");
2000
}
2001
}
2002
2003
if (buf[0] == '\0') {
2004
GetModuleFileName(vm_lib_handle, buf, buflen);
2005
}
2006
strncpy(saved_jvm_path, buf, MAX_PATH);
2007
saved_jvm_path[MAX_PATH - 1] = '\0';
2008
}
2009
2010
2011
void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
2012
#ifndef _WIN64
2013
st->print("_");
2014
#endif
2015
}
2016
2017
2018
void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
2019
#ifndef _WIN64
2020
st->print("@%d", args_size * sizeof(int));
2021
#endif
2022
}
2023
2024
// This method is a copy of JDK's sysGetLastErrorString
2025
// from src/windows/hpi/src/system_md.c
2026
2027
size_t os::lasterror(char* buf, size_t len) {
2028
DWORD errval;
2029
2030
if ((errval = GetLastError()) != 0) {
2031
// DOS error
2032
size_t n = (size_t)FormatMessage(
2033
FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS,
2034
NULL,
2035
errval,
2036
0,
2037
buf,
2038
(DWORD)len,
2039
NULL);
2040
if (n > 3) {
2041
// Drop final '.', CR, LF
2042
if (buf[n - 1] == '\n') n--;
2043
if (buf[n - 1] == '\r') n--;
2044
if (buf[n - 1] == '.') n--;
2045
buf[n] = '\0';
2046
}
2047
return n;
2048
}
2049
2050
if (errno != 0) {
2051
// C runtime error that has no corresponding DOS error code
2052
const char* s = os::strerror(errno);
2053
size_t n = strlen(s);
2054
if (n >= len) n = len - 1;
2055
strncpy(buf, s, n);
2056
buf[n] = '\0';
2057
return n;
2058
}
2059
2060
return 0;
2061
}
2062
2063
int os::get_last_error() {
2064
DWORD error = GetLastError();
2065
if (error == 0) {
2066
error = errno;
2067
}
2068
return (int)error;
2069
}
2070
2071
// sun.misc.Signal
2072
// NOTE that this is a workaround for an apparent kernel bug where if
2073
// a signal handler for SIGBREAK is installed then that signal handler
2074
// takes priority over the console control handler for CTRL_CLOSE_EVENT.
2075
// See bug 4416763.
2076
static void (*sigbreakHandler)(int) = NULL;
2077
2078
static void UserHandler(int sig, void *siginfo, void *context) {
2079
os::signal_notify(sig);
2080
// We need to reinstate the signal handler each time...
2081
os::signal(sig, (void*)UserHandler);
2082
}
2083
2084
void* os::user_handler() {
2085
return (void*) UserHandler;
2086
}
2087
2088
void* os::signal(int signal_number, void* handler) {
2089
if ((signal_number == SIGBREAK) && (!ReduceSignalUsage)) {
2090
void (*oldHandler)(int) = sigbreakHandler;
2091
sigbreakHandler = (void (*)(int)) handler;
2092
return (void*) oldHandler;
2093
} else {
2094
return (void*)::signal(signal_number, (void (*)(int))handler);
2095
}
2096
}
2097
2098
void os::signal_raise(int signal_number) {
2099
raise(signal_number);
2100
}
2101
2102
// The Win32 C runtime library maps all console control events other than ^C
2103
// into SIGBREAK, which makes it impossible to distinguish ^BREAK from close,
2104
// logoff, and shutdown events. We therefore install our own console handler
2105
// that raises SIGTERM for the latter cases.
2106
//
2107
static BOOL WINAPI consoleHandler(DWORD event) {
2108
switch (event) {
2109
case CTRL_C_EVENT:
2110
if (VMError::is_error_reported()) {
2111
// Ctrl-C is pressed during error reporting, likely because the error
2112
// handler fails to abort. Let VM die immediately.
2113
os::die();
2114
}
2115
2116
os::signal_raise(SIGINT);
2117
return TRUE;
2118
break;
2119
case CTRL_BREAK_EVENT:
2120
if (sigbreakHandler != NULL) {
2121
(*sigbreakHandler)(SIGBREAK);
2122
}
2123
return TRUE;
2124
break;
2125
case CTRL_LOGOFF_EVENT: {
2126
// Don't terminate JVM if it is running in a non-interactive session,
2127
// such as a service process.
2128
USEROBJECTFLAGS flags;
2129
HANDLE handle = GetProcessWindowStation();
2130
if (handle != NULL &&
2131
GetUserObjectInformation(handle, UOI_FLAGS, &flags,
2132
sizeof(USEROBJECTFLAGS), NULL)) {
2133
// If it is a non-interactive session, let next handler to deal
2134
// with it.
2135
if ((flags.dwFlags & WSF_VISIBLE) == 0) {
2136
return FALSE;
2137
}
2138
}
2139
}
2140
case CTRL_CLOSE_EVENT:
2141
case CTRL_SHUTDOWN_EVENT:
2142
os::signal_raise(SIGTERM);
2143
return TRUE;
2144
break;
2145
default:
2146
break;
2147
}
2148
return FALSE;
2149
}
2150
2151
// The following code is moved from os.cpp for making this
2152
// code platform specific, which it is by its very nature.
2153
2154
// Return maximum OS signal used + 1 for internal use only
2155
// Used as exit signal for signal_thread
2156
int os::sigexitnum_pd() {
2157
return NSIG;
2158
}
2159
2160
// a counter for each possible signal value, including signal_thread exit signal
2161
static volatile jint pending_signals[NSIG+1] = { 0 };
2162
static Semaphore* sig_sem = NULL;
2163
2164
static void jdk_misc_signal_init() {
2165
// Initialize signal structures
2166
memset((void*)pending_signals, 0, sizeof(pending_signals));
2167
2168
// Initialize signal semaphore
2169
sig_sem = new Semaphore();
2170
2171
// Programs embedding the VM do not want it to attempt to receive
2172
// events like CTRL_LOGOFF_EVENT, which are used to implement the
2173
// shutdown hooks mechanism introduced in 1.3. For example, when
2174
// the VM is run as part of a Windows NT service (i.e., a servlet
2175
// engine in a web server), the correct behavior is for any console
2176
// control handler to return FALSE, not TRUE, because the OS's
2177
// "final" handler for such events allows the process to continue if
2178
// it is a service (while terminating it if it is not a service).
2179
// To make this behavior uniform and the mechanism simpler, we
2180
// completely disable the VM's usage of these console events if -Xrs
2181
// (=ReduceSignalUsage) is specified. This means, for example, that
2182
// the CTRL-BREAK thread dump mechanism is also disabled in this
2183
// case. See bugs 4323062, 4345157, and related bugs.
2184
2185
// Add a CTRL-C handler
2186
SetConsoleCtrlHandler(consoleHandler, TRUE);
2187
}
2188
2189
void os::signal_notify(int sig) {
2190
if (sig_sem != NULL) {
2191
Atomic::inc(&pending_signals[sig]);
2192
sig_sem->signal();
2193
} else {
2194
// Signal thread is not created with ReduceSignalUsage and jdk_misc_signal_init
2195
// initialization isn't called.
2196
assert(ReduceSignalUsage, "signal semaphore should be created");
2197
}
2198
}
2199
2200
static int check_pending_signals() {
2201
while (true) {
2202
for (int i = 0; i < NSIG + 1; i++) {
2203
jint n = pending_signals[i];
2204
if (n > 0 && n == Atomic::cmpxchg(&pending_signals[i], n, n - 1)) {
2205
return i;
2206
}
2207
}
2208
sig_sem->wait_with_safepoint_check(JavaThread::current());
2209
}
2210
ShouldNotReachHere();
2211
return 0; // Satisfy compiler
2212
}
2213
2214
int os::signal_wait() {
2215
return check_pending_signals();
2216
}
2217
2218
// Implicit OS exception handling
2219
2220
LONG Handle_Exception(struct _EXCEPTION_POINTERS* exceptionInfo,
2221
address handler) {
2222
Thread* thread = Thread::current_or_null();
2223
2224
#if defined(_M_ARM64)
2225
#define PC_NAME Pc
2226
#elif defined(_M_AMD64)
2227
#define PC_NAME Rip
2228
#elif defined(_M_IX86)
2229
#define PC_NAME Eip
2230
#else
2231
#error unknown architecture
2232
#endif
2233
2234
// Save pc in thread
2235
if (thread != nullptr && thread->is_Java_thread()) {
2236
thread->as_Java_thread()->set_saved_exception_pc((address)(DWORD_PTR)exceptionInfo->ContextRecord->PC_NAME);
2237
}
2238
2239
// Set pc to handler
2240
exceptionInfo->ContextRecord->PC_NAME = (DWORD64)handler;
2241
2242
// Continue the execution
2243
return EXCEPTION_CONTINUE_EXECUTION;
2244
}
2245
2246
2247
// Used for PostMortemDump
2248
extern "C" void safepoints();
2249
extern "C" void find(int x);
2250
extern "C" void events();
2251
2252
// According to Windows API documentation, an illegal instruction sequence should generate
2253
// the 0xC000001C exception code. However, real world experience shows that occasionnaly
2254
// the execution of an illegal instruction can generate the exception code 0xC000001E. This
2255
// seems to be an undocumented feature of Win NT 4.0 (and probably other Windows systems).
2256
2257
#define EXCEPTION_ILLEGAL_INSTRUCTION_2 0xC000001E
2258
2259
// From "Execution Protection in the Windows Operating System" draft 0.35
2260
// Once a system header becomes available, the "real" define should be
2261
// included or copied here.
2262
#define EXCEPTION_INFO_EXEC_VIOLATION 0x08
2263
2264
// Windows Vista/2008 heap corruption check
2265
#define EXCEPTION_HEAP_CORRUPTION 0xC0000374
2266
2267
// All Visual C++ exceptions thrown from code generated by the Microsoft Visual
2268
// C++ compiler contain this error code. Because this is a compiler-generated
2269
// error, the code is not listed in the Win32 API header files.
2270
// The code is actually a cryptic mnemonic device, with the initial "E"
2271
// standing for "exception" and the final 3 bytes (0x6D7363) representing the
2272
// ASCII values of "msc".
2273
2274
#define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363
2275
2276
#define def_excpt(val) { #val, (val) }
2277
2278
static const struct { const char* name; uint number; } exceptlabels[] = {
2279
def_excpt(EXCEPTION_ACCESS_VIOLATION),
2280
def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT),
2281
def_excpt(EXCEPTION_BREAKPOINT),
2282
def_excpt(EXCEPTION_SINGLE_STEP),
2283
def_excpt(EXCEPTION_ARRAY_BOUNDS_EXCEEDED),
2284
def_excpt(EXCEPTION_FLT_DENORMAL_OPERAND),
2285
def_excpt(EXCEPTION_FLT_DIVIDE_BY_ZERO),
2286
def_excpt(EXCEPTION_FLT_INEXACT_RESULT),
2287
def_excpt(EXCEPTION_FLT_INVALID_OPERATION),
2288
def_excpt(EXCEPTION_FLT_OVERFLOW),
2289
def_excpt(EXCEPTION_FLT_STACK_CHECK),
2290
def_excpt(EXCEPTION_FLT_UNDERFLOW),
2291
def_excpt(EXCEPTION_INT_DIVIDE_BY_ZERO),
2292
def_excpt(EXCEPTION_INT_OVERFLOW),
2293
def_excpt(EXCEPTION_PRIV_INSTRUCTION),
2294
def_excpt(EXCEPTION_IN_PAGE_ERROR),
2295
def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION),
2296
def_excpt(EXCEPTION_ILLEGAL_INSTRUCTION_2),
2297
def_excpt(EXCEPTION_NONCONTINUABLE_EXCEPTION),
2298
def_excpt(EXCEPTION_STACK_OVERFLOW),
2299
def_excpt(EXCEPTION_INVALID_DISPOSITION),
2300
def_excpt(EXCEPTION_GUARD_PAGE),
2301
def_excpt(EXCEPTION_INVALID_HANDLE),
2302
def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION),
2303
def_excpt(EXCEPTION_HEAP_CORRUPTION)
2304
};
2305
2306
#undef def_excpt
2307
2308
const char* os::exception_name(int exception_code, char *buf, size_t size) {
2309
uint code = static_cast<uint>(exception_code);
2310
for (uint i = 0; i < ARRAY_SIZE(exceptlabels); ++i) {
2311
if (exceptlabels[i].number == code) {
2312
jio_snprintf(buf, size, "%s", exceptlabels[i].name);
2313
return buf;
2314
}
2315
}
2316
2317
return NULL;
2318
}
2319
2320
//-----------------------------------------------------------------------------
2321
LONG Handle_IDiv_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2322
// handle exception caused by idiv; should only happen for -MinInt/-1
2323
// (division by zero is handled explicitly)
2324
#if defined(_M_ARM64)
2325
PCONTEXT ctx = exceptionInfo->ContextRecord;
2326
address pc = (address)ctx->Sp;
2327
assert(pc[0] == 0x83, "not an sdiv opcode"); //Fixme did i get the right opcode?
2328
assert(ctx->X4 == min_jint, "unexpected idiv exception");
2329
// set correct result values and continue after idiv instruction
2330
ctx->Pc = (uint64_t)pc + 4; // idiv reg, reg, reg is 4 bytes
2331
ctx->X4 = (uint64_t)min_jint; // result
2332
ctx->X5 = (uint64_t)0; // remainder
2333
// Continue the execution
2334
#elif defined(_M_AMD64)
2335
PCONTEXT ctx = exceptionInfo->ContextRecord;
2336
address pc = (address)ctx->Rip;
2337
assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && pc[1] == 0xF7 || pc[0] == 0xF7, "not an idiv opcode");
2338
assert(pc[0] >= Assembler::REX && pc[0] <= Assembler::REX_WRXB && (pc[2] & ~0x7) == 0xF8 || (pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2339
if (pc[0] == 0xF7) {
2340
// set correct result values and continue after idiv instruction
2341
ctx->Rip = (DWORD64)pc + 2; // idiv reg, reg is 2 bytes
2342
} else {
2343
ctx->Rip = (DWORD64)pc + 3; // REX idiv reg, reg is 3 bytes
2344
}
2345
// Do not set ctx->Rax as it already contains the correct value (either 32 or 64 bit, depending on the operation)
2346
// this is the case because the exception only happens for -MinValue/-1 and -MinValue is always in rax because of the
2347
// idiv opcode (0xF7).
2348
ctx->Rdx = (DWORD)0; // remainder
2349
// Continue the execution
2350
#else
2351
PCONTEXT ctx = exceptionInfo->ContextRecord;
2352
address pc = (address)ctx->Eip;
2353
assert(pc[0] == 0xF7, "not an idiv opcode");
2354
assert((pc[1] & ~0x7) == 0xF8, "cannot handle non-register operands");
2355
assert(ctx->Eax == min_jint, "unexpected idiv exception");
2356
// set correct result values and continue after idiv instruction
2357
ctx->Eip = (DWORD)pc + 2; // idiv reg, reg is 2 bytes
2358
ctx->Eax = (DWORD)min_jint; // result
2359
ctx->Edx = (DWORD)0; // remainder
2360
// Continue the execution
2361
#endif
2362
return EXCEPTION_CONTINUE_EXECUTION;
2363
}
2364
2365
#if defined(_M_AMD64) || defined(_M_IX86)
2366
//-----------------------------------------------------------------------------
2367
LONG WINAPI Handle_FLT_Exception(struct _EXCEPTION_POINTERS* exceptionInfo) {
2368
PCONTEXT ctx = exceptionInfo->ContextRecord;
2369
#ifndef _WIN64
2370
// handle exception caused by native method modifying control word
2371
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2372
2373
switch (exception_code) {
2374
case EXCEPTION_FLT_DENORMAL_OPERAND:
2375
case EXCEPTION_FLT_DIVIDE_BY_ZERO:
2376
case EXCEPTION_FLT_INEXACT_RESULT:
2377
case EXCEPTION_FLT_INVALID_OPERATION:
2378
case EXCEPTION_FLT_OVERFLOW:
2379
case EXCEPTION_FLT_STACK_CHECK:
2380
case EXCEPTION_FLT_UNDERFLOW:
2381
jint fp_control_word = (* (jint*) StubRoutines::addr_fpu_cntrl_wrd_std());
2382
if (fp_control_word != ctx->FloatSave.ControlWord) {
2383
// Restore FPCW and mask out FLT exceptions
2384
ctx->FloatSave.ControlWord = fp_control_word | 0xffffffc0;
2385
// Mask out pending FLT exceptions
2386
ctx->FloatSave.StatusWord &= 0xffffff00;
2387
return EXCEPTION_CONTINUE_EXECUTION;
2388
}
2389
}
2390
2391
if (prev_uef_handler != NULL) {
2392
// We didn't handle this exception so pass it to the previous
2393
// UnhandledExceptionFilter.
2394
return (prev_uef_handler)(exceptionInfo);
2395
}
2396
#else // !_WIN64
2397
// On Windows, the mxcsr control bits are non-volatile across calls
2398
// See also CR 6192333
2399
//
2400
jint MxCsr = INITIAL_MXCSR;
2401
// we can't use StubRoutines::x86::addr_mxcsr_std()
2402
// because in Win64 mxcsr is not saved there
2403
if (MxCsr != ctx->MxCsr) {
2404
ctx->MxCsr = MxCsr;
2405
return EXCEPTION_CONTINUE_EXECUTION;
2406
}
2407
#endif // !_WIN64
2408
2409
return EXCEPTION_CONTINUE_SEARCH;
2410
}
2411
#endif
2412
2413
static inline void report_error(Thread* t, DWORD exception_code,
2414
address addr, void* siginfo, void* context) {
2415
VMError::report_and_die(t, exception_code, addr, siginfo, context);
2416
2417
// If UseOSErrorReporting, this will return here and save the error file
2418
// somewhere where we can find it in the minidump.
2419
}
2420
2421
//-----------------------------------------------------------------------------
2422
JNIEXPORT
2423
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2424
if (InterceptOSException) return EXCEPTION_CONTINUE_SEARCH;
2425
PEXCEPTION_RECORD exception_record = exceptionInfo->ExceptionRecord;
2426
DWORD exception_code = exception_record->ExceptionCode;
2427
#if defined(_M_ARM64)
2428
address pc = (address) exceptionInfo->ContextRecord->Pc;
2429
#elif defined(_M_AMD64)
2430
address pc = (address) exceptionInfo->ContextRecord->Rip;
2431
#else
2432
address pc = (address) exceptionInfo->ContextRecord->Eip;
2433
#endif
2434
Thread* t = Thread::current_or_null_safe();
2435
2436
// Handle SafeFetch32 and SafeFetchN exceptions.
2437
if (StubRoutines::is_safefetch_fault(pc)) {
2438
return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
2439
}
2440
2441
#ifndef _WIN64
2442
// Execution protection violation - win32 running on AMD64 only
2443
// Handled first to avoid misdiagnosis as a "normal" access violation;
2444
// This is safe to do because we have a new/unique ExceptionInformation
2445
// code for this condition.
2446
if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2447
int exception_subcode = (int) exception_record->ExceptionInformation[0];
2448
address addr = (address) exception_record->ExceptionInformation[1];
2449
2450
if (exception_subcode == EXCEPTION_INFO_EXEC_VIOLATION) {
2451
int page_size = os::vm_page_size();
2452
2453
// Make sure the pc and the faulting address are sane.
2454
//
2455
// If an instruction spans a page boundary, and the page containing
2456
// the beginning of the instruction is executable but the following
2457
// page is not, the pc and the faulting address might be slightly
2458
// different - we still want to unguard the 2nd page in this case.
2459
//
2460
// 15 bytes seems to be a (very) safe value for max instruction size.
2461
bool pc_is_near_addr =
2462
(pointer_delta((void*) addr, (void*) pc, sizeof(char)) < 15);
2463
bool instr_spans_page_boundary =
2464
(align_down((intptr_t) pc ^ (intptr_t) addr,
2465
(intptr_t) page_size) > 0);
2466
2467
if (pc == addr || (pc_is_near_addr && instr_spans_page_boundary)) {
2468
static volatile address last_addr =
2469
(address) os::non_memory_address_word();
2470
2471
// In conservative mode, don't unguard unless the address is in the VM
2472
if (UnguardOnExecutionViolation > 0 && addr != last_addr &&
2473
(UnguardOnExecutionViolation > 1 || os::address_is_in_vm(addr))) {
2474
2475
// Set memory to RWX and retry
2476
address page_start = align_down(addr, page_size);
2477
bool res = os::protect_memory((char*) page_start, page_size,
2478
os::MEM_PROT_RWX);
2479
2480
log_debug(os)("Execution protection violation "
2481
"at " INTPTR_FORMAT
2482
", unguarding " INTPTR_FORMAT ": %s", p2i(addr),
2483
p2i(page_start), (res ? "success" : os::strerror(errno)));
2484
2485
// Set last_addr so if we fault again at the same address, we don't
2486
// end up in an endless loop.
2487
//
2488
// There are two potential complications here. Two threads trapping
2489
// at the same address at the same time could cause one of the
2490
// threads to think it already unguarded, and abort the VM. Likely
2491
// very rare.
2492
//
2493
// The other race involves two threads alternately trapping at
2494
// different addresses and failing to unguard the page, resulting in
2495
// an endless loop. This condition is probably even more unlikely
2496
// than the first.
2497
//
2498
// Although both cases could be avoided by using locks or thread
2499
// local last_addr, these solutions are unnecessary complication:
2500
// this handler is a best-effort safety net, not a complete solution.
2501
// It is disabled by default and should only be used as a workaround
2502
// in case we missed any no-execute-unsafe VM code.
2503
2504
last_addr = addr;
2505
2506
return EXCEPTION_CONTINUE_EXECUTION;
2507
}
2508
}
2509
2510
// Last unguard failed or not unguarding
2511
tty->print_raw_cr("Execution protection violation");
2512
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2513
report_error(t, exception_code, addr, exception_record,
2514
exceptionInfo->ContextRecord);
2515
#endif
2516
return EXCEPTION_CONTINUE_SEARCH;
2517
}
2518
}
2519
#endif // _WIN64
2520
2521
#if defined(_M_AMD64) || defined(_M_IX86)
2522
if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
2523
VM_Version::is_cpuinfo_segv_addr(pc)) {
2524
// Verify that OS save/restore AVX registers.
2525
return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
2526
}
2527
#endif
2528
2529
if (t != NULL && t->is_Java_thread()) {
2530
JavaThread* thread = t->as_Java_thread();
2531
bool in_java = thread->thread_state() == _thread_in_Java;
2532
bool in_native = thread->thread_state() == _thread_in_native;
2533
bool in_vm = thread->thread_state() == _thread_in_vm;
2534
2535
// Handle potential stack overflows up front.
2536
if (exception_code == EXCEPTION_STACK_OVERFLOW) {
2537
StackOverflow* overflow_state = thread->stack_overflow_state();
2538
if (overflow_state->stack_guards_enabled()) {
2539
if (in_java) {
2540
frame fr;
2541
if (os::win32::get_frame_at_stack_banging_point(thread, exceptionInfo, pc, &fr)) {
2542
assert(fr.is_java_frame(), "Must be a Java frame");
2543
SharedRuntime::look_for_reserved_stack_annotated_method(thread, fr);
2544
}
2545
}
2546
// Yellow zone violation. The o/s has unprotected the first yellow
2547
// zone page for us. Note: must call disable_stack_yellow_zone to
2548
// update the enabled status, even if the zone contains only one page.
2549
assert(!in_vm, "Undersized StackShadowPages");
2550
overflow_state->disable_stack_yellow_reserved_zone();
2551
// If not in java code, return and hope for the best.
2552
return in_java
2553
? Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW))
2554
: EXCEPTION_CONTINUE_EXECUTION;
2555
} else {
2556
// Fatal red zone violation.
2557
overflow_state->disable_stack_red_zone();
2558
tty->print_raw_cr("An unrecoverable stack overflow has occurred.");
2559
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2560
report_error(t, exception_code, pc, exception_record,
2561
exceptionInfo->ContextRecord);
2562
#endif
2563
return EXCEPTION_CONTINUE_SEARCH;
2564
}
2565
} else if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2566
if (in_java) {
2567
// Either stack overflow or null pointer exception.
2568
address addr = (address) exception_record->ExceptionInformation[1];
2569
address stack_end = thread->stack_end();
2570
if (addr < stack_end && addr >= stack_end - os::vm_page_size()) {
2571
// Stack overflow.
2572
assert(!os::uses_stack_guard_pages(),
2573
"should be caught by red zone code above.");
2574
return Handle_Exception(exceptionInfo,
2575
SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW));
2576
}
2577
// Check for safepoint polling and implicit null
2578
// We only expect null pointers in the stubs (vtable)
2579
// the rest are checked explicitly now.
2580
CodeBlob* cb = CodeCache::find_blob(pc);
2581
if (cb != NULL) {
2582
if (SafepointMechanism::is_poll_address(addr)) {
2583
address stub = SharedRuntime::get_poll_stub(pc);
2584
return Handle_Exception(exceptionInfo, stub);
2585
}
2586
}
2587
#ifdef _WIN64
2588
// If it's a legal stack address map the entire region in
2589
if (thread->is_in_usable_stack(addr)) {
2590
addr = (address)((uintptr_t)addr &
2591
(~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
2592
os::commit_memory((char *)addr, thread->stack_base() - addr,
2593
!ExecMem);
2594
return EXCEPTION_CONTINUE_EXECUTION;
2595
}
2596
#endif
2597
// Null pointer exception.
2598
if (MacroAssembler::uses_implicit_null_check((void*)addr)) {
2599
address stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
2600
if (stub != NULL) return Handle_Exception(exceptionInfo, stub);
2601
}
2602
report_error(t, exception_code, pc, exception_record,
2603
exceptionInfo->ContextRecord);
2604
return EXCEPTION_CONTINUE_SEARCH;
2605
}
2606
2607
#ifdef _WIN64
2608
// Special care for fast JNI field accessors.
2609
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks
2610
// in and the heap gets shrunk before the field access.
2611
address slowcase_pc = JNI_FastGetField::find_slowcase_pc(pc);
2612
if (slowcase_pc != (address)-1) {
2613
return Handle_Exception(exceptionInfo, slowcase_pc);
2614
}
2615
#endif
2616
2617
// Stack overflow or null pointer exception in native code.
2618
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2619
report_error(t, exception_code, pc, exception_record,
2620
exceptionInfo->ContextRecord);
2621
#endif
2622
return EXCEPTION_CONTINUE_SEARCH;
2623
} // /EXCEPTION_ACCESS_VIOLATION
2624
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
2625
2626
if (exception_code == EXCEPTION_IN_PAGE_ERROR) {
2627
CompiledMethod* nm = NULL;
2628
if (in_java) {
2629
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
2630
nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
2631
}
2632
2633
bool is_unsafe_arraycopy = (in_native || in_java) && UnsafeCopyMemory::contains_pc(pc);
2634
if (((in_vm || in_native || is_unsafe_arraycopy) && thread->doing_unsafe_access()) ||
2635
(nm != NULL && nm->has_unsafe_access())) {
2636
address next_pc = Assembler::locate_next_instruction(pc);
2637
if (is_unsafe_arraycopy) {
2638
next_pc = UnsafeCopyMemory::page_error_continue_pc(pc);
2639
}
2640
return Handle_Exception(exceptionInfo, SharedRuntime::handle_unsafe_access(thread, next_pc));
2641
}
2642
}
2643
2644
#ifdef _M_ARM64
2645
if (in_java &&
2646
(exception_code == EXCEPTION_ILLEGAL_INSTRUCTION ||
2647
exception_code == EXCEPTION_ILLEGAL_INSTRUCTION_2)) {
2648
if (nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
2649
if (TraceTraps) {
2650
tty->print_cr("trap: zombie_not_entrant");
2651
}
2652
return Handle_Exception(exceptionInfo, SharedRuntime::get_handle_wrong_method_stub());
2653
}
2654
}
2655
#endif
2656
2657
if (in_java) {
2658
switch (exception_code) {
2659
case EXCEPTION_INT_DIVIDE_BY_ZERO:
2660
return Handle_Exception(exceptionInfo, SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO));
2661
2662
case EXCEPTION_INT_OVERFLOW:
2663
return Handle_IDiv_Exception(exceptionInfo);
2664
2665
} // switch
2666
}
2667
2668
#if defined(_M_AMD64) || defined(_M_IX86)
2669
if ((in_java || in_native) && exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) {
2670
LONG result=Handle_FLT_Exception(exceptionInfo);
2671
if (result==EXCEPTION_CONTINUE_EXECUTION) return result;
2672
}
2673
#endif
2674
}
2675
2676
#if !defined(USE_VECTORED_EXCEPTION_HANDLING)
2677
if (exception_code != EXCEPTION_BREAKPOINT) {
2678
report_error(t, exception_code, pc, exception_record,
2679
exceptionInfo->ContextRecord);
2680
}
2681
#endif
2682
return EXCEPTION_CONTINUE_SEARCH;
2683
}
2684
2685
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
2686
LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2687
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
2688
#if defined(_M_ARM64)
2689
address pc = (address) exceptionInfo->ContextRecord->Pc;
2690
#elif defined(_M_AMD64)
2691
address pc = (address) exceptionInfo->ContextRecord->Rip;
2692
#else
2693
address pc = (address) exceptionInfo->ContextRecord->Eip;
2694
#endif
2695
2696
// Fast path for code part of the code cache
2697
if (CodeCache::low_bound() <= pc && pc < CodeCache::high_bound()) {
2698
return topLevelExceptionFilter(exceptionInfo);
2699
}
2700
2701
// If the exception occurred in the codeCache, pass control
2702
// to our normal exception handler.
2703
CodeBlob* cb = CodeCache::find_blob(pc);
2704
if (cb != NULL) {
2705
return topLevelExceptionFilter(exceptionInfo);
2706
}
2707
2708
return EXCEPTION_CONTINUE_SEARCH;
2709
}
2710
#endif
2711
2712
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
2713
LONG WINAPI topLevelUnhandledExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2714
if (InterceptOSException) goto exit;
2715
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2716
#if defined(_M_ARM64)
2717
address pc = (address)exceptionInfo->ContextRecord->Pc;
2718
#elif defined(_M_AMD64)
2719
address pc = (address) exceptionInfo->ContextRecord->Rip;
2720
#else
2721
address pc = (address) exceptionInfo->ContextRecord->Eip;
2722
#endif
2723
Thread* t = Thread::current_or_null_safe();
2724
2725
if (exception_code != EXCEPTION_BREAKPOINT) {
2726
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
2727
exceptionInfo->ContextRecord);
2728
}
2729
exit:
2730
return previousUnhandledExceptionFilter ? previousUnhandledExceptionFilter(exceptionInfo) : EXCEPTION_CONTINUE_SEARCH;
2731
}
2732
#endif
2733
2734
#ifndef _WIN64
2735
// Special care for fast JNI accessors.
2736
// jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in and
2737
// the heap gets shrunk before the field access.
2738
// Need to install our own structured exception handler since native code may
2739
// install its own.
2740
LONG WINAPI fastJNIAccessorExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
2741
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
2742
if (exception_code == EXCEPTION_ACCESS_VIOLATION) {
2743
address pc = (address) exceptionInfo->ContextRecord->Eip;
2744
address addr = JNI_FastGetField::find_slowcase_pc(pc);
2745
if (addr != (address)-1) {
2746
return Handle_Exception(exceptionInfo, addr);
2747
}
2748
}
2749
return EXCEPTION_CONTINUE_SEARCH;
2750
}
2751
2752
#define DEFINE_FAST_GETFIELD(Return, Fieldname, Result) \
2753
Return JNICALL jni_fast_Get##Result##Field_wrapper(JNIEnv *env, \
2754
jobject obj, \
2755
jfieldID fieldID) { \
2756
__try { \
2757
return (*JNI_FastGetField::jni_fast_Get##Result##Field_fp)(env, \
2758
obj, \
2759
fieldID); \
2760
} __except(fastJNIAccessorExceptionFilter((_EXCEPTION_POINTERS*) \
2761
_exception_info())) { \
2762
} \
2763
return 0; \
2764
}
2765
2766
DEFINE_FAST_GETFIELD(jboolean, bool, Boolean)
2767
DEFINE_FAST_GETFIELD(jbyte, byte, Byte)
2768
DEFINE_FAST_GETFIELD(jchar, char, Char)
2769
DEFINE_FAST_GETFIELD(jshort, short, Short)
2770
DEFINE_FAST_GETFIELD(jint, int, Int)
2771
DEFINE_FAST_GETFIELD(jlong, long, Long)
2772
DEFINE_FAST_GETFIELD(jfloat, float, Float)
2773
DEFINE_FAST_GETFIELD(jdouble, double, Double)
2774
2775
address os::win32::fast_jni_accessor_wrapper(BasicType type) {
2776
switch (type) {
2777
case T_BOOLEAN: return (address)jni_fast_GetBooleanField_wrapper;
2778
case T_BYTE: return (address)jni_fast_GetByteField_wrapper;
2779
case T_CHAR: return (address)jni_fast_GetCharField_wrapper;
2780
case T_SHORT: return (address)jni_fast_GetShortField_wrapper;
2781
case T_INT: return (address)jni_fast_GetIntField_wrapper;
2782
case T_LONG: return (address)jni_fast_GetLongField_wrapper;
2783
case T_FLOAT: return (address)jni_fast_GetFloatField_wrapper;
2784
case T_DOUBLE: return (address)jni_fast_GetDoubleField_wrapper;
2785
default: ShouldNotReachHere();
2786
}
2787
return (address)-1;
2788
}
2789
#endif
2790
2791
// Virtual Memory
2792
2793
int os::vm_page_size() { return os::win32::vm_page_size(); }
2794
int os::vm_allocation_granularity() {
2795
return os::win32::vm_allocation_granularity();
2796
}
2797
2798
// Windows large page support is available on Windows 2003. In order to use
2799
// large page memory, the administrator must first assign additional privilege
2800
// to the user:
2801
// + select Control Panel -> Administrative Tools -> Local Security Policy
2802
// + select Local Policies -> User Rights Assignment
2803
// + double click "Lock pages in memory", add users and/or groups
2804
// + reboot
2805
// Note the above steps are needed for administrator as well, as administrators
2806
// by default do not have the privilege to lock pages in memory.
2807
//
2808
// Note about Windows 2003: although the API supports committing large page
2809
// memory on a page-by-page basis and VirtualAlloc() returns success under this
2810
// scenario, I found through experiment it only uses large page if the entire
2811
// memory region is reserved and committed in a single VirtualAlloc() call.
2812
// This makes Windows large page support more or less like Solaris ISM, in
2813
// that the entire heap must be committed upfront. This probably will change
2814
// in the future, if so the code below needs to be revisited.
2815
2816
#ifndef MEM_LARGE_PAGES
2817
#define MEM_LARGE_PAGES 0x20000000
2818
#endif
2819
2820
// Container for NUMA node list info
2821
class NUMANodeListHolder {
2822
private:
2823
int *_numa_used_node_list; // allocated below
2824
int _numa_used_node_count;
2825
2826
void free_node_list() {
2827
FREE_C_HEAP_ARRAY(int, _numa_used_node_list);
2828
}
2829
2830
public:
2831
NUMANodeListHolder() {
2832
_numa_used_node_count = 0;
2833
_numa_used_node_list = NULL;
2834
// do rest of initialization in build routine (after function pointers are set up)
2835
}
2836
2837
~NUMANodeListHolder() {
2838
free_node_list();
2839
}
2840
2841
bool build() {
2842
DWORD_PTR proc_aff_mask;
2843
DWORD_PTR sys_aff_mask;
2844
if (!GetProcessAffinityMask(GetCurrentProcess(), &proc_aff_mask, &sys_aff_mask)) return false;
2845
ULONG highest_node_number;
2846
if (!GetNumaHighestNodeNumber(&highest_node_number)) return false;
2847
free_node_list();
2848
_numa_used_node_list = NEW_C_HEAP_ARRAY(int, highest_node_number + 1, mtInternal);
2849
for (unsigned int i = 0; i <= highest_node_number; i++) {
2850
ULONGLONG proc_mask_numa_node;
2851
if (!GetNumaNodeProcessorMask(i, &proc_mask_numa_node)) return false;
2852
if ((proc_aff_mask & proc_mask_numa_node)!=0) {
2853
_numa_used_node_list[_numa_used_node_count++] = i;
2854
}
2855
}
2856
return (_numa_used_node_count > 1);
2857
}
2858
2859
int get_count() { return _numa_used_node_count; }
2860
int get_node_list_entry(int n) {
2861
// for indexes out of range, returns -1
2862
return (n < _numa_used_node_count ? _numa_used_node_list[n] : -1);
2863
}
2864
2865
} numa_node_list_holder;
2866
2867
static size_t _large_page_size = 0;
2868
2869
static bool request_lock_memory_privilege() {
2870
HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION, FALSE,
2871
os::current_process_id());
2872
2873
bool success = false;
2874
HANDLE hToken = NULL;
2875
LUID luid;
2876
if (hProcess != NULL &&
2877
OpenProcessToken(hProcess, TOKEN_ADJUST_PRIVILEGES, &hToken) &&
2878
LookupPrivilegeValue(NULL, "SeLockMemoryPrivilege", &luid)) {
2879
2880
TOKEN_PRIVILEGES tp;
2881
tp.PrivilegeCount = 1;
2882
tp.Privileges[0].Luid = luid;
2883
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
2884
2885
// AdjustTokenPrivileges() may return TRUE even when it couldn't change the
2886
// privilege. Check GetLastError() too. See MSDN document.
2887
if (AdjustTokenPrivileges(hToken, false, &tp, sizeof(tp), NULL, NULL) &&
2888
(GetLastError() == ERROR_SUCCESS)) {
2889
success = true;
2890
}
2891
}
2892
2893
// Cleanup
2894
if (hProcess != NULL) {
2895
CloseHandle(hProcess);
2896
}
2897
if (hToken != NULL) {
2898
CloseHandle(hToken);
2899
}
2900
2901
return success;
2902
}
2903
2904
static bool numa_interleaving_init() {
2905
bool success = false;
2906
2907
// print a warning if UseNUMAInterleaving flag is specified on command line
2908
bool warn_on_failure = !FLAG_IS_DEFAULT(UseNUMAInterleaving);
2909
2910
#define WARN(msg) if (warn_on_failure) { warning(msg); }
2911
2912
// NUMAInterleaveGranularity cannot be less than vm_allocation_granularity (or _large_page_size if using large pages)
2913
size_t min_interleave_granularity = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2914
NUMAInterleaveGranularity = align_up(NUMAInterleaveGranularity, min_interleave_granularity);
2915
2916
if (!numa_node_list_holder.build()) {
2917
WARN("Process does not cover multiple NUMA nodes.");
2918
WARN("...Ignoring UseNUMAInterleaving flag.");
2919
return false;
2920
}
2921
2922
if (log_is_enabled(Debug, os, cpu)) {
2923
Log(os, cpu) log;
2924
log.debug("NUMA UsedNodeCount=%d, namely ", numa_node_list_holder.get_count());
2925
for (int i = 0; i < numa_node_list_holder.get_count(); i++) {
2926
log.debug(" %d ", numa_node_list_holder.get_node_list_entry(i));
2927
}
2928
}
2929
2930
#undef WARN
2931
2932
return true;
2933
}
2934
2935
// this routine is used whenever we need to reserve a contiguous VA range
2936
// but we need to make separate VirtualAlloc calls for each piece of the range
2937
// Reasons for doing this:
2938
// * UseLargePagesIndividualAllocation was set (normally only needed on WS2003 but possible to be set otherwise)
2939
// * UseNUMAInterleaving requires a separate node for each piece
2940
static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
2941
DWORD prot,
2942
bool should_inject_error = false) {
2943
char * p_buf;
2944
// note: at setup time we guaranteed that NUMAInterleaveGranularity was aligned up to a page size
2945
size_t page_size = UseLargePages ? _large_page_size : os::vm_allocation_granularity();
2946
size_t chunk_size = UseNUMAInterleaving ? NUMAInterleaveGranularity : page_size;
2947
2948
// first reserve enough address space in advance since we want to be
2949
// able to break a single contiguous virtual address range into multiple
2950
// large page commits but WS2003 does not allow reserving large page space
2951
// so we just use 4K pages for reserve, this gives us a legal contiguous
2952
// address space. then we will deallocate that reservation, and re alloc
2953
// using large pages
2954
const size_t size_of_reserve = bytes + chunk_size;
2955
if (bytes > size_of_reserve) {
2956
// Overflowed.
2957
return NULL;
2958
}
2959
p_buf = (char *) virtualAlloc(addr,
2960
size_of_reserve, // size of Reserve
2961
MEM_RESERVE,
2962
PAGE_READWRITE);
2963
// If reservation failed, return NULL
2964
if (p_buf == NULL) return NULL;
2965
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
2966
os::release_memory(p_buf, bytes + chunk_size);
2967
2968
// we still need to round up to a page boundary (in case we are using large pages)
2969
// but not to a chunk boundary (in case InterleavingGranularity doesn't align with page size)
2970
// instead we handle this in the bytes_to_rq computation below
2971
p_buf = align_up(p_buf, page_size);
2972
2973
// now go through and allocate one chunk at a time until all bytes are
2974
// allocated
2975
size_t bytes_remaining = bytes;
2976
// An overflow of align_up() would have been caught above
2977
// in the calculation of size_of_reserve.
2978
char * next_alloc_addr = p_buf;
2979
HANDLE hProc = GetCurrentProcess();
2980
2981
#ifdef ASSERT
2982
// Variable for the failure injection
2983
int ran_num = os::random();
2984
size_t fail_after = ran_num % bytes;
2985
#endif
2986
2987
int count=0;
2988
while (bytes_remaining) {
2989
// select bytes_to_rq to get to the next chunk_size boundary
2990
2991
size_t bytes_to_rq = MIN2(bytes_remaining, chunk_size - ((size_t)next_alloc_addr % chunk_size));
2992
// Note allocate and commit
2993
char * p_new;
2994
2995
#ifdef ASSERT
2996
bool inject_error_now = should_inject_error && (bytes_remaining <= fail_after);
2997
#else
2998
const bool inject_error_now = false;
2999
#endif
3000
3001
if (inject_error_now) {
3002
p_new = NULL;
3003
} else {
3004
if (!UseNUMAInterleaving) {
3005
p_new = (char *) virtualAlloc(next_alloc_addr,
3006
bytes_to_rq,
3007
flags,
3008
prot);
3009
} else {
3010
// get the next node to use from the used_node_list
3011
assert(numa_node_list_holder.get_count() > 0, "Multiple NUMA nodes expected");
3012
DWORD node = numa_node_list_holder.get_node_list_entry(count % numa_node_list_holder.get_count());
3013
p_new = (char *)virtualAllocExNuma(hProc, next_alloc_addr, bytes_to_rq, flags, prot, node);
3014
}
3015
}
3016
3017
if (p_new == NULL) {
3018
// Free any allocated pages
3019
if (next_alloc_addr > p_buf) {
3020
// Some memory was committed so release it.
3021
size_t bytes_to_release = bytes - bytes_remaining;
3022
// NMT has yet to record any individual blocks, so it
3023
// need to create a dummy 'reserve' record to match
3024
// the release.
3025
MemTracker::record_virtual_memory_reserve((address)p_buf,
3026
bytes_to_release, CALLER_PC);
3027
os::release_memory(p_buf, bytes_to_release);
3028
}
3029
#ifdef ASSERT
3030
if (should_inject_error) {
3031
log_develop_debug(pagesize)("Reserving pages individually failed.");
3032
}
3033
#endif
3034
return NULL;
3035
}
3036
3037
bytes_remaining -= bytes_to_rq;
3038
next_alloc_addr += bytes_to_rq;
3039
count++;
3040
}
3041
// Although the memory is allocated individually, it is returned as one.
3042
// NMT records it as one block.
3043
if ((flags & MEM_COMMIT) != 0) {
3044
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
3045
} else {
3046
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
3047
}
3048
3049
// made it this far, success
3050
return p_buf;
3051
}
3052
3053
static size_t large_page_init_decide_size() {
3054
// print a warning if any large page related flag is specified on command line
3055
bool warn_on_failure = !FLAG_IS_DEFAULT(UseLargePages) ||
3056
!FLAG_IS_DEFAULT(LargePageSizeInBytes);
3057
3058
#define WARN(msg) if (warn_on_failure) { warning(msg); }
3059
3060
if (!request_lock_memory_privilege()) {
3061
WARN("JVM cannot use large page memory because it does not have enough privilege to lock pages in memory.");
3062
return 0;
3063
}
3064
3065
size_t size = GetLargePageMinimum();
3066
if (size == 0) {
3067
WARN("Large page is not supported by the processor.");
3068
return 0;
3069
}
3070
3071
#if defined(IA32) || defined(AMD64)
3072
if (size > 4*M || LargePageSizeInBytes > 4*M) {
3073
WARN("JVM cannot use large pages bigger than 4mb.");
3074
return 0;
3075
}
3076
#endif
3077
3078
if (LargePageSizeInBytes > 0 && LargePageSizeInBytes % size == 0) {
3079
size = LargePageSizeInBytes;
3080
}
3081
3082
#undef WARN
3083
3084
return size;
3085
}
3086
3087
void os::large_page_init() {
3088
if (!UseLargePages) {
3089
return;
3090
}
3091
3092
_large_page_size = large_page_init_decide_size();
3093
const size_t default_page_size = (size_t) vm_page_size();
3094
if (_large_page_size > default_page_size) {
3095
_page_sizes.add(_large_page_size);
3096
}
3097
3098
UseLargePages = _large_page_size != 0;
3099
}
3100
3101
int os::create_file_for_heap(const char* dir) {
3102
3103
const char name_template[] = "/jvmheap.XXXXXX";
3104
3105
size_t fullname_len = strlen(dir) + strlen(name_template);
3106
char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
3107
if (fullname == NULL) {
3108
vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
3109
return -1;
3110
}
3111
int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
3112
assert((size_t)n == fullname_len, "Unexpected number of characters in string");
3113
3114
os::native_path(fullname);
3115
3116
char *path = _mktemp(fullname);
3117
if (path == NULL) {
3118
warning("_mktemp could not create file name from template %s (%s)", fullname, os::strerror(errno));
3119
os::free(fullname);
3120
return -1;
3121
}
3122
3123
int fd = _open(path, O_RDWR | O_CREAT | O_TEMPORARY | O_EXCL, S_IWRITE | S_IREAD);
3124
3125
os::free(fullname);
3126
if (fd < 0) {
3127
warning("Problem opening file for heap (%s)", os::strerror(errno));
3128
return -1;
3129
}
3130
return fd;
3131
}
3132
3133
// If 'base' is not NULL, function will return NULL if it cannot get 'base'
3134
char* os::map_memory_to_file(char* base, size_t size, int fd) {
3135
assert(fd != -1, "File descriptor is not valid");
3136
3137
HANDLE fh = (HANDLE)_get_osfhandle(fd);
3138
#ifdef _LP64
3139
HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3140
(DWORD)(size >> 32), (DWORD)(size & 0xFFFFFFFF), NULL);
3141
#else
3142
HANDLE fileMapping = CreateFileMapping(fh, NULL, PAGE_READWRITE,
3143
0, (DWORD)size, NULL);
3144
#endif
3145
if (fileMapping == NULL) {
3146
if (GetLastError() == ERROR_DISK_FULL) {
3147
vm_exit_during_initialization(err_msg("Could not allocate sufficient disk space for Java heap"));
3148
}
3149
else {
3150
vm_exit_during_initialization(err_msg("Error in mapping Java heap at the given filesystem directory"));
3151
}
3152
3153
return NULL;
3154
}
3155
3156
LPVOID addr = mapViewOfFileEx(fileMapping, FILE_MAP_WRITE, 0, 0, size, base);
3157
3158
CloseHandle(fileMapping);
3159
3160
return (char*)addr;
3161
}
3162
3163
char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd) {
3164
assert(fd != -1, "File descriptor is not valid");
3165
assert(base != NULL, "Base address cannot be NULL");
3166
3167
release_memory(base, size);
3168
return map_memory_to_file(base, size, fd);
3169
}
3170
3171
// Multiple threads can race in this code but it's not possible to unmap small sections of
3172
// virtual space to get requested alignment, like posix-like os's.
3173
// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
3174
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int file_desc) {
3175
assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
3176
"Alignment must be a multiple of allocation granularity (page size)");
3177
assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
3178
3179
size_t extra_size = size + alignment;
3180
assert(extra_size >= size, "overflow, size is too large to allow alignment");
3181
3182
char* aligned_base = NULL;
3183
static const int max_attempts = 20;
3184
3185
for (int attempt = 0; attempt < max_attempts && aligned_base == NULL; attempt ++) {
3186
char* extra_base = file_desc != -1 ? os::map_memory_to_file(extra_size, file_desc) :
3187
os::reserve_memory(extra_size);
3188
if (extra_base == NULL) {
3189
return NULL;
3190
}
3191
// Do manual alignment
3192
aligned_base = align_up(extra_base, alignment);
3193
3194
bool rc = (file_desc != -1) ? os::unmap_memory(extra_base, extra_size) :
3195
os::release_memory(extra_base, extra_size);
3196
assert(rc, "release failed");
3197
if (!rc) {
3198
return NULL;
3199
}
3200
3201
// Attempt to map, into the just vacated space, the slightly smaller aligned area.
3202
// Which may fail, hence the loop.
3203
aligned_base = file_desc != -1 ? os::attempt_map_memory_to_file_at(aligned_base, size, file_desc) :
3204
os::attempt_reserve_memory_at(aligned_base, size);
3205
}
3206
3207
assert(aligned_base != NULL, "Did not manage to re-map after %d attempts?", max_attempts);
3208
3209
return aligned_base;
3210
}
3211
3212
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
3213
// exec can be ignored
3214
return map_or_reserve_memory_aligned(size, alignment, -1 /* file_desc */);
3215
}
3216
3217
char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int fd) {
3218
return map_or_reserve_memory_aligned(size, alignment, fd);
3219
}
3220
3221
char* os::pd_reserve_memory(size_t bytes, bool exec) {
3222
return pd_attempt_reserve_memory_at(NULL /* addr */, bytes, exec);
3223
}
3224
3225
// Reserve memory at an arbitrary address, only if that area is
3226
// available (and not reserved for something else).
3227
char* os::pd_attempt_reserve_memory_at(char* addr, size_t bytes, bool exec) {
3228
assert((size_t)addr % os::vm_allocation_granularity() == 0,
3229
"reserve alignment");
3230
assert(bytes % os::vm_page_size() == 0, "reserve page size");
3231
char* res;
3232
// note that if UseLargePages is on, all the areas that require interleaving
3233
// will go thru reserve_memory_special rather than thru here.
3234
bool use_individual = (UseNUMAInterleaving && !UseLargePages);
3235
if (!use_individual) {
3236
res = (char*)virtualAlloc(addr, bytes, MEM_RESERVE, PAGE_READWRITE);
3237
} else {
3238
elapsedTimer reserveTimer;
3239
if (Verbose && PrintMiscellaneous) reserveTimer.start();
3240
// in numa interleaving, we have to allocate pages individually
3241
// (well really chunks of NUMAInterleaveGranularity size)
3242
res = allocate_pages_individually(bytes, addr, MEM_RESERVE, PAGE_READWRITE);
3243
if (res == NULL) {
3244
warning("NUMA page allocation failed");
3245
}
3246
if (Verbose && PrintMiscellaneous) {
3247
reserveTimer.stop();
3248
tty->print_cr("reserve_memory of %Ix bytes took " JLONG_FORMAT " ms (" JLONG_FORMAT " ticks)", bytes,
3249
reserveTimer.milliseconds(), reserveTimer.ticks());
3250
}
3251
}
3252
assert(res == NULL || addr == NULL || addr == res,
3253
"Unexpected address from reserve.");
3254
3255
return res;
3256
}
3257
3258
char* os::pd_attempt_map_memory_to_file_at(char* requested_addr, size_t bytes, int file_desc) {
3259
assert(file_desc >= 0, "file_desc is not valid");
3260
return map_memory_to_file(requested_addr, bytes, file_desc);
3261
}
3262
3263
size_t os::large_page_size() {
3264
return _large_page_size;
3265
}
3266
3267
bool os::can_commit_large_page_memory() {
3268
// Windows only uses large page memory when the entire region is reserved
3269
// and committed in a single VirtualAlloc() call. This may change in the
3270
// future, but with Windows 2003 it's not possible to commit on demand.
3271
return false;
3272
}
3273
3274
bool os::can_execute_large_page_memory() {
3275
return true;
3276
}
3277
3278
static char* reserve_large_pages_individually(size_t size, char* req_addr, bool exec) {
3279
log_debug(pagesize)("Reserving large pages individually.");
3280
3281
const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3282
const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3283
3284
char * p_buf = allocate_pages_individually(size, req_addr, flags, prot, LargePagesIndividualAllocationInjectError);
3285
if (p_buf == NULL) {
3286
// give an appropriate warning message
3287
if (UseNUMAInterleaving) {
3288
warning("NUMA large page allocation failed, UseLargePages flag ignored");
3289
}
3290
if (UseLargePagesIndividualAllocation) {
3291
warning("Individually allocated large pages failed, "
3292
"use -XX:-UseLargePagesIndividualAllocation to turn off");
3293
}
3294
return NULL;
3295
}
3296
return p_buf;
3297
}
3298
3299
static char* reserve_large_pages_single_range(size_t size, char* req_addr, bool exec) {
3300
log_debug(pagesize)("Reserving large pages in a single large chunk.");
3301
3302
const DWORD prot = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
3303
const DWORD flags = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
3304
3305
return (char *) virtualAlloc(req_addr, size, flags, prot);
3306
}
3307
3308
static char* reserve_large_pages(size_t size, char* req_addr, bool exec) {
3309
// with large pages, there are two cases where we need to use Individual Allocation
3310
// 1) the UseLargePagesIndividualAllocation flag is set (set by default on WS2003)
3311
// 2) NUMA Interleaving is enabled, in which case we use a different node for each page
3312
if (UseLargePagesIndividualAllocation || UseNUMAInterleaving) {
3313
return reserve_large_pages_individually(size, req_addr, exec);
3314
}
3315
return reserve_large_pages_single_range(size, req_addr, exec);
3316
}
3317
3318
static char* find_aligned_address(size_t size, size_t alignment) {
3319
// Temporary reserve memory large enough to ensure we can get the requested
3320
// alignment and still fit the reservation.
3321
char* addr = (char*) virtualAlloc(NULL, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
3322
// Align the address to the requested alignment.
3323
char* aligned_addr = align_up(addr, alignment);
3324
// Free the temporary reservation.
3325
virtualFree(addr, 0, MEM_RELEASE);
3326
3327
return aligned_addr;
3328
}
3329
3330
static char* reserve_large_pages_aligned(size_t size, size_t alignment, bool exec) {
3331
log_debug(pagesize)("Reserving large pages at an aligned address, alignment=" SIZE_FORMAT "%s",
3332
byte_size_in_exact_unit(alignment), exact_unit_for_byte_size(alignment));
3333
3334
// Will try to find a suitable address at most 20 times. The reason we need to try
3335
// multiple times is that between finding the aligned address and trying to commit
3336
// the large pages another thread might have reserved an overlapping region.
3337
const int attempts_limit = 20;
3338
for (int attempts = 0; attempts < attempts_limit; attempts++) {
3339
// Find aligned address.
3340
char* aligned_address = find_aligned_address(size, alignment);
3341
3342
// Try to do the large page reservation using the aligned address.
3343
aligned_address = reserve_large_pages(size, aligned_address, exec);
3344
if (aligned_address != NULL) {
3345
// Reservation at the aligned address succeeded.
3346
guarantee(is_aligned(aligned_address, alignment), "Must be aligned");
3347
return aligned_address;
3348
}
3349
}
3350
3351
log_debug(pagesize)("Failed reserving large pages at aligned address");
3352
return NULL;
3353
}
3354
3355
char* os::pd_reserve_memory_special(size_t bytes, size_t alignment, size_t page_size, char* addr,
3356
bool exec) {
3357
assert(UseLargePages, "only for large pages");
3358
assert(page_size == os::large_page_size(), "Currently only support one large page size on Windows");
3359
assert(is_aligned(addr, alignment), "Must be");
3360
assert(is_aligned(addr, page_size), "Must be");
3361
3362
if (!is_aligned(bytes, page_size)) {
3363
// Fallback to small pages, Windows does not support mixed mappings.
3364
return NULL;
3365
}
3366
3367
// The requested alignment can be larger than the page size, for example with G1
3368
// the alignment is bound to the heap region size. So this reservation needs to
3369
// ensure that the requested alignment is met. When there is a requested address
3370
// this solves it self, since it must be properly aligned already.
3371
if (addr == NULL && alignment > page_size) {
3372
return reserve_large_pages_aligned(bytes, alignment, exec);
3373
}
3374
3375
// No additional requirements, just reserve the large pages.
3376
return reserve_large_pages(bytes, addr, exec);
3377
}
3378
3379
bool os::pd_release_memory_special(char* base, size_t bytes) {
3380
assert(base != NULL, "Sanity check");
3381
return pd_release_memory(base, bytes);
3382
}
3383
3384
void os::print_statistics() {
3385
}
3386
3387
static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
3388
int err = os::get_last_error();
3389
char buf[256];
3390
size_t buf_len = os::lasterror(buf, sizeof(buf));
3391
warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
3392
", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3393
exec, buf_len != 0 ? buf : "<no_error_string>", err);
3394
}
3395
3396
bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
3397
if (bytes == 0) {
3398
// Don't bother the OS with noops.
3399
return true;
3400
}
3401
assert((size_t) addr % os::vm_page_size() == 0, "commit on page boundaries");
3402
assert(bytes % os::vm_page_size() == 0, "commit in page-sized chunks");
3403
// Don't attempt to print anything if the OS call fails. We're
3404
// probably low on resources, so the print itself may cause crashes.
3405
3406
// unless we have NUMAInterleaving enabled, the range of a commit
3407
// is always within a reserve covered by a single VirtualAlloc
3408
// in that case we can just do a single commit for the requested size
3409
if (!UseNUMAInterleaving) {
3410
if (virtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
3411
NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3412
return false;
3413
}
3414
if (exec) {
3415
DWORD oldprot;
3416
// Windows doc says to use VirtualProtect to get execute permissions
3417
if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
3418
NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
3419
return false;
3420
}
3421
}
3422
return true;
3423
} else {
3424
3425
// when NUMAInterleaving is enabled, the commit might cover a range that
3426
// came from multiple VirtualAlloc reserves (using allocate_pages_individually).
3427
// VirtualQuery can help us determine that. The RegionSize that VirtualQuery
3428
// returns represents the number of bytes that can be committed in one step.
3429
size_t bytes_remaining = bytes;
3430
char * next_alloc_addr = addr;
3431
while (bytes_remaining > 0) {
3432
MEMORY_BASIC_INFORMATION alloc_info;
3433
VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
3434
size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3435
if (virtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
3436
PAGE_READWRITE) == NULL) {
3437
NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3438
exec);)
3439
return false;
3440
}
3441
if (exec) {
3442
DWORD oldprot;
3443
if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
3444
PAGE_EXECUTE_READWRITE, &oldprot)) {
3445
NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
3446
exec);)
3447
return false;
3448
}
3449
}
3450
bytes_remaining -= bytes_to_rq;
3451
next_alloc_addr += bytes_to_rq;
3452
}
3453
}
3454
// if we made it this far, return true
3455
return true;
3456
}
3457
3458
bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
3459
bool exec) {
3460
// alignment_hint is ignored on this OS
3461
return pd_commit_memory(addr, size, exec);
3462
}
3463
3464
void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
3465
const char* mesg) {
3466
assert(mesg != NULL, "mesg must be specified");
3467
if (!pd_commit_memory(addr, size, exec)) {
3468
warn_fail_commit_memory(addr, size, exec);
3469
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "%s", mesg);
3470
}
3471
}
3472
3473
void os::pd_commit_memory_or_exit(char* addr, size_t size,
3474
size_t alignment_hint, bool exec,
3475
const char* mesg) {
3476
// alignment_hint is ignored on this OS
3477
pd_commit_memory_or_exit(addr, size, exec, mesg);
3478
}
3479
3480
bool os::pd_uncommit_memory(char* addr, size_t bytes, bool exec) {
3481
if (bytes == 0) {
3482
// Don't bother the OS with noops.
3483
return true;
3484
}
3485
assert((size_t) addr % os::vm_page_size() == 0, "uncommit on page boundaries");
3486
assert(bytes % os::vm_page_size() == 0, "uncommit in page-sized chunks");
3487
return (virtualFree(addr, bytes, MEM_DECOMMIT) == TRUE);
3488
}
3489
3490
bool os::pd_release_memory(char* addr, size_t bytes) {
3491
// Given a range we are to release, we require a mapping to start at the beginning of that range;
3492
// if NUMA or LP we allow the range to contain multiple mappings, which have to cover the range
3493
// completely; otherwise the range must match an OS mapping exactly.
3494
address start = (address)addr;
3495
address end = start + bytes;
3496
os::win32::mapping_info_t mi;
3497
const bool multiple_mappings_allowed = UseLargePagesIndividualAllocation || UseNUMAInterleaving;
3498
address p = start;
3499
bool first_mapping = true;
3500
3501
do {
3502
// Find mapping and check it
3503
const char* err = NULL;
3504
if (!os::win32::find_mapping(p, &mi)) {
3505
err = "no mapping found";
3506
} else {
3507
if (first_mapping) {
3508
if (mi.base != start) {
3509
err = "base address mismatch";
3510
}
3511
if (multiple_mappings_allowed ? (mi.size > bytes) : (mi.size != bytes)) {
3512
err = "size mismatch";
3513
}
3514
} else {
3515
assert(p == mi.base && mi.size > 0, "Sanity");
3516
if (mi.base + mi.size > end) {
3517
err = "mapping overlaps end";
3518
}
3519
if (mi.size == 0) {
3520
err = "zero length mapping?"; // Should never happen; just to prevent endlessly looping in release.
3521
}
3522
}
3523
}
3524
// Handle mapping error. We assert in debug, unconditionally print a warning in release.
3525
if (err != NULL) {
3526
log_warning(os)("bad release: [" PTR_FORMAT "-" PTR_FORMAT "): %s", p2i(start), p2i(end), err);
3527
#ifdef ASSERT
3528
os::print_memory_mappings((char*)start, bytes, tty);
3529
assert(false, "bad release: [" PTR_FORMAT "-" PTR_FORMAT "): %s", p2i(start), p2i(end), err);
3530
#endif
3531
return false;
3532
}
3533
// Free this range
3534
if (virtualFree(p, 0, MEM_RELEASE) == FALSE) {
3535
return false;
3536
}
3537
first_mapping = false;
3538
p = mi.base + mi.size;
3539
} while (p < end);
3540
3541
return true;
3542
}
3543
3544
bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
3545
return os::commit_memory(addr, size, !ExecMem);
3546
}
3547
3548
bool os::remove_stack_guard_pages(char* addr, size_t size) {
3549
return os::uncommit_memory(addr, size);
3550
}
3551
3552
static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
3553
uint count = 0;
3554
bool ret = false;
3555
size_t bytes_remaining = bytes;
3556
char * next_protect_addr = addr;
3557
3558
// Use VirtualQuery() to get the chunk size.
3559
while (bytes_remaining) {
3560
MEMORY_BASIC_INFORMATION alloc_info;
3561
if (VirtualQuery(next_protect_addr, &alloc_info, sizeof(alloc_info)) == 0) {
3562
return false;
3563
}
3564
3565
size_t bytes_to_protect = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
3566
// We used different API at allocate_pages_individually() based on UseNUMAInterleaving,
3567
// but we don't distinguish here as both cases are protected by same API.
3568
ret = VirtualProtect(next_protect_addr, bytes_to_protect, p, old_status) != 0;
3569
warning("Failed protecting pages individually for chunk #%u", count);
3570
if (!ret) {
3571
return false;
3572
}
3573
3574
bytes_remaining -= bytes_to_protect;
3575
next_protect_addr += bytes_to_protect;
3576
count++;
3577
}
3578
return ret;
3579
}
3580
3581
// Set protections specified
3582
bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
3583
bool is_committed) {
3584
unsigned int p = 0;
3585
switch (prot) {
3586
case MEM_PROT_NONE: p = PAGE_NOACCESS; break;
3587
case MEM_PROT_READ: p = PAGE_READONLY; break;
3588
case MEM_PROT_RW: p = PAGE_READWRITE; break;
3589
case MEM_PROT_RWX: p = PAGE_EXECUTE_READWRITE; break;
3590
default:
3591
ShouldNotReachHere();
3592
}
3593
3594
DWORD old_status;
3595
3596
// Strange enough, but on Win32 one can change protection only for committed
3597
// memory, not a big deal anyway, as bytes less or equal than 64K
3598
if (!is_committed) {
3599
commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
3600
"cannot commit protection page");
3601
}
3602
// One cannot use os::guard_memory() here, as on Win32 guard page
3603
// have different (one-shot) semantics, from MSDN on PAGE_GUARD:
3604
//
3605
// Pages in the region become guard pages. Any attempt to access a guard page
3606
// causes the system to raise a STATUS_GUARD_PAGE exception and turn off
3607
// the guard page status. Guard pages thus act as a one-time access alarm.
3608
bool ret;
3609
if (UseNUMAInterleaving) {
3610
// If UseNUMAInterleaving is enabled, the pages may have been allocated a chunk at a time,
3611
// so we must protect the chunks individually.
3612
ret = protect_pages_individually(addr, bytes, p, &old_status);
3613
} else {
3614
ret = VirtualProtect(addr, bytes, p, &old_status) != 0;
3615
}
3616
#ifdef ASSERT
3617
if (!ret) {
3618
int err = os::get_last_error();
3619
char buf[256];
3620
size_t buf_len = os::lasterror(buf, sizeof(buf));
3621
warning("INFO: os::protect_memory(" PTR_FORMAT ", " SIZE_FORMAT
3622
") failed; error='%s' (DOS error/errno=%d)", addr, bytes,
3623
buf_len != 0 ? buf : "<no_error_string>", err);
3624
}
3625
#endif
3626
return ret;
3627
}
3628
3629
bool os::guard_memory(char* addr, size_t bytes) {
3630
DWORD old_status;
3631
return VirtualProtect(addr, bytes, PAGE_READWRITE | PAGE_GUARD, &old_status) != 0;
3632
}
3633
3634
bool os::unguard_memory(char* addr, size_t bytes) {
3635
DWORD old_status;
3636
return VirtualProtect(addr, bytes, PAGE_READWRITE, &old_status) != 0;
3637
}
3638
3639
void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3640
void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) { }
3641
void os::numa_make_global(char *addr, size_t bytes) { }
3642
void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) { }
3643
bool os::numa_topology_changed() { return false; }
3644
size_t os::numa_get_groups_num() { return MAX2(numa_node_list_holder.get_count(), 1); }
3645
int os::numa_get_group_id() { return 0; }
3646
size_t os::numa_get_leaf_groups(int *ids, size_t size) {
3647
if (numa_node_list_holder.get_count() == 0 && size > 0) {
3648
// Provide an answer for UMA systems
3649
ids[0] = 0;
3650
return 1;
3651
} else {
3652
// check for size bigger than actual groups_num
3653
size = MIN2(size, numa_get_groups_num());
3654
for (int i = 0; i < (int)size; i++) {
3655
ids[i] = numa_node_list_holder.get_node_list_entry(i);
3656
}
3657
return size;
3658
}
3659
}
3660
3661
int os::numa_get_group_id_for_address(const void* address) {
3662
return 0;
3663
}
3664
3665
bool os::get_page_info(char *start, page_info* info) {
3666
return false;
3667
}
3668
3669
char *os::scan_pages(char *start, char* end, page_info* page_expected,
3670
page_info* page_found) {
3671
return end;
3672
}
3673
3674
char* os::non_memory_address_word() {
3675
// Must never look like an address returned by reserve_memory,
3676
// even in its subfields (as defined by the CPU immediate fields,
3677
// if the CPU splits constants across multiple instructions).
3678
#ifdef _M_ARM64
3679
// AArch64 has a maximum addressable space of 48-bits
3680
return (char*)((1ull << 48) - 1);
3681
#else
3682
return (char*)-1;
3683
#endif
3684
}
3685
3686
#define MAX_ERROR_COUNT 100
3687
#define SYS_THREAD_ERROR 0xffffffffUL
3688
3689
void os::pd_start_thread(Thread* thread) {
3690
DWORD ret = ResumeThread(thread->osthread()->thread_handle());
3691
// Returns previous suspend state:
3692
// 0: Thread was not suspended
3693
// 1: Thread is running now
3694
// >1: Thread is still suspended.
3695
assert(ret != SYS_THREAD_ERROR, "StartThread failed"); // should propagate back
3696
}
3697
3698
3699
// Short sleep, direct OS call.
3700
//
3701
// ms = 0, means allow others (if any) to run.
3702
//
3703
void os::naked_short_sleep(jlong ms) {
3704
assert(ms < 1000, "Un-interruptable sleep, short time use only");
3705
Sleep(ms);
3706
}
3707
3708
// Windows does not provide sleep functionality with nanosecond resolution, so we
3709
// try to approximate this with spinning combined with yielding if another thread
3710
// is ready to run on the current processor.
3711
void os::naked_short_nanosleep(jlong ns) {
3712
assert(ns > -1 && ns < NANOUNITS, "Un-interruptable sleep, short time use only");
3713
3714
int64_t start = os::javaTimeNanos();
3715
do {
3716
if (SwitchToThread() == 0) {
3717
// Nothing else is ready to run on this cpu, spin a little
3718
SpinPause();
3719
}
3720
} while (os::javaTimeNanos() - start < ns);
3721
}
3722
3723
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
3724
void os::infinite_sleep() {
3725
while (true) { // sleep forever ...
3726
Sleep(100000); // ... 100 seconds at a time
3727
}
3728
}
3729
3730
typedef BOOL (WINAPI * STTSignature)(void);
3731
3732
void os::naked_yield() {
3733
// Consider passing back the return value from SwitchToThread().
3734
SwitchToThread();
3735
}
3736
3737
// Win32 only gives you access to seven real priorities at a time,
3738
// so we compress Java's ten down to seven. It would be better
3739
// if we dynamically adjusted relative priorities.
3740
3741
int os::java_to_os_priority[CriticalPriority + 1] = {
3742
THREAD_PRIORITY_IDLE, // 0 Entry should never be used
3743
THREAD_PRIORITY_LOWEST, // 1 MinPriority
3744
THREAD_PRIORITY_LOWEST, // 2
3745
THREAD_PRIORITY_BELOW_NORMAL, // 3
3746
THREAD_PRIORITY_BELOW_NORMAL, // 4
3747
THREAD_PRIORITY_NORMAL, // 5 NormPriority
3748
THREAD_PRIORITY_NORMAL, // 6
3749
THREAD_PRIORITY_ABOVE_NORMAL, // 7
3750
THREAD_PRIORITY_ABOVE_NORMAL, // 8
3751
THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
3752
THREAD_PRIORITY_HIGHEST, // 10 MaxPriority
3753
THREAD_PRIORITY_HIGHEST // 11 CriticalPriority
3754
};
3755
3756
int prio_policy1[CriticalPriority + 1] = {
3757
THREAD_PRIORITY_IDLE, // 0 Entry should never be used
3758
THREAD_PRIORITY_LOWEST, // 1 MinPriority
3759
THREAD_PRIORITY_LOWEST, // 2
3760
THREAD_PRIORITY_BELOW_NORMAL, // 3
3761
THREAD_PRIORITY_BELOW_NORMAL, // 4
3762
THREAD_PRIORITY_NORMAL, // 5 NormPriority
3763
THREAD_PRIORITY_ABOVE_NORMAL, // 6
3764
THREAD_PRIORITY_ABOVE_NORMAL, // 7
3765
THREAD_PRIORITY_HIGHEST, // 8
3766
THREAD_PRIORITY_HIGHEST, // 9 NearMaxPriority
3767
THREAD_PRIORITY_TIME_CRITICAL, // 10 MaxPriority
3768
THREAD_PRIORITY_TIME_CRITICAL // 11 CriticalPriority
3769
};
3770
3771
static int prio_init() {
3772
// If ThreadPriorityPolicy is 1, switch tables
3773
if (ThreadPriorityPolicy == 1) {
3774
int i;
3775
for (i = 0; i < CriticalPriority + 1; i++) {
3776
os::java_to_os_priority[i] = prio_policy1[i];
3777
}
3778
}
3779
if (UseCriticalJavaThreadPriority) {
3780
os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
3781
}
3782
return 0;
3783
}
3784
3785
OSReturn os::set_native_priority(Thread* thread, int priority) {
3786
if (!UseThreadPriorities) return OS_OK;
3787
bool ret = SetThreadPriority(thread->osthread()->thread_handle(), priority) != 0;
3788
return ret ? OS_OK : OS_ERR;
3789
}
3790
3791
OSReturn os::get_native_priority(const Thread* const thread,
3792
int* priority_ptr) {
3793
if (!UseThreadPriorities) {
3794
*priority_ptr = java_to_os_priority[NormPriority];
3795
return OS_OK;
3796
}
3797
int os_prio = GetThreadPriority(thread->osthread()->thread_handle());
3798
if (os_prio == THREAD_PRIORITY_ERROR_RETURN) {
3799
assert(false, "GetThreadPriority failed");
3800
return OS_ERR;
3801
}
3802
*priority_ptr = os_prio;
3803
return OS_OK;
3804
}
3805
3806
// GetCurrentThreadId() returns DWORD
3807
intx os::current_thread_id() { return GetCurrentThreadId(); }
3808
3809
static int _initial_pid = 0;
3810
3811
int os::current_process_id() {
3812
return (_initial_pid ? _initial_pid : _getpid());
3813
}
3814
3815
int os::win32::_vm_page_size = 0;
3816
int os::win32::_vm_allocation_granularity = 0;
3817
int os::win32::_processor_type = 0;
3818
// Processor level is not available on non-NT systems, use vm_version instead
3819
int os::win32::_processor_level = 0;
3820
julong os::win32::_physical_memory = 0;
3821
size_t os::win32::_default_stack_size = 0;
3822
3823
intx os::win32::_os_thread_limit = 0;
3824
volatile intx os::win32::_os_thread_count = 0;
3825
3826
bool os::win32::_is_windows_server = false;
3827
3828
// 6573254
3829
// Currently, the bug is observed across all the supported Windows releases,
3830
// including the latest one (as of this writing - Windows Server 2012 R2)
3831
bool os::win32::_has_exit_bug = true;
3832
3833
void os::win32::initialize_system_info() {
3834
SYSTEM_INFO si;
3835
GetSystemInfo(&si);
3836
_vm_page_size = si.dwPageSize;
3837
_vm_allocation_granularity = si.dwAllocationGranularity;
3838
_processor_type = si.dwProcessorType;
3839
_processor_level = si.wProcessorLevel;
3840
set_processor_count(si.dwNumberOfProcessors);
3841
3842
MEMORYSTATUSEX ms;
3843
ms.dwLength = sizeof(ms);
3844
3845
// also returns dwAvailPhys (free physical memory bytes), dwTotalVirtual, dwAvailVirtual,
3846
// dwMemoryLoad (% of memory in use)
3847
GlobalMemoryStatusEx(&ms);
3848
_physical_memory = ms.ullTotalPhys;
3849
3850
if (FLAG_IS_DEFAULT(MaxRAM)) {
3851
// Adjust MaxRAM according to the maximum virtual address space available.
3852
FLAG_SET_DEFAULT(MaxRAM, MIN2(MaxRAM, (uint64_t) ms.ullTotalVirtual));
3853
}
3854
3855
OSVERSIONINFOEX oi;
3856
oi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
3857
GetVersionEx((OSVERSIONINFO*)&oi);
3858
switch (oi.dwPlatformId) {
3859
case VER_PLATFORM_WIN32_NT:
3860
{
3861
int os_vers = oi.dwMajorVersion * 1000 + oi.dwMinorVersion;
3862
if (oi.wProductType == VER_NT_DOMAIN_CONTROLLER ||
3863
oi.wProductType == VER_NT_SERVER) {
3864
_is_windows_server = true;
3865
}
3866
}
3867
break;
3868
default: fatal("Unknown platform");
3869
}
3870
3871
_default_stack_size = os::current_stack_size();
3872
assert(_default_stack_size > (size_t) _vm_page_size, "invalid stack size");
3873
assert((_default_stack_size & (_vm_page_size - 1)) == 0,
3874
"stack size not a multiple of page size");
3875
3876
initialize_performance_counter();
3877
}
3878
3879
3880
HINSTANCE os::win32::load_Windows_dll(const char* name, char *ebuf,
3881
int ebuflen) {
3882
char path[MAX_PATH];
3883
DWORD size;
3884
DWORD pathLen = (DWORD)sizeof(path);
3885
HINSTANCE result = NULL;
3886
3887
// only allow library name without path component
3888
assert(strchr(name, '\\') == NULL, "path not allowed");
3889
assert(strchr(name, ':') == NULL, "path not allowed");
3890
if (strchr(name, '\\') != NULL || strchr(name, ':') != NULL) {
3891
jio_snprintf(ebuf, ebuflen,
3892
"Invalid parameter while calling os::win32::load_windows_dll(): cannot take path: %s", name);
3893
return NULL;
3894
}
3895
3896
// search system directory
3897
if ((size = GetSystemDirectory(path, pathLen)) > 0) {
3898
if (size >= pathLen) {
3899
return NULL; // truncated
3900
}
3901
if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3902
return NULL; // truncated
3903
}
3904
if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3905
return result;
3906
}
3907
}
3908
3909
// try Windows directory
3910
if ((size = GetWindowsDirectory(path, pathLen)) > 0) {
3911
if (size >= pathLen) {
3912
return NULL; // truncated
3913
}
3914
if (jio_snprintf(path + size, pathLen - size, "\\%s", name) == -1) {
3915
return NULL; // truncated
3916
}
3917
if ((result = (HINSTANCE)os::dll_load(path, ebuf, ebuflen)) != NULL) {
3918
return result;
3919
}
3920
}
3921
3922
jio_snprintf(ebuf, ebuflen,
3923
"os::win32::load_windows_dll() cannot load %s from system directories.", name);
3924
return NULL;
3925
}
3926
3927
#define MAXIMUM_THREADS_TO_KEEP (16 * MAXIMUM_WAIT_OBJECTS)
3928
#define EXIT_TIMEOUT 300000 /* 5 minutes */
3929
3930
static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
3931
InitializeCriticalSection((CRITICAL_SECTION*)pcrit_sect);
3932
return TRUE;
3933
}
3934
3935
int os::win32::exit_process_or_thread(Ept what, int exit_code) {
3936
// Basic approach:
3937
// - Each exiting thread registers its intent to exit and then does so.
3938
// - A thread trying to terminate the process must wait for all
3939
// threads currently exiting to complete their exit.
3940
3941
if (os::win32::has_exit_bug()) {
3942
// The array holds handles of the threads that have started exiting by calling
3943
// _endthreadex().
3944
// Should be large enough to avoid blocking the exiting thread due to lack of
3945
// a free slot.
3946
static HANDLE handles[MAXIMUM_THREADS_TO_KEEP];
3947
static int handle_count = 0;
3948
3949
static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
3950
static CRITICAL_SECTION crit_sect;
3951
static volatile DWORD process_exiting = 0;
3952
int i, j;
3953
DWORD res;
3954
HANDLE hproc, hthr;
3955
3956
// We only attempt to register threads until a process exiting
3957
// thread manages to set the process_exiting flag. Any threads
3958
// that come through here after the process_exiting flag is set
3959
// are unregistered and will be caught in the SuspendThread()
3960
// infinite loop below.
3961
bool registered = false;
3962
3963
// The first thread that reached this point, initializes the critical section.
3964
if (!InitOnceExecuteOnce(&init_once_crit_sect, init_crit_sect_call, &crit_sect, NULL)) {
3965
warning("crit_sect initialization failed in %s: %d\n", __FILE__, __LINE__);
3966
} else if (Atomic::load_acquire(&process_exiting) == 0) {
3967
if (what != EPT_THREAD) {
3968
// Atomically set process_exiting before the critical section
3969
// to increase the visibility between racing threads.
3970
Atomic::cmpxchg(&process_exiting, (DWORD)0, GetCurrentThreadId());
3971
}
3972
EnterCriticalSection(&crit_sect);
3973
3974
if (what == EPT_THREAD && Atomic::load_acquire(&process_exiting) == 0) {
3975
// Remove from the array those handles of the threads that have completed exiting.
3976
for (i = 0, j = 0; i < handle_count; ++i) {
3977
res = WaitForSingleObject(handles[i], 0 /* don't wait */);
3978
if (res == WAIT_TIMEOUT) {
3979
handles[j++] = handles[i];
3980
} else {
3981
if (res == WAIT_FAILED) {
3982
warning("WaitForSingleObject failed (%u) in %s: %d\n",
3983
GetLastError(), __FILE__, __LINE__);
3984
}
3985
// Don't keep the handle, if we failed waiting for it.
3986
CloseHandle(handles[i]);
3987
}
3988
}
3989
3990
// If there's no free slot in the array of the kept handles, we'll have to
3991
// wait until at least one thread completes exiting.
3992
if ((handle_count = j) == MAXIMUM_THREADS_TO_KEEP) {
3993
// Raise the priority of the oldest exiting thread to increase its chances
3994
// to complete sooner.
3995
SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
3996
res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
3997
if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
3998
i = (res - WAIT_OBJECT_0);
3999
handle_count = MAXIMUM_THREADS_TO_KEEP - 1;
4000
for (; i < handle_count; ++i) {
4001
handles[i] = handles[i + 1];
4002
}
4003
} else {
4004
warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4005
(res == WAIT_FAILED ? "failed" : "timed out"),
4006
GetLastError(), __FILE__, __LINE__);
4007
// Don't keep handles, if we failed waiting for them.
4008
for (i = 0; i < MAXIMUM_THREADS_TO_KEEP; ++i) {
4009
CloseHandle(handles[i]);
4010
}
4011
handle_count = 0;
4012
}
4013
}
4014
4015
// Store a duplicate of the current thread handle in the array of handles.
4016
hproc = GetCurrentProcess();
4017
hthr = GetCurrentThread();
4018
if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
4019
0, FALSE, DUPLICATE_SAME_ACCESS)) {
4020
warning("DuplicateHandle failed (%u) in %s: %d\n",
4021
GetLastError(), __FILE__, __LINE__);
4022
4023
// We can't register this thread (no more handles) so this thread
4024
// may be racing with a thread that is calling exit(). If the thread
4025
// that is calling exit() has managed to set the process_exiting
4026
// flag, then this thread will be caught in the SuspendThread()
4027
// infinite loop below which closes that race. A small timing
4028
// window remains before the process_exiting flag is set, but it
4029
// is only exposed when we are out of handles.
4030
} else {
4031
++handle_count;
4032
registered = true;
4033
4034
// The current exiting thread has stored its handle in the array, and now
4035
// should leave the critical section before calling _endthreadex().
4036
}
4037
4038
} else if (what != EPT_THREAD && handle_count > 0) {
4039
jlong start_time, finish_time, timeout_left;
4040
// Before ending the process, make sure all the threads that had called
4041
// _endthreadex() completed.
4042
4043
// Set the priority level of the current thread to the same value as
4044
// the priority level of exiting threads.
4045
// This is to ensure it will be given a fair chance to execute if
4046
// the timeout expires.
4047
hthr = GetCurrentThread();
4048
SetThreadPriority(hthr, THREAD_PRIORITY_ABOVE_NORMAL);
4049
start_time = os::javaTimeNanos();
4050
finish_time = start_time + ((jlong)EXIT_TIMEOUT * 1000000L);
4051
for (i = 0; ; ) {
4052
int portion_count = handle_count - i;
4053
if (portion_count > MAXIMUM_WAIT_OBJECTS) {
4054
portion_count = MAXIMUM_WAIT_OBJECTS;
4055
}
4056
for (j = 0; j < portion_count; ++j) {
4057
SetThreadPriority(handles[i + j], THREAD_PRIORITY_ABOVE_NORMAL);
4058
}
4059
timeout_left = (finish_time - start_time) / 1000000L;
4060
if (timeout_left < 0) {
4061
timeout_left = 0;
4062
}
4063
res = WaitForMultipleObjects(portion_count, handles + i, TRUE, timeout_left);
4064
if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
4065
warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
4066
(res == WAIT_FAILED ? "failed" : "timed out"),
4067
GetLastError(), __FILE__, __LINE__);
4068
// Reset portion_count so we close the remaining
4069
// handles due to this error.
4070
portion_count = handle_count - i;
4071
}
4072
for (j = 0; j < portion_count; ++j) {
4073
CloseHandle(handles[i + j]);
4074
}
4075
if ((i += portion_count) >= handle_count) {
4076
break;
4077
}
4078
start_time = os::javaTimeNanos();
4079
}
4080
handle_count = 0;
4081
}
4082
4083
LeaveCriticalSection(&crit_sect);
4084
}
4085
4086
if (!registered &&
4087
Atomic::load_acquire(&process_exiting) != 0 &&
4088
process_exiting != GetCurrentThreadId()) {
4089
// Some other thread is about to call exit(), so we don't let
4090
// the current unregistered thread proceed to exit() or _endthreadex()
4091
while (true) {
4092
SuspendThread(GetCurrentThread());
4093
// Avoid busy-wait loop, if SuspendThread() failed.
4094
Sleep(EXIT_TIMEOUT);
4095
}
4096
}
4097
}
4098
4099
// We are here if either
4100
// - there's no 'race at exit' bug on this OS release;
4101
// - initialization of the critical section failed (unlikely);
4102
// - the current thread has registered itself and left the critical section;
4103
// - the process-exiting thread has raised the flag and left the critical section.
4104
if (what == EPT_THREAD) {
4105
_endthreadex((unsigned)exit_code);
4106
} else if (what == EPT_PROCESS) {
4107
::exit(exit_code);
4108
} else {
4109
_exit(exit_code);
4110
}
4111
4112
// Should not reach here
4113
return exit_code;
4114
}
4115
4116
#undef EXIT_TIMEOUT
4117
4118
void os::win32::setmode_streams() {
4119
_setmode(_fileno(stdin), _O_BINARY);
4120
_setmode(_fileno(stdout), _O_BINARY);
4121
_setmode(_fileno(stderr), _O_BINARY);
4122
}
4123
4124
void os::wait_for_keypress_at_exit(void) {
4125
if (PauseAtExit) {
4126
fprintf(stderr, "Press any key to continue...\n");
4127
fgetc(stdin);
4128
}
4129
}
4130
4131
4132
bool os::message_box(const char* title, const char* message) {
4133
int result = MessageBox(NULL, message, title,
4134
MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
4135
return result == IDYES;
4136
}
4137
4138
#ifndef PRODUCT
4139
#ifndef _WIN64
4140
// Helpers to check whether NX protection is enabled
4141
int nx_exception_filter(_EXCEPTION_POINTERS *pex) {
4142
if (pex->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION &&
4143
pex->ExceptionRecord->NumberParameters > 0 &&
4144
pex->ExceptionRecord->ExceptionInformation[0] ==
4145
EXCEPTION_INFO_EXEC_VIOLATION) {
4146
return EXCEPTION_EXECUTE_HANDLER;
4147
}
4148
return EXCEPTION_CONTINUE_SEARCH;
4149
}
4150
4151
void nx_check_protection() {
4152
// If NX is enabled we'll get an exception calling into code on the stack
4153
char code[] = { (char)0xC3 }; // ret
4154
void *code_ptr = (void *)code;
4155
__try {
4156
__asm call code_ptr
4157
} __except(nx_exception_filter((_EXCEPTION_POINTERS*)_exception_info())) {
4158
tty->print_raw_cr("NX protection detected.");
4159
}
4160
}
4161
#endif // _WIN64
4162
#endif // PRODUCT
4163
4164
// This is called _before_ the global arguments have been parsed
4165
void os::init(void) {
4166
_initial_pid = _getpid();
4167
4168
win32::initialize_system_info();
4169
win32::setmode_streams();
4170
_page_sizes.add(win32::vm_page_size());
4171
4172
// This may be overridden later when argument processing is done.
4173
FLAG_SET_ERGO(UseLargePagesIndividualAllocation, false);
4174
4175
// Initialize main_process and main_thread
4176
main_process = GetCurrentProcess(); // Remember main_process is a pseudo handle
4177
if (!DuplicateHandle(main_process, GetCurrentThread(), main_process,
4178
&main_thread, THREAD_ALL_ACCESS, false, 0)) {
4179
fatal("DuplicateHandle failed\n");
4180
}
4181
main_thread_id = (int) GetCurrentThreadId();
4182
4183
// initialize fast thread access - only used for 32-bit
4184
win32::initialize_thread_ptr_offset();
4185
}
4186
4187
// To install functions for atexit processing
4188
extern "C" {
4189
static void perfMemory_exit_helper() {
4190
perfMemory_exit();
4191
}
4192
}
4193
4194
static jint initSock();
4195
4196
// this is called _after_ the global arguments have been parsed
4197
jint os::init_2(void) {
4198
4199
// This could be set any time but all platforms
4200
// have to set it the same so we have to mirror Solaris.
4201
DEBUG_ONLY(os::set_mutex_init_done();)
4202
4203
// Setup Windows Exceptions
4204
4205
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
4206
topLevelVectoredExceptionHandler = AddVectoredExceptionHandler(1, topLevelVectoredExceptionFilter);
4207
previousUnhandledExceptionFilter = SetUnhandledExceptionFilter(topLevelUnhandledExceptionFilter);
4208
#endif
4209
4210
// for debugging float code generation bugs
4211
#if defined(ASSERT) && !defined(_WIN64)
4212
static long fp_control_word = 0;
4213
__asm { fstcw fp_control_word }
4214
// see Intel PPro Manual, Vol. 2, p 7-16
4215
const long invalid = 0x01;
4216
fp_control_word |= invalid;
4217
__asm { fldcw fp_control_word }
4218
#endif
4219
4220
// If stack_commit_size is 0, windows will reserve the default size,
4221
// but only commit a small portion of it.
4222
size_t stack_commit_size = align_up(ThreadStackSize*K, os::vm_page_size());
4223
size_t default_reserve_size = os::win32::default_stack_size();
4224
size_t actual_reserve_size = stack_commit_size;
4225
if (stack_commit_size < default_reserve_size) {
4226
// If stack_commit_size == 0, we want this too
4227
actual_reserve_size = default_reserve_size;
4228
}
4229
4230
// Check minimum allowable stack size for thread creation and to initialize
4231
// the java system classes, including StackOverflowError - depends on page
4232
// size. Add two 4K pages for compiler2 recursion in main thread.
4233
// Add in 4*BytesPerWord 4K pages to account for VM stack during
4234
// class initialization depending on 32 or 64 bit VM.
4235
size_t min_stack_allowed =
4236
(size_t)(StackOverflow::stack_guard_zone_size() +
4237
StackOverflow::stack_shadow_zone_size() +
4238
(4*BytesPerWord COMPILER2_PRESENT(+2)) * 4 * K);
4239
4240
min_stack_allowed = align_up(min_stack_allowed, os::vm_page_size());
4241
4242
if (actual_reserve_size < min_stack_allowed) {
4243
tty->print_cr("\nThe Java thread stack size specified is too small. "
4244
"Specify at least %dk",
4245
min_stack_allowed / K);
4246
return JNI_ERR;
4247
}
4248
4249
JavaThread::set_stack_size_at_create(stack_commit_size);
4250
4251
// Calculate theoretical max. size of Threads to guard gainst artifical
4252
// out-of-memory situations, where all available address-space has been
4253
// reserved by thread stacks.
4254
assert(actual_reserve_size != 0, "Must have a stack");
4255
4256
// Calculate the thread limit when we should start doing Virtual Memory
4257
// banging. Currently when the threads will have used all but 200Mb of space.
4258
//
4259
// TODO: consider performing a similar calculation for commit size instead
4260
// as reserve size, since on a 64-bit platform we'll run into that more
4261
// often than running out of virtual memory space. We can use the
4262
// lower value of the two calculations as the os_thread_limit.
4263
size_t max_address_space = ((size_t)1 << (BitsPerWord - 1)) - (200 * K * K);
4264
win32::_os_thread_limit = (intx)(max_address_space / actual_reserve_size);
4265
4266
// at exit methods are called in the reverse order of their registration.
4267
// there is no limit to the number of functions registered. atexit does
4268
// not set errno.
4269
4270
if (PerfAllowAtExitRegistration) {
4271
// only register atexit functions if PerfAllowAtExitRegistration is set.
4272
// atexit functions can be delayed until process exit time, which
4273
// can be problematic for embedded VM situations. Embedded VMs should
4274
// call DestroyJavaVM() to assure that VM resources are released.
4275
4276
// note: perfMemory_exit_helper atexit function may be removed in
4277
// the future if the appropriate cleanup code can be added to the
4278
// VM_Exit VMOperation's doit method.
4279
if (atexit(perfMemory_exit_helper) != 0) {
4280
warning("os::init_2 atexit(perfMemory_exit_helper) failed");
4281
}
4282
}
4283
4284
#ifndef _WIN64
4285
// Print something if NX is enabled (win32 on AMD64)
4286
NOT_PRODUCT(if (PrintMiscellaneous && Verbose) nx_check_protection());
4287
#endif
4288
4289
// initialize thread priority policy
4290
prio_init();
4291
4292
UseNUMA = false; // We don't fully support this yet
4293
4294
if (UseNUMAInterleaving || (UseNUMA && FLAG_IS_DEFAULT(UseNUMAInterleaving))) {
4295
if (!numa_interleaving_init()) {
4296
FLAG_SET_ERGO(UseNUMAInterleaving, false);
4297
} else if (!UseNUMAInterleaving) {
4298
// When NUMA requested, not-NUMA-aware allocations default to interleaving.
4299
FLAG_SET_ERGO(UseNUMAInterleaving, true);
4300
}
4301
}
4302
4303
if (initSock() != JNI_OK) {
4304
return JNI_ERR;
4305
}
4306
4307
SymbolEngine::recalc_search_path();
4308
4309
// Initialize data for jdk.internal.misc.Signal
4310
if (!ReduceSignalUsage) {
4311
jdk_misc_signal_init();
4312
}
4313
4314
return JNI_OK;
4315
}
4316
4317
// combine the high and low DWORD into a ULONGLONG
4318
static ULONGLONG make_double_word(DWORD high_word, DWORD low_word) {
4319
ULONGLONG value = high_word;
4320
value <<= sizeof(high_word) * 8;
4321
value |= low_word;
4322
return value;
4323
}
4324
4325
// Transfers data from WIN32_FILE_ATTRIBUTE_DATA structure to struct stat
4326
static void file_attribute_data_to_stat(struct stat* sbuf, WIN32_FILE_ATTRIBUTE_DATA file_data) {
4327
::memset((void*)sbuf, 0, sizeof(struct stat));
4328
sbuf->st_size = (_off_t)make_double_word(file_data.nFileSizeHigh, file_data.nFileSizeLow);
4329
sbuf->st_mtime = make_double_word(file_data.ftLastWriteTime.dwHighDateTime,
4330
file_data.ftLastWriteTime.dwLowDateTime);
4331
sbuf->st_ctime = make_double_word(file_data.ftCreationTime.dwHighDateTime,
4332
file_data.ftCreationTime.dwLowDateTime);
4333
sbuf->st_atime = make_double_word(file_data.ftLastAccessTime.dwHighDateTime,
4334
file_data.ftLastAccessTime.dwLowDateTime);
4335
if ((file_data.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0) {
4336
sbuf->st_mode |= S_IFDIR;
4337
} else {
4338
sbuf->st_mode |= S_IFREG;
4339
}
4340
}
4341
4342
static errno_t convert_to_unicode(char const* char_path, LPWSTR* unicode_path) {
4343
// Get required buffer size to convert to Unicode
4344
int unicode_path_len = MultiByteToWideChar(CP_ACP,
4345
MB_ERR_INVALID_CHARS,
4346
char_path, -1,
4347
NULL, 0);
4348
if (unicode_path_len == 0) {
4349
return EINVAL;
4350
}
4351
4352
*unicode_path = NEW_C_HEAP_ARRAY(WCHAR, unicode_path_len, mtInternal);
4353
4354
int result = MultiByteToWideChar(CP_ACP,
4355
MB_ERR_INVALID_CHARS,
4356
char_path, -1,
4357
*unicode_path, unicode_path_len);
4358
assert(result == unicode_path_len, "length already checked above");
4359
4360
return ERROR_SUCCESS;
4361
}
4362
4363
static errno_t get_full_path(LPCWSTR unicode_path, LPWSTR* full_path) {
4364
// Get required buffer size to convert to full path. The return
4365
// value INCLUDES the terminating null character.
4366
DWORD full_path_len = GetFullPathNameW(unicode_path, 0, NULL, NULL);
4367
if (full_path_len == 0) {
4368
return EINVAL;
4369
}
4370
4371
*full_path = NEW_C_HEAP_ARRAY(WCHAR, full_path_len, mtInternal);
4372
4373
// When the buffer has sufficient size, the return value EXCLUDES the
4374
// terminating null character
4375
DWORD result = GetFullPathNameW(unicode_path, full_path_len, *full_path, NULL);
4376
assert(result <= full_path_len, "length already checked above");
4377
4378
return ERROR_SUCCESS;
4379
}
4380
4381
static void set_path_prefix(char* buf, LPWSTR* prefix, int* prefix_off, bool* needs_fullpath) {
4382
*prefix_off = 0;
4383
*needs_fullpath = true;
4384
4385
if (::isalpha(buf[0]) && !::IsDBCSLeadByte(buf[0]) && buf[1] == ':' && buf[2] == '\\') {
4386
*prefix = L"\\\\?\\";
4387
} else if (buf[0] == '\\' && buf[1] == '\\') {
4388
if (buf[2] == '?' && buf[3] == '\\') {
4389
*prefix = L"";
4390
*needs_fullpath = false;
4391
} else {
4392
*prefix = L"\\\\?\\UNC";
4393
*prefix_off = 1; // Overwrite the first char with the prefix, so \\share\path becomes \\?\UNC\share\path
4394
}
4395
} else {
4396
*prefix = L"\\\\?\\";
4397
}
4398
}
4399
4400
// Returns the given path as an absolute wide path in unc format. The returned path is NULL
4401
// on error (with err being set accordingly) and should be freed via os::free() otherwise.
4402
// additional_space is the size of space, in wchar_t, the function will additionally add to
4403
// the allocation of return buffer (such that the size of the returned buffer is at least
4404
// wcslen(buf) + 1 + additional_space).
4405
static wchar_t* wide_abs_unc_path(char const* path, errno_t & err, int additional_space = 0) {
4406
if ((path == NULL) || (path[0] == '\0')) {
4407
err = ENOENT;
4408
return NULL;
4409
}
4410
4411
// Need to allocate at least room for 3 characters, since os::native_path transforms C: to C:.
4412
size_t buf_len = 1 + MAX2((size_t)3, strlen(path));
4413
char* buf = NEW_C_HEAP_ARRAY(char, buf_len, mtInternal);
4414
strncpy(buf, path, buf_len);
4415
os::native_path(buf);
4416
4417
LPWSTR prefix = NULL;
4418
int prefix_off = 0;
4419
bool needs_fullpath = true;
4420
set_path_prefix(buf, &prefix, &prefix_off, &needs_fullpath);
4421
4422
LPWSTR unicode_path = NULL;
4423
err = convert_to_unicode(buf, &unicode_path);
4424
FREE_C_HEAP_ARRAY(char, buf);
4425
if (err != ERROR_SUCCESS) {
4426
return NULL;
4427
}
4428
4429
LPWSTR converted_path = NULL;
4430
if (needs_fullpath) {
4431
err = get_full_path(unicode_path, &converted_path);
4432
} else {
4433
converted_path = unicode_path;
4434
}
4435
4436
LPWSTR result = NULL;
4437
if (converted_path != NULL) {
4438
size_t prefix_len = wcslen(prefix);
4439
size_t result_len = prefix_len - prefix_off + wcslen(converted_path) + additional_space + 1;
4440
result = NEW_C_HEAP_ARRAY(WCHAR, result_len, mtInternal);
4441
_snwprintf(result, result_len, L"%s%s", prefix, &converted_path[prefix_off]);
4442
4443
// Remove trailing pathsep (not for \\?\<DRIVE>:\, since it would make it relative)
4444
result_len = wcslen(result);
4445
if ((result[result_len - 1] == L'\\') &&
4446
!(::iswalpha(result[4]) && result[5] == L':' && result_len == 7)) {
4447
result[result_len - 1] = L'\0';
4448
}
4449
}
4450
4451
if (converted_path != unicode_path) {
4452
FREE_C_HEAP_ARRAY(WCHAR, converted_path);
4453
}
4454
FREE_C_HEAP_ARRAY(WCHAR, unicode_path);
4455
4456
return static_cast<wchar_t*>(result); // LPWSTR and wchat_t* are the same type on Windows.
4457
}
4458
4459
int os::stat(const char *path, struct stat *sbuf) {
4460
errno_t err;
4461
wchar_t* wide_path = wide_abs_unc_path(path, err);
4462
4463
if (wide_path == NULL) {
4464
errno = err;
4465
return -1;
4466
}
4467
4468
WIN32_FILE_ATTRIBUTE_DATA file_data;;
4469
BOOL bret = ::GetFileAttributesExW(wide_path, GetFileExInfoStandard, &file_data);
4470
os::free(wide_path);
4471
4472
if (!bret) {
4473
errno = ::GetLastError();
4474
return -1;
4475
}
4476
4477
file_attribute_data_to_stat(sbuf, file_data);
4478
return 0;
4479
}
4480
4481
static HANDLE create_read_only_file_handle(const char* file) {
4482
errno_t err;
4483
wchar_t* wide_path = wide_abs_unc_path(file, err);
4484
4485
if (wide_path == NULL) {
4486
errno = err;
4487
return INVALID_HANDLE_VALUE;
4488
}
4489
4490
HANDLE handle = ::CreateFileW(wide_path, 0, FILE_SHARE_READ,
4491
NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
4492
os::free(wide_path);
4493
4494
return handle;
4495
}
4496
4497
bool os::same_files(const char* file1, const char* file2) {
4498
4499
if (file1 == NULL && file2 == NULL) {
4500
return true;
4501
}
4502
4503
if (file1 == NULL || file2 == NULL) {
4504
return false;
4505
}
4506
4507
if (strcmp(file1, file2) == 0) {
4508
return true;
4509
}
4510
4511
char* native_file1 = os::strdup_check_oom(file1);
4512
native_file1 = os::native_path(native_file1);
4513
char* native_file2 = os::strdup_check_oom(file2);
4514
native_file2 = os::native_path(native_file2);
4515
if (strcmp(native_file1, native_file2) == 0) {
4516
os::free(native_file1);
4517
os::free(native_file2);
4518
return true;
4519
}
4520
4521
HANDLE handle1 = create_read_only_file_handle(native_file1);
4522
HANDLE handle2 = create_read_only_file_handle(native_file2);
4523
bool result = false;
4524
4525
// if we could open both paths...
4526
if (handle1 != INVALID_HANDLE_VALUE && handle2 != INVALID_HANDLE_VALUE) {
4527
BY_HANDLE_FILE_INFORMATION fileInfo1;
4528
BY_HANDLE_FILE_INFORMATION fileInfo2;
4529
if (::GetFileInformationByHandle(handle1, &fileInfo1) &&
4530
::GetFileInformationByHandle(handle2, &fileInfo2)) {
4531
// the paths are the same if they refer to the same file (fileindex) on the same volume (volume serial number)
4532
if (fileInfo1.dwVolumeSerialNumber == fileInfo2.dwVolumeSerialNumber &&
4533
fileInfo1.nFileIndexHigh == fileInfo2.nFileIndexHigh &&
4534
fileInfo1.nFileIndexLow == fileInfo2.nFileIndexLow) {
4535
result = true;
4536
}
4537
}
4538
}
4539
4540
//free the handles
4541
if (handle1 != INVALID_HANDLE_VALUE) {
4542
::CloseHandle(handle1);
4543
}
4544
4545
if (handle2 != INVALID_HANDLE_VALUE) {
4546
::CloseHandle(handle2);
4547
}
4548
4549
os::free(native_file1);
4550
os::free(native_file2);
4551
4552
return result;
4553
}
4554
4555
#define FT2INT64(ft) \
4556
((jlong)((jlong)(ft).dwHighDateTime << 32 | (julong)(ft).dwLowDateTime))
4557
4558
4559
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4560
// are used by JVM M&M and JVMTI to get user+sys or user CPU time
4561
// of a thread.
4562
//
4563
// current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4564
// the fast estimate available on the platform.
4565
4566
// current_thread_cpu_time() is not optimized for Windows yet
4567
jlong os::current_thread_cpu_time() {
4568
// return user + sys since the cost is the same
4569
return os::thread_cpu_time(Thread::current(), true /* user+sys */);
4570
}
4571
4572
jlong os::thread_cpu_time(Thread* thread) {
4573
// consistent with what current_thread_cpu_time() returns.
4574
return os::thread_cpu_time(thread, true /* user+sys */);
4575
}
4576
4577
jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4578
return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4579
}
4580
4581
jlong os::thread_cpu_time(Thread* thread, bool user_sys_cpu_time) {
4582
// This code is copy from clasic VM -> hpi::sysThreadCPUTime
4583
// If this function changes, os::is_thread_cpu_time_supported() should too
4584
FILETIME CreationTime;
4585
FILETIME ExitTime;
4586
FILETIME KernelTime;
4587
FILETIME UserTime;
4588
4589
if (GetThreadTimes(thread->osthread()->thread_handle(), &CreationTime,
4590
&ExitTime, &KernelTime, &UserTime) == 0) {
4591
return -1;
4592
} else if (user_sys_cpu_time) {
4593
return (FT2INT64(UserTime) + FT2INT64(KernelTime)) * 100;
4594
} else {
4595
return FT2INT64(UserTime) * 100;
4596
}
4597
}
4598
4599
void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4600
info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
4601
info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
4602
info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
4603
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4604
}
4605
4606
void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4607
info_ptr->max_value = ALL_64_BITS; // the max value -- all 64 bits
4608
info_ptr->may_skip_backward = false; // GetThreadTimes returns absolute time
4609
info_ptr->may_skip_forward = false; // GetThreadTimes returns absolute time
4610
info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4611
}
4612
4613
bool os::is_thread_cpu_time_supported() {
4614
// see os::thread_cpu_time
4615
FILETIME CreationTime;
4616
FILETIME ExitTime;
4617
FILETIME KernelTime;
4618
FILETIME UserTime;
4619
4620
if (GetThreadTimes(GetCurrentThread(), &CreationTime, &ExitTime,
4621
&KernelTime, &UserTime) == 0) {
4622
return false;
4623
} else {
4624
return true;
4625
}
4626
}
4627
4628
// Windows does't provide a loadavg primitive so this is stubbed out for now.
4629
// It does have primitives (PDH API) to get CPU usage and run queue length.
4630
// "\\Processor(_Total)\\% Processor Time", "\\System\\Processor Queue Length"
4631
// If we wanted to implement loadavg on Windows, we have a few options:
4632
//
4633
// a) Query CPU usage and run queue length and "fake" an answer by
4634
// returning the CPU usage if it's under 100%, and the run queue
4635
// length otherwise. It turns out that querying is pretty slow
4636
// on Windows, on the order of 200 microseconds on a fast machine.
4637
// Note that on the Windows the CPU usage value is the % usage
4638
// since the last time the API was called (and the first call
4639
// returns 100%), so we'd have to deal with that as well.
4640
//
4641
// b) Sample the "fake" answer using a sampling thread and store
4642
// the answer in a global variable. The call to loadavg would
4643
// just return the value of the global, avoiding the slow query.
4644
//
4645
// c) Sample a better answer using exponential decay to smooth the
4646
// value. This is basically the algorithm used by UNIX kernels.
4647
//
4648
// Note that sampling thread starvation could affect both (b) and (c).
4649
int os::loadavg(double loadavg[], int nelem) {
4650
return -1;
4651
}
4652
4653
4654
// DontYieldALot=false by default: dutifully perform all yields as requested by JVM_Yield()
4655
bool os::dont_yield() {
4656
return DontYieldALot;
4657
}
4658
4659
int os::open(const char *path, int oflag, int mode) {
4660
errno_t err;
4661
wchar_t* wide_path = wide_abs_unc_path(path, err);
4662
4663
if (wide_path == NULL) {
4664
errno = err;
4665
return -1;
4666
}
4667
int fd = ::_wopen(wide_path, oflag | O_BINARY | O_NOINHERIT, mode);
4668
os::free(wide_path);
4669
4670
if (fd == -1) {
4671
errno = ::GetLastError();
4672
}
4673
4674
return fd;
4675
}
4676
4677
FILE* os::open(int fd, const char* mode) {
4678
return ::_fdopen(fd, mode);
4679
}
4680
4681
size_t os::write(int fd, const void *buf, unsigned int nBytes) {
4682
return ::write(fd, buf, nBytes);
4683
}
4684
4685
int os::close(int fd) {
4686
return ::close(fd);
4687
}
4688
4689
void os::exit(int num) {
4690
win32::exit_process_or_thread(win32::EPT_PROCESS, num);
4691
}
4692
4693
// Is a (classpath) directory empty?
4694
bool os::dir_is_empty(const char* path) {
4695
errno_t err;
4696
wchar_t* wide_path = wide_abs_unc_path(path, err, 2);
4697
4698
if (wide_path == NULL) {
4699
errno = err;
4700
return false;
4701
}
4702
4703
// Make sure we end with "\\*"
4704
if (wide_path[wcslen(wide_path) - 1] == L'\\') {
4705
wcscat(wide_path, L"*");
4706
} else {
4707
wcscat(wide_path, L"\\*");
4708
}
4709
4710
WIN32_FIND_DATAW fd;
4711
HANDLE f = ::FindFirstFileW(wide_path, &fd);
4712
os::free(wide_path);
4713
bool is_empty = true;
4714
4715
if (f != INVALID_HANDLE_VALUE) {
4716
while (is_empty && ::FindNextFileW(f, &fd)) {
4717
// An empty directory contains only the current directory file
4718
// and the previous directory file.
4719
if ((wcscmp(fd.cFileName, L".") != 0) &&
4720
(wcscmp(fd.cFileName, L"..") != 0)) {
4721
is_empty = false;
4722
}
4723
}
4724
FindClose(f);
4725
} else {
4726
errno = ::GetLastError();
4727
}
4728
4729
return is_empty;
4730
}
4731
4732
// create binary file, rewriting existing file if required
4733
int os::create_binary_file(const char* path, bool rewrite_existing) {
4734
int oflags = _O_CREAT | _O_WRONLY | _O_BINARY;
4735
if (!rewrite_existing) {
4736
oflags |= _O_EXCL;
4737
}
4738
return ::open(path, oflags, _S_IREAD | _S_IWRITE);
4739
}
4740
4741
// return current position of file pointer
4742
jlong os::current_file_offset(int fd) {
4743
return (jlong)::_lseeki64(fd, (__int64)0L, SEEK_CUR);
4744
}
4745
4746
// move file pointer to the specified offset
4747
jlong os::seek_to_file_offset(int fd, jlong offset) {
4748
return (jlong)::_lseeki64(fd, (__int64)offset, SEEK_SET);
4749
}
4750
4751
4752
jlong os::lseek(int fd, jlong offset, int whence) {
4753
return (jlong) ::_lseeki64(fd, offset, whence);
4754
}
4755
4756
ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {
4757
OVERLAPPED ov;
4758
DWORD nread;
4759
BOOL result;
4760
4761
ZeroMemory(&ov, sizeof(ov));
4762
ov.Offset = (DWORD)offset;
4763
ov.OffsetHigh = (DWORD)(offset >> 32);
4764
4765
HANDLE h = (HANDLE)::_get_osfhandle(fd);
4766
4767
result = ReadFile(h, (LPVOID)buf, nBytes, &nread, &ov);
4768
4769
return result ? nread : 0;
4770
}
4771
4772
4773
// This method is a slightly reworked copy of JDK's sysNativePath
4774
// from src/windows/hpi/src/path_md.c
4775
4776
// Convert a pathname to native format. On win32, this involves forcing all
4777
// separators to be '\\' rather than '/' (both are legal inputs, but Win95
4778
// sometimes rejects '/') and removing redundant separators. The input path is
4779
// assumed to have been converted into the character encoding used by the local
4780
// system. Because this might be a double-byte encoding, care is taken to
4781
// treat double-byte lead characters correctly.
4782
//
4783
// This procedure modifies the given path in place, as the result is never
4784
// longer than the original. There is no error return; this operation always
4785
// succeeds.
4786
char * os::native_path(char *path) {
4787
char *src = path, *dst = path, *end = path;
4788
char *colon = NULL; // If a drive specifier is found, this will
4789
// point to the colon following the drive letter
4790
4791
// Assumption: '/', '\\', ':', and drive letters are never lead bytes
4792
assert(((!::IsDBCSLeadByte('/')) && (!::IsDBCSLeadByte('\\'))
4793
&& (!::IsDBCSLeadByte(':'))), "Illegal lead byte");
4794
4795
// Check for leading separators
4796
#define isfilesep(c) ((c) == '/' || (c) == '\\')
4797
while (isfilesep(*src)) {
4798
src++;
4799
}
4800
4801
if (::isalpha(*src) && !::IsDBCSLeadByte(*src) && src[1] == ':') {
4802
// Remove leading separators if followed by drive specifier. This
4803
// hack is necessary to support file URLs containing drive
4804
// specifiers (e.g., "file://c:/path"). As a side effect,
4805
// "/c:/path" can be used as an alternative to "c:/path".
4806
*dst++ = *src++;
4807
colon = dst;
4808
*dst++ = ':';
4809
src++;
4810
} else {
4811
src = path;
4812
if (isfilesep(src[0]) && isfilesep(src[1])) {
4813
// UNC pathname: Retain first separator; leave src pointed at
4814
// second separator so that further separators will be collapsed
4815
// into the second separator. The result will be a pathname
4816
// beginning with "\\\\" followed (most likely) by a host name.
4817
src = dst = path + 1;
4818
path[0] = '\\'; // Force first separator to '\\'
4819
}
4820
}
4821
4822
end = dst;
4823
4824
// Remove redundant separators from remainder of path, forcing all
4825
// separators to be '\\' rather than '/'. Also, single byte space
4826
// characters are removed from the end of the path because those
4827
// are not legal ending characters on this operating system.
4828
//
4829
while (*src != '\0') {
4830
if (isfilesep(*src)) {
4831
*dst++ = '\\'; src++;
4832
while (isfilesep(*src)) src++;
4833
if (*src == '\0') {
4834
// Check for trailing separator
4835
end = dst;
4836
if (colon == dst - 2) break; // "z:\\"
4837
if (dst == path + 1) break; // "\\"
4838
if (dst == path + 2 && isfilesep(path[0])) {
4839
// "\\\\" is not collapsed to "\\" because "\\\\" marks the
4840
// beginning of a UNC pathname. Even though it is not, by
4841
// itself, a valid UNC pathname, we leave it as is in order
4842
// to be consistent with the path canonicalizer as well
4843
// as the win32 APIs, which treat this case as an invalid
4844
// UNC pathname rather than as an alias for the root
4845
// directory of the current drive.
4846
break;
4847
}
4848
end = --dst; // Path does not denote a root directory, so
4849
// remove trailing separator
4850
break;
4851
}
4852
end = dst;
4853
} else {
4854
if (::IsDBCSLeadByte(*src)) { // Copy a double-byte character
4855
*dst++ = *src++;
4856
if (*src) *dst++ = *src++;
4857
end = dst;
4858
} else { // Copy a single-byte character
4859
char c = *src++;
4860
*dst++ = c;
4861
// Space is not a legal ending character
4862
if (c != ' ') end = dst;
4863
}
4864
}
4865
}
4866
4867
*end = '\0';
4868
4869
// For "z:", add "." to work around a bug in the C runtime library
4870
if (colon == dst - 1) {
4871
path[2] = '.';
4872
path[3] = '\0';
4873
}
4874
4875
return path;
4876
}
4877
4878
// This code is a copy of JDK's sysSetLength
4879
// from src/windows/hpi/src/sys_api_md.c
4880
4881
int os::ftruncate(int fd, jlong length) {
4882
HANDLE h = (HANDLE)::_get_osfhandle(fd);
4883
long high = (long)(length >> 32);
4884
DWORD ret;
4885
4886
if (h == (HANDLE)(-1)) {
4887
return -1;
4888
}
4889
4890
ret = ::SetFilePointer(h, (long)(length), &high, FILE_BEGIN);
4891
if ((ret == 0xFFFFFFFF) && (::GetLastError() != NO_ERROR)) {
4892
return -1;
4893
}
4894
4895
if (::SetEndOfFile(h) == FALSE) {
4896
return -1;
4897
}
4898
4899
return 0;
4900
}
4901
4902
int os::get_fileno(FILE* fp) {
4903
return _fileno(fp);
4904
}
4905
4906
// This code is a copy of JDK's sysSync
4907
// from src/windows/hpi/src/sys_api_md.c
4908
// except for the legacy workaround for a bug in Win 98
4909
4910
int os::fsync(int fd) {
4911
HANDLE handle = (HANDLE)::_get_osfhandle(fd);
4912
4913
if ((!::FlushFileBuffers(handle)) &&
4914
(GetLastError() != ERROR_ACCESS_DENIED)) {
4915
// from winerror.h
4916
return -1;
4917
}
4918
return 0;
4919
}
4920
4921
static int nonSeekAvailable(int, long *);
4922
static int stdinAvailable(int, long *);
4923
4924
// This code is a copy of JDK's sysAvailable
4925
// from src/windows/hpi/src/sys_api_md.c
4926
4927
int os::available(int fd, jlong *bytes) {
4928
jlong cur, end;
4929
struct _stati64 stbuf64;
4930
4931
if (::_fstati64(fd, &stbuf64) >= 0) {
4932
int mode = stbuf64.st_mode;
4933
if (S_ISCHR(mode) || S_ISFIFO(mode)) {
4934
int ret;
4935
long lpbytes;
4936
if (fd == 0) {
4937
ret = stdinAvailable(fd, &lpbytes);
4938
} else {
4939
ret = nonSeekAvailable(fd, &lpbytes);
4940
}
4941
(*bytes) = (jlong)(lpbytes);
4942
return ret;
4943
}
4944
if ((cur = ::_lseeki64(fd, 0L, SEEK_CUR)) == -1) {
4945
return FALSE;
4946
} else if ((end = ::_lseeki64(fd, 0L, SEEK_END)) == -1) {
4947
return FALSE;
4948
} else if (::_lseeki64(fd, cur, SEEK_SET) == -1) {
4949
return FALSE;
4950
}
4951
*bytes = end - cur;
4952
return TRUE;
4953
} else {
4954
return FALSE;
4955
}
4956
}
4957
4958
void os::flockfile(FILE* fp) {
4959
_lock_file(fp);
4960
}
4961
4962
void os::funlockfile(FILE* fp) {
4963
_unlock_file(fp);
4964
}
4965
4966
// This code is a copy of JDK's nonSeekAvailable
4967
// from src/windows/hpi/src/sys_api_md.c
4968
4969
static int nonSeekAvailable(int fd, long *pbytes) {
4970
// This is used for available on non-seekable devices
4971
// (like both named and anonymous pipes, such as pipes
4972
// connected to an exec'd process).
4973
// Standard Input is a special case.
4974
HANDLE han;
4975
4976
if ((han = (HANDLE) ::_get_osfhandle(fd)) == (HANDLE)(-1)) {
4977
return FALSE;
4978
}
4979
4980
if (! ::PeekNamedPipe(han, NULL, 0, NULL, (LPDWORD)pbytes, NULL)) {
4981
// PeekNamedPipe fails when at EOF. In that case we
4982
// simply make *pbytes = 0 which is consistent with the
4983
// behavior we get on Solaris when an fd is at EOF.
4984
// The only alternative is to raise an Exception,
4985
// which isn't really warranted.
4986
//
4987
if (::GetLastError() != ERROR_BROKEN_PIPE) {
4988
return FALSE;
4989
}
4990
*pbytes = 0;
4991
}
4992
return TRUE;
4993
}
4994
4995
#define MAX_INPUT_EVENTS 2000
4996
4997
// This code is a copy of JDK's stdinAvailable
4998
// from src/windows/hpi/src/sys_api_md.c
4999
5000
static int stdinAvailable(int fd, long *pbytes) {
5001
HANDLE han;
5002
DWORD numEventsRead = 0; // Number of events read from buffer
5003
DWORD numEvents = 0; // Number of events in buffer
5004
DWORD i = 0; // Loop index
5005
DWORD curLength = 0; // Position marker
5006
DWORD actualLength = 0; // Number of bytes readable
5007
BOOL error = FALSE; // Error holder
5008
INPUT_RECORD *lpBuffer; // Pointer to records of input events
5009
5010
if ((han = ::GetStdHandle(STD_INPUT_HANDLE)) == INVALID_HANDLE_VALUE) {
5011
return FALSE;
5012
}
5013
5014
// Construct an array of input records in the console buffer
5015
error = ::GetNumberOfConsoleInputEvents(han, &numEvents);
5016
if (error == 0) {
5017
return nonSeekAvailable(fd, pbytes);
5018
}
5019
5020
// lpBuffer must fit into 64K or else PeekConsoleInput fails
5021
if (numEvents > MAX_INPUT_EVENTS) {
5022
numEvents = MAX_INPUT_EVENTS;
5023
}
5024
5025
lpBuffer = (INPUT_RECORD *)os::malloc(numEvents * sizeof(INPUT_RECORD), mtInternal);
5026
if (lpBuffer == NULL) {
5027
return FALSE;
5028
}
5029
5030
error = ::PeekConsoleInput(han, lpBuffer, numEvents, &numEventsRead);
5031
if (error == 0) {
5032
os::free(lpBuffer);
5033
return FALSE;
5034
}
5035
5036
// Examine input records for the number of bytes available
5037
for (i=0; i<numEvents; i++) {
5038
if (lpBuffer[i].EventType == KEY_EVENT) {
5039
5040
KEY_EVENT_RECORD *keyRecord = (KEY_EVENT_RECORD *)
5041
&(lpBuffer[i].Event);
5042
if (keyRecord->bKeyDown == TRUE) {
5043
CHAR *keyPressed = (CHAR *) &(keyRecord->uChar);
5044
curLength++;
5045
if (*keyPressed == '\r') {
5046
actualLength = curLength;
5047
}
5048
}
5049
}
5050
}
5051
5052
if (lpBuffer != NULL) {
5053
os::free(lpBuffer);
5054
}
5055
5056
*pbytes = (long) actualLength;
5057
return TRUE;
5058
}
5059
5060
// Map a block of memory.
5061
char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
5062
char *addr, size_t bytes, bool read_only,
5063
bool allow_exec) {
5064
5065
errno_t err;
5066
wchar_t* wide_path = wide_abs_unc_path(file_name, err);
5067
5068
if (wide_path == NULL) {
5069
return NULL;
5070
}
5071
5072
HANDLE hFile;
5073
char* base;
5074
5075
hFile = CreateFileW(wide_path, GENERIC_READ, FILE_SHARE_READ, NULL,
5076
OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
5077
if (hFile == INVALID_HANDLE_VALUE) {
5078
log_info(os)("CreateFileW() failed: GetLastError->%ld.", GetLastError());
5079
os::free(wide_path);
5080
return NULL;
5081
}
5082
os::free(wide_path);
5083
5084
if (allow_exec) {
5085
// CreateFileMapping/MapViewOfFileEx can't map executable memory
5086
// unless it comes from a PE image (which the shared archive is not.)
5087
// Even VirtualProtect refuses to give execute access to mapped memory
5088
// that was not previously executable.
5089
//
5090
// Instead, stick the executable region in anonymous memory. Yuck.
5091
// Penalty is that ~4 pages will not be shareable - in the future
5092
// we might consider DLLizing the shared archive with a proper PE
5093
// header so that mapping executable + sharing is possible.
5094
5095
base = (char*) virtualAlloc(addr, bytes, MEM_COMMIT | MEM_RESERVE,
5096
PAGE_READWRITE);
5097
if (base == NULL) {
5098
CloseHandle(hFile);
5099
return NULL;
5100
}
5101
5102
// Record virtual memory allocation
5103
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
5104
5105
DWORD bytes_read;
5106
OVERLAPPED overlapped;
5107
overlapped.Offset = (DWORD)file_offset;
5108
overlapped.OffsetHigh = 0;
5109
overlapped.hEvent = NULL;
5110
// ReadFile guarantees that if the return value is true, the requested
5111
// number of bytes were read before returning.
5112
bool res = ReadFile(hFile, base, (DWORD)bytes, &bytes_read, &overlapped) != 0;
5113
if (!res) {
5114
log_info(os)("ReadFile() failed: GetLastError->%ld.", GetLastError());
5115
release_memory(base, bytes);
5116
CloseHandle(hFile);
5117
return NULL;
5118
}
5119
} else {
5120
HANDLE hMap = CreateFileMapping(hFile, NULL, PAGE_WRITECOPY, 0, 0,
5121
NULL /* file_name */);
5122
if (hMap == NULL) {
5123
log_info(os)("CreateFileMapping() failed: GetLastError->%ld.", GetLastError());
5124
CloseHandle(hFile);
5125
return NULL;
5126
}
5127
5128
DWORD access = read_only ? FILE_MAP_READ : FILE_MAP_COPY;
5129
base = (char*)mapViewOfFileEx(hMap, access, 0, (DWORD)file_offset,
5130
(DWORD)bytes, addr);
5131
if (base == NULL) {
5132
CloseHandle(hMap);
5133
CloseHandle(hFile);
5134
return NULL;
5135
}
5136
5137
if (CloseHandle(hMap) == 0) {
5138
log_info(os)("CloseHandle(hMap) failed: GetLastError->%ld.", GetLastError());
5139
CloseHandle(hFile);
5140
return base;
5141
}
5142
}
5143
5144
if (allow_exec) {
5145
DWORD old_protect;
5146
DWORD exec_access = read_only ? PAGE_EXECUTE_READ : PAGE_EXECUTE_READWRITE;
5147
bool res = VirtualProtect(base, bytes, exec_access, &old_protect) != 0;
5148
5149
if (!res) {
5150
log_info(os)("VirtualProtect() failed: GetLastError->%ld.", GetLastError());
5151
// Don't consider this a hard error, on IA32 even if the
5152
// VirtualProtect fails, we should still be able to execute
5153
CloseHandle(hFile);
5154
return base;
5155
}
5156
}
5157
5158
if (CloseHandle(hFile) == 0) {
5159
log_info(os)("CloseHandle(hFile) failed: GetLastError->%ld.", GetLastError());
5160
return base;
5161
}
5162
5163
return base;
5164
}
5165
5166
5167
// Remap a block of memory.
5168
char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
5169
char *addr, size_t bytes, bool read_only,
5170
bool allow_exec) {
5171
// This OS does not allow existing memory maps to be remapped so we
5172
// would have to unmap the memory before we remap it.
5173
5174
// Because there is a small window between unmapping memory and mapping
5175
// it in again with different protections, CDS archives are mapped RW
5176
// on windows, so this function isn't called.
5177
ShouldNotReachHere();
5178
return NULL;
5179
}
5180
5181
5182
// Unmap a block of memory.
5183
// Returns true=success, otherwise false.
5184
5185
bool os::pd_unmap_memory(char* addr, size_t bytes) {
5186
MEMORY_BASIC_INFORMATION mem_info;
5187
if (VirtualQuery(addr, &mem_info, sizeof(mem_info)) == 0) {
5188
log_info(os)("VirtualQuery() failed: GetLastError->%ld.", GetLastError());
5189
return false;
5190
}
5191
5192
// Executable memory was not mapped using CreateFileMapping/MapViewOfFileEx.
5193
// Instead, executable region was allocated using VirtualAlloc(). See
5194
// pd_map_memory() above.
5195
//
5196
// The following flags should match the 'exec_access' flages used for
5197
// VirtualProtect() in pd_map_memory().
5198
if (mem_info.Protect == PAGE_EXECUTE_READ ||
5199
mem_info.Protect == PAGE_EXECUTE_READWRITE) {
5200
return pd_release_memory(addr, bytes);
5201
}
5202
5203
BOOL result = unmapViewOfFile(addr);
5204
if (result == 0) {
5205
return false;
5206
}
5207
return true;
5208
}
5209
5210
void os::pause() {
5211
char filename[MAX_PATH];
5212
if (PauseAtStartupFile && PauseAtStartupFile[0]) {
5213
jio_snprintf(filename, MAX_PATH, "%s", PauseAtStartupFile);
5214
} else {
5215
jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
5216
}
5217
5218
int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
5219
if (fd != -1) {
5220
struct stat buf;
5221
::close(fd);
5222
while (::stat(filename, &buf) == 0) {
5223
Sleep(100);
5224
}
5225
} else {
5226
jio_fprintf(stderr,
5227
"Could not open pause file '%s', continuing immediately.\n", filename);
5228
}
5229
}
5230
5231
Thread* os::ThreadCrashProtection::_protected_thread = NULL;
5232
os::ThreadCrashProtection* os::ThreadCrashProtection::_crash_protection = NULL;
5233
5234
os::ThreadCrashProtection::ThreadCrashProtection() {
5235
_protected_thread = Thread::current();
5236
assert(_protected_thread->is_JfrSampler_thread(), "should be JFRSampler");
5237
}
5238
5239
// See the caveats for this class in os_windows.hpp
5240
// Protects the callback call so that raised OS EXCEPTIONS causes a jump back
5241
// into this method and returns false. If no OS EXCEPTION was raised, returns
5242
// true.
5243
// The callback is supposed to provide the method that should be protected.
5244
//
5245
bool os::ThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
5246
bool success = true;
5247
__try {
5248
_crash_protection = this;
5249
cb.call();
5250
} __except(EXCEPTION_EXECUTE_HANDLER) {
5251
// only for protection, nothing to do
5252
success = false;
5253
}
5254
_crash_protection = NULL;
5255
_protected_thread = NULL;
5256
return success;
5257
}
5258
5259
5260
class HighResolutionInterval : public CHeapObj<mtThread> {
5261
// The default timer resolution seems to be 10 milliseconds.
5262
// (Where is this written down?)
5263
// If someone wants to sleep for only a fraction of the default,
5264
// then we set the timer resolution down to 1 millisecond for
5265
// the duration of their interval.
5266
// We carefully set the resolution back, since otherwise we
5267
// seem to incur an overhead (3%?) that we don't need.
5268
// CONSIDER: if ms is small, say 3, then we should run with a high resolution time.
5269
// Buf if ms is large, say 500, or 503, we should avoid the call to timeBeginPeriod().
5270
// Alternatively, we could compute the relative error (503/500 = .6%) and only use
5271
// timeBeginPeriod() if the relative error exceeded some threshold.
5272
// timeBeginPeriod() has been linked to problems with clock drift on win32 systems and
5273
// to decreased efficiency related to increased timer "tick" rates. We want to minimize
5274
// (a) calls to timeBeginPeriod() and timeEndPeriod() and (b) time spent with high
5275
// resolution timers running.
5276
private:
5277
jlong resolution;
5278
public:
5279
HighResolutionInterval(jlong ms) {
5280
resolution = ms % 10L;
5281
if (resolution != 0) {
5282
MMRESULT result = timeBeginPeriod(1L);
5283
}
5284
}
5285
~HighResolutionInterval() {
5286
if (resolution != 0) {
5287
MMRESULT result = timeEndPeriod(1L);
5288
}
5289
resolution = 0L;
5290
}
5291
};
5292
5293
// An Event wraps a win32 "CreateEvent" kernel handle.
5294
//
5295
// We have a number of choices regarding "CreateEvent" win32 handle leakage:
5296
//
5297
// 1: When a thread dies return the Event to the EventFreeList, clear the ParkHandle
5298
// field, and call CloseHandle() on the win32 event handle. Unpark() would
5299
// need to be modified to tolerate finding a NULL (invalid) win32 event handle.
5300
// In addition, an unpark() operation might fetch the handle field, but the
5301
// event could recycle between the fetch and the SetEvent() operation.
5302
// SetEvent() would either fail because the handle was invalid, or inadvertently work,
5303
// as the win32 handle value had been recycled. In an ideal world calling SetEvent()
5304
// on an stale but recycled handle would be harmless, but in practice this might
5305
// confuse other non-Sun code, so it's not a viable approach.
5306
//
5307
// 2: Once a win32 event handle is associated with an Event, it remains associated
5308
// with the Event. The event handle is never closed. This could be construed
5309
// as handle leakage, but only up to the maximum # of threads that have been extant
5310
// at any one time. This shouldn't be an issue, as windows platforms typically
5311
// permit a process to have hundreds of thousands of open handles.
5312
//
5313
// 3: Same as (1), but periodically, at stop-the-world time, rundown the EventFreeList
5314
// and release unused handles.
5315
//
5316
// 4: Add a CRITICAL_SECTION to the Event to protect LD+SetEvent from LD;ST(null);CloseHandle.
5317
// It's not clear, however, that we wouldn't be trading one type of leak for another.
5318
//
5319
// 5. Use an RCU-like mechanism (Read-Copy Update).
5320
// Or perhaps something similar to Maged Michael's "Hazard pointers".
5321
//
5322
// We use (2).
5323
//
5324
// TODO-FIXME:
5325
// 1. Reconcile Doug's JSR166 j.u.c park-unpark with the objectmonitor implementation.
5326
// 2. Consider wrapping the WaitForSingleObject(Ex) calls in SEH try/finally blocks
5327
// to recover from (or at least detect) the dreaded Windows 841176 bug.
5328
// 3. Collapse the JSR166 parker event, and the objectmonitor ParkEvent
5329
// into a single win32 CreateEvent() handle.
5330
//
5331
// Assumption:
5332
// Only one parker can exist on an event, which is why we allocate
5333
// them per-thread. Multiple unparkers can coexist.
5334
//
5335
// _Event transitions in park()
5336
// -1 => -1 : illegal
5337
// 1 => 0 : pass - return immediately
5338
// 0 => -1 : block; then set _Event to 0 before returning
5339
//
5340
// _Event transitions in unpark()
5341
// 0 => 1 : just return
5342
// 1 => 1 : just return
5343
// -1 => either 0 or 1; must signal target thread
5344
// That is, we can safely transition _Event from -1 to either
5345
// 0 or 1.
5346
//
5347
// _Event serves as a restricted-range semaphore.
5348
// -1 : thread is blocked, i.e. there is a waiter
5349
// 0 : neutral: thread is running or ready,
5350
// could have been signaled after a wait started
5351
// 1 : signaled - thread is running or ready
5352
//
5353
// Another possible encoding of _Event would be with
5354
// explicit "PARKED" == 01b and "SIGNALED" == 10b bits.
5355
//
5356
5357
int os::PlatformEvent::park(jlong Millis) {
5358
// Transitions for _Event:
5359
// -1 => -1 : illegal
5360
// 1 => 0 : pass - return immediately
5361
// 0 => -1 : block; then set _Event to 0 before returning
5362
5363
guarantee(_ParkHandle != NULL , "Invariant");
5364
guarantee(Millis > 0 , "Invariant");
5365
5366
// CONSIDER: defer assigning a CreateEvent() handle to the Event until
5367
// the initial park() operation.
5368
// Consider: use atomic decrement instead of CAS-loop
5369
5370
int v;
5371
for (;;) {
5372
v = _Event;
5373
if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5374
}
5375
guarantee((v == 0) || (v == 1), "invariant");
5376
if (v != 0) return OS_OK;
5377
5378
// Do this the hard way by blocking ...
5379
// TODO: consider a brief spin here, gated on the success of recent
5380
// spin attempts by this thread.
5381
//
5382
// We decompose long timeouts into series of shorter timed waits.
5383
// Evidently large timo values passed in WaitForSingleObject() are problematic on some
5384
// versions of Windows. See EventWait() for details. This may be superstition. Or not.
5385
// We trust the WAIT_TIMEOUT indication and don't track the elapsed wait time
5386
// with os::javaTimeNanos(). Furthermore, we assume that spurious returns from
5387
// ::WaitForSingleObject() caused by latent ::setEvent() operations will tend
5388
// to happen early in the wait interval. Specifically, after a spurious wakeup (rv ==
5389
// WAIT_OBJECT_0 but _Event is still < 0) we don't bother to recompute Millis to compensate
5390
// for the already waited time. This policy does not admit any new outcomes.
5391
// In the future, however, we might want to track the accumulated wait time and
5392
// adjust Millis accordingly if we encounter a spurious wakeup.
5393
5394
const int MAXTIMEOUT = 0x10000000;
5395
DWORD rv = WAIT_TIMEOUT;
5396
while (_Event < 0 && Millis > 0) {
5397
DWORD prd = Millis; // set prd = MAX (Millis, MAXTIMEOUT)
5398
if (Millis > MAXTIMEOUT) {
5399
prd = MAXTIMEOUT;
5400
}
5401
HighResolutionInterval *phri = NULL;
5402
if (!ForceTimeHighResolution) {
5403
phri = new HighResolutionInterval(prd);
5404
}
5405
rv = ::WaitForSingleObject(_ParkHandle, prd);
5406
assert(rv == WAIT_OBJECT_0 || rv == WAIT_TIMEOUT, "WaitForSingleObject failed");
5407
if (rv == WAIT_TIMEOUT) {
5408
Millis -= prd;
5409
}
5410
delete phri; // if it is NULL, harmless
5411
}
5412
v = _Event;
5413
_Event = 0;
5414
// see comment at end of os::PlatformEvent::park() below:
5415
OrderAccess::fence();
5416
// If we encounter a nearly simultanous timeout expiry and unpark()
5417
// we return OS_OK indicating we awoke via unpark().
5418
// Implementor's license -- returning OS_TIMEOUT would be equally valid, however.
5419
return (v >= 0) ? OS_OK : OS_TIMEOUT;
5420
}
5421
5422
void os::PlatformEvent::park() {
5423
// Transitions for _Event:
5424
// -1 => -1 : illegal
5425
// 1 => 0 : pass - return immediately
5426
// 0 => -1 : block; then set _Event to 0 before returning
5427
5428
guarantee(_ParkHandle != NULL, "Invariant");
5429
// Invariant: Only the thread associated with the Event/PlatformEvent
5430
// may call park().
5431
// Consider: use atomic decrement instead of CAS-loop
5432
int v;
5433
for (;;) {
5434
v = _Event;
5435
if (Atomic::cmpxchg(&_Event, v, v-1) == v) break;
5436
}
5437
guarantee((v == 0) || (v == 1), "invariant");
5438
if (v != 0) return;
5439
5440
// Do this the hard way by blocking ...
5441
// TODO: consider a brief spin here, gated on the success of recent
5442
// spin attempts by this thread.
5443
while (_Event < 0) {
5444
DWORD rv = ::WaitForSingleObject(_ParkHandle, INFINITE);
5445
assert(rv == WAIT_OBJECT_0, "WaitForSingleObject failed");
5446
}
5447
5448
// Usually we'll find _Event == 0 at this point, but as
5449
// an optional optimization we clear it, just in case can
5450
// multiple unpark() operations drove _Event up to 1.
5451
_Event = 0;
5452
OrderAccess::fence();
5453
guarantee(_Event >= 0, "invariant");
5454
}
5455
5456
void os::PlatformEvent::unpark() {
5457
guarantee(_ParkHandle != NULL, "Invariant");
5458
5459
// Transitions for _Event:
5460
// 0 => 1 : just return
5461
// 1 => 1 : just return
5462
// -1 => either 0 or 1; must signal target thread
5463
// That is, we can safely transition _Event from -1 to either
5464
// 0 or 1.
5465
// See also: "Semaphores in Plan 9" by Mullender & Cox
5466
//
5467
// Note: Forcing a transition from "-1" to "1" on an unpark() means
5468
// that it will take two back-to-back park() calls for the owning
5469
// thread to block. This has the benefit of forcing a spurious return
5470
// from the first park() call after an unpark() call which will help
5471
// shake out uses of park() and unpark() without condition variables.
5472
5473
if (Atomic::xchg(&_Event, 1) >= 0) return;
5474
5475
::SetEvent(_ParkHandle);
5476
}
5477
5478
5479
// JSR166
5480
// -------------------------------------------------------
5481
5482
// The Windows implementation of Park is very straightforward: Basic
5483
// operations on Win32 Events turn out to have the right semantics to
5484
// use them directly.
5485
5486
void Parker::park(bool isAbsolute, jlong time) {
5487
guarantee(_ParkHandle != NULL, "invariant");
5488
// First, demultiplex/decode time arguments
5489
if (time < 0) { // don't wait
5490
return;
5491
} else if (time == 0 && !isAbsolute) {
5492
time = INFINITE;
5493
} else if (isAbsolute) {
5494
time -= os::javaTimeMillis(); // convert to relative time
5495
if (time <= 0) { // already elapsed
5496
return;
5497
}
5498
} else { // relative
5499
time /= 1000000; // Must coarsen from nanos to millis
5500
if (time == 0) { // Wait for the minimal time unit if zero
5501
time = 1;
5502
}
5503
}
5504
5505
JavaThread* thread = JavaThread::current();
5506
5507
// Don't wait if interrupted or already triggered
5508
if (thread->is_interrupted(false) ||
5509
WaitForSingleObject(_ParkHandle, 0) == WAIT_OBJECT_0) {
5510
ResetEvent(_ParkHandle);
5511
return;
5512
} else {
5513
ThreadBlockInVM tbivm(thread);
5514
OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5515
5516
WaitForSingleObject(_ParkHandle, time);
5517
ResetEvent(_ParkHandle);
5518
}
5519
}
5520
5521
void Parker::unpark() {
5522
guarantee(_ParkHandle != NULL, "invariant");
5523
SetEvent(_ParkHandle);
5524
}
5525
5526
// Platform Monitor implementation
5527
5528
// Must already be locked
5529
int os::PlatformMonitor::wait(jlong millis) {
5530
assert(millis >= 0, "negative timeout");
5531
int ret = OS_TIMEOUT;
5532
int status = SleepConditionVariableCS(&_cond, &_mutex,
5533
millis == 0 ? INFINITE : millis);
5534
if (status != 0) {
5535
ret = OS_OK;
5536
}
5537
#ifndef PRODUCT
5538
else {
5539
DWORD err = GetLastError();
5540
assert(err == ERROR_TIMEOUT, "SleepConditionVariableCS: %ld:", err);
5541
}
5542
#endif
5543
return ret;
5544
}
5545
5546
// Run the specified command in a separate process. Return its exit value,
5547
// or -1 on failure (e.g. can't create a new process).
5548
int os::fork_and_exec(const char* cmd, bool dummy /* ignored */) {
5549
STARTUPINFO si;
5550
PROCESS_INFORMATION pi;
5551
DWORD exit_code;
5552
5553
char * cmd_string;
5554
const char * cmd_prefix = "cmd /C ";
5555
size_t len = strlen(cmd) + strlen(cmd_prefix) + 1;
5556
cmd_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtInternal);
5557
if (cmd_string == NULL) {
5558
return -1;
5559
}
5560
cmd_string[0] = '\0';
5561
strcat(cmd_string, cmd_prefix);
5562
strcat(cmd_string, cmd);
5563
5564
// now replace all '\n' with '&'
5565
char * substring = cmd_string;
5566
while ((substring = strchr(substring, '\n')) != NULL) {
5567
substring[0] = '&';
5568
substring++;
5569
}
5570
memset(&si, 0, sizeof(si));
5571
si.cb = sizeof(si);
5572
memset(&pi, 0, sizeof(pi));
5573
BOOL rslt = CreateProcess(NULL, // executable name - use command line
5574
cmd_string, // command line
5575
NULL, // process security attribute
5576
NULL, // thread security attribute
5577
TRUE, // inherits system handles
5578
0, // no creation flags
5579
NULL, // use parent's environment block
5580
NULL, // use parent's starting directory
5581
&si, // (in) startup information
5582
&pi); // (out) process information
5583
5584
if (rslt) {
5585
// Wait until child process exits.
5586
WaitForSingleObject(pi.hProcess, INFINITE);
5587
5588
GetExitCodeProcess(pi.hProcess, &exit_code);
5589
5590
// Close process and thread handles.
5591
CloseHandle(pi.hProcess);
5592
CloseHandle(pi.hThread);
5593
} else {
5594
exit_code = -1;
5595
}
5596
5597
FREE_C_HEAP_ARRAY(char, cmd_string);
5598
return (int)exit_code;
5599
}
5600
5601
bool os::find(address addr, outputStream* st) {
5602
int offset = -1;
5603
bool result = false;
5604
char buf[256];
5605
if (os::dll_address_to_library_name(addr, buf, sizeof(buf), &offset)) {
5606
st->print(PTR_FORMAT " ", addr);
5607
if (strlen(buf) < sizeof(buf) - 1) {
5608
char* p = strrchr(buf, '\\');
5609
if (p) {
5610
st->print("%s", p + 1);
5611
} else {
5612
st->print("%s", buf);
5613
}
5614
} else {
5615
// The library name is probably truncated. Let's omit the library name.
5616
// See also JDK-8147512.
5617
}
5618
if (os::dll_address_to_function_name(addr, buf, sizeof(buf), &offset)) {
5619
st->print("::%s + 0x%x", buf, offset);
5620
}
5621
st->cr();
5622
result = true;
5623
}
5624
return result;
5625
}
5626
5627
static jint initSock() {
5628
WSADATA wsadata;
5629
5630
if (WSAStartup(MAKEWORD(2,2), &wsadata) != 0) {
5631
jio_fprintf(stderr, "Could not initialize Winsock (error: %d)\n",
5632
::GetLastError());
5633
return JNI_ERR;
5634
}
5635
return JNI_OK;
5636
}
5637
5638
struct hostent* os::get_host_by_name(char* name) {
5639
return (struct hostent*)gethostbyname(name);
5640
}
5641
5642
int os::socket_close(int fd) {
5643
return ::closesocket(fd);
5644
}
5645
5646
int os::socket(int domain, int type, int protocol) {
5647
return ::socket(domain, type, protocol);
5648
}
5649
5650
int os::connect(int fd, struct sockaddr* him, socklen_t len) {
5651
return ::connect(fd, him, len);
5652
}
5653
5654
int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
5655
return ::recv(fd, buf, (int)nBytes, flags);
5656
}
5657
5658
int os::send(int fd, char* buf, size_t nBytes, uint flags) {
5659
return ::send(fd, buf, (int)nBytes, flags);
5660
}
5661
5662
int os::raw_send(int fd, char* buf, size_t nBytes, uint flags) {
5663
return ::send(fd, buf, (int)nBytes, flags);
5664
}
5665
5666
// WINDOWS CONTEXT Flags for THREAD_SAMPLING
5667
#if defined(IA32)
5668
#define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
5669
#elif defined(AMD64) || defined(_M_ARM64)
5670
#define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
5671
#endif
5672
5673
// returns true if thread could be suspended,
5674
// false otherwise
5675
static bool do_suspend(HANDLE* h) {
5676
if (h != NULL) {
5677
if (SuspendThread(*h) != ~0) {
5678
return true;
5679
}
5680
}
5681
return false;
5682
}
5683
5684
// resume the thread
5685
// calling resume on an active thread is a no-op
5686
static void do_resume(HANDLE* h) {
5687
if (h != NULL) {
5688
ResumeThread(*h);
5689
}
5690
}
5691
5692
// retrieve a suspend/resume context capable handle
5693
// from the tid. Caller validates handle return value.
5694
void get_thread_handle_for_extended_context(HANDLE* h,
5695
OSThread::thread_id_t tid) {
5696
if (h != NULL) {
5697
*h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
5698
}
5699
}
5700
5701
// Thread sampling implementation
5702
//
5703
void os::SuspendedThreadTask::internal_do_task() {
5704
CONTEXT ctxt;
5705
HANDLE h = NULL;
5706
5707
// get context capable handle for thread
5708
get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
5709
5710
// sanity
5711
if (h == NULL || h == INVALID_HANDLE_VALUE) {
5712
return;
5713
}
5714
5715
// suspend the thread
5716
if (do_suspend(&h)) {
5717
ctxt.ContextFlags = sampling_context_flags;
5718
// get thread context
5719
GetThreadContext(h, &ctxt);
5720
SuspendedThreadTaskContext context(_thread, &ctxt);
5721
// pass context to Thread Sampling impl
5722
do_task(context);
5723
// resume thread
5724
do_resume(&h);
5725
}
5726
5727
// close handle
5728
CloseHandle(h);
5729
}
5730
5731
bool os::start_debugging(char *buf, int buflen) {
5732
int len = (int)strlen(buf);
5733
char *p = &buf[len];
5734
5735
jio_snprintf(p, buflen-len,
5736
"\n\n"
5737
"Do you want to debug the problem?\n\n"
5738
"To debug, attach Visual Studio to process %d; then switch to thread 0x%x\n"
5739
"Select 'Yes' to launch Visual Studio automatically (PATH must include msdev)\n"
5740
"Otherwise, select 'No' to abort...",
5741
os::current_process_id(), os::current_thread_id());
5742
5743
bool yes = os::message_box("Unexpected Error", buf);
5744
5745
if (yes) {
5746
// os::breakpoint() calls DebugBreak(), which causes a breakpoint
5747
// exception. If VM is running inside a debugger, the debugger will
5748
// catch the exception. Otherwise, the breakpoint exception will reach
5749
// the default windows exception handler, which can spawn a debugger and
5750
// automatically attach to the dying VM.
5751
os::breakpoint();
5752
yes = false;
5753
}
5754
return yes;
5755
}
5756
5757
void* os::get_default_process_handle() {
5758
return (void*)GetModuleHandle(NULL);
5759
}
5760
5761
// Builds a platform dependent Agent_OnLoad_<lib_name> function name
5762
// which is used to find statically linked in agents.
5763
// Additionally for windows, takes into account __stdcall names.
5764
// Parameters:
5765
// sym_name: Symbol in library we are looking for
5766
// lib_name: Name of library to look in, NULL for shared libs.
5767
// is_absolute_path == true if lib_name is absolute path to agent
5768
// such as "C:/a/b/L.dll"
5769
// == false if only the base name of the library is passed in
5770
// such as "L"
5771
char* os::build_agent_function_name(const char *sym_name, const char *lib_name,
5772
bool is_absolute_path) {
5773
char *agent_entry_name;
5774
size_t len;
5775
size_t name_len;
5776
size_t prefix_len = strlen(JNI_LIB_PREFIX);
5777
size_t suffix_len = strlen(JNI_LIB_SUFFIX);
5778
const char *start;
5779
5780
if (lib_name != NULL) {
5781
len = name_len = strlen(lib_name);
5782
if (is_absolute_path) {
5783
// Need to strip path, prefix and suffix
5784
if ((start = strrchr(lib_name, *os::file_separator())) != NULL) {
5785
lib_name = ++start;
5786
} else {
5787
// Need to check for drive prefix
5788
if ((start = strchr(lib_name, ':')) != NULL) {
5789
lib_name = ++start;
5790
}
5791
}
5792
if (len <= (prefix_len + suffix_len)) {
5793
return NULL;
5794
}
5795
lib_name += prefix_len;
5796
name_len = strlen(lib_name) - suffix_len;
5797
}
5798
}
5799
len = (lib_name != NULL ? name_len : 0) + strlen(sym_name) + 2;
5800
agent_entry_name = NEW_C_HEAP_ARRAY_RETURN_NULL(char, len, mtThread);
5801
if (agent_entry_name == NULL) {
5802
return NULL;
5803
}
5804
if (lib_name != NULL) {
5805
const char *p = strrchr(sym_name, '@');
5806
if (p != NULL && p != sym_name) {
5807
// sym_name == _Agent_OnLoad@XX
5808
strncpy(agent_entry_name, sym_name, (p - sym_name));
5809
agent_entry_name[(p-sym_name)] = '\0';
5810
// agent_entry_name == _Agent_OnLoad
5811
strcat(agent_entry_name, "_");
5812
strncat(agent_entry_name, lib_name, name_len);
5813
strcat(agent_entry_name, p);
5814
// agent_entry_name == _Agent_OnLoad_lib_name@XX
5815
} else {
5816
strcpy(agent_entry_name, sym_name);
5817
strcat(agent_entry_name, "_");
5818
strncat(agent_entry_name, lib_name, name_len);
5819
}
5820
} else {
5821
strcpy(agent_entry_name, sym_name);
5822
}
5823
return agent_entry_name;
5824
}
5825
5826
/*
5827
All the defined signal names for Windows.
5828
5829
NOTE that not all of these names are accepted by FindSignal!
5830
5831
For various reasons some of these may be rejected at runtime.
5832
5833
Here are the names currently accepted by a user of sun.misc.Signal with
5834
1.4.1 (ignoring potential interaction with use of chaining, etc):
5835
5836
(LIST TBD)
5837
5838
*/
5839
int os::get_signal_number(const char* name) {
5840
static const struct {
5841
const char* name;
5842
int number;
5843
} siglabels [] =
5844
// derived from version 6.0 VC98/include/signal.h
5845
{"ABRT", SIGABRT, // abnormal termination triggered by abort cl
5846
"FPE", SIGFPE, // floating point exception
5847
"SEGV", SIGSEGV, // segment violation
5848
"INT", SIGINT, // interrupt
5849
"TERM", SIGTERM, // software term signal from kill
5850
"BREAK", SIGBREAK, // Ctrl-Break sequence
5851
"ILL", SIGILL}; // illegal instruction
5852
for (unsigned i = 0; i < ARRAY_SIZE(siglabels); ++i) {
5853
if (strcmp(name, siglabels[i].name) == 0) {
5854
return siglabels[i].number;
5855
}
5856
}
5857
return -1;
5858
}
5859
5860
// Fast current thread access
5861
5862
int os::win32::_thread_ptr_offset = 0;
5863
5864
static void call_wrapper_dummy() {}
5865
5866
// We need to call the os_exception_wrapper once so that it sets
5867
// up the offset from FS of the thread pointer.
5868
void os::win32::initialize_thread_ptr_offset() {
5869
os::os_exception_wrapper((java_call_t)call_wrapper_dummy,
5870
NULL, methodHandle(), NULL, NULL);
5871
}
5872
5873
bool os::supports_map_sync() {
5874
return false;
5875
}
5876
5877
#ifdef ASSERT
5878
static void check_meminfo(MEMORY_BASIC_INFORMATION* minfo) {
5879
assert(minfo->State == MEM_FREE || minfo->State == MEM_COMMIT || minfo->State == MEM_RESERVE, "Invalid state");
5880
if (minfo->State != MEM_FREE) {
5881
assert(minfo->AllocationBase != NULL && minfo->BaseAddress >= minfo->AllocationBase, "Invalid pointers");
5882
assert(minfo->RegionSize > 0, "Invalid region size");
5883
}
5884
}
5885
#endif
5886
5887
5888
static bool checkedVirtualQuery(address addr, MEMORY_BASIC_INFORMATION* minfo) {
5889
ZeroMemory(minfo, sizeof(MEMORY_BASIC_INFORMATION));
5890
if (::VirtualQuery(addr, minfo, sizeof(MEMORY_BASIC_INFORMATION)) == sizeof(MEMORY_BASIC_INFORMATION)) {
5891
DEBUG_ONLY(check_meminfo(minfo);)
5892
return true;
5893
}
5894
return false;
5895
}
5896
5897
// Given a pointer pointing into an allocation (an area allocated with VirtualAlloc),
5898
// return information about that allocation.
5899
bool os::win32::find_mapping(address addr, mapping_info_t* mi) {
5900
// Query at addr to find allocation base; then, starting at allocation base,
5901
// query all regions, until we either find the next allocation or a free area.
5902
ZeroMemory(mi, sizeof(mapping_info_t));
5903
MEMORY_BASIC_INFORMATION minfo;
5904
address allocation_base = NULL;
5905
address allocation_end = NULL;
5906
bool rc = false;
5907
if (checkedVirtualQuery(addr, &minfo)) {
5908
if (minfo.State != MEM_FREE) {
5909
allocation_base = (address)minfo.AllocationBase;
5910
allocation_end = allocation_base;
5911
// Iterate through all regions in this allocation to find its end. While we are here, also count things.
5912
for (;;) {
5913
bool rc = checkedVirtualQuery(allocation_end, &minfo);
5914
if (rc == false || // VirtualQuery error, end of allocation?
5915
minfo.State == MEM_FREE || // end of allocation, free memory follows
5916
(address)minfo.AllocationBase != allocation_base) // end of allocation, a new one starts
5917
{
5918
break;
5919
}
5920
const size_t region_size = minfo.RegionSize;
5921
mi->regions ++;
5922
if (minfo.State == MEM_COMMIT) {
5923
mi->committed_size += minfo.RegionSize;
5924
}
5925
allocation_end += region_size;
5926
}
5927
if (allocation_base != NULL && allocation_end > allocation_base) {
5928
mi->base = allocation_base;
5929
mi->size = allocation_end - allocation_base;
5930
rc = true;
5931
}
5932
}
5933
}
5934
#ifdef ASSERT
5935
if (rc) {
5936
assert(mi->size > 0 && mi->size >= mi->committed_size, "Sanity");
5937
assert(addr >= mi->base && addr < mi->base + mi->size, "Sanity");
5938
assert(mi->regions > 0, "Sanity");
5939
}
5940
#endif
5941
return rc;
5942
}
5943
5944
// Helper for print_one_mapping: print n words, both as hex and ascii.
5945
// Use Safefetch for all values.
5946
static void print_snippet(const void* p, outputStream* st) {
5947
static const int num_words = LP64_ONLY(3) NOT_LP64(6);
5948
static const int num_bytes = num_words * sizeof(int);
5949
intptr_t v[num_words];
5950
const int errval = 0xDE210244;
5951
for (int i = 0; i < num_words; i++) {
5952
v[i] = SafeFetchN((intptr_t*)p + i, errval);
5953
if (v[i] == errval &&
5954
SafeFetchN((intptr_t*)p + i, ~errval) == ~errval) {
5955
return;
5956
}
5957
}
5958
st->put('[');
5959
for (int i = 0; i < num_words; i++) {
5960
st->print(INTPTR_FORMAT " ", v[i]);
5961
}
5962
const char* b = (char*)v;
5963
st->put('\"');
5964
for (int i = 0; i < num_bytes; i++) {
5965
st->put(::isgraph(b[i]) ? b[i] : '.');
5966
}
5967
st->put('\"');
5968
st->put(']');
5969
}
5970
5971
// Helper function for print_memory_mappings:
5972
// Given a MEMORY_BASIC_INFORMATION, containing information about a non-free region:
5973
// print out all regions in that allocation. If any of those regions
5974
// fall outside the given range [start, end), indicate that in the output.
5975
// Return the pointer to the end of the allocation.
5976
static address print_one_mapping(MEMORY_BASIC_INFORMATION* minfo, address start, address end, outputStream* st) {
5977
// Print it like this:
5978
//
5979
// Base: <xxxxx>: [xxxx - xxxx], state=MEM_xxx, prot=x, type=MEM_xxx (region 1)
5980
// [xxxx - xxxx], state=MEM_xxx, prot=x, type=MEM_xxx (region 2)
5981
assert(minfo->State != MEM_FREE, "Not inside an allocation.");
5982
address allocation_base = (address)minfo->AllocationBase;
5983
#define IS_IN(p) (p >= start && p < end)
5984
bool first_line = true;
5985
bool is_dll = false;
5986
for(;;) {
5987
if (first_line) {
5988
st->print("Base " PTR_FORMAT ": ", p2i(allocation_base));
5989
} else {
5990
st->print_raw(NOT_LP64 (" ")
5991
LP64_ONLY(" "));
5992
}
5993
address region_start = (address)minfo->BaseAddress;
5994
address region_end = region_start + minfo->RegionSize;
5995
assert(region_end > region_start, "Sanity");
5996
if (region_end <= start) {
5997
st->print("<outside range> ");
5998
} else if (region_start >= end) {
5999
st->print("<outside range> ");
6000
} else if (!IS_IN(region_start) || !IS_IN(region_end - 1)) {
6001
st->print("<partly outside range> ");
6002
}
6003
st->print("[" PTR_FORMAT "-" PTR_FORMAT "), state=", p2i(region_start), p2i(region_end));
6004
switch (minfo->State) {
6005
case MEM_COMMIT: st->print_raw("MEM_COMMIT "); break;
6006
case MEM_FREE: st->print_raw("MEM_FREE "); break;
6007
case MEM_RESERVE: st->print_raw("MEM_RESERVE"); break;
6008
default: st->print("%x?", (unsigned)minfo->State);
6009
}
6010
st->print(", prot=%3x, type=", (unsigned)minfo->Protect);
6011
switch (minfo->Type) {
6012
case MEM_IMAGE: st->print_raw("MEM_IMAGE "); break;
6013
case MEM_MAPPED: st->print_raw("MEM_MAPPED "); break;
6014
case MEM_PRIVATE: st->print_raw("MEM_PRIVATE"); break;
6015
default: st->print("%x?", (unsigned)minfo->State);
6016
}
6017
// At the start of every allocation, print some more information about this mapping.
6018
// Notes:
6019
// - this could be beefed up a lot, similar to os::print_location
6020
// - for now we just query the allocation start point. This may be confusing for cases where
6021
// the kernel merges multiple mappings.
6022
if (first_line) {
6023
char buf[MAX_PATH];
6024
if (os::dll_address_to_library_name(allocation_base, buf, sizeof(buf), nullptr)) {
6025
st->print(", %s", buf);
6026
is_dll = true;
6027
}
6028
}
6029
// If memory is accessible, and we do not know anything else about it, print a snippet
6030
if (!is_dll &&
6031
minfo->State == MEM_COMMIT &&
6032
!(minfo->Protect & PAGE_NOACCESS || minfo->Protect & PAGE_GUARD)) {
6033
st->print_raw(", ");
6034
print_snippet(region_start, st);
6035
}
6036
st->cr();
6037
// Next region...
6038
bool rc = checkedVirtualQuery(region_end, minfo);
6039
if (rc == false || // VirtualQuery error, end of allocation?
6040
(minfo->State == MEM_FREE) || // end of allocation, free memory follows
6041
((address)minfo->AllocationBase != allocation_base) || // end of allocation, a new one starts
6042
(region_end > end)) // end of range to print.
6043
{
6044
return region_end;
6045
}
6046
first_line = false;
6047
}
6048
#undef IS_IN
6049
ShouldNotReachHere();
6050
return NULL;
6051
}
6052
6053
void os::print_memory_mappings(char* addr, size_t bytes, outputStream* st) {
6054
MEMORY_BASIC_INFORMATION minfo;
6055
address start = (address)addr;
6056
address end = start + bytes;
6057
address p = start;
6058
if (p == nullptr) { // Lets skip the zero pages.
6059
p += os::vm_allocation_granularity();
6060
}
6061
address p2 = p; // guard against wraparounds
6062
int fuse = 0;
6063
6064
while (p < end && p >= p2) {
6065
p2 = p;
6066
// Probe for the next mapping.
6067
if (checkedVirtualQuery(p, &minfo)) {
6068
if (minfo.State != MEM_FREE) {
6069
// Found one. Print it out.
6070
address p2 = print_one_mapping(&minfo, start, end, st);
6071
assert(p2 > p, "Sanity");
6072
p = p2;
6073
} else {
6074
// Note: for free regions, most of MEMORY_BASIC_INFORMATION is undefined.
6075
// Only region dimensions are not: use those to jump to the end of
6076
// the free range.
6077
address region_start = (address)minfo.BaseAddress;
6078
address region_end = region_start + minfo.RegionSize;
6079
assert(p >= region_start && p < region_end, "Sanity");
6080
p = region_end;
6081
}
6082
} else {
6083
// MSDN doc on VirtualQuery is unclear about what it means if it returns an error.
6084
// In particular, whether querying an address outside any mappings would report
6085
// a MEM_FREE region or just return an error. From experiments, it seems to return
6086
// a MEM_FREE region for unmapped areas in valid address space and an error if we
6087
// are outside valid address space.
6088
// Here, we advance the probe pointer by alloc granularity. But if the range to print
6089
// is large, this may take a long time. Therefore lets stop right away if the address
6090
// is outside of what we know are valid addresses on Windows. Also, add a loop fuse.
6091
static const address end_virt = (address)(LP64_ONLY(0x7ffffffffffULL) NOT_LP64(3*G));
6092
if (p >= end_virt) {
6093
break;
6094
} else {
6095
// Advance probe pointer, but with a fuse to break long loops.
6096
if (fuse++ == 100000) {
6097
break;
6098
}
6099
p += os::vm_allocation_granularity();
6100
}
6101
}
6102
}
6103
}
6104
6105