Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/compiler/oopMap.cpp
40930 views
1
/*
2
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "code/codeBlob.hpp"
27
#include "code/codeCache.hpp"
28
#include "code/nmethod.hpp"
29
#include "code/scopeDesc.hpp"
30
#include "compiler/oopMap.hpp"
31
#include "gc/shared/collectedHeap.hpp"
32
#include "memory/allocation.inline.hpp"
33
#include "memory/iterator.hpp"
34
#include "memory/resourceArea.hpp"
35
#include "memory/universe.hpp"
36
#include "oops/compressedOops.hpp"
37
#include "runtime/frame.inline.hpp"
38
#include "runtime/handles.inline.hpp"
39
#include "runtime/signature.hpp"
40
#include "runtime/stackWatermarkSet.inline.hpp"
41
#include "utilities/align.hpp"
42
#include "utilities/lockFreeStack.hpp"
43
#ifdef COMPILER1
44
#include "c1/c1_Defs.hpp"
45
#endif
46
#ifdef COMPILER2
47
#include "opto/optoreg.hpp"
48
#endif
49
#if INCLUDE_JVMCI
50
#include "jvmci/jvmci_globals.hpp"
51
#endif
52
53
static_assert(sizeof(oop) == sizeof(intptr_t), "Derived pointer sanity check");
54
55
static inline intptr_t derived_pointer_value(derived_pointer p) {
56
return static_cast<intptr_t>(p);
57
}
58
59
static inline derived_pointer to_derived_pointer(oop obj) {
60
return static_cast<derived_pointer>(cast_from_oop<intptr_t>(obj));
61
}
62
63
static inline intptr_t operator-(derived_pointer p, derived_pointer p1) {
64
return derived_pointer_value(p) - derived_pointer_value(p1);
65
}
66
67
static inline derived_pointer operator+(derived_pointer p, intptr_t offset) {
68
return static_cast<derived_pointer>(derived_pointer_value(p) + offset);
69
}
70
71
// OopMapStream
72
73
OopMapStream::OopMapStream(OopMap* oop_map) {
74
_stream = new CompressedReadStream(oop_map->write_stream()->buffer());
75
_size = oop_map->omv_count();
76
_position = 0;
77
_valid_omv = false;
78
}
79
80
OopMapStream::OopMapStream(const ImmutableOopMap* oop_map) {
81
_stream = new CompressedReadStream(oop_map->data_addr());
82
_size = oop_map->count();
83
_position = 0;
84
_valid_omv = false;
85
}
86
87
void OopMapStream::find_next() {
88
if (_position++ < _size) {
89
_omv.read_from(_stream);
90
_valid_omv = true;
91
return;
92
}
93
_valid_omv = false;
94
}
95
96
97
// OopMap
98
99
// frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
100
// slots to hold 4-byte values like ints and floats in the LP64 build.
101
OopMap::OopMap(int frame_size, int arg_count) {
102
// OopMaps are usually quite so small, so pick a small initial size
103
set_write_stream(new CompressedWriteStream(32));
104
set_omv_count(0);
105
106
#ifdef ASSERT
107
_locs_length = VMRegImpl::stack2reg(0)->value() + frame_size + arg_count;
108
_locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
109
for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
110
#endif
111
}
112
113
114
OopMap::OopMap(OopMap::DeepCopyToken, OopMap* source) {
115
// This constructor does a deep copy
116
// of the source OopMap.
117
set_write_stream(new CompressedWriteStream(source->omv_count() * 2));
118
set_omv_count(0);
119
set_offset(source->offset());
120
121
#ifdef ASSERT
122
_locs_length = source->_locs_length;
123
_locs_used = NEW_RESOURCE_ARRAY(OopMapValue::oop_types, _locs_length);
124
for(int i = 0; i < _locs_length; i++) _locs_used[i] = OopMapValue::unused_value;
125
#endif
126
127
// We need to copy the entries too.
128
for (OopMapStream oms(source); !oms.is_done(); oms.next()) {
129
OopMapValue omv = oms.current();
130
omv.write_on(write_stream());
131
increment_count();
132
}
133
}
134
135
136
OopMap* OopMap::deep_copy() {
137
return new OopMap(_deep_copy_token, this);
138
}
139
140
void OopMap::copy_data_to(address addr) const {
141
memcpy(addr, write_stream()->buffer(), write_stream()->position());
142
}
143
144
int OopMap::heap_size() const {
145
int size = sizeof(OopMap);
146
int align = sizeof(void *) - 1;
147
size += write_stream()->position();
148
// Align to a reasonable ending point
149
size = ((size+align) & ~align);
150
return size;
151
}
152
153
// frame_size units are stack-slots (4 bytes) NOT intptr_t; we can name odd
154
// slots to hold 4-byte values like ints and floats in the LP64 build.
155
void OopMap::set_xxx(VMReg reg, OopMapValue::oop_types x, VMReg optional) {
156
157
assert(reg->value() < _locs_length, "too big reg value for stack size");
158
assert( _locs_used[reg->value()] == OopMapValue::unused_value, "cannot insert twice" );
159
debug_only( _locs_used[reg->value()] = x; )
160
161
OopMapValue o(reg, x, optional);
162
o.write_on(write_stream());
163
increment_count();
164
}
165
166
167
void OopMap::set_oop(VMReg reg) {
168
set_xxx(reg, OopMapValue::oop_value, VMRegImpl::Bad());
169
}
170
171
172
void OopMap::set_narrowoop(VMReg reg) {
173
set_xxx(reg, OopMapValue::narrowoop_value, VMRegImpl::Bad());
174
}
175
176
177
void OopMap::set_callee_saved(VMReg reg, VMReg caller_machine_register ) {
178
set_xxx(reg, OopMapValue::callee_saved_value, caller_machine_register);
179
}
180
181
182
void OopMap::set_derived_oop(VMReg reg, VMReg derived_from_local_register ) {
183
if( reg == derived_from_local_register ) {
184
// Actually an oop, derived shares storage with base,
185
set_oop(reg);
186
} else {
187
set_xxx(reg, OopMapValue::derived_oop_value, derived_from_local_register);
188
}
189
}
190
191
// OopMapSet
192
193
OopMapSet::OopMapSet() : _list(MinOopMapAllocation) {}
194
195
void OopMapSet::add_gc_map(int pc_offset, OopMap *map ) {
196
map->set_offset(pc_offset);
197
198
#ifdef ASSERT
199
if(_list.length() > 0) {
200
OopMap* last = _list.last();
201
if (last->offset() == map->offset() ) {
202
fatal("OopMap inserted twice");
203
}
204
if (last->offset() > map->offset()) {
205
tty->print_cr( "WARNING, maps not sorted: pc[%d]=%d, pc[%d]=%d",
206
_list.length(),last->offset(),_list.length()+1,map->offset());
207
}
208
}
209
#endif // ASSERT
210
211
add(map);
212
}
213
214
static void add_derived_oop(oop* base, derived_pointer* derived, OopClosure* oop_fn) {
215
#if COMPILER2_OR_JVMCI
216
DerivedPointerTable::add(derived, base);
217
#endif // COMPILER2_OR_JVMCI
218
}
219
220
static void ignore_derived_oop(oop* base, derived_pointer* derived, OopClosure* oop_fn) {
221
}
222
223
static void process_derived_oop(oop* base, derived_pointer* derived, OopClosure* oop_fn) {
224
// All derived pointers must be processed before the base pointer of any derived pointer is processed.
225
// Otherwise, if two derived pointers use the same base, the second derived pointer will get an obscured
226
// offset, if the base pointer is processed in the first derived pointer.
227
derived_pointer derived_base = to_derived_pointer(*base);
228
intptr_t offset = *derived - derived_base;
229
*derived = derived_base;
230
oop_fn->do_oop((oop*)derived);
231
*derived = *derived + offset;
232
}
233
234
235
#ifndef PRODUCT
236
static void trace_codeblob_maps(const frame *fr, const RegisterMap *reg_map) {
237
// Print oopmap and regmap
238
tty->print_cr("------ ");
239
CodeBlob* cb = fr->cb();
240
const ImmutableOopMapSet* maps = cb->oop_maps();
241
const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
242
map->print();
243
if( cb->is_nmethod() ) {
244
nmethod* nm = (nmethod*)cb;
245
// native wrappers have no scope data, it is implied
246
if (nm->is_native_method()) {
247
tty->print("bci: 0 (native)");
248
} else {
249
ScopeDesc* scope = nm->scope_desc_at(fr->pc());
250
tty->print("bci: %d ",scope->bci());
251
}
252
}
253
tty->cr();
254
fr->print_on(tty);
255
tty->print(" ");
256
cb->print_value_on(tty); tty->cr();
257
reg_map->print();
258
tty->print_cr("------ ");
259
260
}
261
#endif // PRODUCT
262
263
void OopMapSet::oops_do(const frame *fr, const RegisterMap* reg_map, OopClosure* f, DerivedPointerIterationMode mode) {
264
switch (mode) {
265
case DerivedPointerIterationMode::_directly:
266
all_do(fr, reg_map, f, process_derived_oop);
267
break;
268
case DerivedPointerIterationMode::_with_table:
269
all_do(fr, reg_map, f, add_derived_oop);
270
break;
271
case DerivedPointerIterationMode::_ignore:
272
all_do(fr, reg_map, f, ignore_derived_oop);
273
break;
274
}
275
}
276
277
278
void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
279
OopClosure* oop_fn, void derived_oop_fn(oop*, derived_pointer*, OopClosure*)) {
280
CodeBlob* cb = fr->cb();
281
assert(cb != NULL, "no codeblob");
282
283
NOT_PRODUCT(if (TraceCodeBlobStacks) trace_codeblob_maps(fr, reg_map);)
284
285
const ImmutableOopMap* map = cb->oop_map_for_return_address(fr->pc());
286
assert(map != NULL, "no ptr map found");
287
288
// handle derived pointers first (otherwise base pointer may be
289
// changed before derived pointer offset has been collected)
290
{
291
for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
292
OopMapValue omv = oms.current();
293
if (omv.type() != OopMapValue::derived_oop_value) {
294
continue;
295
}
296
297
#ifndef COMPILER2
298
COMPILER1_PRESENT(ShouldNotReachHere();)
299
#if INCLUDE_JVMCI
300
if (UseJVMCICompiler) {
301
ShouldNotReachHere();
302
}
303
#endif
304
#endif // !COMPILER2
305
derived_pointer* derived_loc = (derived_pointer*)fr->oopmapreg_to_location(omv.reg(),reg_map);
306
guarantee(derived_loc != NULL, "missing saved register");
307
oop* base_loc = fr->oopmapreg_to_oop_location(omv.content_reg(), reg_map);
308
// Ignore NULL oops and decoded NULL narrow oops which
309
// equal to CompressedOops::base() when a narrow oop
310
// implicit null check is used in compiled code.
311
// The narrow_oop_base could be NULL or be the address
312
// of the page below heap depending on compressed oops mode.
313
if (base_loc != NULL && *base_loc != NULL && !CompressedOops::is_base(*base_loc)) {
314
derived_oop_fn(base_loc, derived_loc, oop_fn);
315
}
316
}
317
}
318
319
{
320
// We want coop and oop oop_types
321
for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
322
OopMapValue omv = oms.current();
323
oop* loc = fr->oopmapreg_to_oop_location(omv.reg(),reg_map);
324
// It should be an error if no location can be found for a
325
// register mentioned as contained an oop of some kind. Maybe
326
// this was allowed previously because value_value items might
327
// be missing?
328
guarantee(loc != NULL, "missing saved register");
329
if ( omv.type() == OopMapValue::oop_value ) {
330
oop val = *loc;
331
if (val == NULL || CompressedOops::is_base(val)) {
332
// Ignore NULL oops and decoded NULL narrow oops which
333
// equal to CompressedOops::base() when a narrow oop
334
// implicit null check is used in compiled code.
335
// The narrow_oop_base could be NULL or be the address
336
// of the page below heap depending on compressed oops mode.
337
continue;
338
}
339
oop_fn->do_oop(loc);
340
} else if ( omv.type() == OopMapValue::narrowoop_value ) {
341
narrowOop *nl = (narrowOop*)loc;
342
#ifndef VM_LITTLE_ENDIAN
343
VMReg vmReg = omv.reg();
344
if (!vmReg->is_stack()) {
345
// compressed oops in registers only take up 4 bytes of an
346
// 8 byte register but they are in the wrong part of the
347
// word so adjust loc to point at the right place.
348
nl = (narrowOop*)((address)nl + 4);
349
}
350
#endif
351
oop_fn->do_oop(nl);
352
}
353
}
354
}
355
}
356
357
358
// Update callee-saved register info for the following frame
359
void OopMapSet::update_register_map(const frame *fr, RegisterMap *reg_map) {
360
ResourceMark rm;
361
CodeBlob* cb = fr->cb();
362
assert(cb != NULL, "no codeblob");
363
364
// Any reg might be saved by a safepoint handler (see generate_handler_blob).
365
assert( reg_map->_update_for_id == NULL || fr->is_older(reg_map->_update_for_id),
366
"already updated this map; do not 'update' it twice!" );
367
debug_only(reg_map->_update_for_id = fr->id());
368
369
// Check if caller must update oop argument
370
assert((reg_map->include_argument_oops() ||
371
!cb->caller_must_gc_arguments(reg_map->thread())),
372
"include_argument_oops should already be set");
373
374
// Scan through oopmap and find location of all callee-saved registers
375
// (we do not do update in place, since info could be overwritten)
376
377
address pc = fr->pc();
378
const ImmutableOopMap* map = cb->oop_map_for_return_address(pc);
379
assert(map != NULL, "no ptr map found");
380
DEBUG_ONLY(int nof_callee = 0;)
381
382
for (OopMapStream oms(map); !oms.is_done(); oms.next()) {
383
OopMapValue omv = oms.current();
384
if (omv.type() == OopMapValue::callee_saved_value) {
385
VMReg reg = omv.content_reg();
386
oop* loc = fr->oopmapreg_to_oop_location(omv.reg(), reg_map);
387
reg_map->set_location(reg, (address) loc);
388
DEBUG_ONLY(nof_callee++;)
389
}
390
}
391
392
// Check that runtime stubs save all callee-saved registers
393
#ifdef COMPILER2
394
assert(cb->is_compiled_by_c1() || cb->is_compiled_by_jvmci() || !cb->is_runtime_stub() ||
395
(nof_callee >= SAVED_ON_ENTRY_REG_COUNT || nof_callee >= C_SAVED_ON_ENTRY_REG_COUNT),
396
"must save all");
397
#endif // COMPILER2
398
}
399
400
// Printing code is present in product build for -XX:+PrintAssembly.
401
402
static
403
void print_register_type(OopMapValue::oop_types x, VMReg optional,
404
outputStream* st) {
405
switch( x ) {
406
case OopMapValue::oop_value:
407
st->print("Oop");
408
break;
409
case OopMapValue::narrowoop_value:
410
st->print("NarrowOop");
411
break;
412
case OopMapValue::callee_saved_value:
413
st->print("Callers_");
414
optional->print_on(st);
415
break;
416
case OopMapValue::derived_oop_value:
417
st->print("Derived_oop_");
418
optional->print_on(st);
419
break;
420
default:
421
ShouldNotReachHere();
422
}
423
}
424
425
void OopMapValue::print_on(outputStream* st) const {
426
reg()->print_on(st);
427
st->print("=");
428
print_register_type(type(),content_reg(),st);
429
st->print(" ");
430
}
431
432
void OopMapValue::print() const { print_on(tty); }
433
434
void ImmutableOopMap::print_on(outputStream* st) const {
435
OopMapValue omv;
436
st->print("ImmutableOopMap {");
437
for(OopMapStream oms(this); !oms.is_done(); oms.next()) {
438
omv = oms.current();
439
omv.print_on(st);
440
}
441
st->print("}");
442
}
443
444
void ImmutableOopMap::print() const { print_on(tty); }
445
446
void OopMap::print_on(outputStream* st) const {
447
OopMapValue omv;
448
st->print("OopMap {");
449
for(OopMapStream oms((OopMap*)this); !oms.is_done(); oms.next()) {
450
omv = oms.current();
451
omv.print_on(st);
452
}
453
// Print hex offset in addition.
454
st->print("off=%d/0x%x}", (int) offset(), (int) offset());
455
}
456
457
void OopMap::print() const { print_on(tty); }
458
459
void ImmutableOopMapSet::print_on(outputStream* st) const {
460
const ImmutableOopMap* last = NULL;
461
const int len = count();
462
463
st->print_cr("ImmutableOopMapSet contains %d OopMaps", len);
464
465
for (int i = 0; i < len; i++) {
466
const ImmutableOopMapPair* pair = pair_at(i);
467
const ImmutableOopMap* map = pair->get_from(this);
468
if (map != last) {
469
st->cr();
470
map->print_on(st);
471
st->print(" pc offsets: ");
472
}
473
last = map;
474
st->print("%d ", pair->pc_offset());
475
}
476
st->cr();
477
}
478
479
void ImmutableOopMapSet::print() const { print_on(tty); }
480
481
void OopMapSet::print_on(outputStream* st) const {
482
const int len = _list.length();
483
484
st->print_cr("OopMapSet contains %d OopMaps", len);
485
486
for( int i = 0; i < len; i++) {
487
OopMap* m = at(i);
488
st->print_cr("#%d ",i);
489
m->print_on(st);
490
st->cr();
491
}
492
st->cr();
493
}
494
495
void OopMapSet::print() const { print_on(tty); }
496
497
bool OopMap::equals(const OopMap* other) const {
498
if (other->_omv_count != _omv_count) {
499
return false;
500
}
501
if (other->write_stream()->position() != write_stream()->position()) {
502
return false;
503
}
504
if (memcmp(other->write_stream()->buffer(), write_stream()->buffer(), write_stream()->position()) != 0) {
505
return false;
506
}
507
return true;
508
}
509
510
const ImmutableOopMap* ImmutableOopMapSet::find_map_at_offset(int pc_offset) const {
511
ImmutableOopMapPair* pairs = get_pairs();
512
ImmutableOopMapPair* last = NULL;
513
514
for (int i = 0; i < _count; ++i) {
515
if (pairs[i].pc_offset() >= pc_offset) {
516
last = &pairs[i];
517
break;
518
}
519
}
520
521
// Heal Coverity issue: potential index out of bounds access.
522
guarantee(last != NULL, "last may not be null");
523
assert(last->pc_offset() == pc_offset, "oopmap not found");
524
return last->get_from(this);
525
}
526
527
const ImmutableOopMap* ImmutableOopMapPair::get_from(const ImmutableOopMapSet* set) const {
528
return set->oopmap_at_offset(_oopmap_offset);
529
}
530
531
ImmutableOopMap::ImmutableOopMap(const OopMap* oopmap) : _count(oopmap->count()) {
532
address addr = data_addr();
533
oopmap->copy_data_to(addr);
534
}
535
536
#ifdef ASSERT
537
int ImmutableOopMap::nr_of_bytes() const {
538
OopMapStream oms(this);
539
540
while (!oms.is_done()) {
541
oms.next();
542
}
543
return sizeof(ImmutableOopMap) + oms.stream_position();
544
}
545
#endif
546
547
ImmutableOopMapBuilder::ImmutableOopMapBuilder(const OopMapSet* set) : _set(set), _empty(NULL), _last(NULL), _empty_offset(-1), _last_offset(-1), _offset(0), _required(-1), _new_set(NULL) {
548
_mapping = NEW_RESOURCE_ARRAY(Mapping, _set->size());
549
}
550
551
int ImmutableOopMapBuilder::size_for(const OopMap* map) const {
552
return align_up((int)sizeof(ImmutableOopMap) + map->data_size(), 8);
553
}
554
555
int ImmutableOopMapBuilder::heap_size() {
556
int base = sizeof(ImmutableOopMapSet);
557
base = align_up(base, 8);
558
559
// all of ours pc / offset pairs
560
int pairs = _set->size() * sizeof(ImmutableOopMapPair);
561
pairs = align_up(pairs, 8);
562
563
for (int i = 0; i < _set->size(); ++i) {
564
int size = 0;
565
OopMap* map = _set->at(i);
566
567
if (is_empty(map)) {
568
/* only keep a single empty map in the set */
569
if (has_empty()) {
570
_mapping[i].set(Mapping::OOPMAP_EMPTY, _empty_offset, 0, map, _empty);
571
} else {
572
_empty_offset = _offset;
573
_empty = map;
574
size = size_for(map);
575
_mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
576
}
577
} else if (is_last_duplicate(map)) {
578
/* if this entry is identical to the previous one, just point it there */
579
_mapping[i].set(Mapping::OOPMAP_DUPLICATE, _last_offset, 0, map, _last);
580
} else {
581
/* not empty, not an identical copy of the previous entry */
582
size = size_for(map);
583
_mapping[i].set(Mapping::OOPMAP_NEW, _offset, size, map);
584
_last_offset = _offset;
585
_last = map;
586
}
587
588
assert(_mapping[i]._map == map, "check");
589
_offset += size;
590
}
591
592
int total = base + pairs + _offset;
593
DEBUG_ONLY(total += 8);
594
_required = total;
595
return total;
596
}
597
598
void ImmutableOopMapBuilder::fill_pair(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
599
assert(offset < set->nr_of_bytes(), "check");
600
new ((address) pair) ImmutableOopMapPair(map->offset(), offset);
601
}
602
603
int ImmutableOopMapBuilder::fill_map(ImmutableOopMapPair* pair, const OopMap* map, int offset, const ImmutableOopMapSet* set) {
604
fill_pair(pair, map, offset, set);
605
address addr = (address) pair->get_from(_new_set); // location of the ImmutableOopMap
606
607
new (addr) ImmutableOopMap(map);
608
return size_for(map);
609
}
610
611
void ImmutableOopMapBuilder::fill(ImmutableOopMapSet* set, int sz) {
612
ImmutableOopMapPair* pairs = set->get_pairs();
613
614
for (int i = 0; i < set->count(); ++i) {
615
const OopMap* map = _mapping[i]._map;
616
ImmutableOopMapPair* pair = NULL;
617
int size = 0;
618
619
if (_mapping[i]._kind == Mapping::OOPMAP_NEW) {
620
size = fill_map(&pairs[i], map, _mapping[i]._offset, set);
621
} else if (_mapping[i]._kind == Mapping::OOPMAP_DUPLICATE || _mapping[i]._kind == Mapping::OOPMAP_EMPTY) {
622
fill_pair(&pairs[i], map, _mapping[i]._offset, set);
623
}
624
625
const ImmutableOopMap* nv = set->find_map_at_offset(map->offset());
626
assert(memcmp(map->data(), nv->data_addr(), map->data_size()) == 0, "check identity");
627
}
628
}
629
630
#ifdef ASSERT
631
void ImmutableOopMapBuilder::verify(address buffer, int size, const ImmutableOopMapSet* set) {
632
for (int i = 0; i < 8; ++i) {
633
assert(buffer[size - 8 + i] == (unsigned char) 0xff, "overwritten memory check");
634
}
635
636
for (int i = 0; i < set->count(); ++i) {
637
const ImmutableOopMapPair* pair = set->pair_at(i);
638
assert(pair->oopmap_offset() < set->nr_of_bytes(), "check size");
639
const ImmutableOopMap* map = pair->get_from(set);
640
int nr_of_bytes = map->nr_of_bytes();
641
assert(pair->oopmap_offset() + nr_of_bytes <= set->nr_of_bytes(), "check size + size");
642
}
643
}
644
#endif
645
646
ImmutableOopMapSet* ImmutableOopMapBuilder::generate_into(address buffer) {
647
DEBUG_ONLY(memset(&buffer[_required-8], 0xff, 8));
648
649
_new_set = new (buffer) ImmutableOopMapSet(_set, _required);
650
fill(_new_set, _required);
651
652
DEBUG_ONLY(verify(buffer, _required, _new_set));
653
654
return _new_set;
655
}
656
657
ImmutableOopMapSet* ImmutableOopMapBuilder::build() {
658
_required = heap_size();
659
660
// We need to allocate a chunk big enough to hold the ImmutableOopMapSet and all of its ImmutableOopMaps
661
address buffer = NEW_C_HEAP_ARRAY(unsigned char, _required, mtCode);
662
return generate_into(buffer);
663
}
664
665
ImmutableOopMapSet* ImmutableOopMapSet::build_from(const OopMapSet* oopmap_set) {
666
ResourceMark mark;
667
ImmutableOopMapBuilder builder(oopmap_set);
668
return builder.build();
669
}
670
671
672
//------------------------------DerivedPointerTable---------------------------
673
674
#if COMPILER2_OR_JVMCI
675
676
class DerivedPointerTable::Entry : public CHeapObj<mtCompiler> {
677
derived_pointer* _location; // Location of derived pointer, also pointing to base
678
intptr_t _offset; // Offset from base pointer
679
Entry* volatile _next;
680
681
static Entry* volatile* next_ptr(Entry& entry) { return &entry._next; }
682
683
public:
684
Entry(derived_pointer* location, intptr_t offset) :
685
_location(location), _offset(offset), _next(NULL) {}
686
687
derived_pointer* location() const { return _location; }
688
intptr_t offset() const { return _offset; }
689
Entry* next() const { return _next; }
690
691
typedef LockFreeStack<Entry, &next_ptr> List;
692
static List* _list;
693
};
694
695
DerivedPointerTable::Entry::List* DerivedPointerTable::Entry::_list = NULL;
696
bool DerivedPointerTable::_active = false;
697
698
bool DerivedPointerTable::is_empty() {
699
return Entry::_list == NULL || Entry::_list->empty();
700
}
701
702
void DerivedPointerTable::clear() {
703
// The first time, we create the list. Otherwise it should be
704
// empty. If not, then we have probably forgotton to call
705
// update_pointers after last GC/Scavenge.
706
assert (!_active, "should not be active");
707
assert(is_empty(), "table not empty");
708
if (Entry::_list == NULL) {
709
void* mem = NEW_C_HEAP_OBJ(Entry::List, mtCompiler);
710
Entry::_list = ::new (mem) Entry::List();
711
}
712
_active = true;
713
}
714
715
void DerivedPointerTable::add(derived_pointer* derived_loc, oop *base_loc) {
716
assert(Universe::heap()->is_in_or_null(*base_loc), "not an oop");
717
assert(derived_loc != (void*)base_loc, "Base and derived in same location");
718
derived_pointer base_loc_as_derived_pointer =
719
static_cast<derived_pointer>(reinterpret_cast<intptr_t>(base_loc));
720
assert(*derived_loc != base_loc_as_derived_pointer, "location already added");
721
assert(Entry::_list != NULL, "list must exist");
722
assert(is_active(), "table must be active here");
723
intptr_t offset = *derived_loc - to_derived_pointer(*base_loc);
724
// This assert is invalid because derived pointers can be
725
// arbitrarily far away from their base.
726
// assert(offset >= -1000000, "wrong derived pointer info");
727
728
if (TraceDerivedPointers) {
729
tty->print_cr(
730
"Add derived pointer@" INTPTR_FORMAT
731
" - Derived: " INTPTR_FORMAT
732
" Base: " INTPTR_FORMAT " (@" INTPTR_FORMAT ") (Offset: " INTX_FORMAT ")",
733
p2i(derived_loc), derived_pointer_value(*derived_loc), p2i(*base_loc), p2i(base_loc), offset
734
);
735
}
736
// Set derived oop location to point to base.
737
*derived_loc = base_loc_as_derived_pointer;
738
Entry* entry = new Entry(derived_loc, offset);
739
Entry::_list->push(*entry);
740
}
741
742
void DerivedPointerTable::update_pointers() {
743
assert(Entry::_list != NULL, "list must exist");
744
Entry* entries = Entry::_list->pop_all();
745
while (entries != NULL) {
746
Entry* entry = entries;
747
entries = entry->next();
748
derived_pointer* derived_loc = entry->location();
749
intptr_t offset = entry->offset();
750
// The derived oop was setup to point to location of base
751
oop base = **reinterpret_cast<oop**>(derived_loc);
752
assert(Universe::heap()->is_in_or_null(base), "must be an oop");
753
754
derived_pointer derived_base = to_derived_pointer(base);
755
*derived_loc = derived_base + offset;
756
assert(*derived_loc - derived_base == offset, "sanity check");
757
758
if (TraceDerivedPointers) {
759
tty->print_cr("Updating derived pointer@" INTPTR_FORMAT
760
" - Derived: " INTPTR_FORMAT " Base: " INTPTR_FORMAT " (Offset: " INTX_FORMAT ")",
761
p2i(derived_loc), derived_pointer_value(*derived_loc), p2i(base), offset);
762
}
763
764
// Delete entry
765
delete entry;
766
}
767
assert(Entry::_list->empty(), "invariant");
768
_active = false;
769
}
770
771
#endif // COMPILER2_OR_JVMCI
772
773