Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/code/codeCache.hpp
32285 views
1
/*
2
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_VM_CODE_CODECACHE_HPP
26
#define SHARE_VM_CODE_CODECACHE_HPP
27
28
#include "code/codeBlob.hpp"
29
#include "memory/allocation.hpp"
30
#include "memory/heap.hpp"
31
#include "oops/instanceKlass.hpp"
32
#include "oops/oopsHierarchy.hpp"
33
34
// The CodeCache implements the code cache for various pieces of generated
35
// code, e.g., compiled java methods, runtime stubs, transition frames, etc.
36
// The entries in the CodeCache are all CodeBlob's.
37
38
// Implementation:
39
// - Each CodeBlob occupies one chunk of memory.
40
// - Like the offset table in oldspace the zone has at table for
41
// locating a method given a addess of an instruction.
42
43
class OopClosure;
44
class DepChange;
45
class ShenandoahParallelCodeCacheIterator;
46
47
class CodeCache : AllStatic {
48
friend class VMStructs;
49
friend class ShenandoahParallelCodeCacheIterator;
50
private:
51
// CodeHeap is malloc()'ed at startup and never deleted during shutdown,
52
// so that the generated assembly code is always there when it's needed.
53
// This may cause memory leak, but is necessary, for now. See 4423824,
54
// 4422213 or 4436291 for details.
55
static CodeHeap * _heap;
56
static int _number_of_blobs;
57
static int _number_of_adapters;
58
static int _number_of_nmethods;
59
static int _number_of_nmethods_with_dependencies;
60
static bool _needs_cache_clean;
61
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
62
63
static void verify_if_often() PRODUCT_RETURN;
64
65
static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
66
static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
67
68
static int _codemem_full_count;
69
70
static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
71
static void prune_scavenge_root_nmethods();
72
static void unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev);
73
74
public:
75
76
// Initialization
77
static void initialize();
78
79
static void report_codemem_full();
80
81
// Allocation/administration
82
static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
83
static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
84
static int alignment_unit(); // guaranteed alignment of all CodeBlobs
85
static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
86
static void free(CodeBlob* cb); // frees a CodeBlob
87
static void flush(); // flushes all CodeBlobs
88
static bool contains(void *p); // returns whether p is included
89
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
90
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
91
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
92
static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
93
94
// Lookup
95
static CodeBlob* find_blob(void* start);
96
static nmethod* find_nmethod(void* start);
97
98
// Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
99
// what you are doing)
100
static CodeBlob* find_blob_unsafe(void* start) {
101
// NMT can walk the stack before code cache is created
102
if (_heap == NULL) return NULL;
103
104
CodeBlob* result = (CodeBlob*)_heap->find_start(start);
105
// this assert is too strong because the heap code will return the
106
// heapblock containing start. That block can often be larger than
107
// the codeBlob itself. If you look up an address that is within
108
// the heapblock but not in the codeBlob you will assert.
109
//
110
// Most things will not lookup such bad addresses. However
111
// AsyncGetCallTrace can see intermediate frames and get that kind
112
// of invalid address and so can a developer using hsfind.
113
//
114
// The more correct answer is to return NULL if blob_contains() returns
115
// false.
116
// assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
117
118
if (result != NULL && !result->blob_contains((address)start)) {
119
result = NULL;
120
}
121
return result;
122
}
123
124
// Iteration
125
static CodeBlob* first();
126
static CodeBlob* next (CodeBlob* cb);
127
static CodeBlob* alive(CodeBlob *cb);
128
static nmethod* alive_nmethod(CodeBlob *cb);
129
static nmethod* first_nmethod();
130
static nmethod* next_nmethod (CodeBlob* cb);
131
static int nof_blobs() { return _number_of_blobs; }
132
static int nof_adapters() { return _number_of_adapters; }
133
static int nof_nmethods() { return _number_of_nmethods; }
134
135
// GC support
136
static void gc_epilogue();
137
static void gc_prologue();
138
static void verify_oops();
139
// If "unloading_occurred" is true, then unloads (i.e., breaks root links
140
// to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading"
141
// to "true" iff some code got unloaded.
142
static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
143
static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
144
145
// Apply f to every live code blob in scavengable nmethods. Prune nmethods
146
// from the list of scavengable nmethods if f->fix_relocations() and a nmethod
147
// no longer has scavengable oops. If f->fix_relocations(), then f must copy
148
// objects to their new location immediately to avoid fixing nmethods on the
149
// basis of the old object locations.
150
static void scavenge_root_nmethods_do(CodeBlobToOopClosure* f);
151
152
static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; }
153
static void add_scavenge_root_nmethod(nmethod* nm);
154
static void drop_scavenge_root_nmethod(nmethod* nm);
155
156
// Printing/debugging
157
static void print(); // prints summary
158
static void print_internals();
159
static void verify(); // verifies the code cache
160
static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
161
static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage
162
static void log_state(outputStream* st);
163
164
// The full limits of the codeCache
165
static address low_bound() { return (address) _heap->low_boundary(); }
166
static address high_bound() { return (address) _heap->high_boundary(); }
167
static address high() { return (address) _heap->high(); }
168
169
// Profiling
170
static address first_address(); // first address used for CodeBlobs
171
static address last_address(); // last address used for CodeBlobs
172
static size_t capacity() { return _heap->capacity(); }
173
static size_t max_capacity() { return _heap->max_capacity(); }
174
static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
175
static double reverse_free_ratio();
176
177
static bool needs_cache_clean() { return _needs_cache_clean; }
178
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
179
static void clear_inline_caches(); // clear all inline caches
180
181
static void verify_clean_inline_caches();
182
static void verify_icholder_relocations();
183
184
// Deoptimization
185
static int mark_for_deoptimization(DepChange& changes);
186
#ifdef HOTSWAP
187
static int mark_for_evol_deoptimization(instanceKlassHandle dependee);
188
#endif // HOTSWAP
189
190
static void mark_all_nmethods_for_deoptimization();
191
static int mark_for_deoptimization(Method* dependee);
192
static void make_marked_nmethods_not_entrant();
193
194
// tells how many nmethods have dependencies
195
static int number_of_nmethods_with_dependencies();
196
197
static int get_codemem_full_count() { return _codemem_full_count; }
198
};
199
200
#endif // SHARE_VM_CODE_CODECACHE_HPP
201
202