Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/hotspot/src/share/vm/runtime/fprofiler.cpp
32285 views
1
/*
2
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#include "precompiled.hpp"
26
#include "classfile/classLoader.hpp"
27
#include "code/vtableStubs.hpp"
28
#include "gc_interface/collectedHeap.inline.hpp"
29
#include "interpreter/interpreter.hpp"
30
#include "memory/allocation.inline.hpp"
31
#include "memory/universe.inline.hpp"
32
#include "oops/oop.inline.hpp"
33
#include "oops/oop.inline2.hpp"
34
#include "oops/symbol.hpp"
35
#include "runtime/deoptimization.hpp"
36
#include "runtime/fprofiler.hpp"
37
#include "runtime/mutexLocker.hpp"
38
#include "runtime/stubCodeGenerator.hpp"
39
#include "runtime/stubRoutines.hpp"
40
#include "runtime/task.hpp"
41
#include "runtime/thread.inline.hpp"
42
#include "runtime/vframe.hpp"
43
#include "utilities/macros.hpp"
44
45
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
46
47
// Static fields of FlatProfiler
48
int FlatProfiler::received_gc_ticks = 0;
49
int FlatProfiler::vm_operation_ticks = 0;
50
int FlatProfiler::threads_lock_ticks = 0;
51
int FlatProfiler::class_loader_ticks = 0;
52
int FlatProfiler::extra_ticks = 0;
53
int FlatProfiler::blocked_ticks = 0;
54
int FlatProfiler::deopt_ticks = 0;
55
int FlatProfiler::unknown_ticks = 0;
56
int FlatProfiler::interpreter_ticks = 0;
57
int FlatProfiler::compiler_ticks = 0;
58
int FlatProfiler::received_ticks = 0;
59
int FlatProfiler::delivered_ticks = 0;
60
int* FlatProfiler::bytecode_ticks = NULL;
61
int* FlatProfiler::bytecode_ticks_stub = NULL;
62
int FlatProfiler::all_int_ticks = 0;
63
int FlatProfiler::all_comp_ticks = 0;
64
int FlatProfiler::all_ticks = 0;
65
bool FlatProfiler::full_profile_flag = false;
66
ThreadProfiler* FlatProfiler::thread_profiler = NULL;
67
ThreadProfiler* FlatProfiler::vm_thread_profiler = NULL;
68
FlatProfilerTask* FlatProfiler::task = NULL;
69
elapsedTimer FlatProfiler::timer;
70
int FlatProfiler::interval_ticks_previous = 0;
71
IntervalData* FlatProfiler::interval_data = NULL;
72
73
ThreadProfiler::ThreadProfiler() {
74
// Space for the ProfilerNodes
75
const int area_size = 1 * ProfilerNodeSize * 1024;
76
area_bottom = AllocateHeap(area_size, mtInternal);
77
area_top = area_bottom;
78
area_limit = area_bottom + area_size;
79
80
// ProfilerNode pointer table
81
table = NEW_C_HEAP_ARRAY(ProfilerNode*, table_size, mtInternal);
82
initialize();
83
engaged = false;
84
}
85
86
ThreadProfiler::~ThreadProfiler() {
87
FreeHeap(area_bottom);
88
area_bottom = NULL;
89
area_top = NULL;
90
area_limit = NULL;
91
FreeHeap(table);
92
table = NULL;
93
}
94
95
// Statics for ThreadProfiler
96
int ThreadProfiler::table_size = 1024;
97
98
int ThreadProfiler::entry(int value) {
99
value = (value > 0) ? value : -value;
100
return value % table_size;
101
}
102
103
ThreadProfilerMark::ThreadProfilerMark(ThreadProfilerMark::Region r) {
104
_r = r;
105
_pp = NULL;
106
assert(((r > ThreadProfilerMark::noRegion) && (r < ThreadProfilerMark::maxRegion)), "ThreadProfilerMark::Region out of bounds");
107
Thread* tp = Thread::current();
108
if (tp != NULL && tp->is_Java_thread()) {
109
JavaThread* jtp = (JavaThread*) tp;
110
ThreadProfiler* pp = jtp->get_thread_profiler();
111
_pp = pp;
112
if (pp != NULL) {
113
pp->region_flag[r] = true;
114
}
115
}
116
}
117
118
ThreadProfilerMark::~ThreadProfilerMark() {
119
if (_pp != NULL) {
120
_pp->region_flag[_r] = false;
121
}
122
_pp = NULL;
123
}
124
125
// Random other statics
126
static const int col1 = 2; // position of output column 1
127
static const int col2 = 11; // position of output column 2
128
static const int col3 = 25; // position of output column 3
129
static const int col4 = 55; // position of output column 4
130
131
132
// Used for detailed profiling of nmethods.
133
class PCRecorder : AllStatic {
134
private:
135
static int* counters;
136
static address base;
137
enum {
138
bucket_size = 16
139
};
140
static int index_for(address pc) { return (pc - base)/bucket_size; }
141
static address pc_for(int index) { return base + (index * bucket_size); }
142
static int size() {
143
return ((int)CodeCache::max_capacity())/bucket_size * BytesPerWord;
144
}
145
public:
146
static address bucket_start_for(address pc) {
147
if (counters == NULL) return NULL;
148
return pc_for(index_for(pc));
149
}
150
static int bucket_count_for(address pc) { return counters[index_for(pc)]; }
151
static void init();
152
static void record(address pc);
153
static void print();
154
static void print_blobs(CodeBlob* cb);
155
};
156
157
int* PCRecorder::counters = NULL;
158
address PCRecorder::base = NULL;
159
160
void PCRecorder::init() {
161
MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag);
162
int s = size();
163
counters = NEW_C_HEAP_ARRAY(int, s, mtInternal);
164
for (int index = 0; index < s; index++) {
165
counters[index] = 0;
166
}
167
base = CodeCache::first_address();
168
}
169
170
void PCRecorder::record(address pc) {
171
if (counters == NULL) return;
172
assert(CodeCache::contains(pc), "must be in CodeCache");
173
counters[index_for(pc)]++;
174
}
175
176
177
address FlatProfiler::bucket_start_for(address pc) {
178
return PCRecorder::bucket_start_for(pc);
179
}
180
181
int FlatProfiler::bucket_count_for(address pc) {
182
return PCRecorder::bucket_count_for(pc);
183
}
184
185
void PCRecorder::print() {
186
if (counters == NULL) return;
187
188
tty->cr();
189
tty->print_cr("Printing compiled methods with PC buckets having more than %d ticks", ProfilerPCTickThreshold);
190
tty->print_cr("===================================================================");
191
tty->cr();
192
193
GrowableArray<CodeBlob*>* candidates = new GrowableArray<CodeBlob*>(20);
194
195
196
int s;
197
{
198
MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag);
199
s = size();
200
}
201
202
for (int index = 0; index < s; index++) {
203
int count = counters[index];
204
if (count > ProfilerPCTickThreshold) {
205
address pc = pc_for(index);
206
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
207
if (cb != NULL && candidates->find(cb) < 0) {
208
candidates->push(cb);
209
}
210
}
211
}
212
for (int i = 0; i < candidates->length(); i++) {
213
print_blobs(candidates->at(i));
214
}
215
}
216
217
void PCRecorder::print_blobs(CodeBlob* cb) {
218
if (cb != NULL) {
219
cb->print();
220
if (cb->is_nmethod()) {
221
((nmethod*)cb)->print_code();
222
}
223
tty->cr();
224
} else {
225
tty->print_cr("stub code");
226
}
227
}
228
229
class tick_counter { // holds tick info for one node
230
public:
231
int ticks_in_code;
232
int ticks_in_native;
233
234
tick_counter() { ticks_in_code = ticks_in_native = 0; }
235
tick_counter(int code, int native) { ticks_in_code = code; ticks_in_native = native; }
236
237
int total() const {
238
return (ticks_in_code + ticks_in_native);
239
}
240
241
void add(tick_counter* a) {
242
ticks_in_code += a->ticks_in_code;
243
ticks_in_native += a->ticks_in_native;
244
}
245
246
void update(TickPosition where) {
247
switch(where) {
248
case tp_code: ticks_in_code++; break;
249
case tp_native: ticks_in_native++; break;
250
}
251
}
252
253
void print_code(outputStream* st, int total_ticks) {
254
st->print("%5.1f%% %5d ", total() * 100.0 / total_ticks, ticks_in_code);
255
}
256
257
void print_native(outputStream* st) {
258
st->print(" + %5d ", ticks_in_native);
259
}
260
};
261
262
class ProfilerNode {
263
private:
264
ProfilerNode* _next;
265
public:
266
tick_counter ticks;
267
268
public:
269
270
void* operator new(size_t size, ThreadProfiler* tp) throw();
271
void operator delete(void* p);
272
273
ProfilerNode() {
274
_next = NULL;
275
}
276
277
virtual ~ProfilerNode() {
278
if (_next)
279
delete _next;
280
}
281
282
void set_next(ProfilerNode* n) { _next = n; }
283
ProfilerNode* next() { return _next; }
284
285
void update(TickPosition where) { ticks.update(where);}
286
int total_ticks() { return ticks.total(); }
287
288
virtual bool is_interpreted() const { return false; }
289
virtual bool is_compiled() const { return false; }
290
virtual bool is_stub() const { return false; }
291
virtual bool is_runtime_stub() const{ return false; }
292
virtual void oops_do(OopClosure* f) = 0;
293
294
virtual bool interpreted_match(Method* m) const { return false; }
295
virtual bool compiled_match(Method* m ) const { return false; }
296
virtual bool stub_match(Method* m, const char* name) const { return false; }
297
virtual bool adapter_match() const { return false; }
298
virtual bool runtimeStub_match(const CodeBlob* stub, const char* name) const { return false; }
299
virtual bool unknown_compiled_match(const CodeBlob* cb) const { return false; }
300
301
static void print_title(outputStream* st) {
302
st->print(" + native");
303
st->fill_to(col3);
304
st->print("Method");
305
st->fill_to(col4);
306
st->cr();
307
}
308
309
static void print_total(outputStream* st, tick_counter* t, int total, const char* msg) {
310
t->print_code(st, total);
311
st->fill_to(col2);
312
t->print_native(st);
313
st->fill_to(col3);
314
st->print("%s", msg);
315
st->cr();
316
}
317
318
virtual Method* method() = 0;
319
320
virtual void print_method_on(outputStream* st) {
321
int limit;
322
int i;
323
Method* m = method();
324
Symbol* k = m->klass_name();
325
// Print the class name with dots instead of slashes
326
limit = k->utf8_length();
327
for (i = 0 ; i < limit ; i += 1) {
328
char c = (char) k->byte_at(i);
329
if (c == '/') {
330
c = '.';
331
}
332
st->print("%c", c);
333
}
334
if (limit > 0) {
335
st->print(".");
336
}
337
Symbol* n = m->name();
338
limit = n->utf8_length();
339
for (i = 0 ; i < limit ; i += 1) {
340
char c = (char) n->byte_at(i);
341
st->print("%c", c);
342
}
343
if (Verbose || WizardMode) {
344
// Disambiguate overloaded methods
345
Symbol* sig = m->signature();
346
sig->print_symbol_on(st);
347
} else if (MethodHandles::is_signature_polymorphic(m->intrinsic_id()))
348
// compare with Method::print_short_name
349
MethodHandles::print_as_basic_type_signature_on(st, m->signature(), true);
350
}
351
352
virtual void print(outputStream* st, int total_ticks) {
353
ticks.print_code(st, total_ticks);
354
st->fill_to(col2);
355
ticks.print_native(st);
356
st->fill_to(col3);
357
print_method_on(st);
358
st->cr();
359
}
360
361
// for hashing into the table
362
static int hash(Method* method) {
363
// The point here is to try to make something fairly unique
364
// out of the fields we can read without grabbing any locks
365
// since the method may be locked when we need the hash.
366
return (
367
method->code_size() ^
368
method->max_stack() ^
369
method->max_locals() ^
370
method->size_of_parameters());
371
}
372
373
// for sorting
374
static int compare(ProfilerNode** a, ProfilerNode** b) {
375
return (*b)->total_ticks() - (*a)->total_ticks();
376
}
377
};
378
379
void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp) throw() {
380
void* result = (void*) tp->area_top;
381
tp->area_top += size;
382
383
if (tp->area_top > tp->area_limit) {
384
fatal("flat profiler buffer overflow");
385
}
386
return result;
387
}
388
389
void ProfilerNode::operator delete(void* p){
390
}
391
392
class interpretedNode : public ProfilerNode {
393
private:
394
Method* _method;
395
oop _class_loader; // needed to keep metadata for the method alive
396
public:
397
interpretedNode(Method* method, TickPosition where) : ProfilerNode() {
398
_method = method;
399
_class_loader = method->method_holder()->class_loader();
400
update(where);
401
}
402
403
bool is_interpreted() const { return true; }
404
405
bool interpreted_match(Method* m) const {
406
return _method == m;
407
}
408
409
void oops_do(OopClosure* f) {
410
f->do_oop(&_class_loader);
411
}
412
413
Method* method() { return _method; }
414
415
static void print_title(outputStream* st) {
416
st->fill_to(col1);
417
st->print("%11s", "Interpreted");
418
ProfilerNode::print_title(st);
419
}
420
421
void print(outputStream* st, int total_ticks) {
422
ProfilerNode::print(st, total_ticks);
423
}
424
425
void print_method_on(outputStream* st) {
426
ProfilerNode::print_method_on(st);
427
MethodCounters* mcs = method()->method_counters();
428
if (Verbose && mcs != NULL) mcs->invocation_counter()->print_short();
429
}
430
};
431
432
class compiledNode : public ProfilerNode {
433
private:
434
Method* _method;
435
oop _class_loader; // needed to keep metadata for the method alive
436
public:
437
compiledNode(Method* method, TickPosition where) : ProfilerNode() {
438
_method = method;
439
_class_loader = method->method_holder()->class_loader();
440
update(where);
441
}
442
bool is_compiled() const { return true; }
443
444
bool compiled_match(Method* m) const {
445
return _method == m;
446
}
447
448
Method* method() { return _method; }
449
450
void oops_do(OopClosure* f) {
451
f->do_oop(&_class_loader);
452
}
453
454
static void print_title(outputStream* st) {
455
st->fill_to(col1);
456
st->print("%11s", "Compiled");
457
ProfilerNode::print_title(st);
458
}
459
460
void print(outputStream* st, int total_ticks) {
461
ProfilerNode::print(st, total_ticks);
462
}
463
464
void print_method_on(outputStream* st) {
465
ProfilerNode::print_method_on(st);
466
}
467
};
468
469
class stubNode : public ProfilerNode {
470
private:
471
Method* _method;
472
oop _class_loader; // needed to keep metadata for the method alive
473
const char* _symbol; // The name of the nearest VM symbol (for +ProfileVM). Points to a unique string
474
public:
475
stubNode(Method* method, const char* name, TickPosition where) : ProfilerNode() {
476
_method = method;
477
_class_loader = method->method_holder()->class_loader();
478
_symbol = name;
479
update(where);
480
}
481
482
bool is_stub() const { return true; }
483
484
void oops_do(OopClosure* f) {
485
f->do_oop(&_class_loader);
486
}
487
488
bool stub_match(Method* m, const char* name) const {
489
return (_method == m) && (_symbol == name);
490
}
491
492
Method* method() { return _method; }
493
494
static void print_title(outputStream* st) {
495
st->fill_to(col1);
496
st->print("%11s", "Stub");
497
ProfilerNode::print_title(st);
498
}
499
500
void print(outputStream* st, int total_ticks) {
501
ProfilerNode::print(st, total_ticks);
502
}
503
504
void print_method_on(outputStream* st) {
505
ProfilerNode::print_method_on(st);
506
print_symbol_on(st);
507
}
508
509
void print_symbol_on(outputStream* st) {
510
if(_symbol) {
511
st->print(" (%s)", _symbol);
512
}
513
}
514
};
515
516
class adapterNode : public ProfilerNode {
517
public:
518
adapterNode(TickPosition where) : ProfilerNode() {
519
update(where);
520
}
521
bool is_compiled() const { return true; }
522
523
bool adapter_match() const { return true; }
524
525
Method* method() { return NULL; }
526
527
void oops_do(OopClosure* f) {
528
;
529
}
530
531
void print(outputStream* st, int total_ticks) {
532
ProfilerNode::print(st, total_ticks);
533
}
534
535
void print_method_on(outputStream* st) {
536
st->print("%s", "adapters");
537
}
538
};
539
540
class runtimeStubNode : public ProfilerNode {
541
private:
542
const CodeBlob* _stub;
543
const char* _symbol; // The name of the nearest VM symbol when ProfileVM is on. Points to a unique string.
544
public:
545
runtimeStubNode(const CodeBlob* stub, const char* name, TickPosition where) : ProfilerNode(), _stub(stub), _symbol(name) {
546
assert(stub->is_runtime_stub(), "wrong code blob");
547
update(where);
548
}
549
550
bool is_runtime_stub() const { return true; }
551
552
bool runtimeStub_match(const CodeBlob* stub, const char* name) const {
553
assert(stub->is_runtime_stub(), "wrong code blob");
554
return ((RuntimeStub*)_stub)->entry_point() == ((RuntimeStub*)stub)->entry_point() &&
555
(_symbol == name);
556
}
557
558
Method* method() { return NULL; }
559
560
static void print_title(outputStream* st) {
561
st->fill_to(col1);
562
st->print("%11s", "Runtime stub");
563
ProfilerNode::print_title(st);
564
}
565
566
void oops_do(OopClosure* f) {
567
;
568
}
569
570
void print(outputStream* st, int total_ticks) {
571
ProfilerNode::print(st, total_ticks);
572
}
573
574
void print_method_on(outputStream* st) {
575
st->print("%s", ((RuntimeStub*)_stub)->name());
576
print_symbol_on(st);
577
}
578
579
void print_symbol_on(outputStream* st) {
580
if(_symbol) {
581
st->print(" (%s)", _symbol);
582
}
583
}
584
};
585
586
587
class unknown_compiledNode : public ProfilerNode {
588
const char *_name;
589
public:
590
unknown_compiledNode(const CodeBlob* cb, TickPosition where) : ProfilerNode() {
591
if ( cb->is_buffer_blob() )
592
_name = ((BufferBlob*)cb)->name();
593
else
594
_name = ((SingletonBlob*)cb)->name();
595
update(where);
596
}
597
bool is_compiled() const { return true; }
598
599
bool unknown_compiled_match(const CodeBlob* cb) const {
600
if ( cb->is_buffer_blob() )
601
return !strcmp(((BufferBlob*)cb)->name(), _name);
602
else
603
return !strcmp(((SingletonBlob*)cb)->name(), _name);
604
}
605
606
Method* method() { return NULL; }
607
608
void oops_do(OopClosure* f) {
609
;
610
}
611
612
void print(outputStream* st, int total_ticks) {
613
ProfilerNode::print(st, total_ticks);
614
}
615
616
void print_method_on(outputStream* st) {
617
st->print("%s", _name);
618
}
619
};
620
621
class vmNode : public ProfilerNode {
622
private:
623
const char* _name; // "optional" name obtained by os means such as dll lookup
624
public:
625
vmNode(const TickPosition where) : ProfilerNode() {
626
_name = NULL;
627
update(where);
628
}
629
630
vmNode(const char* name, const TickPosition where) : ProfilerNode() {
631
_name = name;
632
update(where);
633
}
634
635
const char *name() const { return _name; }
636
bool is_compiled() const { return true; }
637
638
bool vm_match(const char* name) const { return strcmp(name, _name) == 0; }
639
640
Method* method() { return NULL; }
641
642
static int hash(const char* name){
643
// Compute a simple hash
644
const char* cp = name;
645
int h = 0;
646
647
if(name != NULL){
648
while(*cp != '\0'){
649
h = (h << 1) ^ *cp;
650
cp++;
651
}
652
}
653
return h;
654
}
655
656
void oops_do(OopClosure* f) {
657
;
658
}
659
660
void print(outputStream* st, int total_ticks) {
661
ProfilerNode::print(st, total_ticks);
662
}
663
664
void print_method_on(outputStream* st) {
665
if(_name==NULL){
666
st->print("%s", "unknown code");
667
}
668
else {
669
st->print("%s", _name);
670
}
671
}
672
};
673
674
void ThreadProfiler::interpreted_update(Method* method, TickPosition where) {
675
int index = entry(ProfilerNode::hash(method));
676
if (!table[index]) {
677
table[index] = new (this) interpretedNode(method, where);
678
} else {
679
ProfilerNode* prev = table[index];
680
for(ProfilerNode* node = prev; node; node = node->next()) {
681
if (node->interpreted_match(method)) {
682
node->update(where);
683
return;
684
}
685
prev = node;
686
}
687
prev->set_next(new (this) interpretedNode(method, where));
688
}
689
}
690
691
void ThreadProfiler::compiled_update(Method* method, TickPosition where) {
692
int index = entry(ProfilerNode::hash(method));
693
if (!table[index]) {
694
table[index] = new (this) compiledNode(method, where);
695
} else {
696
ProfilerNode* prev = table[index];
697
for(ProfilerNode* node = prev; node; node = node->next()) {
698
if (node->compiled_match(method)) {
699
node->update(where);
700
return;
701
}
702
prev = node;
703
}
704
prev->set_next(new (this) compiledNode(method, where));
705
}
706
}
707
708
void ThreadProfiler::stub_update(Method* method, const char* name, TickPosition where) {
709
int index = entry(ProfilerNode::hash(method));
710
if (!table[index]) {
711
table[index] = new (this) stubNode(method, name, where);
712
} else {
713
ProfilerNode* prev = table[index];
714
for(ProfilerNode* node = prev; node; node = node->next()) {
715
if (node->stub_match(method, name)) {
716
node->update(where);
717
return;
718
}
719
prev = node;
720
}
721
prev->set_next(new (this) stubNode(method, name, where));
722
}
723
}
724
725
void ThreadProfiler::adapter_update(TickPosition where) {
726
int index = 0;
727
if (!table[index]) {
728
table[index] = new (this) adapterNode(where);
729
} else {
730
ProfilerNode* prev = table[index];
731
for(ProfilerNode* node = prev; node; node = node->next()) {
732
if (node->adapter_match()) {
733
node->update(where);
734
return;
735
}
736
prev = node;
737
}
738
prev->set_next(new (this) adapterNode(where));
739
}
740
}
741
742
void ThreadProfiler::runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where) {
743
int index = 0;
744
if (!table[index]) {
745
table[index] = new (this) runtimeStubNode(stub, name, where);
746
} else {
747
ProfilerNode* prev = table[index];
748
for(ProfilerNode* node = prev; node; node = node->next()) {
749
if (node->runtimeStub_match(stub, name)) {
750
node->update(where);
751
return;
752
}
753
prev = node;
754
}
755
prev->set_next(new (this) runtimeStubNode(stub, name, where));
756
}
757
}
758
759
760
void ThreadProfiler::unknown_compiled_update(const CodeBlob* cb, TickPosition where) {
761
int index = 0;
762
if (!table[index]) {
763
table[index] = new (this) unknown_compiledNode(cb, where);
764
} else {
765
ProfilerNode* prev = table[index];
766
for(ProfilerNode* node = prev; node; node = node->next()) {
767
if (node->unknown_compiled_match(cb)) {
768
node->update(where);
769
return;
770
}
771
prev = node;
772
}
773
prev->set_next(new (this) unknown_compiledNode(cb, where));
774
}
775
}
776
777
void ThreadProfiler::vm_update(TickPosition where) {
778
vm_update("", where);
779
}
780
781
void ThreadProfiler::vm_update(const char* name, TickPosition where) {
782
int index = entry(vmNode::hash(name));
783
assert(index >= 0, "Must be positive");
784
// Note that we call strdup below since the symbol may be resource allocated
785
if (!table[index]) {
786
table[index] = new (this) vmNode(os::strdup(name), where);
787
} else {
788
ProfilerNode* prev = table[index];
789
for(ProfilerNode* node = prev; node; node = node->next()) {
790
if (((vmNode *)node)->vm_match(name)) {
791
node->update(where);
792
return;
793
}
794
prev = node;
795
}
796
prev->set_next(new (this) vmNode(os::strdup(name), where));
797
}
798
}
799
800
801
class FlatProfilerTask : public PeriodicTask {
802
public:
803
FlatProfilerTask(int interval_time) : PeriodicTask(interval_time) {}
804
void task();
805
};
806
807
void FlatProfiler::record_vm_operation() {
808
if (Universe::heap()->is_gc_active()) {
809
FlatProfiler::received_gc_ticks += 1;
810
return;
811
}
812
813
if (DeoptimizationMarker::is_active()) {
814
FlatProfiler::deopt_ticks += 1;
815
return;
816
}
817
818
FlatProfiler::vm_operation_ticks += 1;
819
}
820
821
void FlatProfiler::record_vm_tick() {
822
// Profile the VM Thread itself if needed
823
// This is done without getting the Threads_lock and we can go deep
824
// inside Safepoint, etc.
825
if( ProfileVM ) {
826
ResourceMark rm;
827
ExtendedPC epc;
828
const char *name = NULL;
829
char buf[256];
830
buf[0] = '\0';
831
832
vm_thread_profiler->inc_thread_ticks();
833
834
// Get a snapshot of a current VMThread pc (and leave it running!)
835
// The call may fail if, for instance the VM thread is interrupted while
836
// holding the Interrupt_lock or for other reasons.
837
epc = os::get_thread_pc(VMThread::vm_thread());
838
if(epc.pc() != NULL) {
839
if (os::dll_address_to_function_name(epc.pc(), buf, sizeof(buf), NULL)) {
840
name = buf;
841
}
842
}
843
if (name != NULL) {
844
vm_thread_profiler->vm_update(name, tp_native);
845
}
846
}
847
}
848
849
void FlatProfiler::record_thread_ticks() {
850
851
int maxthreads, suspendedthreadcount;
852
JavaThread** threadsList;
853
bool interval_expired = false;
854
855
if (ProfileIntervals &&
856
(FlatProfiler::received_ticks >= interval_ticks_previous + ProfileIntervalsTicks)) {
857
interval_expired = true;
858
interval_ticks_previous = FlatProfiler::received_ticks;
859
}
860
861
// Try not to wait for the Threads_lock
862
if (Threads_lock->try_lock()) {
863
{ // Threads_lock scope
864
maxthreads = Threads::number_of_threads();
865
threadsList = NEW_C_HEAP_ARRAY(JavaThread *, maxthreads, mtInternal);
866
suspendedthreadcount = 0;
867
for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
868
if (tp->is_Compiler_thread()) {
869
// Only record ticks for active compiler threads
870
CompilerThread* cthread = (CompilerThread*)tp;
871
if (cthread->task() != NULL) {
872
// The compiler is active. If we need to access any of the fields
873
// of the compiler task we should suspend the CompilerThread first.
874
FlatProfiler::compiler_ticks += 1;
875
continue;
876
}
877
}
878
879
// First externally suspend all threads by marking each for
880
// external suspension - so it will stop at its next transition
881
// Then do a safepoint
882
ThreadProfiler* pp = tp->get_thread_profiler();
883
if (pp != NULL && pp->engaged) {
884
MutexLockerEx ml(tp->SR_lock(), Mutex::_no_safepoint_check_flag);
885
if (!tp->is_external_suspend() && !tp->is_exiting()) {
886
tp->set_external_suspend();
887
threadsList[suspendedthreadcount++] = tp;
888
}
889
}
890
}
891
Threads_lock->unlock();
892
}
893
// Suspend each thread. This call should just return
894
// for any threads that have already self-suspended
895
// Net result should be one safepoint
896
for (int j = 0; j < suspendedthreadcount; j++) {
897
JavaThread *tp = threadsList[j];
898
if (tp) {
899
tp->java_suspend();
900
}
901
}
902
903
// We are responsible for resuming any thread on this list
904
for (int i = 0; i < suspendedthreadcount; i++) {
905
JavaThread *tp = threadsList[i];
906
if (tp) {
907
ThreadProfiler* pp = tp->get_thread_profiler();
908
if (pp != NULL && pp->engaged) {
909
HandleMark hm;
910
FlatProfiler::delivered_ticks += 1;
911
if (interval_expired) {
912
FlatProfiler::interval_record_thread(pp);
913
}
914
// This is the place where we check to see if a user thread is
915
// blocked waiting for compilation.
916
if (tp->blocked_on_compilation()) {
917
pp->compiler_ticks += 1;
918
pp->interval_data_ref()->inc_compiling();
919
} else {
920
pp->record_tick(tp);
921
}
922
}
923
MutexLocker ml(Threads_lock);
924
tp->java_resume();
925
}
926
}
927
if (interval_expired) {
928
FlatProfiler::interval_print();
929
FlatProfiler::interval_reset();
930
}
931
932
FREE_C_HEAP_ARRAY(JavaThread *, threadsList, mtInternal);
933
} else {
934
// Couldn't get the threads lock, just record that rather than blocking
935
FlatProfiler::threads_lock_ticks += 1;
936
}
937
938
}
939
940
void FlatProfilerTask::task() {
941
FlatProfiler::received_ticks += 1;
942
943
if (ProfileVM) {
944
FlatProfiler::record_vm_tick();
945
}
946
947
VM_Operation* op = VMThread::vm_operation();
948
if (op != NULL) {
949
FlatProfiler::record_vm_operation();
950
if (SafepointSynchronize::is_at_safepoint()) {
951
return;
952
}
953
}
954
FlatProfiler::record_thread_ticks();
955
}
956
957
void ThreadProfiler::record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks) {
958
FlatProfiler::all_int_ticks++;
959
if (!FlatProfiler::full_profile()) {
960
return;
961
}
962
963
if (!fr.is_interpreted_frame_valid(thread)) {
964
// tick came at a bad time
965
interpreter_ticks += 1;
966
FlatProfiler::interpreter_ticks += 1;
967
return;
968
}
969
970
// The frame has been fully validated so we can trust the method and bci
971
972
Method* method = *fr.interpreter_frame_method_addr();
973
974
interpreted_update(method, where);
975
976
// update byte code table
977
InterpreterCodelet* desc = Interpreter::codelet_containing(fr.pc());
978
if (desc != NULL && desc->bytecode() >= 0) {
979
ticks[desc->bytecode()]++;
980
}
981
}
982
983
void ThreadProfiler::record_compiled_tick(JavaThread* thread, frame fr, TickPosition where) {
984
const char *name = NULL;
985
TickPosition localwhere = where;
986
987
FlatProfiler::all_comp_ticks++;
988
if (!FlatProfiler::full_profile()) return;
989
990
CodeBlob* cb = fr.cb();
991
992
// For runtime stubs, record as native rather than as compiled
993
if (cb->is_runtime_stub()) {
994
RegisterMap map(thread, false);
995
fr = fr.sender(&map);
996
cb = fr.cb();
997
localwhere = tp_native;
998
}
999
Method* method = (cb->is_nmethod()) ? ((nmethod *)cb)->method() :
1000
(Method*)NULL;
1001
1002
if (method == NULL) {
1003
if (cb->is_runtime_stub())
1004
runtime_stub_update(cb, name, localwhere);
1005
else
1006
unknown_compiled_update(cb, localwhere);
1007
}
1008
else {
1009
if (method->is_native()) {
1010
stub_update(method, name, localwhere);
1011
} else {
1012
compiled_update(method, localwhere);
1013
}
1014
}
1015
}
1016
1017
extern "C" void find(int x);
1018
1019
1020
void ThreadProfiler::record_tick_for_running_frame(JavaThread* thread, frame fr) {
1021
// The tick happened in real code -> non VM code
1022
if (fr.is_interpreted_frame()) {
1023
interval_data_ref()->inc_interpreted();
1024
record_interpreted_tick(thread, fr, tp_code, FlatProfiler::bytecode_ticks);
1025
return;
1026
}
1027
1028
if (CodeCache::contains(fr.pc())) {
1029
interval_data_ref()->inc_compiled();
1030
PCRecorder::record(fr.pc());
1031
record_compiled_tick(thread, fr, tp_code);
1032
return;
1033
}
1034
1035
if (VtableStubs::stub_containing(fr.pc()) != NULL) {
1036
unknown_ticks_array[ut_vtable_stubs] += 1;
1037
return;
1038
}
1039
1040
frame caller = fr.profile_find_Java_sender_frame(thread);
1041
1042
if (caller.sp() != NULL && caller.pc() != NULL) {
1043
record_tick_for_calling_frame(thread, caller);
1044
return;
1045
}
1046
1047
unknown_ticks_array[ut_running_frame] += 1;
1048
FlatProfiler::unknown_ticks += 1;
1049
}
1050
1051
void ThreadProfiler::record_tick_for_calling_frame(JavaThread* thread, frame fr) {
1052
// The tick happened in VM code
1053
interval_data_ref()->inc_native();
1054
if (fr.is_interpreted_frame()) {
1055
record_interpreted_tick(thread, fr, tp_native, FlatProfiler::bytecode_ticks_stub);
1056
return;
1057
}
1058
if (CodeCache::contains(fr.pc())) {
1059
record_compiled_tick(thread, fr, tp_native);
1060
return;
1061
}
1062
1063
frame caller = fr.profile_find_Java_sender_frame(thread);
1064
1065
if (caller.sp() != NULL && caller.pc() != NULL) {
1066
record_tick_for_calling_frame(thread, caller);
1067
return;
1068
}
1069
1070
unknown_ticks_array[ut_calling_frame] += 1;
1071
FlatProfiler::unknown_ticks += 1;
1072
}
1073
1074
void ThreadProfiler::record_tick(JavaThread* thread) {
1075
FlatProfiler::all_ticks++;
1076
thread_ticks += 1;
1077
1078
// Here's another way to track global state changes.
1079
// When the class loader starts it marks the ThreadProfiler to tell it it is in the class loader
1080
// and we check that here.
1081
// This is more direct, and more than one thread can be in the class loader at a time,
1082
// but it does mean the class loader has to know about the profiler.
1083
if (region_flag[ThreadProfilerMark::classLoaderRegion]) {
1084
class_loader_ticks += 1;
1085
FlatProfiler::class_loader_ticks += 1;
1086
return;
1087
} else if (region_flag[ThreadProfilerMark::extraRegion]) {
1088
extra_ticks += 1;
1089
FlatProfiler::extra_ticks += 1;
1090
return;
1091
}
1092
// Note that the WatcherThread can now stop for safepoints
1093
uint32_t debug_bits = 0;
1094
if (!thread->wait_for_ext_suspend_completion(SuspendRetryCount,
1095
SuspendRetryDelay, &debug_bits)) {
1096
unknown_ticks_array[ut_unknown_thread_state] += 1;
1097
FlatProfiler::unknown_ticks += 1;
1098
return;
1099
}
1100
1101
frame fr;
1102
1103
switch (thread->thread_state()) {
1104
case _thread_in_native:
1105
case _thread_in_native_trans:
1106
case _thread_in_vm:
1107
case _thread_in_vm_trans:
1108
if (thread->profile_last_Java_frame(&fr)) {
1109
if (fr.is_runtime_frame()) {
1110
RegisterMap map(thread, false);
1111
fr = fr.sender(&map);
1112
}
1113
record_tick_for_calling_frame(thread, fr);
1114
} else {
1115
unknown_ticks_array[ut_no_last_Java_frame] += 1;
1116
FlatProfiler::unknown_ticks += 1;
1117
}
1118
break;
1119
// handle_special_runtime_exit_condition self-suspends threads in Java
1120
case _thread_in_Java:
1121
case _thread_in_Java_trans:
1122
if (thread->profile_last_Java_frame(&fr)) {
1123
if (fr.is_safepoint_blob_frame()) {
1124
RegisterMap map(thread, false);
1125
fr = fr.sender(&map);
1126
}
1127
record_tick_for_running_frame(thread, fr);
1128
} else {
1129
unknown_ticks_array[ut_no_last_Java_frame] += 1;
1130
FlatProfiler::unknown_ticks += 1;
1131
}
1132
break;
1133
case _thread_blocked:
1134
case _thread_blocked_trans:
1135
if (thread->osthread() && thread->osthread()->get_state() == RUNNABLE) {
1136
if (thread->profile_last_Java_frame(&fr)) {
1137
if (fr.is_safepoint_blob_frame()) {
1138
RegisterMap map(thread, false);
1139
fr = fr.sender(&map);
1140
record_tick_for_running_frame(thread, fr);
1141
} else {
1142
record_tick_for_calling_frame(thread, fr);
1143
}
1144
} else {
1145
unknown_ticks_array[ut_no_last_Java_frame] += 1;
1146
FlatProfiler::unknown_ticks += 1;
1147
}
1148
} else {
1149
blocked_ticks += 1;
1150
FlatProfiler::blocked_ticks += 1;
1151
}
1152
break;
1153
case _thread_uninitialized:
1154
case _thread_new:
1155
// not used, included for completeness
1156
case _thread_new_trans:
1157
unknown_ticks_array[ut_no_last_Java_frame] += 1;
1158
FlatProfiler::unknown_ticks += 1;
1159
break;
1160
default:
1161
unknown_ticks_array[ut_unknown_thread_state] += 1;
1162
FlatProfiler::unknown_ticks += 1;
1163
break;
1164
}
1165
return;
1166
}
1167
1168
void ThreadProfiler::engage() {
1169
engaged = true;
1170
timer.start();
1171
}
1172
1173
void ThreadProfiler::disengage() {
1174
engaged = false;
1175
timer.stop();
1176
}
1177
1178
void ThreadProfiler::initialize() {
1179
for (int index = 0; index < table_size; index++) {
1180
table[index] = NULL;
1181
}
1182
thread_ticks = 0;
1183
blocked_ticks = 0;
1184
compiler_ticks = 0;
1185
interpreter_ticks = 0;
1186
for (int ut = 0; ut < ut_end; ut += 1) {
1187
unknown_ticks_array[ut] = 0;
1188
}
1189
region_flag[ThreadProfilerMark::classLoaderRegion] = false;
1190
class_loader_ticks = 0;
1191
region_flag[ThreadProfilerMark::extraRegion] = false;
1192
extra_ticks = 0;
1193
timer.start();
1194
interval_data_ref()->reset();
1195
}
1196
1197
void ThreadProfiler::reset() {
1198
timer.stop();
1199
if (table != NULL) {
1200
for (int index = 0; index < table_size; index++) {
1201
ProfilerNode* n = table[index];
1202
if (n != NULL) {
1203
delete n;
1204
}
1205
}
1206
}
1207
initialize();
1208
}
1209
1210
void FlatProfiler::allocate_table() {
1211
{ // Bytecode table
1212
bytecode_ticks = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes, mtInternal);
1213
bytecode_ticks_stub = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes, mtInternal);
1214
for(int index = 0; index < Bytecodes::number_of_codes; index++) {
1215
bytecode_ticks[index] = 0;
1216
bytecode_ticks_stub[index] = 0;
1217
}
1218
}
1219
1220
if (ProfilerRecordPC) PCRecorder::init();
1221
1222
interval_data = NEW_C_HEAP_ARRAY(IntervalData, interval_print_size, mtInternal);
1223
FlatProfiler::interval_reset();
1224
}
1225
1226
void FlatProfiler::engage(JavaThread* mainThread, bool fullProfile) {
1227
full_profile_flag = fullProfile;
1228
if (bytecode_ticks == NULL) {
1229
allocate_table();
1230
}
1231
if(ProfileVM && (vm_thread_profiler == NULL)){
1232
vm_thread_profiler = new ThreadProfiler();
1233
}
1234
if (task == NULL) {
1235
task = new FlatProfilerTask(WatcherThread::delay_interval);
1236
task->enroll();
1237
}
1238
timer.start();
1239
if (mainThread != NULL) {
1240
// When mainThread was created, it might not have a ThreadProfiler
1241
ThreadProfiler* pp = mainThread->get_thread_profiler();
1242
if (pp == NULL) {
1243
mainThread->set_thread_profiler(new ThreadProfiler());
1244
} else {
1245
pp->reset();
1246
}
1247
mainThread->get_thread_profiler()->engage();
1248
}
1249
// This is where we would assign thread_profiler
1250
// if we wanted only one thread_profiler for all threads.
1251
thread_profiler = NULL;
1252
}
1253
1254
void FlatProfiler::disengage() {
1255
if (!task) {
1256
return;
1257
}
1258
timer.stop();
1259
task->disenroll();
1260
delete task;
1261
task = NULL;
1262
if (thread_profiler != NULL) {
1263
thread_profiler->disengage();
1264
} else {
1265
MutexLocker tl(Threads_lock);
1266
for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
1267
ThreadProfiler* pp = tp->get_thread_profiler();
1268
if (pp != NULL) {
1269
pp->disengage();
1270
}
1271
}
1272
}
1273
}
1274
1275
void FlatProfiler::reset() {
1276
if (task) {
1277
disengage();
1278
}
1279
1280
class_loader_ticks = 0;
1281
extra_ticks = 0;
1282
received_gc_ticks = 0;
1283
vm_operation_ticks = 0;
1284
compiler_ticks = 0;
1285
deopt_ticks = 0;
1286
interpreter_ticks = 0;
1287
blocked_ticks = 0;
1288
unknown_ticks = 0;
1289
received_ticks = 0;
1290
delivered_ticks = 0;
1291
timer.stop();
1292
}
1293
1294
bool FlatProfiler::is_active() {
1295
return task != NULL;
1296
}
1297
1298
void FlatProfiler::print_byte_code_statistics() {
1299
GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200);
1300
1301
tty->print_cr(" Bytecode ticks:");
1302
for (int index = 0; index < Bytecodes::number_of_codes; index++) {
1303
if (FlatProfiler::bytecode_ticks[index] > 0 || FlatProfiler::bytecode_ticks_stub[index] > 0) {
1304
tty->print_cr(" %4d %4d = %s",
1305
FlatProfiler::bytecode_ticks[index],
1306
FlatProfiler::bytecode_ticks_stub[index],
1307
Bytecodes::name( (Bytecodes::Code) index));
1308
}
1309
}
1310
tty->cr();
1311
}
1312
1313
void print_ticks(const char* title, int ticks, int total) {
1314
if (ticks > 0) {
1315
tty->print("%5.1f%% %5d", ticks * 100.0 / total, ticks);
1316
tty->fill_to(col3);
1317
tty->print("%s", title);
1318
tty->cr();
1319
}
1320
}
1321
1322
void ThreadProfiler::print(const char* thread_name) {
1323
ResourceMark rm;
1324
MutexLocker ppl(ProfilePrint_lock);
1325
int index = 0; // Declared outside for loops for portability
1326
1327
if (table == NULL) {
1328
return;
1329
}
1330
1331
if (thread_ticks <= 0) {
1332
return;
1333
}
1334
1335
const char* title = "too soon to tell";
1336
double secs = timer.seconds();
1337
1338
GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200);
1339
for(index = 0; index < table_size; index++) {
1340
for(ProfilerNode* node = table[index]; node; node = node->next())
1341
array->append(node);
1342
}
1343
1344
array->sort(&ProfilerNode::compare);
1345
1346
// compute total (sanity check)
1347
int active =
1348
class_loader_ticks +
1349
compiler_ticks +
1350
interpreter_ticks +
1351
unknown_ticks();
1352
for (index = 0; index < array->length(); index++) {
1353
active += array->at(index)->ticks.total();
1354
}
1355
int total = active + blocked_ticks;
1356
1357
tty->cr();
1358
tty->print_cr("Flat profile of %3.2f secs (%d total ticks): %s", secs, total, thread_name);
1359
if (total != thread_ticks) {
1360
print_ticks("Lost ticks", thread_ticks-total, thread_ticks);
1361
}
1362
tty->cr();
1363
1364
// print interpreted methods
1365
tick_counter interpreted_ticks;
1366
bool has_interpreted_ticks = false;
1367
int print_count = 0;
1368
for (index = 0; index < array->length(); index++) {
1369
ProfilerNode* n = array->at(index);
1370
if (n->is_interpreted()) {
1371
interpreted_ticks.add(&n->ticks);
1372
if (!has_interpreted_ticks) {
1373
interpretedNode::print_title(tty);
1374
has_interpreted_ticks = true;
1375
}
1376
if (print_count++ < ProfilerNumberOfInterpretedMethods) {
1377
n->print(tty, active);
1378
}
1379
}
1380
}
1381
if (has_interpreted_ticks) {
1382
if (print_count <= ProfilerNumberOfInterpretedMethods) {
1383
title = "Total interpreted";
1384
} else {
1385
title = "Total interpreted (including elided)";
1386
}
1387
interpretedNode::print_total(tty, &interpreted_ticks, active, title);
1388
tty->cr();
1389
}
1390
1391
// print compiled methods
1392
tick_counter compiled_ticks;
1393
bool has_compiled_ticks = false;
1394
print_count = 0;
1395
for (index = 0; index < array->length(); index++) {
1396
ProfilerNode* n = array->at(index);
1397
if (n->is_compiled()) {
1398
compiled_ticks.add(&n->ticks);
1399
if (!has_compiled_ticks) {
1400
compiledNode::print_title(tty);
1401
has_compiled_ticks = true;
1402
}
1403
if (print_count++ < ProfilerNumberOfCompiledMethods) {
1404
n->print(tty, active);
1405
}
1406
}
1407
}
1408
if (has_compiled_ticks) {
1409
if (print_count <= ProfilerNumberOfCompiledMethods) {
1410
title = "Total compiled";
1411
} else {
1412
title = "Total compiled (including elided)";
1413
}
1414
compiledNode::print_total(tty, &compiled_ticks, active, title);
1415
tty->cr();
1416
}
1417
1418
// print stub methods
1419
tick_counter stub_ticks;
1420
bool has_stub_ticks = false;
1421
print_count = 0;
1422
for (index = 0; index < array->length(); index++) {
1423
ProfilerNode* n = array->at(index);
1424
if (n->is_stub()) {
1425
stub_ticks.add(&n->ticks);
1426
if (!has_stub_ticks) {
1427
stubNode::print_title(tty);
1428
has_stub_ticks = true;
1429
}
1430
if (print_count++ < ProfilerNumberOfStubMethods) {
1431
n->print(tty, active);
1432
}
1433
}
1434
}
1435
if (has_stub_ticks) {
1436
if (print_count <= ProfilerNumberOfStubMethods) {
1437
title = "Total stub";
1438
} else {
1439
title = "Total stub (including elided)";
1440
}
1441
stubNode::print_total(tty, &stub_ticks, active, title);
1442
tty->cr();
1443
}
1444
1445
// print runtime stubs
1446
tick_counter runtime_stub_ticks;
1447
bool has_runtime_stub_ticks = false;
1448
print_count = 0;
1449
for (index = 0; index < array->length(); index++) {
1450
ProfilerNode* n = array->at(index);
1451
if (n->is_runtime_stub()) {
1452
runtime_stub_ticks.add(&n->ticks);
1453
if (!has_runtime_stub_ticks) {
1454
runtimeStubNode::print_title(tty);
1455
has_runtime_stub_ticks = true;
1456
}
1457
if (print_count++ < ProfilerNumberOfRuntimeStubNodes) {
1458
n->print(tty, active);
1459
}
1460
}
1461
}
1462
if (has_runtime_stub_ticks) {
1463
if (print_count <= ProfilerNumberOfRuntimeStubNodes) {
1464
title = "Total runtime stubs";
1465
} else {
1466
title = "Total runtime stubs (including elided)";
1467
}
1468
runtimeStubNode::print_total(tty, &runtime_stub_ticks, active, title);
1469
tty->cr();
1470
}
1471
1472
if (blocked_ticks + class_loader_ticks + interpreter_ticks + compiler_ticks + unknown_ticks() != 0) {
1473
tty->fill_to(col1);
1474
tty->print_cr("Thread-local ticks:");
1475
print_ticks("Blocked (of total)", blocked_ticks, total);
1476
print_ticks("Class loader", class_loader_ticks, active);
1477
print_ticks("Extra", extra_ticks, active);
1478
print_ticks("Interpreter", interpreter_ticks, active);
1479
print_ticks("Compilation", compiler_ticks, active);
1480
print_ticks("Unknown: vtable stubs", unknown_ticks_array[ut_vtable_stubs], active);
1481
print_ticks("Unknown: null method", unknown_ticks_array[ut_null_method], active);
1482
print_ticks("Unknown: running frame", unknown_ticks_array[ut_running_frame], active);
1483
print_ticks("Unknown: calling frame", unknown_ticks_array[ut_calling_frame], active);
1484
print_ticks("Unknown: no pc", unknown_ticks_array[ut_no_pc], active);
1485
print_ticks("Unknown: no last frame", unknown_ticks_array[ut_no_last_Java_frame], active);
1486
print_ticks("Unknown: thread_state", unknown_ticks_array[ut_unknown_thread_state], active);
1487
tty->cr();
1488
}
1489
1490
if (WizardMode) {
1491
tty->print_cr("Node area used: %dKb", (area_top - area_bottom) / 1024);
1492
}
1493
reset();
1494
}
1495
1496
/*
1497
ThreadProfiler::print_unknown(){
1498
if (table == NULL) {
1499
return;
1500
}
1501
1502
if (thread_ticks <= 0) {
1503
return;
1504
}
1505
} */
1506
1507
void FlatProfiler::print(int unused) {
1508
ResourceMark rm;
1509
if (thread_profiler != NULL) {
1510
thread_profiler->print("All threads");
1511
} else {
1512
MutexLocker tl(Threads_lock);
1513
for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
1514
ThreadProfiler* pp = tp->get_thread_profiler();
1515
if (pp != NULL) {
1516
pp->print(tp->get_thread_name());
1517
}
1518
}
1519
}
1520
1521
if (ProfilerPrintByteCodeStatistics) {
1522
print_byte_code_statistics();
1523
}
1524
1525
if (non_method_ticks() > 0) {
1526
tty->cr();
1527
tty->print_cr("Global summary of %3.2f seconds:", timer.seconds());
1528
print_ticks("Received ticks", received_ticks, received_ticks);
1529
print_ticks("Received GC ticks", received_gc_ticks, received_ticks);
1530
print_ticks("Compilation", compiler_ticks, received_ticks);
1531
print_ticks("Deoptimization", deopt_ticks, received_ticks);
1532
print_ticks("Other VM operations", vm_operation_ticks, received_ticks);
1533
#ifndef PRODUCT
1534
print_ticks("Blocked ticks", blocked_ticks, received_ticks);
1535
print_ticks("Threads_lock blocks", threads_lock_ticks, received_ticks);
1536
print_ticks("Delivered ticks", delivered_ticks, received_ticks);
1537
print_ticks("All ticks", all_ticks, received_ticks);
1538
#endif
1539
print_ticks("Class loader", class_loader_ticks, received_ticks);
1540
print_ticks("Extra ", extra_ticks, received_ticks);
1541
print_ticks("Interpreter", interpreter_ticks, received_ticks);
1542
print_ticks("Unknown code", unknown_ticks, received_ticks);
1543
}
1544
1545
PCRecorder::print();
1546
1547
if(ProfileVM){
1548
tty->cr();
1549
vm_thread_profiler->print("VM Thread");
1550
}
1551
}
1552
1553
void IntervalData::print_header(outputStream* st) {
1554
st->print("i/c/n/g");
1555
}
1556
1557
void IntervalData::print_data(outputStream* st) {
1558
st->print("%d/%d/%d/%d", interpreted(), compiled(), native(), compiling());
1559
}
1560
1561
void FlatProfiler::interval_record_thread(ThreadProfiler* tp) {
1562
IntervalData id = tp->interval_data();
1563
int total = id.total();
1564
tp->interval_data_ref()->reset();
1565
1566
// Insertion sort the data, if it's relevant.
1567
for (int i = 0; i < interval_print_size; i += 1) {
1568
if (total > interval_data[i].total()) {
1569
for (int j = interval_print_size - 1; j > i; j -= 1) {
1570
interval_data[j] = interval_data[j-1];
1571
}
1572
interval_data[i] = id;
1573
break;
1574
}
1575
}
1576
}
1577
1578
void FlatProfiler::interval_print() {
1579
if ((interval_data[0].total() > 0)) {
1580
tty->stamp();
1581
tty->print("\t");
1582
IntervalData::print_header(tty);
1583
for (int i = 0; i < interval_print_size; i += 1) {
1584
if (interval_data[i].total() > 0) {
1585
tty->print("\t");
1586
interval_data[i].print_data(tty);
1587
}
1588
}
1589
tty->cr();
1590
}
1591
}
1592
1593
void FlatProfiler::interval_reset() {
1594
for (int i = 0; i < interval_print_size; i += 1) {
1595
interval_data[i].reset();
1596
}
1597
}
1598
1599
void ThreadProfiler::oops_do(OopClosure* f) {
1600
if (table == NULL) return;
1601
1602
for(int index = 0; index < table_size; index++) {
1603
for(ProfilerNode* node = table[index]; node; node = node->next())
1604
node->oops_do(f);
1605
}
1606
}
1607
1608
void FlatProfiler::oops_do(OopClosure* f) {
1609
if (thread_profiler != NULL) {
1610
thread_profiler->oops_do(f);
1611
} else {
1612
for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
1613
ThreadProfiler* pp = tp->get_thread_profiler();
1614
if (pp != NULL) {
1615
pp->oops_do(f);
1616
}
1617
}
1618
}
1619
}
1620
1621