Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/mobile
Path: blob/master/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp
40949 views
1
/*
2
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation.
8
*
9
* This code is distributed in the hope that it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12
* version 2 for more details (a copy is included in the LICENSE file that
13
* accompanied this code).
14
*
15
* You should have received a copy of the GNU General Public License version
16
* 2 along with this work; if not, write to the Free Software Foundation,
17
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18
*
19
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20
* or visit www.oracle.com if you need additional information or have any
21
* questions.
22
*
23
*/
24
25
#ifndef SHARE_UTILITIES_CONCURRENTHASHTABLETASKS_INLINE_HPP
26
#define SHARE_UTILITIES_CONCURRENTHASHTABLETASKS_INLINE_HPP
27
28
// No concurrentHashTableTasks.hpp
29
30
#include "runtime/atomic.hpp"
31
#include "utilities/globalDefinitions.hpp"
32
#include "utilities/concurrentHashTable.inline.hpp"
33
34
// This inline file contains BulkDeleteTask and GrowTasks which are both bucket
35
// operations, which they are serialized with each other.
36
37
// Base class for pause and/or parallel bulk operations.
38
template <typename CONFIG, MEMFLAGS F>
39
class ConcurrentHashTable<CONFIG, F>::BucketsOperation {
40
protected:
41
ConcurrentHashTable<CONFIG, F>* _cht;
42
43
// Default size of _task_size_log2
44
static const size_t DEFAULT_TASK_SIZE_LOG2 = 12;
45
46
// The table is split into ranges, every increment is one range.
47
volatile size_t _next_to_claim;
48
size_t _task_size_log2; // Number of buckets.
49
size_t _stop_task; // Last task
50
size_t _size_log2; // Table size.
51
bool _is_mt;
52
53
BucketsOperation(ConcurrentHashTable<CONFIG, F>* cht, bool is_mt = false)
54
: _cht(cht), _next_to_claim(0), _task_size_log2(DEFAULT_TASK_SIZE_LOG2),
55
_stop_task(0), _size_log2(0), _is_mt(is_mt) {}
56
57
// Returns true if you succeeded to claim the range start -> (stop-1).
58
bool claim(size_t* start, size_t* stop) {
59
size_t claimed = Atomic::fetch_and_add(&_next_to_claim, 1u);
60
if (claimed >= _stop_task) {
61
return false;
62
}
63
*start = claimed * (((size_t)1) << _task_size_log2);
64
*stop = ((*start) + (((size_t)1) << _task_size_log2));
65
return true;
66
}
67
68
// Calculate starting values.
69
void setup(Thread* thread) {
70
thread_owns_resize_lock(thread);
71
_size_log2 = _cht->_table->_log2_size;
72
_task_size_log2 = MIN2(_task_size_log2, _size_log2);
73
size_t tmp = _size_log2 > _task_size_log2 ?
74
_size_log2 - _task_size_log2 : 0;
75
_stop_task = (((size_t)1) << tmp);
76
}
77
78
// Returns false if all ranges are claimed.
79
bool have_more_work() {
80
return Atomic::load_acquire(&_next_to_claim) >= _stop_task;
81
}
82
83
void thread_owns_resize_lock(Thread* thread) {
84
assert(BucketsOperation::_cht->_resize_lock_owner == thread,
85
"Should be locked by me");
86
assert(BucketsOperation::_cht->_resize_lock->owned_by_self(),
87
"Operations lock not held");
88
}
89
void thread_owns_only_state_lock(Thread* thread) {
90
assert(BucketsOperation::_cht->_resize_lock_owner == thread,
91
"Should be locked by me");
92
assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
93
"Operations lock held");
94
}
95
void thread_do_not_own_resize_lock(Thread* thread) {
96
assert(!BucketsOperation::_cht->_resize_lock->owned_by_self(),
97
"Operations lock held");
98
assert(BucketsOperation::_cht->_resize_lock_owner != thread,
99
"Should not be locked by me");
100
}
101
102
public:
103
// Pauses for safepoint
104
void pause(Thread* thread) {
105
// This leaves internal state locked.
106
this->thread_owns_resize_lock(thread);
107
BucketsOperation::_cht->_resize_lock->unlock();
108
this->thread_owns_only_state_lock(thread);
109
}
110
111
// Continues after safepoint.
112
void cont(Thread* thread) {
113
this->thread_owns_only_state_lock(thread);
114
// If someone slips in here directly after safepoint.
115
while (!BucketsOperation::_cht->_resize_lock->try_lock())
116
{ /* for ever */ };
117
this->thread_owns_resize_lock(thread);
118
}
119
};
120
121
// For doing pausable/parallel bulk delete.
122
template <typename CONFIG, MEMFLAGS F>
123
class ConcurrentHashTable<CONFIG, F>::BulkDeleteTask :
124
public BucketsOperation
125
{
126
public:
127
BulkDeleteTask(ConcurrentHashTable<CONFIG, F>* cht, bool is_mt = false)
128
: BucketsOperation(cht, is_mt) {
129
}
130
// Before start prepare must be called.
131
bool prepare(Thread* thread) {
132
bool lock = BucketsOperation::_cht->try_resize_lock(thread);
133
if (!lock) {
134
return false;
135
}
136
this->setup(thread);
137
return true;
138
}
139
140
// Does one range destroying all matching EVALUATE_FUNC and
141
// DELETE_FUNC is called be destruction. Returns true if there is more work.
142
template <typename EVALUATE_FUNC, typename DELETE_FUNC>
143
bool do_task(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) {
144
size_t start, stop;
145
assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
146
"Should be locked");
147
if (!this->claim(&start, &stop)) {
148
return false;
149
}
150
BucketsOperation::_cht->do_bulk_delete_locked_for(thread, start, stop,
151
eval_f, del_f,
152
BucketsOperation::_is_mt);
153
assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
154
"Should be locked");
155
return true;
156
}
157
158
// Must be called after ranges are done.
159
void done(Thread* thread) {
160
this->thread_owns_resize_lock(thread);
161
BucketsOperation::_cht->unlock_resize_lock(thread);
162
this->thread_do_not_own_resize_lock(thread);
163
}
164
};
165
166
template <typename CONFIG, MEMFLAGS F>
167
class ConcurrentHashTable<CONFIG, F>::GrowTask :
168
public BucketsOperation
169
{
170
public:
171
GrowTask(ConcurrentHashTable<CONFIG, F>* cht) : BucketsOperation(cht) {
172
}
173
// Before start prepare must be called.
174
bool prepare(Thread* thread) {
175
if (!BucketsOperation::_cht->internal_grow_prolog(
176
thread, BucketsOperation::_cht->_log2_size_limit)) {
177
return false;
178
}
179
this->setup(thread);
180
return true;
181
}
182
183
// Re-sizes a portion of the table. Returns true if there is more work.
184
bool do_task(Thread* thread) {
185
size_t start, stop;
186
assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
187
"Should be locked");
188
if (!this->claim(&start, &stop)) {
189
return false;
190
}
191
BucketsOperation::_cht->internal_grow_range(thread, start, stop);
192
assert(BucketsOperation::_cht->_resize_lock_owner != NULL,
193
"Should be locked");
194
return true;
195
}
196
197
// Must be called after do_task returns false.
198
void done(Thread* thread) {
199
this->thread_owns_resize_lock(thread);
200
BucketsOperation::_cht->internal_grow_epilog(thread);
201
this->thread_do_not_own_resize_lock(thread);
202
}
203
};
204
205
#endif // SHARE_UTILITIES_CONCURRENTHASHTABLETASKS_INLINE_HPP
206
207