Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
PojavLauncherTeam
GitHub Repository: PojavLauncherTeam/openjdk-multiarch-jdk8u
Path: blob/aarch64-shenandoah-jdk8u272-b10/jdk/src/share/classes/java/util/HashMap.java
38829 views
1
/*
2
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
*
5
* This code is free software; you can redistribute it and/or modify it
6
* under the terms of the GNU General Public License version 2 only, as
7
* published by the Free Software Foundation. Oracle designates this
8
* particular file as subject to the "Classpath" exception as provided
9
* by Oracle in the LICENSE file that accompanied this code.
10
*
11
* This code is distributed in the hope that it will be useful, but WITHOUT
12
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14
* version 2 for more details (a copy is included in the LICENSE file that
15
* accompanied this code).
16
*
17
* You should have received a copy of the GNU General Public License version
18
* 2 along with this work; if not, write to the Free Software Foundation,
19
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20
*
21
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22
* or visit www.oracle.com if you need additional information or have any
23
* questions.
24
*/
25
26
package java.util;
27
28
import java.io.IOException;
29
import java.io.InvalidObjectException;
30
import java.io.Serializable;
31
import java.lang.reflect.ParameterizedType;
32
import java.lang.reflect.Type;
33
import java.util.function.BiConsumer;
34
import java.util.function.BiFunction;
35
import java.util.function.Consumer;
36
import java.util.function.Function;
37
import sun.misc.SharedSecrets;
38
39
/**
40
* Hash table based implementation of the <tt>Map</tt> interface. This
41
* implementation provides all of the optional map operations, and permits
42
* <tt>null</tt> values and the <tt>null</tt> key. (The <tt>HashMap</tt>
43
* class is roughly equivalent to <tt>Hashtable</tt>, except that it is
44
* unsynchronized and permits nulls.) This class makes no guarantees as to
45
* the order of the map; in particular, it does not guarantee that the order
46
* will remain constant over time.
47
*
48
* <p>This implementation provides constant-time performance for the basic
49
* operations (<tt>get</tt> and <tt>put</tt>), assuming the hash function
50
* disperses the elements properly among the buckets. Iteration over
51
* collection views requires time proportional to the "capacity" of the
52
* <tt>HashMap</tt> instance (the number of buckets) plus its size (the number
53
* of key-value mappings). Thus, it's very important not to set the initial
54
* capacity too high (or the load factor too low) if iteration performance is
55
* important.
56
*
57
* <p>An instance of <tt>HashMap</tt> has two parameters that affect its
58
* performance: <i>initial capacity</i> and <i>load factor</i>. The
59
* <i>capacity</i> is the number of buckets in the hash table, and the initial
60
* capacity is simply the capacity at the time the hash table is created. The
61
* <i>load factor</i> is a measure of how full the hash table is allowed to
62
* get before its capacity is automatically increased. When the number of
63
* entries in the hash table exceeds the product of the load factor and the
64
* current capacity, the hash table is <i>rehashed</i> (that is, internal data
65
* structures are rebuilt) so that the hash table has approximately twice the
66
* number of buckets.
67
*
68
* <p>As a general rule, the default load factor (.75) offers a good
69
* tradeoff between time and space costs. Higher values decrease the
70
* space overhead but increase the lookup cost (reflected in most of
71
* the operations of the <tt>HashMap</tt> class, including
72
* <tt>get</tt> and <tt>put</tt>). The expected number of entries in
73
* the map and its load factor should be taken into account when
74
* setting its initial capacity, so as to minimize the number of
75
* rehash operations. If the initial capacity is greater than the
76
* maximum number of entries divided by the load factor, no rehash
77
* operations will ever occur.
78
*
79
* <p>If many mappings are to be stored in a <tt>HashMap</tt>
80
* instance, creating it with a sufficiently large capacity will allow
81
* the mappings to be stored more efficiently than letting it perform
82
* automatic rehashing as needed to grow the table. Note that using
83
* many keys with the same {@code hashCode()} is a sure way to slow
84
* down performance of any hash table. To ameliorate impact, when keys
85
* are {@link Comparable}, this class may use comparison order among
86
* keys to help break ties.
87
*
88
* <p><strong>Note that this implementation is not synchronized.</strong>
89
* If multiple threads access a hash map concurrently, and at least one of
90
* the threads modifies the map structurally, it <i>must</i> be
91
* synchronized externally. (A structural modification is any operation
92
* that adds or deletes one or more mappings; merely changing the value
93
* associated with a key that an instance already contains is not a
94
* structural modification.) This is typically accomplished by
95
* synchronizing on some object that naturally encapsulates the map.
96
*
97
* If no such object exists, the map should be "wrapped" using the
98
* {@link Collections#synchronizedMap Collections.synchronizedMap}
99
* method. This is best done at creation time, to prevent accidental
100
* unsynchronized access to the map:<pre>
101
* Map m = Collections.synchronizedMap(new HashMap(...));</pre>
102
*
103
* <p>The iterators returned by all of this class's "collection view methods"
104
* are <i>fail-fast</i>: if the map is structurally modified at any time after
105
* the iterator is created, in any way except through the iterator's own
106
* <tt>remove</tt> method, the iterator will throw a
107
* {@link ConcurrentModificationException}. Thus, in the face of concurrent
108
* modification, the iterator fails quickly and cleanly, rather than risking
109
* arbitrary, non-deterministic behavior at an undetermined time in the
110
* future.
111
*
112
* <p>Note that the fail-fast behavior of an iterator cannot be guaranteed
113
* as it is, generally speaking, impossible to make any hard guarantees in the
114
* presence of unsynchronized concurrent modification. Fail-fast iterators
115
* throw <tt>ConcurrentModificationException</tt> on a best-effort basis.
116
* Therefore, it would be wrong to write a program that depended on this
117
* exception for its correctness: <i>the fail-fast behavior of iterators
118
* should be used only to detect bugs.</i>
119
*
120
* <p>This class is a member of the
121
* <a href="{@docRoot}/../technotes/guides/collections/index.html">
122
* Java Collections Framework</a>.
123
*
124
* @param <K> the type of keys maintained by this map
125
* @param <V> the type of mapped values
126
*
127
* @author Doug Lea
128
* @author Josh Bloch
129
* @author Arthur van Hoff
130
* @author Neal Gafter
131
* @see Object#hashCode()
132
* @see Collection
133
* @see Map
134
* @see TreeMap
135
* @see Hashtable
136
* @since 1.2
137
*/
138
public class HashMap<K,V> extends AbstractMap<K,V>
139
implements Map<K,V>, Cloneable, Serializable {
140
141
private static final long serialVersionUID = 362498820763181265L;
142
143
/*
144
* Implementation notes.
145
*
146
* This map usually acts as a binned (bucketed) hash table, but
147
* when bins get too large, they are transformed into bins of
148
* TreeNodes, each structured similarly to those in
149
* java.util.TreeMap. Most methods try to use normal bins, but
150
* relay to TreeNode methods when applicable (simply by checking
151
* instanceof a node). Bins of TreeNodes may be traversed and
152
* used like any others, but additionally support faster lookup
153
* when overpopulated. However, since the vast majority of bins in
154
* normal use are not overpopulated, checking for existence of
155
* tree bins may be delayed in the course of table methods.
156
*
157
* Tree bins (i.e., bins whose elements are all TreeNodes) are
158
* ordered primarily by hashCode, but in the case of ties, if two
159
* elements are of the same "class C implements Comparable<C>",
160
* type then their compareTo method is used for ordering. (We
161
* conservatively check generic types via reflection to validate
162
* this -- see method comparableClassFor). The added complexity
163
* of tree bins is worthwhile in providing worst-case O(log n)
164
* operations when keys either have distinct hashes or are
165
* orderable, Thus, performance degrades gracefully under
166
* accidental or malicious usages in which hashCode() methods
167
* return values that are poorly distributed, as well as those in
168
* which many keys share a hashCode, so long as they are also
169
* Comparable. (If neither of these apply, we may waste about a
170
* factor of two in time and space compared to taking no
171
* precautions. But the only known cases stem from poor user
172
* programming practices that are already so slow that this makes
173
* little difference.)
174
*
175
* Because TreeNodes are about twice the size of regular nodes, we
176
* use them only when bins contain enough nodes to warrant use
177
* (see TREEIFY_THRESHOLD). And when they become too small (due to
178
* removal or resizing) they are converted back to plain bins. In
179
* usages with well-distributed user hashCodes, tree bins are
180
* rarely used. Ideally, under random hashCodes, the frequency of
181
* nodes in bins follows a Poisson distribution
182
* (http://en.wikipedia.org/wiki/Poisson_distribution) with a
183
* parameter of about 0.5 on average for the default resizing
184
* threshold of 0.75, although with a large variance because of
185
* resizing granularity. Ignoring variance, the expected
186
* occurrences of list size k are (exp(-0.5) * pow(0.5, k) /
187
* factorial(k)). The first values are:
188
*
189
* 0: 0.60653066
190
* 1: 0.30326533
191
* 2: 0.07581633
192
* 3: 0.01263606
193
* 4: 0.00157952
194
* 5: 0.00015795
195
* 6: 0.00001316
196
* 7: 0.00000094
197
* 8: 0.00000006
198
* more: less than 1 in ten million
199
*
200
* The root of a tree bin is normally its first node. However,
201
* sometimes (currently only upon Iterator.remove), the root might
202
* be elsewhere, but can be recovered following parent links
203
* (method TreeNode.root()).
204
*
205
* All applicable internal methods accept a hash code as an
206
* argument (as normally supplied from a public method), allowing
207
* them to call each other without recomputing user hashCodes.
208
* Most internal methods also accept a "tab" argument, that is
209
* normally the current table, but may be a new or old one when
210
* resizing or converting.
211
*
212
* When bin lists are treeified, split, or untreeified, we keep
213
* them in the same relative access/traversal order (i.e., field
214
* Node.next) to better preserve locality, and to slightly
215
* simplify handling of splits and traversals that invoke
216
* iterator.remove. When using comparators on insertion, to keep a
217
* total ordering (or as close as is required here) across
218
* rebalancings, we compare classes and identityHashCodes as
219
* tie-breakers.
220
*
221
* The use and transitions among plain vs tree modes is
222
* complicated by the existence of subclass LinkedHashMap. See
223
* below for hook methods defined to be invoked upon insertion,
224
* removal and access that allow LinkedHashMap internals to
225
* otherwise remain independent of these mechanics. (This also
226
* requires that a map instance be passed to some utility methods
227
* that may create new nodes.)
228
*
229
* The concurrent-programming-like SSA-based coding style helps
230
* avoid aliasing errors amid all of the twisty pointer operations.
231
*/
232
233
/**
234
* The default initial capacity - MUST be a power of two.
235
*/
236
static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16
237
238
/**
239
* The maximum capacity, used if a higher value is implicitly specified
240
* by either of the constructors with arguments.
241
* MUST be a power of two <= 1<<30.
242
*/
243
static final int MAXIMUM_CAPACITY = 1 << 30;
244
245
/**
246
* The load factor used when none specified in constructor.
247
*/
248
static final float DEFAULT_LOAD_FACTOR = 0.75f;
249
250
/**
251
* The bin count threshold for using a tree rather than list for a
252
* bin. Bins are converted to trees when adding an element to a
253
* bin with at least this many nodes. The value must be greater
254
* than 2 and should be at least 8 to mesh with assumptions in
255
* tree removal about conversion back to plain bins upon
256
* shrinkage.
257
*/
258
static final int TREEIFY_THRESHOLD = 8;
259
260
/**
261
* The bin count threshold for untreeifying a (split) bin during a
262
* resize operation. Should be less than TREEIFY_THRESHOLD, and at
263
* most 6 to mesh with shrinkage detection under removal.
264
*/
265
static final int UNTREEIFY_THRESHOLD = 6;
266
267
/**
268
* The smallest table capacity for which bins may be treeified.
269
* (Otherwise the table is resized if too many nodes in a bin.)
270
* Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts
271
* between resizing and treeification thresholds.
272
*/
273
static final int MIN_TREEIFY_CAPACITY = 64;
274
275
/**
276
* Basic hash bin node, used for most entries. (See below for
277
* TreeNode subclass, and in LinkedHashMap for its Entry subclass.)
278
*/
279
static class Node<K,V> implements Map.Entry<K,V> {
280
final int hash;
281
final K key;
282
V value;
283
Node<K,V> next;
284
285
Node(int hash, K key, V value, Node<K,V> next) {
286
this.hash = hash;
287
this.key = key;
288
this.value = value;
289
this.next = next;
290
}
291
292
public final K getKey() { return key; }
293
public final V getValue() { return value; }
294
public final String toString() { return key + "=" + value; }
295
296
public final int hashCode() {
297
return Objects.hashCode(key) ^ Objects.hashCode(value);
298
}
299
300
public final V setValue(V newValue) {
301
V oldValue = value;
302
value = newValue;
303
return oldValue;
304
}
305
306
public final boolean equals(Object o) {
307
if (o == this)
308
return true;
309
if (o instanceof Map.Entry) {
310
Map.Entry<?,?> e = (Map.Entry<?,?>)o;
311
if (Objects.equals(key, e.getKey()) &&
312
Objects.equals(value, e.getValue()))
313
return true;
314
}
315
return false;
316
}
317
}
318
319
/* ---------------- Static utilities -------------- */
320
321
/**
322
* Computes key.hashCode() and spreads (XORs) higher bits of hash
323
* to lower. Because the table uses power-of-two masking, sets of
324
* hashes that vary only in bits above the current mask will
325
* always collide. (Among known examples are sets of Float keys
326
* holding consecutive whole numbers in small tables.) So we
327
* apply a transform that spreads the impact of higher bits
328
* downward. There is a tradeoff between speed, utility, and
329
* quality of bit-spreading. Because many common sets of hashes
330
* are already reasonably distributed (so don't benefit from
331
* spreading), and because we use trees to handle large sets of
332
* collisions in bins, we just XOR some shifted bits in the
333
* cheapest possible way to reduce systematic lossage, as well as
334
* to incorporate impact of the highest bits that would otherwise
335
* never be used in index calculations because of table bounds.
336
*/
337
static final int hash(Object key) {
338
int h;
339
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
340
}
341
342
/**
343
* Returns x's Class if it is of the form "class C implements
344
* Comparable<C>", else null.
345
*/
346
static Class<?> comparableClassFor(Object x) {
347
if (x instanceof Comparable) {
348
Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
349
if ((c = x.getClass()) == String.class) // bypass checks
350
return c;
351
if ((ts = c.getGenericInterfaces()) != null) {
352
for (int i = 0; i < ts.length; ++i) {
353
if (((t = ts[i]) instanceof ParameterizedType) &&
354
((p = (ParameterizedType)t).getRawType() ==
355
Comparable.class) &&
356
(as = p.getActualTypeArguments()) != null &&
357
as.length == 1 && as[0] == c) // type arg is c
358
return c;
359
}
360
}
361
}
362
return null;
363
}
364
365
/**
366
* Returns k.compareTo(x) if x matches kc (k's screened comparable
367
* class), else 0.
368
*/
369
@SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
370
static int compareComparables(Class<?> kc, Object k, Object x) {
371
return (x == null || x.getClass() != kc ? 0 :
372
((Comparable)k).compareTo(x));
373
}
374
375
/**
376
* Returns a power of two size for the given target capacity.
377
*/
378
static final int tableSizeFor(int cap) {
379
int n = cap - 1;
380
n |= n >>> 1;
381
n |= n >>> 2;
382
n |= n >>> 4;
383
n |= n >>> 8;
384
n |= n >>> 16;
385
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
386
}
387
388
/* ---------------- Fields -------------- */
389
390
/**
391
* The table, initialized on first use, and resized as
392
* necessary. When allocated, length is always a power of two.
393
* (We also tolerate length zero in some operations to allow
394
* bootstrapping mechanics that are currently not needed.)
395
*/
396
transient Node<K,V>[] table;
397
398
/**
399
* Holds cached entrySet(). Note that AbstractMap fields are used
400
* for keySet() and values().
401
*/
402
transient Set<Map.Entry<K,V>> entrySet;
403
404
/**
405
* The number of key-value mappings contained in this map.
406
*/
407
transient int size;
408
409
/**
410
* The number of times this HashMap has been structurally modified
411
* Structural modifications are those that change the number of mappings in
412
* the HashMap or otherwise modify its internal structure (e.g.,
413
* rehash). This field is used to make iterators on Collection-views of
414
* the HashMap fail-fast. (See ConcurrentModificationException).
415
*/
416
transient int modCount;
417
418
/**
419
* The next size value at which to resize (capacity * load factor).
420
*
421
* @serial
422
*/
423
// (The javadoc description is true upon serialization.
424
// Additionally, if the table array has not been allocated, this
425
// field holds the initial array capacity, or zero signifying
426
// DEFAULT_INITIAL_CAPACITY.)
427
int threshold;
428
429
/**
430
* The load factor for the hash table.
431
*
432
* @serial
433
*/
434
final float loadFactor;
435
436
/* ---------------- Public operations -------------- */
437
438
/**
439
* Constructs an empty <tt>HashMap</tt> with the specified initial
440
* capacity and load factor.
441
*
442
* @param initialCapacity the initial capacity
443
* @param loadFactor the load factor
444
* @throws IllegalArgumentException if the initial capacity is negative
445
* or the load factor is nonpositive
446
*/
447
public HashMap(int initialCapacity, float loadFactor) {
448
if (initialCapacity < 0)
449
throw new IllegalArgumentException("Illegal initial capacity: " +
450
initialCapacity);
451
if (initialCapacity > MAXIMUM_CAPACITY)
452
initialCapacity = MAXIMUM_CAPACITY;
453
if (loadFactor <= 0 || Float.isNaN(loadFactor))
454
throw new IllegalArgumentException("Illegal load factor: " +
455
loadFactor);
456
this.loadFactor = loadFactor;
457
this.threshold = tableSizeFor(initialCapacity);
458
}
459
460
/**
461
* Constructs an empty <tt>HashMap</tt> with the specified initial
462
* capacity and the default load factor (0.75).
463
*
464
* @param initialCapacity the initial capacity.
465
* @throws IllegalArgumentException if the initial capacity is negative.
466
*/
467
public HashMap(int initialCapacity) {
468
this(initialCapacity, DEFAULT_LOAD_FACTOR);
469
}
470
471
/**
472
* Constructs an empty <tt>HashMap</tt> with the default initial capacity
473
* (16) and the default load factor (0.75).
474
*/
475
public HashMap() {
476
this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted
477
}
478
479
/**
480
* Constructs a new <tt>HashMap</tt> with the same mappings as the
481
* specified <tt>Map</tt>. The <tt>HashMap</tt> is created with
482
* default load factor (0.75) and an initial capacity sufficient to
483
* hold the mappings in the specified <tt>Map</tt>.
484
*
485
* @param m the map whose mappings are to be placed in this map
486
* @throws NullPointerException if the specified map is null
487
*/
488
public HashMap(Map<? extends K, ? extends V> m) {
489
this.loadFactor = DEFAULT_LOAD_FACTOR;
490
putMapEntries(m, false);
491
}
492
493
/**
494
* Implements Map.putAll and Map constructor.
495
*
496
* @param m the map
497
* @param evict false when initially constructing this map, else
498
* true (relayed to method afterNodeInsertion).
499
*/
500
final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) {
501
int s = m.size();
502
if (s > 0) {
503
if (table == null) { // pre-size
504
float ft = ((float)s / loadFactor) + 1.0F;
505
int t = ((ft < (float)MAXIMUM_CAPACITY) ?
506
(int)ft : MAXIMUM_CAPACITY);
507
if (t > threshold)
508
threshold = tableSizeFor(t);
509
}
510
else if (s > threshold)
511
resize();
512
for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) {
513
K key = e.getKey();
514
V value = e.getValue();
515
putVal(hash(key), key, value, false, evict);
516
}
517
}
518
}
519
520
/**
521
* Returns the number of key-value mappings in this map.
522
*
523
* @return the number of key-value mappings in this map
524
*/
525
public int size() {
526
return size;
527
}
528
529
/**
530
* Returns <tt>true</tt> if this map contains no key-value mappings.
531
*
532
* @return <tt>true</tt> if this map contains no key-value mappings
533
*/
534
public boolean isEmpty() {
535
return size == 0;
536
}
537
538
/**
539
* Returns the value to which the specified key is mapped,
540
* or {@code null} if this map contains no mapping for the key.
541
*
542
* <p>More formally, if this map contains a mapping from a key
543
* {@code k} to a value {@code v} such that {@code (key==null ? k==null :
544
* key.equals(k))}, then this method returns {@code v}; otherwise
545
* it returns {@code null}. (There can be at most one such mapping.)
546
*
547
* <p>A return value of {@code null} does not <i>necessarily</i>
548
* indicate that the map contains no mapping for the key; it's also
549
* possible that the map explicitly maps the key to {@code null}.
550
* The {@link #containsKey containsKey} operation may be used to
551
* distinguish these two cases.
552
*
553
* @see #put(Object, Object)
554
*/
555
public V get(Object key) {
556
Node<K,V> e;
557
return (e = getNode(hash(key), key)) == null ? null : e.value;
558
}
559
560
/**
561
* Implements Map.get and related methods.
562
*
563
* @param hash hash for key
564
* @param key the key
565
* @return the node, or null if none
566
*/
567
final Node<K,V> getNode(int hash, Object key) {
568
Node<K,V>[] tab; Node<K,V> first, e; int n; K k;
569
if ((tab = table) != null && (n = tab.length) > 0 &&
570
(first = tab[(n - 1) & hash]) != null) {
571
if (first.hash == hash && // always check first node
572
((k = first.key) == key || (key != null && key.equals(k))))
573
return first;
574
if ((e = first.next) != null) {
575
if (first instanceof TreeNode)
576
return ((TreeNode<K,V>)first).getTreeNode(hash, key);
577
do {
578
if (e.hash == hash &&
579
((k = e.key) == key || (key != null && key.equals(k))))
580
return e;
581
} while ((e = e.next) != null);
582
}
583
}
584
return null;
585
}
586
587
/**
588
* Returns <tt>true</tt> if this map contains a mapping for the
589
* specified key.
590
*
591
* @param key The key whose presence in this map is to be tested
592
* @return <tt>true</tt> if this map contains a mapping for the specified
593
* key.
594
*/
595
public boolean containsKey(Object key) {
596
return getNode(hash(key), key) != null;
597
}
598
599
/**
600
* Associates the specified value with the specified key in this map.
601
* If the map previously contained a mapping for the key, the old
602
* value is replaced.
603
*
604
* @param key key with which the specified value is to be associated
605
* @param value value to be associated with the specified key
606
* @return the previous value associated with <tt>key</tt>, or
607
* <tt>null</tt> if there was no mapping for <tt>key</tt>.
608
* (A <tt>null</tt> return can also indicate that the map
609
* previously associated <tt>null</tt> with <tt>key</tt>.)
610
*/
611
public V put(K key, V value) {
612
return putVal(hash(key), key, value, false, true);
613
}
614
615
/**
616
* Implements Map.put and related methods.
617
*
618
* @param hash hash for key
619
* @param key the key
620
* @param value the value to put
621
* @param onlyIfAbsent if true, don't change existing value
622
* @param evict if false, the table is in creation mode.
623
* @return previous value, or null if none
624
*/
625
final V putVal(int hash, K key, V value, boolean onlyIfAbsent,
626
boolean evict) {
627
Node<K,V>[] tab; Node<K,V> p; int n, i;
628
if ((tab = table) == null || (n = tab.length) == 0)
629
n = (tab = resize()).length;
630
if ((p = tab[i = (n - 1) & hash]) == null)
631
tab[i] = newNode(hash, key, value, null);
632
else {
633
Node<K,V> e; K k;
634
if (p.hash == hash &&
635
((k = p.key) == key || (key != null && key.equals(k))))
636
e = p;
637
else if (p instanceof TreeNode)
638
e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value);
639
else {
640
for (int binCount = 0; ; ++binCount) {
641
if ((e = p.next) == null) {
642
p.next = newNode(hash, key, value, null);
643
if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
644
treeifyBin(tab, hash);
645
break;
646
}
647
if (e.hash == hash &&
648
((k = e.key) == key || (key != null && key.equals(k))))
649
break;
650
p = e;
651
}
652
}
653
if (e != null) { // existing mapping for key
654
V oldValue = e.value;
655
if (!onlyIfAbsent || oldValue == null)
656
e.value = value;
657
afterNodeAccess(e);
658
return oldValue;
659
}
660
}
661
++modCount;
662
if (++size > threshold)
663
resize();
664
afterNodeInsertion(evict);
665
return null;
666
}
667
668
/**
669
* Initializes or doubles table size. If null, allocates in
670
* accord with initial capacity target held in field threshold.
671
* Otherwise, because we are using power-of-two expansion, the
672
* elements from each bin must either stay at same index, or move
673
* with a power of two offset in the new table.
674
*
675
* @return the table
676
*/
677
final Node<K,V>[] resize() {
678
Node<K,V>[] oldTab = table;
679
int oldCap = (oldTab == null) ? 0 : oldTab.length;
680
int oldThr = threshold;
681
int newCap, newThr = 0;
682
if (oldCap > 0) {
683
if (oldCap >= MAXIMUM_CAPACITY) {
684
threshold = Integer.MAX_VALUE;
685
return oldTab;
686
}
687
else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
688
oldCap >= DEFAULT_INITIAL_CAPACITY)
689
newThr = oldThr << 1; // double threshold
690
}
691
else if (oldThr > 0) // initial capacity was placed in threshold
692
newCap = oldThr;
693
else { // zero initial threshold signifies using defaults
694
newCap = DEFAULT_INITIAL_CAPACITY;
695
newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
696
}
697
if (newThr == 0) {
698
float ft = (float)newCap * loadFactor;
699
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
700
(int)ft : Integer.MAX_VALUE);
701
}
702
threshold = newThr;
703
@SuppressWarnings({"rawtypes","unchecked"})
704
Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
705
table = newTab;
706
if (oldTab != null) {
707
for (int j = 0; j < oldCap; ++j) {
708
Node<K,V> e;
709
if ((e = oldTab[j]) != null) {
710
oldTab[j] = null;
711
if (e.next == null)
712
newTab[e.hash & (newCap - 1)] = e;
713
else if (e instanceof TreeNode)
714
((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
715
else { // preserve order
716
Node<K,V> loHead = null, loTail = null;
717
Node<K,V> hiHead = null, hiTail = null;
718
Node<K,V> next;
719
do {
720
next = e.next;
721
if ((e.hash & oldCap) == 0) {
722
if (loTail == null)
723
loHead = e;
724
else
725
loTail.next = e;
726
loTail = e;
727
}
728
else {
729
if (hiTail == null)
730
hiHead = e;
731
else
732
hiTail.next = e;
733
hiTail = e;
734
}
735
} while ((e = next) != null);
736
if (loTail != null) {
737
loTail.next = null;
738
newTab[j] = loHead;
739
}
740
if (hiTail != null) {
741
hiTail.next = null;
742
newTab[j + oldCap] = hiHead;
743
}
744
}
745
}
746
}
747
}
748
return newTab;
749
}
750
751
/**
752
* Replaces all linked nodes in bin at index for given hash unless
753
* table is too small, in which case resizes instead.
754
*/
755
final void treeifyBin(Node<K,V>[] tab, int hash) {
756
int n, index; Node<K,V> e;
757
if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY)
758
resize();
759
else if ((e = tab[index = (n - 1) & hash]) != null) {
760
TreeNode<K,V> hd = null, tl = null;
761
do {
762
TreeNode<K,V> p = replacementTreeNode(e, null);
763
if (tl == null)
764
hd = p;
765
else {
766
p.prev = tl;
767
tl.next = p;
768
}
769
tl = p;
770
} while ((e = e.next) != null);
771
if ((tab[index] = hd) != null)
772
hd.treeify(tab);
773
}
774
}
775
776
/**
777
* Copies all of the mappings from the specified map to this map.
778
* These mappings will replace any mappings that this map had for
779
* any of the keys currently in the specified map.
780
*
781
* @param m mappings to be stored in this map
782
* @throws NullPointerException if the specified map is null
783
*/
784
public void putAll(Map<? extends K, ? extends V> m) {
785
putMapEntries(m, true);
786
}
787
788
/**
789
* Removes the mapping for the specified key from this map if present.
790
*
791
* @param key key whose mapping is to be removed from the map
792
* @return the previous value associated with <tt>key</tt>, or
793
* <tt>null</tt> if there was no mapping for <tt>key</tt>.
794
* (A <tt>null</tt> return can also indicate that the map
795
* previously associated <tt>null</tt> with <tt>key</tt>.)
796
*/
797
public V remove(Object key) {
798
Node<K,V> e;
799
return (e = removeNode(hash(key), key, null, false, true)) == null ?
800
null : e.value;
801
}
802
803
/**
804
* Implements Map.remove and related methods.
805
*
806
* @param hash hash for key
807
* @param key the key
808
* @param value the value to match if matchValue, else ignored
809
* @param matchValue if true only remove if value is equal
810
* @param movable if false do not move other nodes while removing
811
* @return the node, or null if none
812
*/
813
final Node<K,V> removeNode(int hash, Object key, Object value,
814
boolean matchValue, boolean movable) {
815
Node<K,V>[] tab; Node<K,V> p; int n, index;
816
if ((tab = table) != null && (n = tab.length) > 0 &&
817
(p = tab[index = (n - 1) & hash]) != null) {
818
Node<K,V> node = null, e; K k; V v;
819
if (p.hash == hash &&
820
((k = p.key) == key || (key != null && key.equals(k))))
821
node = p;
822
else if ((e = p.next) != null) {
823
if (p instanceof TreeNode)
824
node = ((TreeNode<K,V>)p).getTreeNode(hash, key);
825
else {
826
do {
827
if (e.hash == hash &&
828
((k = e.key) == key ||
829
(key != null && key.equals(k)))) {
830
node = e;
831
break;
832
}
833
p = e;
834
} while ((e = e.next) != null);
835
}
836
}
837
if (node != null && (!matchValue || (v = node.value) == value ||
838
(value != null && value.equals(v)))) {
839
if (node instanceof TreeNode)
840
((TreeNode<K,V>)node).removeTreeNode(this, tab, movable);
841
else if (node == p)
842
tab[index] = node.next;
843
else
844
p.next = node.next;
845
++modCount;
846
--size;
847
afterNodeRemoval(node);
848
return node;
849
}
850
}
851
return null;
852
}
853
854
/**
855
* Removes all of the mappings from this map.
856
* The map will be empty after this call returns.
857
*/
858
public void clear() {
859
Node<K,V>[] tab;
860
modCount++;
861
if ((tab = table) != null && size > 0) {
862
size = 0;
863
for (int i = 0; i < tab.length; ++i)
864
tab[i] = null;
865
}
866
}
867
868
/**
869
* Returns <tt>true</tt> if this map maps one or more keys to the
870
* specified value.
871
*
872
* @param value value whose presence in this map is to be tested
873
* @return <tt>true</tt> if this map maps one or more keys to the
874
* specified value
875
*/
876
public boolean containsValue(Object value) {
877
Node<K,V>[] tab; V v;
878
if ((tab = table) != null && size > 0) {
879
for (int i = 0; i < tab.length; ++i) {
880
for (Node<K,V> e = tab[i]; e != null; e = e.next) {
881
if ((v = e.value) == value ||
882
(value != null && value.equals(v)))
883
return true;
884
}
885
}
886
}
887
return false;
888
}
889
890
/**
891
* Returns a {@link Set} view of the keys contained in this map.
892
* The set is backed by the map, so changes to the map are
893
* reflected in the set, and vice-versa. If the map is modified
894
* while an iteration over the set is in progress (except through
895
* the iterator's own <tt>remove</tt> operation), the results of
896
* the iteration are undefined. The set supports element removal,
897
* which removes the corresponding mapping from the map, via the
898
* <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
899
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
900
* operations. It does not support the <tt>add</tt> or <tt>addAll</tt>
901
* operations.
902
*
903
* @return a set view of the keys contained in this map
904
*/
905
public Set<K> keySet() {
906
Set<K> ks = keySet;
907
if (ks == null) {
908
ks = new KeySet();
909
keySet = ks;
910
}
911
return ks;
912
}
913
914
final class KeySet extends AbstractSet<K> {
915
public final int size() { return size; }
916
public final void clear() { HashMap.this.clear(); }
917
public final Iterator<K> iterator() { return new KeyIterator(); }
918
public final boolean contains(Object o) { return containsKey(o); }
919
public final boolean remove(Object key) {
920
return removeNode(hash(key), key, null, false, true) != null;
921
}
922
public final Spliterator<K> spliterator() {
923
return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0);
924
}
925
public final void forEach(Consumer<? super K> action) {
926
Node<K,V>[] tab;
927
if (action == null)
928
throw new NullPointerException();
929
if (size > 0 && (tab = table) != null) {
930
int mc = modCount;
931
for (int i = 0; i < tab.length; ++i) {
932
for (Node<K,V> e = tab[i]; e != null; e = e.next)
933
action.accept(e.key);
934
}
935
if (modCount != mc)
936
throw new ConcurrentModificationException();
937
}
938
}
939
}
940
941
/**
942
* Returns a {@link Collection} view of the values contained in this map.
943
* The collection is backed by the map, so changes to the map are
944
* reflected in the collection, and vice-versa. If the map is
945
* modified while an iteration over the collection is in progress
946
* (except through the iterator's own <tt>remove</tt> operation),
947
* the results of the iteration are undefined. The collection
948
* supports element removal, which removes the corresponding
949
* mapping from the map, via the <tt>Iterator.remove</tt>,
950
* <tt>Collection.remove</tt>, <tt>removeAll</tt>,
951
* <tt>retainAll</tt> and <tt>clear</tt> operations. It does not
952
* support the <tt>add</tt> or <tt>addAll</tt> operations.
953
*
954
* @return a view of the values contained in this map
955
*/
956
public Collection<V> values() {
957
Collection<V> vs = values;
958
if (vs == null) {
959
vs = new Values();
960
values = vs;
961
}
962
return vs;
963
}
964
965
final class Values extends AbstractCollection<V> {
966
public final int size() { return size; }
967
public final void clear() { HashMap.this.clear(); }
968
public final Iterator<V> iterator() { return new ValueIterator(); }
969
public final boolean contains(Object o) { return containsValue(o); }
970
public final Spliterator<V> spliterator() {
971
return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0);
972
}
973
public final void forEach(Consumer<? super V> action) {
974
Node<K,V>[] tab;
975
if (action == null)
976
throw new NullPointerException();
977
if (size > 0 && (tab = table) != null) {
978
int mc = modCount;
979
for (int i = 0; i < tab.length; ++i) {
980
for (Node<K,V> e = tab[i]; e != null; e = e.next)
981
action.accept(e.value);
982
}
983
if (modCount != mc)
984
throw new ConcurrentModificationException();
985
}
986
}
987
}
988
989
/**
990
* Returns a {@link Set} view of the mappings contained in this map.
991
* The set is backed by the map, so changes to the map are
992
* reflected in the set, and vice-versa. If the map is modified
993
* while an iteration over the set is in progress (except through
994
* the iterator's own <tt>remove</tt> operation, or through the
995
* <tt>setValue</tt> operation on a map entry returned by the
996
* iterator) the results of the iteration are undefined. The set
997
* supports element removal, which removes the corresponding
998
* mapping from the map, via the <tt>Iterator.remove</tt>,
999
* <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and
1000
* <tt>clear</tt> operations. It does not support the
1001
* <tt>add</tt> or <tt>addAll</tt> operations.
1002
*
1003
* @return a set view of the mappings contained in this map
1004
*/
1005
public Set<Map.Entry<K,V>> entrySet() {
1006
Set<Map.Entry<K,V>> es;
1007
return (es = entrySet) == null ? (entrySet = new EntrySet()) : es;
1008
}
1009
1010
final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
1011
public final int size() { return size; }
1012
public final void clear() { HashMap.this.clear(); }
1013
public final Iterator<Map.Entry<K,V>> iterator() {
1014
return new EntryIterator();
1015
}
1016
public final boolean contains(Object o) {
1017
if (!(o instanceof Map.Entry))
1018
return false;
1019
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
1020
Object key = e.getKey();
1021
Node<K,V> candidate = getNode(hash(key), key);
1022
return candidate != null && candidate.equals(e);
1023
}
1024
public final boolean remove(Object o) {
1025
if (o instanceof Map.Entry) {
1026
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
1027
Object key = e.getKey();
1028
Object value = e.getValue();
1029
return removeNode(hash(key), key, value, true, true) != null;
1030
}
1031
return false;
1032
}
1033
public final Spliterator<Map.Entry<K,V>> spliterator() {
1034
return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0);
1035
}
1036
public final void forEach(Consumer<? super Map.Entry<K,V>> action) {
1037
Node<K,V>[] tab;
1038
if (action == null)
1039
throw new NullPointerException();
1040
if (size > 0 && (tab = table) != null) {
1041
int mc = modCount;
1042
for (int i = 0; i < tab.length; ++i) {
1043
for (Node<K,V> e = tab[i]; e != null; e = e.next)
1044
action.accept(e);
1045
}
1046
if (modCount != mc)
1047
throw new ConcurrentModificationException();
1048
}
1049
}
1050
}
1051
1052
// Overrides of JDK8 Map extension methods
1053
1054
@Override
1055
public V getOrDefault(Object key, V defaultValue) {
1056
Node<K,V> e;
1057
return (e = getNode(hash(key), key)) == null ? defaultValue : e.value;
1058
}
1059
1060
@Override
1061
public V putIfAbsent(K key, V value) {
1062
return putVal(hash(key), key, value, true, true);
1063
}
1064
1065
@Override
1066
public boolean remove(Object key, Object value) {
1067
return removeNode(hash(key), key, value, true, true) != null;
1068
}
1069
1070
@Override
1071
public boolean replace(K key, V oldValue, V newValue) {
1072
Node<K,V> e; V v;
1073
if ((e = getNode(hash(key), key)) != null &&
1074
((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) {
1075
e.value = newValue;
1076
afterNodeAccess(e);
1077
return true;
1078
}
1079
return false;
1080
}
1081
1082
@Override
1083
public V replace(K key, V value) {
1084
Node<K,V> e;
1085
if ((e = getNode(hash(key), key)) != null) {
1086
V oldValue = e.value;
1087
e.value = value;
1088
afterNodeAccess(e);
1089
return oldValue;
1090
}
1091
return null;
1092
}
1093
1094
@Override
1095
public V computeIfAbsent(K key,
1096
Function<? super K, ? extends V> mappingFunction) {
1097
if (mappingFunction == null)
1098
throw new NullPointerException();
1099
int hash = hash(key);
1100
Node<K,V>[] tab; Node<K,V> first; int n, i;
1101
int binCount = 0;
1102
TreeNode<K,V> t = null;
1103
Node<K,V> old = null;
1104
if (size > threshold || (tab = table) == null ||
1105
(n = tab.length) == 0)
1106
n = (tab = resize()).length;
1107
if ((first = tab[i = (n - 1) & hash]) != null) {
1108
if (first instanceof TreeNode)
1109
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
1110
else {
1111
Node<K,V> e = first; K k;
1112
do {
1113
if (e.hash == hash &&
1114
((k = e.key) == key || (key != null && key.equals(k)))) {
1115
old = e;
1116
break;
1117
}
1118
++binCount;
1119
} while ((e = e.next) != null);
1120
}
1121
V oldValue;
1122
if (old != null && (oldValue = old.value) != null) {
1123
afterNodeAccess(old);
1124
return oldValue;
1125
}
1126
}
1127
V v = mappingFunction.apply(key);
1128
if (v == null) {
1129
return null;
1130
} else if (old != null) {
1131
old.value = v;
1132
afterNodeAccess(old);
1133
return v;
1134
}
1135
else if (t != null)
1136
t.putTreeVal(this, tab, hash, key, v);
1137
else {
1138
tab[i] = newNode(hash, key, v, first);
1139
if (binCount >= TREEIFY_THRESHOLD - 1)
1140
treeifyBin(tab, hash);
1141
}
1142
++modCount;
1143
++size;
1144
afterNodeInsertion(true);
1145
return v;
1146
}
1147
1148
public V computeIfPresent(K key,
1149
BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1150
if (remappingFunction == null)
1151
throw new NullPointerException();
1152
Node<K,V> e; V oldValue;
1153
int hash = hash(key);
1154
if ((e = getNode(hash, key)) != null &&
1155
(oldValue = e.value) != null) {
1156
V v = remappingFunction.apply(key, oldValue);
1157
if (v != null) {
1158
e.value = v;
1159
afterNodeAccess(e);
1160
return v;
1161
}
1162
else
1163
removeNode(hash, key, null, false, true);
1164
}
1165
return null;
1166
}
1167
1168
@Override
1169
public V compute(K key,
1170
BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1171
if (remappingFunction == null)
1172
throw new NullPointerException();
1173
int hash = hash(key);
1174
Node<K,V>[] tab; Node<K,V> first; int n, i;
1175
int binCount = 0;
1176
TreeNode<K,V> t = null;
1177
Node<K,V> old = null;
1178
if (size > threshold || (tab = table) == null ||
1179
(n = tab.length) == 0)
1180
n = (tab = resize()).length;
1181
if ((first = tab[i = (n - 1) & hash]) != null) {
1182
if (first instanceof TreeNode)
1183
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
1184
else {
1185
Node<K,V> e = first; K k;
1186
do {
1187
if (e.hash == hash &&
1188
((k = e.key) == key || (key != null && key.equals(k)))) {
1189
old = e;
1190
break;
1191
}
1192
++binCount;
1193
} while ((e = e.next) != null);
1194
}
1195
}
1196
V oldValue = (old == null) ? null : old.value;
1197
V v = remappingFunction.apply(key, oldValue);
1198
if (old != null) {
1199
if (v != null) {
1200
old.value = v;
1201
afterNodeAccess(old);
1202
}
1203
else
1204
removeNode(hash, key, null, false, true);
1205
}
1206
else if (v != null) {
1207
if (t != null)
1208
t.putTreeVal(this, tab, hash, key, v);
1209
else {
1210
tab[i] = newNode(hash, key, v, first);
1211
if (binCount >= TREEIFY_THRESHOLD - 1)
1212
treeifyBin(tab, hash);
1213
}
1214
++modCount;
1215
++size;
1216
afterNodeInsertion(true);
1217
}
1218
return v;
1219
}
1220
1221
@Override
1222
public V merge(K key, V value,
1223
BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
1224
if (value == null)
1225
throw new NullPointerException();
1226
if (remappingFunction == null)
1227
throw new NullPointerException();
1228
int hash = hash(key);
1229
Node<K,V>[] tab; Node<K,V> first; int n, i;
1230
int binCount = 0;
1231
TreeNode<K,V> t = null;
1232
Node<K,V> old = null;
1233
if (size > threshold || (tab = table) == null ||
1234
(n = tab.length) == 0)
1235
n = (tab = resize()).length;
1236
if ((first = tab[i = (n - 1) & hash]) != null) {
1237
if (first instanceof TreeNode)
1238
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
1239
else {
1240
Node<K,V> e = first; K k;
1241
do {
1242
if (e.hash == hash &&
1243
((k = e.key) == key || (key != null && key.equals(k)))) {
1244
old = e;
1245
break;
1246
}
1247
++binCount;
1248
} while ((e = e.next) != null);
1249
}
1250
}
1251
if (old != null) {
1252
V v;
1253
if (old.value != null)
1254
v = remappingFunction.apply(old.value, value);
1255
else
1256
v = value;
1257
if (v != null) {
1258
old.value = v;
1259
afterNodeAccess(old);
1260
}
1261
else
1262
removeNode(hash, key, null, false, true);
1263
return v;
1264
}
1265
if (value != null) {
1266
if (t != null)
1267
t.putTreeVal(this, tab, hash, key, value);
1268
else {
1269
tab[i] = newNode(hash, key, value, first);
1270
if (binCount >= TREEIFY_THRESHOLD - 1)
1271
treeifyBin(tab, hash);
1272
}
1273
++modCount;
1274
++size;
1275
afterNodeInsertion(true);
1276
}
1277
return value;
1278
}
1279
1280
@Override
1281
public void forEach(BiConsumer<? super K, ? super V> action) {
1282
Node<K,V>[] tab;
1283
if (action == null)
1284
throw new NullPointerException();
1285
if (size > 0 && (tab = table) != null) {
1286
int mc = modCount;
1287
for (int i = 0; i < tab.length; ++i) {
1288
for (Node<K,V> e = tab[i]; e != null; e = e.next)
1289
action.accept(e.key, e.value);
1290
}
1291
if (modCount != mc)
1292
throw new ConcurrentModificationException();
1293
}
1294
}
1295
1296
@Override
1297
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1298
Node<K,V>[] tab;
1299
if (function == null)
1300
throw new NullPointerException();
1301
if (size > 0 && (tab = table) != null) {
1302
int mc = modCount;
1303
for (int i = 0; i < tab.length; ++i) {
1304
for (Node<K,V> e = tab[i]; e != null; e = e.next) {
1305
e.value = function.apply(e.key, e.value);
1306
}
1307
}
1308
if (modCount != mc)
1309
throw new ConcurrentModificationException();
1310
}
1311
}
1312
1313
/* ------------------------------------------------------------ */
1314
// Cloning and serialization
1315
1316
/**
1317
* Returns a shallow copy of this <tt>HashMap</tt> instance: the keys and
1318
* values themselves are not cloned.
1319
*
1320
* @return a shallow copy of this map
1321
*/
1322
@SuppressWarnings("unchecked")
1323
@Override
1324
public Object clone() {
1325
HashMap<K,V> result;
1326
try {
1327
result = (HashMap<K,V>)super.clone();
1328
} catch (CloneNotSupportedException e) {
1329
// this shouldn't happen, since we are Cloneable
1330
throw new InternalError(e);
1331
}
1332
result.reinitialize();
1333
result.putMapEntries(this, false);
1334
return result;
1335
}
1336
1337
// These methods are also used when serializing HashSets
1338
final float loadFactor() { return loadFactor; }
1339
final int capacity() {
1340
return (table != null) ? table.length :
1341
(threshold > 0) ? threshold :
1342
DEFAULT_INITIAL_CAPACITY;
1343
}
1344
1345
/**
1346
* Save the state of the <tt>HashMap</tt> instance to a stream (i.e.,
1347
* serialize it).
1348
*
1349
* @serialData The <i>capacity</i> of the HashMap (the length of the
1350
* bucket array) is emitted (int), followed by the
1351
* <i>size</i> (an int, the number of key-value
1352
* mappings), followed by the key (Object) and value (Object)
1353
* for each key-value mapping. The key-value mappings are
1354
* emitted in no particular order.
1355
*/
1356
private void writeObject(java.io.ObjectOutputStream s)
1357
throws IOException {
1358
int buckets = capacity();
1359
// Write out the threshold, loadfactor, and any hidden stuff
1360
s.defaultWriteObject();
1361
s.writeInt(buckets);
1362
s.writeInt(size);
1363
internalWriteEntries(s);
1364
}
1365
1366
/**
1367
* Reconstitutes this map from a stream (that is, deserializes it).
1368
* @param s the stream
1369
* @throws ClassNotFoundException if the class of a serialized object
1370
* could not be found
1371
* @throws IOException if an I/O error occurs
1372
*/
1373
private void readObject(java.io.ObjectInputStream s)
1374
throws IOException, ClassNotFoundException {
1375
// Read in the threshold (ignored), loadfactor, and any hidden stuff
1376
s.defaultReadObject();
1377
reinitialize();
1378
if (loadFactor <= 0 || Float.isNaN(loadFactor))
1379
throw new InvalidObjectException("Illegal load factor: " +
1380
loadFactor);
1381
s.readInt(); // Read and ignore number of buckets
1382
int mappings = s.readInt(); // Read number of mappings (size)
1383
if (mappings < 0)
1384
throw new InvalidObjectException("Illegal mappings count: " +
1385
mappings);
1386
else if (mappings > 0) { // (if zero, use defaults)
1387
// Size the table using given load factor only if within
1388
// range of 0.25...4.0
1389
float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f);
1390
float fc = (float)mappings / lf + 1.0f;
1391
int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ?
1392
DEFAULT_INITIAL_CAPACITY :
1393
(fc >= MAXIMUM_CAPACITY) ?
1394
MAXIMUM_CAPACITY :
1395
tableSizeFor((int)fc));
1396
float ft = (float)cap * lf;
1397
threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ?
1398
(int)ft : Integer.MAX_VALUE);
1399
1400
// Check Map.Entry[].class since it's the nearest public type to
1401
// what we're actually creating.
1402
SharedSecrets.getJavaOISAccess().checkArray(s, Map.Entry[].class, cap);
1403
@SuppressWarnings({"rawtypes","unchecked"})
1404
Node<K,V>[] tab = (Node<K,V>[])new Node[cap];
1405
table = tab;
1406
1407
// Read the keys and values, and put the mappings in the HashMap
1408
for (int i = 0; i < mappings; i++) {
1409
@SuppressWarnings("unchecked")
1410
K key = (K) s.readObject();
1411
@SuppressWarnings("unchecked")
1412
V value = (V) s.readObject();
1413
putVal(hash(key), key, value, false, false);
1414
}
1415
}
1416
}
1417
1418
/* ------------------------------------------------------------ */
1419
// iterators
1420
1421
abstract class HashIterator {
1422
Node<K,V> next; // next entry to return
1423
Node<K,V> current; // current entry
1424
int expectedModCount; // for fast-fail
1425
int index; // current slot
1426
1427
HashIterator() {
1428
expectedModCount = modCount;
1429
Node<K,V>[] t = table;
1430
current = next = null;
1431
index = 0;
1432
if (t != null && size > 0) { // advance to first entry
1433
do {} while (index < t.length && (next = t[index++]) == null);
1434
}
1435
}
1436
1437
public final boolean hasNext() {
1438
return next != null;
1439
}
1440
1441
final Node<K,V> nextNode() {
1442
Node<K,V>[] t;
1443
Node<K,V> e = next;
1444
if (modCount != expectedModCount)
1445
throw new ConcurrentModificationException();
1446
if (e == null)
1447
throw new NoSuchElementException();
1448
if ((next = (current = e).next) == null && (t = table) != null) {
1449
do {} while (index < t.length && (next = t[index++]) == null);
1450
}
1451
return e;
1452
}
1453
1454
public final void remove() {
1455
Node<K,V> p = current;
1456
if (p == null)
1457
throw new IllegalStateException();
1458
if (modCount != expectedModCount)
1459
throw new ConcurrentModificationException();
1460
current = null;
1461
K key = p.key;
1462
removeNode(hash(key), key, null, false, false);
1463
expectedModCount = modCount;
1464
}
1465
}
1466
1467
final class KeyIterator extends HashIterator
1468
implements Iterator<K> {
1469
public final K next() { return nextNode().key; }
1470
}
1471
1472
final class ValueIterator extends HashIterator
1473
implements Iterator<V> {
1474
public final V next() { return nextNode().value; }
1475
}
1476
1477
final class EntryIterator extends HashIterator
1478
implements Iterator<Map.Entry<K,V>> {
1479
public final Map.Entry<K,V> next() { return nextNode(); }
1480
}
1481
1482
/* ------------------------------------------------------------ */
1483
// spliterators
1484
1485
static class HashMapSpliterator<K,V> {
1486
final HashMap<K,V> map;
1487
Node<K,V> current; // current node
1488
int index; // current index, modified on advance/split
1489
int fence; // one past last index
1490
int est; // size estimate
1491
int expectedModCount; // for comodification checks
1492
1493
HashMapSpliterator(HashMap<K,V> m, int origin,
1494
int fence, int est,
1495
int expectedModCount) {
1496
this.map = m;
1497
this.index = origin;
1498
this.fence = fence;
1499
this.est = est;
1500
this.expectedModCount = expectedModCount;
1501
}
1502
1503
final int getFence() { // initialize fence and size on first use
1504
int hi;
1505
if ((hi = fence) < 0) {
1506
HashMap<K,V> m = map;
1507
est = m.size;
1508
expectedModCount = m.modCount;
1509
Node<K,V>[] tab = m.table;
1510
hi = fence = (tab == null) ? 0 : tab.length;
1511
}
1512
return hi;
1513
}
1514
1515
public final long estimateSize() {
1516
getFence(); // force init
1517
return (long) est;
1518
}
1519
}
1520
1521
static final class KeySpliterator<K,V>
1522
extends HashMapSpliterator<K,V>
1523
implements Spliterator<K> {
1524
KeySpliterator(HashMap<K,V> m, int origin, int fence, int est,
1525
int expectedModCount) {
1526
super(m, origin, fence, est, expectedModCount);
1527
}
1528
1529
public KeySpliterator<K,V> trySplit() {
1530
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
1531
return (lo >= mid || current != null) ? null :
1532
new KeySpliterator<>(map, lo, index = mid, est >>>= 1,
1533
expectedModCount);
1534
}
1535
1536
public void forEachRemaining(Consumer<? super K> action) {
1537
int i, hi, mc;
1538
if (action == null)
1539
throw new NullPointerException();
1540
HashMap<K,V> m = map;
1541
Node<K,V>[] tab = m.table;
1542
if ((hi = fence) < 0) {
1543
mc = expectedModCount = m.modCount;
1544
hi = fence = (tab == null) ? 0 : tab.length;
1545
}
1546
else
1547
mc = expectedModCount;
1548
if (tab != null && tab.length >= hi &&
1549
(i = index) >= 0 && (i < (index = hi) || current != null)) {
1550
Node<K,V> p = current;
1551
current = null;
1552
do {
1553
if (p == null)
1554
p = tab[i++];
1555
else {
1556
action.accept(p.key);
1557
p = p.next;
1558
}
1559
} while (p != null || i < hi);
1560
if (m.modCount != mc)
1561
throw new ConcurrentModificationException();
1562
}
1563
}
1564
1565
public boolean tryAdvance(Consumer<? super K> action) {
1566
int hi;
1567
if (action == null)
1568
throw new NullPointerException();
1569
Node<K,V>[] tab = map.table;
1570
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
1571
while (current != null || index < hi) {
1572
if (current == null)
1573
current = tab[index++];
1574
else {
1575
K k = current.key;
1576
current = current.next;
1577
action.accept(k);
1578
if (map.modCount != expectedModCount)
1579
throw new ConcurrentModificationException();
1580
return true;
1581
}
1582
}
1583
}
1584
return false;
1585
}
1586
1587
public int characteristics() {
1588
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
1589
Spliterator.DISTINCT;
1590
}
1591
}
1592
1593
static final class ValueSpliterator<K,V>
1594
extends HashMapSpliterator<K,V>
1595
implements Spliterator<V> {
1596
ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est,
1597
int expectedModCount) {
1598
super(m, origin, fence, est, expectedModCount);
1599
}
1600
1601
public ValueSpliterator<K,V> trySplit() {
1602
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
1603
return (lo >= mid || current != null) ? null :
1604
new ValueSpliterator<>(map, lo, index = mid, est >>>= 1,
1605
expectedModCount);
1606
}
1607
1608
public void forEachRemaining(Consumer<? super V> action) {
1609
int i, hi, mc;
1610
if (action == null)
1611
throw new NullPointerException();
1612
HashMap<K,V> m = map;
1613
Node<K,V>[] tab = m.table;
1614
if ((hi = fence) < 0) {
1615
mc = expectedModCount = m.modCount;
1616
hi = fence = (tab == null) ? 0 : tab.length;
1617
}
1618
else
1619
mc = expectedModCount;
1620
if (tab != null && tab.length >= hi &&
1621
(i = index) >= 0 && (i < (index = hi) || current != null)) {
1622
Node<K,V> p = current;
1623
current = null;
1624
do {
1625
if (p == null)
1626
p = tab[i++];
1627
else {
1628
action.accept(p.value);
1629
p = p.next;
1630
}
1631
} while (p != null || i < hi);
1632
if (m.modCount != mc)
1633
throw new ConcurrentModificationException();
1634
}
1635
}
1636
1637
public boolean tryAdvance(Consumer<? super V> action) {
1638
int hi;
1639
if (action == null)
1640
throw new NullPointerException();
1641
Node<K,V>[] tab = map.table;
1642
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
1643
while (current != null || index < hi) {
1644
if (current == null)
1645
current = tab[index++];
1646
else {
1647
V v = current.value;
1648
current = current.next;
1649
action.accept(v);
1650
if (map.modCount != expectedModCount)
1651
throw new ConcurrentModificationException();
1652
return true;
1653
}
1654
}
1655
}
1656
return false;
1657
}
1658
1659
public int characteristics() {
1660
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0);
1661
}
1662
}
1663
1664
static final class EntrySpliterator<K,V>
1665
extends HashMapSpliterator<K,V>
1666
implements Spliterator<Map.Entry<K,V>> {
1667
EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est,
1668
int expectedModCount) {
1669
super(m, origin, fence, est, expectedModCount);
1670
}
1671
1672
public EntrySpliterator<K,V> trySplit() {
1673
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
1674
return (lo >= mid || current != null) ? null :
1675
new EntrySpliterator<>(map, lo, index = mid, est >>>= 1,
1676
expectedModCount);
1677
}
1678
1679
public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
1680
int i, hi, mc;
1681
if (action == null)
1682
throw new NullPointerException();
1683
HashMap<K,V> m = map;
1684
Node<K,V>[] tab = m.table;
1685
if ((hi = fence) < 0) {
1686
mc = expectedModCount = m.modCount;
1687
hi = fence = (tab == null) ? 0 : tab.length;
1688
}
1689
else
1690
mc = expectedModCount;
1691
if (tab != null && tab.length >= hi &&
1692
(i = index) >= 0 && (i < (index = hi) || current != null)) {
1693
Node<K,V> p = current;
1694
current = null;
1695
do {
1696
if (p == null)
1697
p = tab[i++];
1698
else {
1699
action.accept(p);
1700
p = p.next;
1701
}
1702
} while (p != null || i < hi);
1703
if (m.modCount != mc)
1704
throw new ConcurrentModificationException();
1705
}
1706
}
1707
1708
public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
1709
int hi;
1710
if (action == null)
1711
throw new NullPointerException();
1712
Node<K,V>[] tab = map.table;
1713
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
1714
while (current != null || index < hi) {
1715
if (current == null)
1716
current = tab[index++];
1717
else {
1718
Node<K,V> e = current;
1719
current = current.next;
1720
action.accept(e);
1721
if (map.modCount != expectedModCount)
1722
throw new ConcurrentModificationException();
1723
return true;
1724
}
1725
}
1726
}
1727
return false;
1728
}
1729
1730
public int characteristics() {
1731
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
1732
Spliterator.DISTINCT;
1733
}
1734
}
1735
1736
/* ------------------------------------------------------------ */
1737
// LinkedHashMap support
1738
1739
1740
/*
1741
* The following package-protected methods are designed to be
1742
* overridden by LinkedHashMap, but not by any other subclass.
1743
* Nearly all other internal methods are also package-protected
1744
* but are declared final, so can be used by LinkedHashMap, view
1745
* classes, and HashSet.
1746
*/
1747
1748
// Create a regular (non-tree) node
1749
Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) {
1750
return new Node<>(hash, key, value, next);
1751
}
1752
1753
// For conversion from TreeNodes to plain nodes
1754
Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) {
1755
return new Node<>(p.hash, p.key, p.value, next);
1756
}
1757
1758
// Create a tree bin node
1759
TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) {
1760
return new TreeNode<>(hash, key, value, next);
1761
}
1762
1763
// For treeifyBin
1764
TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) {
1765
return new TreeNode<>(p.hash, p.key, p.value, next);
1766
}
1767
1768
/**
1769
* Reset to initial default state. Called by clone and readObject.
1770
*/
1771
void reinitialize() {
1772
table = null;
1773
entrySet = null;
1774
keySet = null;
1775
values = null;
1776
modCount = 0;
1777
threshold = 0;
1778
size = 0;
1779
}
1780
1781
// Callbacks to allow LinkedHashMap post-actions
1782
void afterNodeAccess(Node<K,V> p) { }
1783
void afterNodeInsertion(boolean evict) { }
1784
void afterNodeRemoval(Node<K,V> p) { }
1785
1786
// Called only from writeObject, to ensure compatible ordering.
1787
void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
1788
Node<K,V>[] tab;
1789
if (size > 0 && (tab = table) != null) {
1790
for (int i = 0; i < tab.length; ++i) {
1791
for (Node<K,V> e = tab[i]; e != null; e = e.next) {
1792
s.writeObject(e.key);
1793
s.writeObject(e.value);
1794
}
1795
}
1796
}
1797
}
1798
1799
/* ------------------------------------------------------------ */
1800
// Tree bins
1801
1802
/**
1803
* Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn
1804
* extends Node) so can be used as extension of either regular or
1805
* linked node.
1806
*/
1807
static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> {
1808
TreeNode<K,V> parent; // red-black tree links
1809
TreeNode<K,V> left;
1810
TreeNode<K,V> right;
1811
TreeNode<K,V> prev; // needed to unlink next upon deletion
1812
boolean red;
1813
TreeNode(int hash, K key, V val, Node<K,V> next) {
1814
super(hash, key, val, next);
1815
}
1816
1817
/**
1818
* Returns root of tree containing this node.
1819
*/
1820
final TreeNode<K,V> root() {
1821
for (TreeNode<K,V> r = this, p;;) {
1822
if ((p = r.parent) == null)
1823
return r;
1824
r = p;
1825
}
1826
}
1827
1828
/**
1829
* Ensures that the given root is the first node of its bin.
1830
*/
1831
static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) {
1832
int n;
1833
if (root != null && tab != null && (n = tab.length) > 0) {
1834
int index = (n - 1) & root.hash;
1835
TreeNode<K,V> first = (TreeNode<K,V>)tab[index];
1836
if (root != first) {
1837
Node<K,V> rn;
1838
tab[index] = root;
1839
TreeNode<K,V> rp = root.prev;
1840
if ((rn = root.next) != null)
1841
((TreeNode<K,V>)rn).prev = rp;
1842
if (rp != null)
1843
rp.next = rn;
1844
if (first != null)
1845
first.prev = root;
1846
root.next = first;
1847
root.prev = null;
1848
}
1849
assert checkInvariants(root);
1850
}
1851
}
1852
1853
/**
1854
* Finds the node starting at root p with the given hash and key.
1855
* The kc argument caches comparableClassFor(key) upon first use
1856
* comparing keys.
1857
*/
1858
final TreeNode<K,V> find(int h, Object k, Class<?> kc) {
1859
TreeNode<K,V> p = this;
1860
do {
1861
int ph, dir; K pk;
1862
TreeNode<K,V> pl = p.left, pr = p.right, q;
1863
if ((ph = p.hash) > h)
1864
p = pl;
1865
else if (ph < h)
1866
p = pr;
1867
else if ((pk = p.key) == k || (k != null && k.equals(pk)))
1868
return p;
1869
else if (pl == null)
1870
p = pr;
1871
else if (pr == null)
1872
p = pl;
1873
else if ((kc != null ||
1874
(kc = comparableClassFor(k)) != null) &&
1875
(dir = compareComparables(kc, k, pk)) != 0)
1876
p = (dir < 0) ? pl : pr;
1877
else if ((q = pr.find(h, k, kc)) != null)
1878
return q;
1879
else
1880
p = pl;
1881
} while (p != null);
1882
return null;
1883
}
1884
1885
/**
1886
* Calls find for root node.
1887
*/
1888
final TreeNode<K,V> getTreeNode(int h, Object k) {
1889
return ((parent != null) ? root() : this).find(h, k, null);
1890
}
1891
1892
/**
1893
* Tie-breaking utility for ordering insertions when equal
1894
* hashCodes and non-comparable. We don't require a total
1895
* order, just a consistent insertion rule to maintain
1896
* equivalence across rebalancings. Tie-breaking further than
1897
* necessary simplifies testing a bit.
1898
*/
1899
static int tieBreakOrder(Object a, Object b) {
1900
int d;
1901
if (a == null || b == null ||
1902
(d = a.getClass().getName().
1903
compareTo(b.getClass().getName())) == 0)
1904
d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
1905
-1 : 1);
1906
return d;
1907
}
1908
1909
/**
1910
* Forms tree of the nodes linked from this node.
1911
*/
1912
final void treeify(Node<K,V>[] tab) {
1913
TreeNode<K,V> root = null;
1914
for (TreeNode<K,V> x = this, next; x != null; x = next) {
1915
next = (TreeNode<K,V>)x.next;
1916
x.left = x.right = null;
1917
if (root == null) {
1918
x.parent = null;
1919
x.red = false;
1920
root = x;
1921
}
1922
else {
1923
K k = x.key;
1924
int h = x.hash;
1925
Class<?> kc = null;
1926
for (TreeNode<K,V> p = root;;) {
1927
int dir, ph;
1928
K pk = p.key;
1929
if ((ph = p.hash) > h)
1930
dir = -1;
1931
else if (ph < h)
1932
dir = 1;
1933
else if ((kc == null &&
1934
(kc = comparableClassFor(k)) == null) ||
1935
(dir = compareComparables(kc, k, pk)) == 0)
1936
dir = tieBreakOrder(k, pk);
1937
1938
TreeNode<K,V> xp = p;
1939
if ((p = (dir <= 0) ? p.left : p.right) == null) {
1940
x.parent = xp;
1941
if (dir <= 0)
1942
xp.left = x;
1943
else
1944
xp.right = x;
1945
root = balanceInsertion(root, x);
1946
break;
1947
}
1948
}
1949
}
1950
}
1951
moveRootToFront(tab, root);
1952
}
1953
1954
/**
1955
* Returns a list of non-TreeNodes replacing those linked from
1956
* this node.
1957
*/
1958
final Node<K,V> untreeify(HashMap<K,V> map) {
1959
Node<K,V> hd = null, tl = null;
1960
for (Node<K,V> q = this; q != null; q = q.next) {
1961
Node<K,V> p = map.replacementNode(q, null);
1962
if (tl == null)
1963
hd = p;
1964
else
1965
tl.next = p;
1966
tl = p;
1967
}
1968
return hd;
1969
}
1970
1971
/**
1972
* Tree version of putVal.
1973
*/
1974
final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab,
1975
int h, K k, V v) {
1976
Class<?> kc = null;
1977
boolean searched = false;
1978
TreeNode<K,V> root = (parent != null) ? root() : this;
1979
for (TreeNode<K,V> p = root;;) {
1980
int dir, ph; K pk;
1981
if ((ph = p.hash) > h)
1982
dir = -1;
1983
else if (ph < h)
1984
dir = 1;
1985
else if ((pk = p.key) == k || (k != null && k.equals(pk)))
1986
return p;
1987
else if ((kc == null &&
1988
(kc = comparableClassFor(k)) == null) ||
1989
(dir = compareComparables(kc, k, pk)) == 0) {
1990
if (!searched) {
1991
TreeNode<K,V> q, ch;
1992
searched = true;
1993
if (((ch = p.left) != null &&
1994
(q = ch.find(h, k, kc)) != null) ||
1995
((ch = p.right) != null &&
1996
(q = ch.find(h, k, kc)) != null))
1997
return q;
1998
}
1999
dir = tieBreakOrder(k, pk);
2000
}
2001
2002
TreeNode<K,V> xp = p;
2003
if ((p = (dir <= 0) ? p.left : p.right) == null) {
2004
Node<K,V> xpn = xp.next;
2005
TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn);
2006
if (dir <= 0)
2007
xp.left = x;
2008
else
2009
xp.right = x;
2010
xp.next = x;
2011
x.parent = x.prev = xp;
2012
if (xpn != null)
2013
((TreeNode<K,V>)xpn).prev = x;
2014
moveRootToFront(tab, balanceInsertion(root, x));
2015
return null;
2016
}
2017
}
2018
}
2019
2020
/**
2021
* Removes the given node, that must be present before this call.
2022
* This is messier than typical red-black deletion code because we
2023
* cannot swap the contents of an interior node with a leaf
2024
* successor that is pinned by "next" pointers that are accessible
2025
* independently during traversal. So instead we swap the tree
2026
* linkages. If the current tree appears to have too few nodes,
2027
* the bin is converted back to a plain bin. (The test triggers
2028
* somewhere between 2 and 6 nodes, depending on tree structure).
2029
*/
2030
final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab,
2031
boolean movable) {
2032
int n;
2033
if (tab == null || (n = tab.length) == 0)
2034
return;
2035
int index = (n - 1) & hash;
2036
TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl;
2037
TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev;
2038
if (pred == null)
2039
tab[index] = first = succ;
2040
else
2041
pred.next = succ;
2042
if (succ != null)
2043
succ.prev = pred;
2044
if (first == null)
2045
return;
2046
if (root.parent != null)
2047
root = root.root();
2048
if (root == null
2049
|| (movable
2050
&& (root.right == null
2051
|| (rl = root.left) == null
2052
|| rl.left == null))) {
2053
tab[index] = first.untreeify(map); // too small
2054
return;
2055
}
2056
TreeNode<K,V> p = this, pl = left, pr = right, replacement;
2057
if (pl != null && pr != null) {
2058
TreeNode<K,V> s = pr, sl;
2059
while ((sl = s.left) != null) // find successor
2060
s = sl;
2061
boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2062
TreeNode<K,V> sr = s.right;
2063
TreeNode<K,V> pp = p.parent;
2064
if (s == pr) { // p was s's direct parent
2065
p.parent = s;
2066
s.right = p;
2067
}
2068
else {
2069
TreeNode<K,V> sp = s.parent;
2070
if ((p.parent = sp) != null) {
2071
if (s == sp.left)
2072
sp.left = p;
2073
else
2074
sp.right = p;
2075
}
2076
if ((s.right = pr) != null)
2077
pr.parent = s;
2078
}
2079
p.left = null;
2080
if ((p.right = sr) != null)
2081
sr.parent = p;
2082
if ((s.left = pl) != null)
2083
pl.parent = s;
2084
if ((s.parent = pp) == null)
2085
root = s;
2086
else if (p == pp.left)
2087
pp.left = s;
2088
else
2089
pp.right = s;
2090
if (sr != null)
2091
replacement = sr;
2092
else
2093
replacement = p;
2094
}
2095
else if (pl != null)
2096
replacement = pl;
2097
else if (pr != null)
2098
replacement = pr;
2099
else
2100
replacement = p;
2101
if (replacement != p) {
2102
TreeNode<K,V> pp = replacement.parent = p.parent;
2103
if (pp == null)
2104
root = replacement;
2105
else if (p == pp.left)
2106
pp.left = replacement;
2107
else
2108
pp.right = replacement;
2109
p.left = p.right = p.parent = null;
2110
}
2111
2112
TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement);
2113
2114
if (replacement == p) { // detach
2115
TreeNode<K,V> pp = p.parent;
2116
p.parent = null;
2117
if (pp != null) {
2118
if (p == pp.left)
2119
pp.left = null;
2120
else if (p == pp.right)
2121
pp.right = null;
2122
}
2123
}
2124
if (movable)
2125
moveRootToFront(tab, r);
2126
}
2127
2128
/**
2129
* Splits nodes in a tree bin into lower and upper tree bins,
2130
* or untreeifies if now too small. Called only from resize;
2131
* see above discussion about split bits and indices.
2132
*
2133
* @param map the map
2134
* @param tab the table for recording bin heads
2135
* @param index the index of the table being split
2136
* @param bit the bit of hash to split on
2137
*/
2138
final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) {
2139
TreeNode<K,V> b = this;
2140
// Relink into lo and hi lists, preserving order
2141
TreeNode<K,V> loHead = null, loTail = null;
2142
TreeNode<K,V> hiHead = null, hiTail = null;
2143
int lc = 0, hc = 0;
2144
for (TreeNode<K,V> e = b, next; e != null; e = next) {
2145
next = (TreeNode<K,V>)e.next;
2146
e.next = null;
2147
if ((e.hash & bit) == 0) {
2148
if ((e.prev = loTail) == null)
2149
loHead = e;
2150
else
2151
loTail.next = e;
2152
loTail = e;
2153
++lc;
2154
}
2155
else {
2156
if ((e.prev = hiTail) == null)
2157
hiHead = e;
2158
else
2159
hiTail.next = e;
2160
hiTail = e;
2161
++hc;
2162
}
2163
}
2164
2165
if (loHead != null) {
2166
if (lc <= UNTREEIFY_THRESHOLD)
2167
tab[index] = loHead.untreeify(map);
2168
else {
2169
tab[index] = loHead;
2170
if (hiHead != null) // (else is already treeified)
2171
loHead.treeify(tab);
2172
}
2173
}
2174
if (hiHead != null) {
2175
if (hc <= UNTREEIFY_THRESHOLD)
2176
tab[index + bit] = hiHead.untreeify(map);
2177
else {
2178
tab[index + bit] = hiHead;
2179
if (loHead != null)
2180
hiHead.treeify(tab);
2181
}
2182
}
2183
}
2184
2185
/* ------------------------------------------------------------ */
2186
// Red-black tree methods, all adapted from CLR
2187
2188
static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
2189
TreeNode<K,V> p) {
2190
TreeNode<K,V> r, pp, rl;
2191
if (p != null && (r = p.right) != null) {
2192
if ((rl = p.right = r.left) != null)
2193
rl.parent = p;
2194
if ((pp = r.parent = p.parent) == null)
2195
(root = r).red = false;
2196
else if (pp.left == p)
2197
pp.left = r;
2198
else
2199
pp.right = r;
2200
r.left = p;
2201
p.parent = r;
2202
}
2203
return root;
2204
}
2205
2206
static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
2207
TreeNode<K,V> p) {
2208
TreeNode<K,V> l, pp, lr;
2209
if (p != null && (l = p.left) != null) {
2210
if ((lr = p.left = l.right) != null)
2211
lr.parent = p;
2212
if ((pp = l.parent = p.parent) == null)
2213
(root = l).red = false;
2214
else if (pp.right == p)
2215
pp.right = l;
2216
else
2217
pp.left = l;
2218
l.right = p;
2219
p.parent = l;
2220
}
2221
return root;
2222
}
2223
2224
static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
2225
TreeNode<K,V> x) {
2226
x.red = true;
2227
for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
2228
if ((xp = x.parent) == null) {
2229
x.red = false;
2230
return x;
2231
}
2232
else if (!xp.red || (xpp = xp.parent) == null)
2233
return root;
2234
if (xp == (xppl = xpp.left)) {
2235
if ((xppr = xpp.right) != null && xppr.red) {
2236
xppr.red = false;
2237
xp.red = false;
2238
xpp.red = true;
2239
x = xpp;
2240
}
2241
else {
2242
if (x == xp.right) {
2243
root = rotateLeft(root, x = xp);
2244
xpp = (xp = x.parent) == null ? null : xp.parent;
2245
}
2246
if (xp != null) {
2247
xp.red = false;
2248
if (xpp != null) {
2249
xpp.red = true;
2250
root = rotateRight(root, xpp);
2251
}
2252
}
2253
}
2254
}
2255
else {
2256
if (xppl != null && xppl.red) {
2257
xppl.red = false;
2258
xp.red = false;
2259
xpp.red = true;
2260
x = xpp;
2261
}
2262
else {
2263
if (x == xp.left) {
2264
root = rotateRight(root, x = xp);
2265
xpp = (xp = x.parent) == null ? null : xp.parent;
2266
}
2267
if (xp != null) {
2268
xp.red = false;
2269
if (xpp != null) {
2270
xpp.red = true;
2271
root = rotateLeft(root, xpp);
2272
}
2273
}
2274
}
2275
}
2276
}
2277
}
2278
2279
static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
2280
TreeNode<K,V> x) {
2281
for (TreeNode<K,V> xp, xpl, xpr;;) {
2282
if (x == null || x == root)
2283
return root;
2284
else if ((xp = x.parent) == null) {
2285
x.red = false;
2286
return x;
2287
}
2288
else if (x.red) {
2289
x.red = false;
2290
return root;
2291
}
2292
else if ((xpl = xp.left) == x) {
2293
if ((xpr = xp.right) != null && xpr.red) {
2294
xpr.red = false;
2295
xp.red = true;
2296
root = rotateLeft(root, xp);
2297
xpr = (xp = x.parent) == null ? null : xp.right;
2298
}
2299
if (xpr == null)
2300
x = xp;
2301
else {
2302
TreeNode<K,V> sl = xpr.left, sr = xpr.right;
2303
if ((sr == null || !sr.red) &&
2304
(sl == null || !sl.red)) {
2305
xpr.red = true;
2306
x = xp;
2307
}
2308
else {
2309
if (sr == null || !sr.red) {
2310
if (sl != null)
2311
sl.red = false;
2312
xpr.red = true;
2313
root = rotateRight(root, xpr);
2314
xpr = (xp = x.parent) == null ?
2315
null : xp.right;
2316
}
2317
if (xpr != null) {
2318
xpr.red = (xp == null) ? false : xp.red;
2319
if ((sr = xpr.right) != null)
2320
sr.red = false;
2321
}
2322
if (xp != null) {
2323
xp.red = false;
2324
root = rotateLeft(root, xp);
2325
}
2326
x = root;
2327
}
2328
}
2329
}
2330
else { // symmetric
2331
if (xpl != null && xpl.red) {
2332
xpl.red = false;
2333
xp.red = true;
2334
root = rotateRight(root, xp);
2335
xpl = (xp = x.parent) == null ? null : xp.left;
2336
}
2337
if (xpl == null)
2338
x = xp;
2339
else {
2340
TreeNode<K,V> sl = xpl.left, sr = xpl.right;
2341
if ((sl == null || !sl.red) &&
2342
(sr == null || !sr.red)) {
2343
xpl.red = true;
2344
x = xp;
2345
}
2346
else {
2347
if (sl == null || !sl.red) {
2348
if (sr != null)
2349
sr.red = false;
2350
xpl.red = true;
2351
root = rotateLeft(root, xpl);
2352
xpl = (xp = x.parent) == null ?
2353
null : xp.left;
2354
}
2355
if (xpl != null) {
2356
xpl.red = (xp == null) ? false : xp.red;
2357
if ((sl = xpl.left) != null)
2358
sl.red = false;
2359
}
2360
if (xp != null) {
2361
xp.red = false;
2362
root = rotateRight(root, xp);
2363
}
2364
x = root;
2365
}
2366
}
2367
}
2368
}
2369
}
2370
2371
/**
2372
* Recursive invariant check
2373
*/
2374
static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
2375
TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
2376
tb = t.prev, tn = (TreeNode<K,V>)t.next;
2377
if (tb != null && tb.next != t)
2378
return false;
2379
if (tn != null && tn.prev != t)
2380
return false;
2381
if (tp != null && t != tp.left && t != tp.right)
2382
return false;
2383
if (tl != null && (tl.parent != t || tl.hash > t.hash))
2384
return false;
2385
if (tr != null && (tr.parent != t || tr.hash < t.hash))
2386
return false;
2387
if (t.red && tl != null && tl.red && tr != null && tr.red)
2388
return false;
2389
if (tl != null && !checkInvariants(tl))
2390
return false;
2391
if (tr != null && !checkInvariants(tr))
2392
return false;
2393
return true;
2394
}
2395
}
2396
2397
}
2398
2399