1 /*
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation. Oracle designates this
7 * particular file as subject to the "Classpath" exception as provided
8 * by Oracle in the LICENSE file that accompanied this code.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 */
24
25 /*
26 * This file is available under and governed by the GNU General Public
27 * License version 2 only, as published by the Free Software Foundation.
28 * However, the following notice accompanied the original version of this
29 * file:
30 *
31 * Written by Doug Lea with assistance from members of JCP JSR-166
32 * Expert Group and released to the public domain, as explained at
33 * http://creativecommons.org/publicdomain/zero/1.0/
34 */
35
36 package java.util.concurrent;
37
38 import java.io.ObjectStreamField;
39 import java.io.Serializable;
40 import java.lang.reflect.ParameterizedType;
41 import java.lang.reflect.Type;
42 import java.util.AbstractMap;
43 import java.util.Arrays;
44 import java.util.Collection;
45 import java.util.Enumeration;
46 import java.util.HashMap;
47 import java.util.Hashtable;
48 import java.util.Iterator;
49 import java.util.Map;
50 import java.util.NoSuchElementException;
51 import java.util.Set;
52 import java.util.Spliterator;
53 import java.util.concurrent.atomic.AtomicReference;
54 import java.util.concurrent.locks.LockSupport;
55 import java.util.concurrent.locks.ReentrantLock;
56 import java.util.function.BiConsumer;
57 import java.util.function.BiFunction;
58 import java.util.function.Consumer;
59 import java.util.function.DoubleBinaryOperator;
60 import java.util.function.Function;
61 import java.util.function.IntBinaryOperator;
62 import java.util.function.LongBinaryOperator;
63 import java.util.function.Predicate;
64 import java.util.function.ToDoubleBiFunction;
65 import java.util.function.ToDoubleFunction;
66 import java.util.function.ToIntBiFunction;
67 import java.util.function.ToIntFunction;
68 import java.util.function.ToLongBiFunction;
69 import java.util.function.ToLongFunction;
70 import java.util.stream.Stream;
71 import jdk.internal.misc.Unsafe;
72
73 /**
74 * A hash table supporting full concurrency of retrievals and
75 * high expected concurrency for updates. This class obeys the
76 * same functional specification as {@link java.util.Hashtable}, and
77 * includes versions of methods corresponding to each method of
78 * {@code Hashtable}. However, even though all operations are
79 * thread-safe, retrieval operations do <em>not</em> entail locking,
80 * and there is <em>not</em> any support for locking the entire table
81 * in a way that prevents all access. This class is fully
82 * interoperable with {@code Hashtable} in programs that rely on its
83 * thread safety but not on its synchronization details.
84 *
85 * <p>Retrieval operations (including {@code get}) generally do not
86 * block, so may overlap with update operations (including {@code put}
87 * and {@code remove}). Retrievals reflect the results of the most
88 * recently <em>completed</em> update operations holding upon their
89 * onset. (More formally, an update operation for a given key bears a
90 * <em>happens-before</em> relation with any (non-null) retrieval for
91 * that key reporting the updated value.) For aggregate operations
92 * such as {@code putAll} and {@code clear}, concurrent retrievals may
93 * reflect insertion or removal of only some entries. Similarly,
94 * Iterators, Spliterators and Enumerations return elements reflecting the
95 * state of the hash table at some point at or since the creation of the
96 * iterator/enumeration. They do <em>not</em> throw {@link
97 * java.util.ConcurrentModificationException ConcurrentModificationException}.
98 * However, iterators are designed to be used by only one thread at a time.
99 * Bear in mind that the results of aggregate status methods including
100 * {@code size}, {@code isEmpty}, and {@code containsValue} are typically
101 * useful only when a map is not undergoing concurrent updates in other threads.
102 * Otherwise the results of these methods reflect transient states
103 * that may be adequate for monitoring or estimation purposes, but not
104 * for program control.
105 *
106 * <p>The table is dynamically expanded when there are too many
107 * collisions (i.e., keys that have distinct hash codes but fall into
108 * the same slot modulo the table size), with the expected average
109 * effect of maintaining roughly two bins per mapping (corresponding
110 * to a 0.75 load factor threshold for resizing). There may be much
111 * variance around this average as mappings are added and removed, but
112 * overall, this maintains a commonly accepted time/space tradeoff for
113 * hash tables. However, resizing this or any other kind of hash
114 * table may be a relatively slow operation. When possible, it is a
115 * good idea to provide a size estimate as an optional {@code
116 * initialCapacity} constructor argument. An additional optional
117 * {@code loadFactor} constructor argument provides a further means of
118 * customizing initial table capacity by specifying the table density
119 * to be used in calculating the amount of space to allocate for the
120 * given number of elements. Also, for compatibility with previous
121 * versions of this class, constructors may optionally specify an
122 * expected {@code concurrencyLevel} as an additional hint for
123 * internal sizing. Note that using many keys with exactly the same
124 * {@code hashCode()} is a sure way to slow down performance of any
125 * hash table. To ameliorate impact, when keys are {@link Comparable},
126 * this class may use comparison order among keys to help break ties.
127 *
128 * <p>A {@link Set} projection of a ConcurrentHashMap may be created
129 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
130 * (using {@link #keySet(Object)} when only keys are of interest, and the
131 * mapped values are (perhaps transiently) not used or all take the
132 * same mapping value.
133 *
134 * <p>A ConcurrentHashMap can be used as a scalable frequency map (a
135 * form of histogram or multiset) by using {@link
136 * java.util.concurrent.atomic.LongAdder} values and initializing via
137 * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
138 * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
139 * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();}
140 *
141 * <p>This class and its views and iterators implement all of the
142 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
143 * interfaces.
144 *
145 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
146 * does <em>not</em> allow {@code null} to be used as a key or value.
147 *
148 * <p>ConcurrentHashMaps support a set of sequential and parallel bulk
149 * operations that, unlike most {@link Stream} methods, are designed
150 * to be safely, and often sensibly, applied even with maps that are
151 * being concurrently updated by other threads; for example, when
152 * computing a snapshot summary of the values in a shared registry.
153 * There are three kinds of operation, each with four forms, accepting
154 * functions with keys, values, entries, and (key, value) pairs as
155 * arguments and/or return values. Because the elements of a
156 * ConcurrentHashMap are not ordered in any particular way, and may be
157 * processed in different orders in different parallel executions, the
158 * correctness of supplied functions should not depend on any
159 * ordering, or on any other objects or values that may transiently
160 * change while computation is in progress; and except for forEach
161 * actions, should ideally be side-effect-free. Bulk operations on
162 * {@link Map.Entry} objects do not support method {@code setValue}.
163 *
164 * <ul>
165 * <li>forEach: Performs a given action on each element.
166 * A variant form applies a given transformation on each element
167 * before performing the action.
168 *
169 * <li>search: Returns the first available non-null result of
170 * applying a given function on each element; skipping further
171 * search when a result is found.
172 *
173 * <li>reduce: Accumulates each element. The supplied reduction
174 * function cannot rely on ordering (more formally, it should be
175 * both associative and commutative). There are five variants:
176 *
177 * <ul>
178 *
179 * <li>Plain reductions. (There is not a form of this method for
180 * (key, value) function arguments since there is no corresponding
181 * return type.)
182 *
183 * <li>Mapped reductions that accumulate the results of a given
184 * function applied to each element.
185 *
186 * <li>Reductions to scalar doubles, longs, and ints, using a
187 * given basis value.
188 *
189 * </ul>
190 * </ul>
191 *
192 * <p>These bulk operations accept a {@code parallelismThreshold}
193 * argument. Methods proceed sequentially if the current map size is
194 * estimated to be less than the given threshold. Using a value of
195 * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
196 * of {@code 1} results in maximal parallelism by partitioning into
197 * enough subtasks to fully utilize the {@link
198 * ForkJoinPool#commonPool()} that is used for all parallel
199 * computations. Normally, you would initially choose one of these
200 * extreme values, and then measure performance of using in-between
201 * values that trade off overhead versus throughput.
202 *
203 * <p>The concurrency properties of bulk operations follow
204 * from those of ConcurrentHashMap: Any non-null result returned
205 * from {@code get(key)} and related access methods bears a
206 * happens-before relation with the associated insertion or
207 * update. The result of any bulk operation reflects the
208 * composition of these per-element relations (but is not
209 * necessarily atomic with respect to the map as a whole unless it
210 * is somehow known to be quiescent). Conversely, because keys
211 * and values in the map are never null, null serves as a reliable
212 * atomic indicator of the current lack of any result. To
213 * maintain this property, null serves as an implicit basis for
214 * all non-scalar reduction operations. For the double, long, and
215 * int versions, the basis should be one that, when combined with
216 * any other value, returns that other value (more formally, it
217 * should be the identity element for the reduction). Most common
218 * reductions have these properties; for example, computing a sum
219 * with basis 0 or a minimum with basis MAX_VALUE.
220 *
221 * <p>Search and transformation functions provided as arguments
222 * should similarly return null to indicate the lack of any result
223 * (in which case it is not used). In the case of mapped
224 * reductions, this also enables transformations to serve as
225 * filters, returning null (or, in the case of primitive
226 * specializations, the identity basis) if the element should not
227 * be combined. You can create compound transformations and
228 * filterings by composing them yourself under this "null means
229 * there is nothing there now" rule before using them in search or
230 * reduce operations.
231 *
232 * <p>Methods accepting and/or returning Entry arguments maintain
233 * key-value associations. They may be useful for example when
234 * finding the key for the greatest value. Note that "plain" Entry
235 * arguments can be supplied using {@code new
236 * AbstractMap.SimpleEntry(k,v)}.
237 *
238 * <p>Bulk operations may complete abruptly, throwing an
239 * exception encountered in the application of a supplied
240 * function. Bear in mind when handling such exceptions that other
241 * concurrently executing functions could also have thrown
242 * exceptions, or would have done so if the first exception had
243 * not occurred.
244 *
245 * <p>Speedups for parallel compared to sequential forms are common
246 * but not guaranteed. Parallel operations involving brief functions
247 * on small maps may execute more slowly than sequential forms if the
248 * underlying work to parallelize the computation is more expensive
249 * than the computation itself. Similarly, parallelization may not
250 * lead to much actual parallelism if all processors are busy
251 * performing unrelated tasks.
252 *
253 * <p>All arguments to all task methods must be non-null.
254 *
255 * <p>This class is a member of the
256 * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework">
257 * Java Collections Framework</a>.
258 *
259 * @since 1.5
260 * @author Doug Lea
261 * @param <K> the type of keys maintained by this map
262 * @param <V> the type of mapped values
263 */
264 public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
265 implements ConcurrentMap<K,V>, Serializable {
266 private static final long serialVersionUID = 7249069246763182397L;
267
268 /*
269 * Overview:
270 *
271 * The primary design goal of this hash table is to maintain
272 * concurrent readability (typically method get(), but also
273 * iterators and related methods) while minimizing update
274 * contention. Secondary goals are to keep space consumption about
275 * the same or better than java.util.HashMap, and to support high
276 * initial insertion rates on an empty table by many threads.
277 *
278 * This map usually acts as a binned (bucketed) hash table. Each
279 * key-value mapping is held in a Node. Most nodes are instances
280 * of the basic Node class with hash, key, value, and next
281 * fields. However, various subclasses exist: TreeNodes are
282 * arranged in balanced trees, not lists. TreeBins hold the roots
283 * of sets of TreeNodes. ForwardingNodes are placed at the heads
284 * of bins during resizing. ReservationNodes are used as
285 * placeholders while establishing values in computeIfAbsent and
286 * related methods. The types TreeBin, ForwardingNode, and
287 * ReservationNode do not hold normal user keys, values, or
288 * hashes, and are readily distinguishable during search etc
289 * because they have negative hash fields and null key and value
290 * fields. (These special nodes are either uncommon or transient,
291 * so the impact of carrying around some unused fields is
292 * insignificant.)
293 *
294 * The table is lazily initialized to a power-of-two size upon the
295 * first insertion. Each bin in the table normally contains a
296 * list of Nodes (most often, the list has only zero or one Node).
297 * Table accesses require volatile/atomic reads, writes, and
298 * CASes. Because there is no other way to arrange this without
299 * adding further indirections, we use intrinsics
300 * (jdk.internal.misc.Unsafe) operations.
301 *
302 * We use the top (sign) bit of Node hash fields for control
303 * purposes -- it is available anyway because of addressing
304 * constraints. Nodes with negative hash fields are specially
305 * handled or ignored in map methods.
306 *
307 * Insertion (via put or its variants) of the first node in an
308 * empty bin is performed by just CASing it to the bin. This is
309 * by far the most common case for put operations under most
310 * key/hash distributions. Other update operations (insert,
311 * delete, and replace) require locks. We do not want to waste
312 * the space required to associate a distinct lock object with
313 * each bin, so instead use the first node of a bin list itself as
314 * a lock. Locking support for these locks relies on builtin
315 * "synchronized" monitors.
316 *
317 * Using the first node of a list as a lock does not by itself
318 * suffice though: When a node is locked, any update must first
319 * validate that it is still the first node after locking it, and
320 * retry if not. Because new nodes are always appended to lists,
321 * once a node is first in a bin, it remains first until deleted
322 * or the bin becomes invalidated (upon resizing).
323 *
324 * The main disadvantage of per-bin locks is that other update
325 * operations on other nodes in a bin list protected by the same
326 * lock can stall, for example when user equals() or mapping
327 * functions take a long time. However, statistically, under
328 * random hash codes, this is not a common problem. Ideally, the
329 * frequency of nodes in bins follows a Poisson distribution
330 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
331 * parameter of about 0.5 on average, given the resizing threshold
332 * of 0.75, although with a large variance because of resizing
333 * granularity. Ignoring variance, the expected occurrences of
334 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
335 * first values are:
336 *
337 * 0: 0.60653066
338 * 1: 0.30326533
339 * 2: 0.07581633
340 * 3: 0.01263606
341 * 4: 0.00157952
342 * 5: 0.00015795
343 * 6: 0.00001316
344 * 7: 0.00000094
345 * 8: 0.00000006
346 * more: less than 1 in ten million
347 *
348 * Lock contention probability for two threads accessing distinct
349 * elements is roughly 1 / (8 * #elements) under random hashes.
350 *
351 * Actual hash code distributions encountered in practice
352 * sometimes deviate significantly from uniform randomness. This
353 * includes the case when N > (1<<30), so some keys MUST collide.
354 * Similarly for dumb or hostile usages in which multiple keys are
355 * designed to have identical hash codes or ones that differs only
356 * in masked-out high bits. So we use a secondary strategy that
357 * applies when the number of nodes in a bin exceeds a
358 * threshold. These TreeBins use a balanced tree to hold nodes (a
359 * specialized form of red-black trees), bounding search time to
360 * O(log N). Each search step in a TreeBin is at least twice as
361 * slow as in a regular list, but given that N cannot exceed
362 * (1<<64) (before running out of addresses) this bounds search
363 * steps, lock hold times, etc, to reasonable constants (roughly
364 * 100 nodes inspected per operation worst case) so long as keys
365 * are Comparable (which is very common -- String, Long, etc).
366 * TreeBin nodes (TreeNodes) also maintain the same "next"
367 * traversal pointers as regular nodes, so can be traversed in
368 * iterators in the same way.
369 *
370 * The table is resized when occupancy exceeds a percentage
371 * threshold (nominally, 0.75, but see below). Any thread
372 * noticing an overfull bin may assist in resizing after the
373 * initiating thread allocates and sets up the replacement array.
374 * However, rather than stalling, these other threads may proceed
375 * with insertions etc. The use of TreeBins shields us from the
376 * worst case effects of overfilling while resizes are in
377 * progress. Resizing proceeds by transferring bins, one by one,
378 * from the table to the next table. However, threads claim small
379 * blocks of indices to transfer (via field transferIndex) before
380 * doing so, reducing contention. A generation stamp in field
381 * sizeCtl ensures that resizings do not overlap. Because we are
382 * using power-of-two expansion, the elements from each bin must
383 * either stay at same index, or move with a power of two
384 * offset. We eliminate unnecessary node creation by catching
385 * cases where old nodes can be reused because their next fields
386 * won't change. On average, only about one-sixth of them need
387 * cloning when a table doubles. The nodes they replace will be
388 * garbage collectable as soon as they are no longer referenced by
389 * any reader thread that may be in the midst of concurrently
390 * traversing table. Upon transfer, the old table bin contains
391 * only a special forwarding node (with hash field "MOVED") that
392 * contains the next table as its key. On encountering a
393 * forwarding node, access and update operations restart, using
394 * the new table.
395 *
396 * Each bin transfer requires its bin lock, which can stall
397 * waiting for locks while resizing. However, because other
398 * threads can join in and help resize rather than contend for
399 * locks, average aggregate waits become shorter as resizing
400 * progresses. The transfer operation must also ensure that all
401 * accessible bins in both the old and new table are usable by any
402 * traversal. This is arranged in part by proceeding from the
403 * last bin (table.length - 1) up towards the first. Upon seeing
404 * a forwarding node, traversals (see class Traverser) arrange to
405 * move to the new table without revisiting nodes. To ensure that
406 * no intervening nodes are skipped even when moved out of order,
407 * a stack (see class TableStack) is created on first encounter of
408 * a forwarding node during a traversal, to maintain its place if
409 * later processing the current table. The need for these
410 * save/restore mechanics is relatively rare, but when one
411 * forwarding node is encountered, typically many more will be.
412 * So Traversers use a simple caching scheme to avoid creating so
413 * many new TableStack nodes. (Thanks to Peter Levart for
414 * suggesting use of a stack here.)
415 *
416 * The traversal scheme also applies to partial traversals of
417 * ranges of bins (via an alternate Traverser constructor)
418 * to support partitioned aggregate operations. Also, read-only
419 * operations give up if ever forwarded to a null table, which
420 * provides support for shutdown-style clearing, which is also not
421 * currently implemented.
422 *
423 * Lazy table initialization minimizes footprint until first use,
424 * and also avoids resizings when the first operation is from a
425 * putAll, constructor with map argument, or deserialization.
426 * These cases attempt to override the initial capacity settings,
427 * but harmlessly fail to take effect in cases of races.
428 *
429 * The element count is maintained using a specialization of
430 * LongAdder. We need to incorporate a specialization rather than
431 * just use a LongAdder in order to access implicit
432 * contention-sensing that leads to creation of multiple
433 * CounterCells. The counter mechanics avoid contention on
434 * updates but can encounter cache thrashing if read too
435 * frequently during concurrent access. To avoid reading so often,
436 * resizing under contention is attempted only upon adding to a
437 * bin already holding two or more nodes. Under uniform hash
438 * distributions, the probability of this occurring at threshold
439 * is around 13%, meaning that only about 1 in 8 puts check
440 * threshold (and after resizing, many fewer do so).
441 *
442 * TreeBins use a special form of comparison for search and
443 * related operations (which is the main reason we cannot use
444 * existing collections such as TreeMaps). TreeBins contain
445 * Comparable elements, but may contain others, as well as
446 * elements that are Comparable but not necessarily Comparable for
447 * the same T, so we cannot invoke compareTo among them. To handle
448 * this, the tree is ordered primarily by hash value, then by
449 * Comparable.compareTo order if applicable. On lookup at a node,
450 * if elements are not comparable or compare as 0 then both left
451 * and right children may need to be searched in the case of tied
452 * hash values. (This corresponds to the full list search that
453 * would be necessary if all elements were non-Comparable and had
454 * tied hashes.) On insertion, to keep a total ordering (or as
455 * close as is required here) across rebalancings, we compare
456 * classes and identityHashCodes as tie-breakers. The red-black
457 * balancing code is updated from pre-jdk-collections
458 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
459 * based in turn on Cormen, Leiserson, and Rivest "Introduction to
460 * Algorithms" (CLR).
461 *
462 * TreeBins also require an additional locking mechanism. While
463 * list traversal is always possible by readers even during
464 * updates, tree traversal is not, mainly because of tree-rotations
465 * that may change the root node and/or its linkages. TreeBins
466 * include a simple read-write lock mechanism parasitic on the
467 * main bin-synchronization strategy: Structural adjustments
468 * associated with an insertion or removal are already bin-locked
469 * (and so cannot conflict with other writers) but must wait for
470 * ongoing readers to finish. Since there can be only one such
471 * waiter, we use a simple scheme using a single "waiter" field to
472 * block writers. However, readers need never block. If the root
473 * lock is held, they proceed along the slow traversal path (via
474 * next-pointers) until the lock becomes available or the list is
475 * exhausted, whichever comes first. These cases are not fast, but
476 * maximize aggregate expected throughput.
477 *
478 * Maintaining API and serialization compatibility with previous
479 * versions of this class introduces several oddities. Mainly: We
480 * leave untouched but unused constructor arguments referring to
481 * concurrencyLevel. We accept a loadFactor constructor argument,
482 * but apply it only to initial table capacity (which is the only
483 * time that we can guarantee to honor it.) We also declare an
484 * unused "Segment" class that is instantiated in minimal form
485 * only when serializing.
486 *
487 * Also, solely for compatibility with previous versions of this
488 * class, it extends AbstractMap, even though all of its methods
489 * are overridden, so it is just useless baggage.
490 *
491 * This file is organized to make things a little easier to follow
492 * while reading than they might otherwise: First the main static
493 * declarations and utilities, then fields, then main public
494 * methods (with a few factorings of multiple public methods into
495 * internal ones), then sizing methods, trees, traversers, and
496 * bulk operations.
497 */
498
499 /* ---------------- Constants -------------- */
500
501 /**
502 * The largest possible table capacity. This value must be
503 * exactly 1<<30 to stay within Java array allocation and indexing
504 * bounds for power of two table sizes, and is further required
505 * because the top two bits of 32bit hash fields are used for
506 * control purposes.
507 */
508 private static final int MAXIMUM_CAPACITY = 1 << 30;
509
510 /**
511 * The default initial table capacity. Must be a power of 2
512 * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
513 */
514 private static final int DEFAULT_CAPACITY = 16;
515
516 /**
517 * The largest possible (non-power of two) array size.
518 * Needed by toArray and related methods.
519 */
520 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
521
522 /**
523 * The default concurrency level for this table. Unused but
524 * defined for compatibility with previous versions of this class.
525 */
526 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
527
528 /**
529 * The load factor for this table. Overrides of this value in
530 * constructors affect only the initial table capacity. The
531 * actual floating point value isn't normally used -- it is
532 * simpler to use expressions such as {@code n - (n >>> 2)} for
533 * the associated resizing threshold.
534 */
535 private static final float LOAD_FACTOR = 0.75f;
536
537 /**
538 * The bin count threshold for using a tree rather than list for a
539 * bin. Bins are converted to trees when adding an element to a
540 * bin with at least this many nodes. The value must be greater
541 * than 2, and should be at least 8 to mesh with assumptions in
542 * tree removal about conversion back to plain bins upon
543 * shrinkage.
544 */
545 static final int TREEIFY_THRESHOLD = 8;
546
547 /**
548 * The bin count threshold for untreeifying a (split) bin during a
549 * resize operation. Should be less than TREEIFY_THRESHOLD, and at
550 * most 6 to mesh with shrinkage detection under removal.
551 */
552 static final int UNTREEIFY_THRESHOLD = 6;
553
554 /**
555 * The smallest table capacity for which bins may be treeified.
556 * (Otherwise the table is resized if too many nodes in a bin.)
557 * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
558 * conflicts between resizing and treeification thresholds.
559 */
560 static final int MIN_TREEIFY_CAPACITY = 64;
561
562 /**
563 * Minimum number of rebinnings per transfer step. Ranges are
564 * subdivided to allow multiple resizer threads. This value
565 * serves as a lower bound to avoid resizers encountering
566 * excessive memory contention. The value should be at least
567 * DEFAULT_CAPACITY.
568 */
569 private static final int MIN_TRANSFER_STRIDE = 16;
570
571 /**
572 * The number of bits used for generation stamp in sizeCtl.
573 * Must be at least 6 for 32bit arrays.
574 */
575 private static final int RESIZE_STAMP_BITS = 16;
576
577 /**
578 * The maximum number of threads that can help resize.
579 * Must fit in 32 - RESIZE_STAMP_BITS bits.
580 */
581 private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
582
583 /**
584 * The bit shift for recording size stamp in sizeCtl.
585 */
586 private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
587
588 /*
589 * Encodings for Node hash fields. See above for explanation.
590 */
591 static final int MOVED = -1; // hash for forwarding nodes
592 static final int TREEBIN = -2; // hash for roots of trees
593 static final int RESERVED = -3; // hash for transient reservations
594 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
595
596 /** Number of CPUS, to place bounds on some sizings */
597 static final int NCPU = Runtime.getRuntime().availableProcessors();
598
599 /**
600 * Serialized pseudo-fields, provided only for jdk7 compatibility.
601 * @serialField segments Segment[]
602 * The segments, each of which is a specialized hash table.
603 * @serialField segmentMask int
604 * Mask value for indexing into segments. The upper bits of a
605 * key's hash code are used to choose the segment.
606 * @serialField segmentShift int
607 * Shift value for indexing within segments.
608 */
609 private static final ObjectStreamField[] serialPersistentFields = {
610 new ObjectStreamField("segments", Segment[].class),
611 new ObjectStreamField("segmentMask", Integer.TYPE),
612 new ObjectStreamField("segmentShift", Integer.TYPE),
613 };
614
615 /* ---------------- Nodes -------------- */
616
617 /**
618 * Key-value entry. This class is never exported out as a
619 * user-mutable Map.Entry (i.e., one supporting setValue; see
620 * MapEntry below), but can be used for read-only traversals used
621 * in bulk tasks. Subclasses of Node with a negative hash field
622 * are special, and contain null keys and values (but are never
623 * exported). Otherwise, keys and vals are never null.
624 */
625 static class Node<K,V> implements Map.Entry<K,V> {
626 final int hash;
627 final K key;
628 volatile V val;
629 volatile Node<K,V> next;
630
631 Node(int hash, K key, V val) {
632 this.hash = hash;
633 this.key = key;
634 this.val = val;
635 }
636
637 Node(int hash, K key, V val, Node<K,V> next) {
638 this(hash, key, val);
639 this.next = next;
640 }
641
642 public final K getKey() { return key; }
643 public final V getValue() { return val; }
644 public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
645 public final String toString() {
646 return Helpers.mapEntryToString(key, val);
647 }
648 public final V setValue(V value) {
649 throw new UnsupportedOperationException();
650 }
651
652 public final boolean equals(Object o) {
653 Object k, v, u; Map.Entry<?,?> e;
654 return ((o instanceof Map.Entry) &&
655 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
656 (v = e.getValue()) != null &&
657 (k == key || k.equals(key)) &&
658 (v == (u = val) || v.equals(u)));
659 }
660
661 /**
662 * Virtualized support for map.get(); overridden in subclasses.
663 */
664 Node<K,V> find(int h, Object k) {
665 Node<K,V> e = this;
666 if (k != null) {
667 do {
668 K ek;
669 if (e.hash == h &&
670 ((ek = e.key) == k || (ek != null && k.equals(ek))))
671 return e;
672 } while ((e = e.next) != null);
673 }
674 return null;
675 }
676 }
677
678 /* ---------------- Static utilities -------------- */
679
680 /**
681 * Spreads (XORs) higher bits of hash to lower and also forces top
682 * bit to 0. Because the table uses power-of-two masking, sets of
683 * hashes that vary only in bits above the current mask will
684 * always collide. (Among known examples are sets of Float keys
685 * holding consecutive whole numbers in small tables.) So we
686 * apply a transform that spreads the impact of higher bits
687 * downward. There is a tradeoff between speed, utility, and
688 * quality of bit-spreading. Because many common sets of hashes
689 * are already reasonably distributed (so don't benefit from
690 * spreading), and because we use trees to handle large sets of
691 * collisions in bins, we just XOR some shifted bits in the
692 * cheapest possible way to reduce systematic lossage, as well as
693 * to incorporate impact of the highest bits that would otherwise
694 * never be used in index calculations because of table bounds.
695 */
696 static final int spread(int h) {
697 return (h ^ (h >>> 16)) & HASH_BITS;
698 }
699
700 /**
701 * Returns a power of two table size for the given desired capacity.
702 * See Hackers Delight, sec 3.2
703 */
704 private static final int tableSizeFor(int c) {
705 int n = -1 >>> Integer.numberOfLeadingZeros(c - 1);
706 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
707 }
708
709 /**
710 * Returns x's Class if it is of the form "class C implements
711 * Comparable<C>", else null.
712 */
713 static Class<?> comparableClassFor(Object x) {
714 if (x instanceof Comparable) {
715 Class<?> c; Type[] ts, as; ParameterizedType p;
716 if ((c = x.getClass()) == String.class) // bypass checks
717 return c;
718 if ((ts = c.getGenericInterfaces()) != null) {
719 for (Type t : ts) {
720 if ((t instanceof ParameterizedType) &&
721 ((p = (ParameterizedType)t).getRawType() ==
722 Comparable.class) &&
723 (as = p.getActualTypeArguments()) != null &&
724 as.length == 1 && as[0] == c) // type arg is c
725 return c;
726 }
727 }
728 }
729 return null;
730 }
731
732 /**
733 * Returns k.compareTo(x) if x matches kc (k's screened comparable
734 * class), else 0.
735 */
736 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
737 static int compareComparables(Class<?> kc, Object k, Object x) {
738 return (x == null || x.getClass() != kc ? 0 :
739 ((Comparable)k).compareTo(x));
740 }
741
742 /* ---------------- Table element access -------------- */
743
744 /*
745 * Atomic access methods are used for table elements as well as
746 * elements of in-progress next table while resizing. All uses of
747 * the tab arguments must be null checked by callers. All callers
748 * also paranoically precheck that tab's length is not zero (or an
749 * equivalent check), thus ensuring that any index argument taking
750 * the form of a hash value anded with (length - 1) is a valid
751 * index. Note that, to be correct wrt arbitrary concurrency
752 * errors by users, these checks must operate on local variables,
753 * which accounts for some odd-looking inline assignments below.
754 * Note that calls to setTabAt always occur within locked regions,
755 * and so require only release ordering.
756 */
757
758 @SuppressWarnings("unchecked")
759 static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
760 return (Node<K,V>)U.getObjectAcquire(tab, ((long)i << ASHIFT) + ABASE);
761 }
762
763 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
764 Node<K,V> c, Node<K,V> v) {
765 return U.compareAndSetObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
766 }
767
768 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
769 U.putObjectRelease(tab, ((long)i << ASHIFT) + ABASE, v);
770 }
771
772 /* ---------------- Fields -------------- */
773
774 /**
775 * The array of bins. Lazily initialized upon first insertion.
776 * Size is always a power of two. Accessed directly by iterators.
777 */
778 transient volatile Node<K,V>[] table;
779
780 /**
781 * The next table to use; non-null only while resizing.
782 */
783 private transient volatile Node<K,V>[] nextTable;
784
785 /**
786 * Base counter value, used mainly when there is no contention,
787 * but also as a fallback during table initialization
788 * races. Updated via CAS.
789 */
790 private transient volatile long baseCount;
791
792 /**
793 * Table initialization and resizing control. When negative, the
794 * table is being initialized or resized: -1 for initialization,
795 * else -(1 + the number of active resizing threads). Otherwise,
796 * when table is null, holds the initial table size to use upon
797 * creation, or 0 for default. After initialization, holds the
798 * next element count value upon which to resize the table.
799 */
800 private transient volatile int sizeCtl;
801
802 /**
803 * The next table index (plus one) to split while resizing.
804 */
805 private transient volatile int transferIndex;
806
807 /**
808 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
809 */
810 private transient volatile int cellsBusy;
811
812 /**
813 * Table of counter cells. When non-null, size is a power of 2.
814 */
815 private transient volatile CounterCell[] counterCells;
816
817 // views
818 private transient KeySetView<K,V> keySet;
819 private transient ValuesView<K,V> values;
820 private transient EntrySetView<K,V> entrySet;
821
822
823 /* ---------------- Public operations -------------- */
824
825 /**
826 * Creates a new, empty map with the default initial table size (16).
827 */
828 public ConcurrentHashMap() {
829 }
830
831 /**
832 * Creates a new, empty map with an initial table size
833 * accommodating the specified number of elements without the need
834 * to dynamically resize.
835 *
836 * @param initialCapacity The implementation performs internal
837 * sizing to accommodate this many elements.
838 * @throws IllegalArgumentException if the initial capacity of
839 * elements is negative
840 */
841 public ConcurrentHashMap(int initialCapacity) {
842 this(initialCapacity, LOAD_FACTOR, 1);
843 }
844
845 /**
846 * Creates a new map with the same mappings as the given map.
847 *
848 * @param m the map
849 */
850 public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
851 this.sizeCtl = DEFAULT_CAPACITY;
852 putAll(m);
853 }
854
855 /**
856 * Creates a new, empty map with an initial table size based on
857 * the given number of elements ({@code initialCapacity}) and
858 * initial table density ({@code loadFactor}).
859 *
860 * @param initialCapacity the initial capacity. The implementation
861 * performs internal sizing to accommodate this many elements,
862 * given the specified load factor.
863 * @param loadFactor the load factor (table density) for
864 * establishing the initial table size
865 * @throws IllegalArgumentException if the initial capacity of
866 * elements is negative or the load factor is nonpositive
867 *
868 * @since 1.6
869 */
870 public ConcurrentHashMap(int initialCapacity, float loadFactor) {
871 this(initialCapacity, loadFactor, 1);
872 }
873
874 /**
875 * Creates a new, empty map with an initial table size based on
876 * the given number of elements ({@code initialCapacity}), initial
877 * table density ({@code loadFactor}), and number of concurrently
878 * updating threads ({@code concurrencyLevel}).
879 *
880 * @param initialCapacity the initial capacity. The implementation
881 * performs internal sizing to accommodate this many elements,
882 * given the specified load factor.
883 * @param loadFactor the load factor (table density) for
884 * establishing the initial table size
885 * @param concurrencyLevel the estimated number of concurrently
886 * updating threads. The implementation may use this value as
887 * a sizing hint.
888 * @throws IllegalArgumentException if the initial capacity is
889 * negative or the load factor or concurrencyLevel are
890 * nonpositive
891 */
892 public ConcurrentHashMap(int initialCapacity,
893 float loadFactor, int concurrencyLevel) {
894 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
895 throw new IllegalArgumentException();
896 if (initialCapacity < concurrencyLevel) // Use at least as many bins
897 initialCapacity = concurrencyLevel; // as estimated threads
898 long size = (long)(1.0 + (long)initialCapacity / loadFactor);
899 int cap = (size >= (long)MAXIMUM_CAPACITY) ?
900 MAXIMUM_CAPACITY : tableSizeFor((int)size);
901 this.sizeCtl = cap;
902 }
903
904 // Original (since JDK1.2) Map methods
905
906 /**
907 * {@inheritDoc}
908 */
909 public int size() {
910 long n = sumCount();
911 return ((n < 0L) ? 0 :
912 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
913 (int)n);
914 }
915
916 /**
917 * {@inheritDoc}
918 */
919 public boolean isEmpty() {
920 return sumCount() <= 0L; // ignore transient negative values
921 }
922
923 /**
924 * Returns the value to which the specified key is mapped,
925 * or {@code null} if this map contains no mapping for the key.
926 *
927 * <p>More formally, if this map contains a mapping from a key
928 * {@code k} to a value {@code v} such that {@code key.equals(k)},
929 * then this method returns {@code v}; otherwise it returns
930 * {@code null}. (There can be at most one such mapping.)
931 *
932 * @throws NullPointerException if the specified key is null
933 */
934 public V get(Object key) {
935 Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
936 int h = spread(key.hashCode());
937 if ((tab = table) != null && (n = tab.length) > 0 &&
938 (e = tabAt(tab, (n - 1) & h)) != null) {
939 if ((eh = e.hash) == h) {
940 if ((ek = e.key) == key || (ek != null && key.equals(ek)))
941 return e.val;
942 }
943 else if (eh < 0)
944 return (p = e.find(h, key)) != null ? p.val : null;
945 while ((e = e.next) != null) {
946 if (e.hash == h &&
947 ((ek = e.key) == key || (ek != null && key.equals(ek))))
948 return e.val;
949 }
950 }
951 return null;
952 }
953
954 /**
955 * Tests if the specified object is a key in this table.
956 *
957 * @param key possible key
958 * @return {@code true} if and only if the specified object
959 * is a key in this table, as determined by the
960 * {@code equals} method; {@code false} otherwise
961 * @throws NullPointerException if the specified key is null
962 */
963 public boolean containsKey(Object key) {
964 return get(key) != null;
965 }
966
967 /**
968 * Returns {@code true} if this map maps one or more keys to the
969 * specified value. Note: This method may require a full traversal
970 * of the map, and is much slower than method {@code containsKey}.
971 *
972 * @param value value whose presence in this map is to be tested
973 * @return {@code true} if this map maps one or more keys to the
974 * specified value
975 * @throws NullPointerException if the specified value is null
976 */
977 public boolean containsValue(Object value) {
978 if (value == null)
979 throw new NullPointerException();
980 Node<K,V>[] t;
981 if ((t = table) != null) {
982 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
983 for (Node<K,V> p; (p = it.advance()) != null; ) {
984 V v;
985 if ((v = p.val) == value || (v != null && value.equals(v)))
986 return true;
987 }
988 }
989 return false;
990 }
991
992 /**
993 * Maps the specified key to the specified value in this table.
994 * Neither the key nor the value can be null.
995 *
996 * <p>The value can be retrieved by calling the {@code get} method
997 * with a key that is equal to the original key.
998 *
999 * @param key key with which the specified value is to be associated
1000 * @param value value to be associated with the specified key
1001 * @return the previous value associated with {@code key}, or
1002 * {@code null} if there was no mapping for {@code key}
1003 * @throws NullPointerException if the specified key or value is null
1004 */
1005 public V put(K key, V value) {
1006 return putVal(key, value, false);
1007 }
1008
1009 /** Implementation for put and putIfAbsent */
1010 final V putVal(K key, V value, boolean onlyIfAbsent) {
1011 if (key == null || value == null) throw new NullPointerException();
1012 int hash = spread(key.hashCode());
1013 int binCount = 0;
1014 for (Node<K,V>[] tab = table;;) {
1015 Node<K,V> f; int n, i, fh; K fk; V fv;
1016 if (tab == null || (n = tab.length) == 0)
1017 tab = initTable();
1018 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1019 if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value)))
1020 break; // no lock when adding to empty bin
1021 }
1022 else if ((fh = f.hash) == MOVED)
1023 tab = helpTransfer(tab, f);
1024 else if (onlyIfAbsent // check first node without acquiring lock
1025 && fh == hash
1026 && ((fk = f.key) == key || (fk != null && key.equals(fk)))
1027 && (fv = f.val) != null)
1028 return fv;
1029 else {
1030 V oldVal = null;
1031 synchronized (f) {
1032 if (tabAt(tab, i) == f) {
1033 if (fh >= 0) {
1034 binCount = 1;
1035 for (Node<K,V> e = f;; ++binCount) {
1036 K ek;
1037 if (e.hash == hash &&
1038 ((ek = e.key) == key ||
1039 (ek != null && key.equals(ek)))) {
1040 oldVal = e.val;
1041 if (!onlyIfAbsent)
1042 e.val = value;
1043 break;
1044 }
1045 Node<K,V> pred = e;
1046 if ((e = e.next) == null) {
1047 pred.next = new Node<K,V>(hash, key, value);
1048 break;
1049 }
1050 }
1051 }
1052 else if (f instanceof TreeBin) {
1053 Node<K,V> p;
1054 binCount = 2;
1055 if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1056 value)) != null) {
1057 oldVal = p.val;
1058 if (!onlyIfAbsent)
1059 p.val = value;
1060 }
1061 }
1062 else if (f instanceof ReservationNode)
1063 throw new IllegalStateException("Recursive update");
1064 }
1065 }
1066 if (binCount != 0) {
1067 if (binCount >= TREEIFY_THRESHOLD)
1068 treeifyBin(tab, i);
1069 if (oldVal != null)
1070 return oldVal;
1071 break;
1072 }
1073 }
1074 }
1075 addCount(1L, binCount);
1076 return null;
1077 }
1078
1079 /**
1080 * Copies all of the mappings from the specified map to this one.
1081 * These mappings replace any mappings that this map had for any of the
1082 * keys currently in the specified map.
1083 *
1084 * @param m mappings to be stored in this map
1085 */
1086 public void putAll(Map<? extends K, ? extends V> m) {
1087 tryPresize(m.size());
1088 for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1089 putVal(e.getKey(), e.getValue(), false);
1090 }
1091
1092 /**
1093 * Removes the key (and its corresponding value) from this map.
1094 * This method does nothing if the key is not in the map.
1095 *
1096 * @param key the key that needs to be removed
1097 * @return the previous value associated with {@code key}, or
1098 * {@code null} if there was no mapping for {@code key}
1099 * @throws NullPointerException if the specified key is null
1100 */
1101 public V remove(Object key) {
1102 return replaceNode(key, null, null);
1103 }
1104
1105 /**
1106 * Implementation for the four public remove/replace methods:
1107 * Replaces node value with v, conditional upon match of cv if
1108 * non-null. If resulting value is null, delete.
1109 */
1110 final V replaceNode(Object key, V value, Object cv) {
1111 int hash = spread(key.hashCode());
1112 for (Node<K,V>[] tab = table;;) {
1113 Node<K,V> f; int n, i, fh;
1114 if (tab == null || (n = tab.length) == 0 ||
1115 (f = tabAt(tab, i = (n - 1) & hash)) == null)
1116 break;
1117 else if ((fh = f.hash) == MOVED)
1118 tab = helpTransfer(tab, f);
1119 else {
1120 V oldVal = null;
1121 boolean validated = false;
1122 synchronized (f) {
1123 if (tabAt(tab, i) == f) {
1124 if (fh >= 0) {
1125 validated = true;
1126 for (Node<K,V> e = f, pred = null;;) {
1127 K ek;
1128 if (e.hash == hash &&
1129 ((ek = e.key) == key ||
1130 (ek != null && key.equals(ek)))) {
1131 V ev = e.val;
1132 if (cv == null || cv == ev ||
1133 (ev != null && cv.equals(ev))) {
1134 oldVal = ev;
1135 if (value != null)
1136 e.val = value;
1137 else if (pred != null)
1138 pred.next = e.next;
1139 else
1140 setTabAt(tab, i, e.next);
1141 }
1142 break;
1143 }
1144 pred = e;
1145 if ((e = e.next) == null)
1146 break;
1147 }
1148 }
1149 else if (f instanceof TreeBin) {
1150 validated = true;
1151 TreeBin<K,V> t = (TreeBin<K,V>)f;
1152 TreeNode<K,V> r, p;
1153 if ((r = t.root) != null &&
1154 (p = r.findTreeNode(hash, key, null)) != null) {
1155 V pv = p.val;
1156 if (cv == null || cv == pv ||
1157 (pv != null && cv.equals(pv))) {
1158 oldVal = pv;
1159 if (value != null)
1160 p.val = value;
1161 else if (t.removeTreeNode(p))
1162 setTabAt(tab, i, untreeify(t.first));
1163 }
1164 }
1165 }
1166 else if (f instanceof ReservationNode)
1167 throw new IllegalStateException("Recursive update");
1168 }
1169 }
1170 if (validated) {
1171 if (oldVal != null) {
1172 if (value == null)
1173 addCount(-1L, -1);
1174 return oldVal;
1175 }
1176 break;
1177 }
1178 }
1179 }
1180 return null;
1181 }
1182
1183 /**
1184 * Removes all of the mappings from this map.
1185 */
1186 public void clear() {
1187 long delta = 0L; // negative number of deletions
1188 int i = 0;
1189 Node<K,V>[] tab = table;
1190 while (tab != null && i < tab.length) {
1191 int fh;
1192 Node<K,V> f = tabAt(tab, i);
1193 if (f == null)
1194 ++i;
1195 else if ((fh = f.hash) == MOVED) {
1196 tab = helpTransfer(tab, f);
1197 i = 0; // restart
1198 }
1199 else {
1200 synchronized (f) {
1201 if (tabAt(tab, i) == f) {
1202 Node<K,V> p = (fh >= 0 ? f :
1203 (f instanceof TreeBin) ?
1204 ((TreeBin<K,V>)f).first : null);
1205 while (p != null) {
1206 --delta;
1207 p = p.next;
1208 }
1209 setTabAt(tab, i++, null);
1210 }
1211 }
1212 }
1213 }
1214 if (delta != 0L)
1215 addCount(delta, -1);
1216 }
1217
1218 /**
1219 * Returns a {@link Set} view of the keys contained in this map.
1220 * The set is backed by the map, so changes to the map are
1221 * reflected in the set, and vice-versa. The set supports element
1222 * removal, which removes the corresponding mapping from this map,
1223 * via the {@code Iterator.remove}, {@code Set.remove},
1224 * {@code removeAll}, {@code retainAll}, and {@code clear}
1225 * operations. It does not support the {@code add} or
1226 * {@code addAll} operations.
1227 *
1228 * <p>The view's iterators and spliterators are
1229 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1230 *
1231 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1232 * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1233 *
1234 * @return the set view
1235 */
1236 public KeySetView<K,V> keySet() {
1237 KeySetView<K,V> ks;
1238 if ((ks = keySet) != null) return ks;
1239 return keySet = new KeySetView<K,V>(this, null);
1240 }
1241
1242 /**
1243 * Returns a {@link Collection} view of the values contained in this map.
1244 * The collection is backed by the map, so changes to the map are
1245 * reflected in the collection, and vice-versa. The collection
1246 * supports element removal, which removes the corresponding
1247 * mapping from this map, via the {@code Iterator.remove},
1248 * {@code Collection.remove}, {@code removeAll},
1249 * {@code retainAll}, and {@code clear} operations. It does not
1250 * support the {@code add} or {@code addAll} operations.
1251 *
1252 * <p>The view's iterators and spliterators are
1253 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1254 *
1255 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}
1256 * and {@link Spliterator#NONNULL}.
1257 *
1258 * @return the collection view
1259 */
1260 public Collection<V> values() {
1261 ValuesView<K,V> vs;
1262 if ((vs = values) != null) return vs;
1263 return values = new ValuesView<K,V>(this);
1264 }
1265
1266 /**
1267 * Returns a {@link Set} view of the mappings contained in this map.
1268 * The set is backed by the map, so changes to the map are
1269 * reflected in the set, and vice-versa. The set supports element
1270 * removal, which removes the corresponding mapping from the map,
1271 * via the {@code Iterator.remove}, {@code Set.remove},
1272 * {@code removeAll}, {@code retainAll}, and {@code clear}
1273 * operations.
1274 *
1275 * <p>The view's iterators and spliterators are
1276 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1277 *
1278 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1279 * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1280 *
1281 * @return the set view
1282 */
1283 public Set<Map.Entry<K,V>> entrySet() {
1284 EntrySetView<K,V> es;
1285 if ((es = entrySet) != null) return es;
1286 return entrySet = new EntrySetView<K,V>(this);
1287 }
1288
1289 /**
1290 * Returns the hash code value for this {@link Map}, i.e.,
1291 * the sum of, for each key-value pair in the map,
1292 * {@code key.hashCode() ^ value.hashCode()}.
1293 *
1294 * @return the hash code value for this map
1295 */
1296 public int hashCode() {
1297 int h = 0;
1298 Node<K,V>[] t;
1299 if ((t = table) != null) {
1300 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1301 for (Node<K,V> p; (p = it.advance()) != null; )
1302 h += p.key.hashCode() ^ p.val.hashCode();
1303 }
1304 return h;
1305 }
1306
1307 /**
1308 * Returns a string representation of this map. The string
1309 * representation consists of a list of key-value mappings (in no
1310 * particular order) enclosed in braces ("{@code {}}"). Adjacent
1311 * mappings are separated by the characters {@code ", "} (comma
1312 * and space). Each key-value mapping is rendered as the key
1313 * followed by an equals sign ("{@code =}") followed by the
1314 * associated value.
1315 *
1316 * @return a string representation of this map
1317 */
1318 public String toString() {
1319 Node<K,V>[] t;
1320 int f = (t = table) == null ? 0 : t.length;
1321 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1322 StringBuilder sb = new StringBuilder();
1323 sb.append('{');
1324 Node<K,V> p;
1325 if ((p = it.advance()) != null) {
1326 for (;;) {
1327 K k = p.key;
1328 V v = p.val;
1329 sb.append(k == this ? "(this Map)" : k);
1330 sb.append('=');
1331 sb.append(v == this ? "(this Map)" : v);
1332 if ((p = it.advance()) == null)
1333 break;
1334 sb.append(',').append(' ');
1335 }
1336 }
1337 return sb.append('}').toString();
1338 }
1339
1340 /**
1341 * Compares the specified object with this map for equality.
1342 * Returns {@code true} if the given object is a map with the same
1343 * mappings as this map. This operation may return misleading
1344 * results if either map is concurrently modified during execution
1345 * of this method.
1346 *
1347 * @param o object to be compared for equality with this map
1348 * @return {@code true} if the specified object is equal to this map
1349 */
1350 public boolean equals(Object o) {
1351 if (o != this) {
1352 if (!(o instanceof Map))
1353 return false;
1354 Map<?,?> m = (Map<?,?>) o;
1355 Node<K,V>[] t;
1356 int f = (t = table) == null ? 0 : t.length;
1357 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1358 for (Node<K,V> p; (p = it.advance()) != null; ) {
1359 V val = p.val;
1360 Object v = m.get(p.key);
1361 if (v == null || (v != val && !v.equals(val)))
1362 return false;
1363 }
1364 for (Map.Entry<?,?> e : m.entrySet()) {
1365 Object mk, mv, v;
1366 if ((mk = e.getKey()) == null ||
1367 (mv = e.getValue()) == null ||
1368 (v = get(mk)) == null ||
1369 (mv != v && !mv.equals(v)))
1370 return false;
1371 }
1372 }
1373 return true;
1374 }
1375
1376 /**
1377 * Stripped-down version of helper class used in previous version,
1378 * declared for the sake of serialization compatibility.
1379 */
1380 static class Segment<K,V> extends ReentrantLock implements Serializable {
1381 private static final long serialVersionUID = 2249069246763182397L;
1382 final float loadFactor;
1383 Segment(float lf) { this.loadFactor = lf; }
1384 }
1385
1386 /**
1387 * Saves this map to a stream (that is, serializes it).
1388 *
1389 * @param s the stream
1390 * @throws java.io.IOException if an I/O error occurs
1391 * @serialData
1392 * the serialized fields, followed by the key (Object) and value
1393 * (Object) for each key-value mapping, followed by a null pair.
1394 * The key-value mappings are emitted in no particular order.
1395 */
1396 private void writeObject(java.io.ObjectOutputStream s)
1397 throws java.io.IOException {
1398 // For serialization compatibility
1399 // Emulate segment calculation from previous version of this class
1400 int sshift = 0;
1401 int ssize = 1;
1402 while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1403 ++sshift;
1404 ssize <<= 1;
1405 }
1406 int segmentShift = 32 - sshift;
1407 int segmentMask = ssize - 1;
1408 @SuppressWarnings("unchecked")
1409 Segment<K,V>[] segments = (Segment<K,V>[])
1410 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1411 for (int i = 0; i < segments.length; ++i)
1412 segments[i] = new Segment<K,V>(LOAD_FACTOR);
1413 java.io.ObjectOutputStream.PutField streamFields = s.putFields();
1414 streamFields.put("segments", segments);
1415 streamFields.put("segmentShift", segmentShift);
1416 streamFields.put("segmentMask", segmentMask);
1417 s.writeFields();
1418
1419 Node<K,V>[] t;
1420 if ((t = table) != null) {
1421 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1422 for (Node<K,V> p; (p = it.advance()) != null; ) {
1423 s.writeObject(p.key);
1424 s.writeObject(p.val);
1425 }
1426 }
1427 s.writeObject(null);
1428 s.writeObject(null);
1429 }
1430
1431 /**
1432 * Reconstitutes this map from a stream (that is, deserializes it).
1433 * @param s the stream
1434 * @throws ClassNotFoundException if the class of a serialized object
1435 * could not be found
1436 * @throws java.io.IOException if an I/O error occurs
1437 */
1438 private void readObject(java.io.ObjectInputStream s)
1439 throws java.io.IOException, ClassNotFoundException {
1440 /*
1441 * To improve performance in typical cases, we create nodes
1442 * while reading, then place in table once size is known.
1443 * However, we must also validate uniqueness and deal with
1444 * overpopulated bins while doing so, which requires
1445 * specialized versions of putVal mechanics.
1446 */
1447 sizeCtl = -1; // force exclusion for table construction
1448 s.defaultReadObject();
1449 long size = 0L;
1450 Node<K,V> p = null;
1451 for (;;) {
1452 @SuppressWarnings("unchecked")
1453 K k = (K) s.readObject();
1454 @SuppressWarnings("unchecked")
1455 V v = (V) s.readObject();
1456 if (k != null && v != null) {
1457 p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1458 ++size;
1459 }
1460 else
1461 break;
1462 }
1463 if (size == 0L)
1464 sizeCtl = 0;
1465 else {
1466 long ts = (long)(1.0 + size / LOAD_FACTOR);
1467 int n = (ts >= (long)MAXIMUM_CAPACITY) ?
1468 MAXIMUM_CAPACITY : tableSizeFor((int)ts);
1469 @SuppressWarnings("unchecked")
1470 Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
1471 int mask = n - 1;
1472 long added = 0L;
1473 while (p != null) {
1474 boolean insertAtFront;
1475 Node<K,V> next = p.next, first;
1476 int h = p.hash, j = h & mask;
1477 if ((first = tabAt(tab, j)) == null)
1478 insertAtFront = true;
1479 else {
1480 K k = p.key;
1481 if (first.hash < 0) {
1482 TreeBin<K,V> t = (TreeBin<K,V>)first;
1483 if (t.putTreeVal(h, k, p.val) == null)
1484 ++added;
1485 insertAtFront = false;
1486 }
1487 else {
1488 int binCount = 0;
1489 insertAtFront = true;
1490 Node<K,V> q; K qk;
1491 for (q = first; q != null; q = q.next) {
1492 if (q.hash == h &&
1493 ((qk = q.key) == k ||
1494 (qk != null && k.equals(qk)))) {
1495 insertAtFront = false;
1496 break;
1497 }
1498 ++binCount;
1499 }
1500 if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1501 insertAtFront = false;
1502 ++added;
1503 p.next = first;
1504 TreeNode<K,V> hd = null, tl = null;
1505 for (q = p; q != null; q = q.next) {
1506 TreeNode<K,V> t = new TreeNode<K,V>
1507 (q.hash, q.key, q.val, null, null);
1508 if ((t.prev = tl) == null)
1509 hd = t;
1510 else
1511 tl.next = t;
1512 tl = t;
1513 }
1514 setTabAt(tab, j, new TreeBin<K,V>(hd));
1515 }
1516 }
1517 }
1518 if (insertAtFront) {
1519 ++added;
1520 p.next = first;
1521 setTabAt(tab, j, p);
1522 }
1523 p = next;
1524 }
1525 table = tab;
1526 sizeCtl = n - (n >>> 2);
1527 baseCount = added;
1528 }
1529 }
1530
1531 // ConcurrentMap methods
1532
1533 /**
1534 * {@inheritDoc}
1535 *
1536 * @return the previous value associated with the specified key,
1537 * or {@code null} if there was no mapping for the key
1538 * @throws NullPointerException if the specified key or value is null
1539 */
1540 public V putIfAbsent(K key, V value) {
1541 return putVal(key, value, true);
1542 }
1543
1544 /**
1545 * {@inheritDoc}
1546 *
1547 * @throws NullPointerException if the specified key is null
1548 */
1549 public boolean remove(Object key, Object value) {
1550 if (key == null)
1551 throw new NullPointerException();
1552 return value != null && replaceNode(key, null, value) != null;
1553 }
1554
1555 /**
1556 * {@inheritDoc}
1557 *
1558 * @throws NullPointerException if any of the arguments are null
1559 */
1560 public boolean replace(K key, V oldValue, V newValue) {
1561 if (key == null || oldValue == null || newValue == null)
1562 throw new NullPointerException();
1563 return replaceNode(key, newValue, oldValue) != null;
1564 }
1565
1566 /**
1567 * {@inheritDoc}
1568 *
1569 * @return the previous value associated with the specified key,
1570 * or {@code null} if there was no mapping for the key
1571 * @throws NullPointerException if the specified key or value is null
1572 */
1573 public V replace(K key, V value) {
1574 if (key == null || value == null)
1575 throw new NullPointerException();
1576 return replaceNode(key, value, null);
1577 }
1578
1579 // Overrides of JDK8+ Map extension method defaults
1580
1581 /**
1582 * Returns the value to which the specified key is mapped, or the
1583 * given default value if this map contains no mapping for the
1584 * key.
1585 *
1586 * @param key the key whose associated value is to be returned
1587 * @param defaultValue the value to return if this map contains
1588 * no mapping for the given key
1589 * @return the mapping for the key, if present; else the default value
1590 * @throws NullPointerException if the specified key is null
1591 */
1592 public V getOrDefault(Object key, V defaultValue) {
1593 V v;
1594 return (v = get(key)) == null ? defaultValue : v;
1595 }
1596
1597 public void forEach(BiConsumer<? super K, ? super V> action) {
1598 if (action == null) throw new NullPointerException();
1599 Node<K,V>[] t;
1600 if ((t = table) != null) {
1601 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1602 for (Node<K,V> p; (p = it.advance()) != null; ) {
1603 action.accept(p.key, p.val);
1604 }
1605 }
1606 }
1607
1608 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1609 if (function == null) throw new NullPointerException();
1610 Node<K,V>[] t;
1611 if ((t = table) != null) {
1612 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1613 for (Node<K,V> p; (p = it.advance()) != null; ) {
1614 V oldValue = p.val;
1615 for (K key = p.key;;) {
1616 V newValue = function.apply(key, oldValue);
1617 if (newValue == null)
1618 throw new NullPointerException();
1619 if (replaceNode(key, newValue, oldValue) != null ||
1620 (oldValue = get(key)) == null)
1621 break;
1622 }
1623 }
1624 }
1625 }
1626
1627 /**
1628 * Helper method for EntrySetView.removeIf.
1629 */
1630 boolean removeEntryIf(Predicate<? super Entry<K,V>> function) {
1631 if (function == null) throw new NullPointerException();
1632 Node<K,V>[] t;
1633 boolean removed = false;
1634 if ((t = table) != null) {
1635 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1636 for (Node<K,V> p; (p = it.advance()) != null; ) {
1637 K k = p.key;
1638 V v = p.val;
1639 Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v);
1640 if (function.test(e) && replaceNode(k, null, v) != null)
1641 removed = true;
1642 }
1643 }
1644 return removed;
1645 }
1646
1647 /**
1648 * Helper method for ValuesView.removeIf.
1649 */
1650 boolean removeValueIf(Predicate<? super V> function) {
1651 if (function == null) throw new NullPointerException();
1652 Node<K,V>[] t;
1653 boolean removed = false;
1654 if ((t = table) != null) {
1655 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1656 for (Node<K,V> p; (p = it.advance()) != null; ) {
1657 K k = p.key;
1658 V v = p.val;
1659 if (function.test(v) && replaceNode(k, null, v) != null)
1660 removed = true;
1661 }
1662 }
1663 return removed;
1664 }
1665
1666 /**
1667 * If the specified key is not already associated with a value,
1668 * attempts to compute its value using the given mapping function
1669 * and enters it into this map unless {@code null}. The entire
1670 * method invocation is performed atomically, so the function is
1671 * applied at most once per key. Some attempted update operations
1672 * on this map by other threads may be blocked while computation
1673 * is in progress, so the computation should be short and simple,
1674 * and must not attempt to update any other mappings of this map.
1675 *
1676 * @param key key with which the specified value is to be associated
1677 * @param mappingFunction the function to compute a value
1678 * @return the current (existing or computed) value associated with
1679 * the specified key, or null if the computed value is null
1680 * @throws NullPointerException if the specified key or mappingFunction
1681 * is null
1682 * @throws IllegalStateException if the computation detectably
1683 * attempts a recursive update to this map that would
1684 * otherwise never complete
1685 * @throws RuntimeException or Error if the mappingFunction does so,
1686 * in which case the mapping is left unestablished
1687 */
1688 public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
1689 if (key == null || mappingFunction == null)
1690 throw new NullPointerException();
1691 int h = spread(key.hashCode());
1692 V val = null;
1693 int binCount = 0;
1694 for (Node<K,V>[] tab = table;;) {
1695 Node<K,V> f; int n, i, fh; K fk; V fv;
1696 if (tab == null || (n = tab.length) == 0)
1697 tab = initTable();
1698 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1699 Node<K,V> r = new ReservationNode<K,V>();
1700 synchronized (r) {
1701 if (casTabAt(tab, i, null, r)) {
1702 binCount = 1;
1703 Node<K,V> node = null;
1704 try {
1705 if ((val = mappingFunction.apply(key)) != null)
1706 node = new Node<K,V>(h, key, val);
1707 } finally {
1708 setTabAt(tab, i, node);
1709 }
1710 }
1711 }
1712 if (binCount != 0)
1713 break;
1714 }
1715 else if ((fh = f.hash) == MOVED)
1716 tab = helpTransfer(tab, f);
1717 else if (fh == h // check first node without acquiring lock
1718 && ((fk = f.key) == key || (fk != null && key.equals(fk)))
1719 && (fv = f.val) != null)
1720 return fv;
1721 else {
1722 boolean added = false;
1723 synchronized (f) {
1724 if (tabAt(tab, i) == f) {
1725 if (fh >= 0) {
1726 binCount = 1;
1727 for (Node<K,V> e = f;; ++binCount) {
1728 K ek;
1729 if (e.hash == h &&
1730 ((ek = e.key) == key ||
1731 (ek != null && key.equals(ek)))) {
1732 val = e.val;
1733 break;
1734 }
1735 Node<K,V> pred = e;
1736 if ((e = e.next) == null) {
1737 if ((val = mappingFunction.apply(key)) != null) {
1738 if (pred.next != null)
1739 throw new IllegalStateException("Recursive update");
1740 added = true;
1741 pred.next = new Node<K,V>(h, key, val);
1742 }
1743 break;
1744 }
1745 }
1746 }
1747 else if (f instanceof TreeBin) {
1748 binCount = 2;
1749 TreeBin<K,V> t = (TreeBin<K,V>)f;
1750 TreeNode<K,V> r, p;
1751 if ((r = t.root) != null &&
1752 (p = r.findTreeNode(h, key, null)) != null)
1753 val = p.val;
1754 else if ((val = mappingFunction.apply(key)) != null) {
1755 added = true;
1756 t.putTreeVal(h, key, val);
1757 }
1758 }
1759 else if (f instanceof ReservationNode)
1760 throw new IllegalStateException("Recursive update");
1761 }
1762 }
1763 if (binCount != 0) {
1764 if (binCount >= TREEIFY_THRESHOLD)
1765 treeifyBin(tab, i);
1766 if (!added)
1767 return val;
1768 break;
1769 }
1770 }
1771 }
1772 if (val != null)
1773 addCount(1L, binCount);
1774 return val;
1775 }
1776
1777 /**
1778 * If the value for the specified key is present, attempts to
1779 * compute a new mapping given the key and its current mapped
1780 * value. The entire method invocation is performed atomically.
1781 * Some attempted update operations on this map by other threads
1782 * may be blocked while computation is in progress, so the
1783 * computation should be short and simple, and must not attempt to
1784 * update any other mappings of this map.
1785 *
1786 * @param key key with which a value may be associated
1787 * @param remappingFunction the function to compute a value
1788 * @return the new value associated with the specified key, or null if none
1789 * @throws NullPointerException if the specified key or remappingFunction
1790 * is null
1791 * @throws IllegalStateException if the computation detectably
1792 * attempts a recursive update to this map that would
1793 * otherwise never complete
1794 * @throws RuntimeException or Error if the remappingFunction does so,
1795 * in which case the mapping is unchanged
1796 */
1797 public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1798 if (key == null || remappingFunction == null)
1799 throw new NullPointerException();
1800 int h = spread(key.hashCode());
1801 V val = null;
1802 int delta = 0;
1803 int binCount = 0;
1804 for (Node<K,V>[] tab = table;;) {
1805 Node<K,V> f; int n, i, fh;
1806 if (tab == null || (n = tab.length) == 0)
1807 tab = initTable();
1808 else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1809 break;
1810 else if ((fh = f.hash) == MOVED)
1811 tab = helpTransfer(tab, f);
1812 else {
1813 synchronized (f) {
1814 if (tabAt(tab, i) == f) {
1815 if (fh >= 0) {
1816 binCount = 1;
1817 for (Node<K,V> e = f, pred = null;; ++binCount) {
1818 K ek;
1819 if (e.hash == h &&
1820 ((ek = e.key) == key ||
1821 (ek != null && key.equals(ek)))) {
1822 val = remappingFunction.apply(key, e.val);
1823 if (val != null)
1824 e.val = val;
1825 else {
1826 delta = -1;
1827 Node<K,V> en = e.next;
1828 if (pred != null)
1829 pred.next = en;
1830 else
1831 setTabAt(tab, i, en);
1832 }
1833 break;
1834 }
1835 pred = e;
1836 if ((e = e.next) == null)
1837 break;
1838 }
1839 }
1840 else if (f instanceof TreeBin) {
1841 binCount = 2;
1842 TreeBin<K,V> t = (TreeBin<K,V>)f;
1843 TreeNode<K,V> r, p;
1844 if ((r = t.root) != null &&
1845 (p = r.findTreeNode(h, key, null)) != null) {
1846 val = remappingFunction.apply(key, p.val);
1847 if (val != null)
1848 p.val = val;
1849 else {
1850 delta = -1;
1851 if (t.removeTreeNode(p))
1852 setTabAt(tab, i, untreeify(t.first));
1853 }
1854 }
1855 }
1856 else if (f instanceof ReservationNode)
1857 throw new IllegalStateException("Recursive update");
1858 }
1859 }
1860 if (binCount != 0)
1861 break;
1862 }
1863 }
1864 if (delta != 0)
1865 addCount((long)delta, binCount);
1866 return val;
1867 }
1868
1869 /**
1870 * Attempts to compute a mapping for the specified key and its
1871 * current mapped value (or {@code null} if there is no current
1872 * mapping). The entire method invocation is performed atomically.
1873 * Some attempted update operations on this map by other threads
1874 * may be blocked while computation is in progress, so the
1875 * computation should be short and simple, and must not attempt to
1876 * update any other mappings of this Map.
1877 *
1878 * @param key key with which the specified value is to be associated
1879 * @param remappingFunction the function to compute a value
1880 * @return the new value associated with the specified key, or null if none
1881 * @throws NullPointerException if the specified key or remappingFunction
1882 * is null
1883 * @throws IllegalStateException if the computation detectably
1884 * attempts a recursive update to this map that would
1885 * otherwise never complete
1886 * @throws RuntimeException or Error if the remappingFunction does so,
1887 * in which case the mapping is unchanged
1888 */
1889 public V compute(K key,
1890 BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1891 if (key == null || remappingFunction == null)
1892 throw new NullPointerException();
1893 int h = spread(key.hashCode());
1894 V val = null;
1895 int delta = 0;
1896 int binCount = 0;
1897 for (Node<K,V>[] tab = table;;) {
1898 Node<K,V> f; int n, i, fh;
1899 if (tab == null || (n = tab.length) == 0)
1900 tab = initTable();
1901 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1902 Node<K,V> r = new ReservationNode<K,V>();
1903 synchronized (r) {
1904 if (casTabAt(tab, i, null, r)) {
1905 binCount = 1;
1906 Node<K,V> node = null;
1907 try {
1908 if ((val = remappingFunction.apply(key, null)) != null) {
1909 delta = 1;
1910 node = new Node<K,V>(h, key, val);
1911 }
1912 } finally {
1913 setTabAt(tab, i, node);
1914 }
1915 }
1916 }
1917 if (binCount != 0)
1918 break;
1919 }
1920 else if ((fh = f.hash) == MOVED)
1921 tab = helpTransfer(tab, f);
1922 else {
1923 synchronized (f) {
1924 if (tabAt(tab, i) == f) {
1925 if (fh >= 0) {
1926 binCount = 1;
1927 for (Node<K,V> e = f, pred = null;; ++binCount) {
1928 K ek;
1929 if (e.hash == h &&
1930 ((ek = e.key) == key ||
1931 (ek != null && key.equals(ek)))) {
1932 val = remappingFunction.apply(key, e.val);
1933 if (val != null)
1934 e.val = val;
1935 else {
1936 delta = -1;
1937 Node<K,V> en = e.next;
1938 if (pred != null)
1939 pred.next = en;
1940 else
1941 setTabAt(tab, i, en);
1942 }
1943 break;
1944 }
1945 pred = e;
1946 if ((e = e.next) == null) {
1947 val = remappingFunction.apply(key, null);
1948 if (val != null) {
1949 if (pred.next != null)
1950 throw new IllegalStateException("Recursive update");
1951 delta = 1;
1952 pred.next = new Node<K,V>(h, key, val);
1953 }
1954 break;
1955 }
1956 }
1957 }
1958 else if (f instanceof TreeBin) {
1959 binCount = 1;
1960 TreeBin<K,V> t = (TreeBin<K,V>)f;
1961 TreeNode<K,V> r, p;
1962 if ((r = t.root) != null)
1963 p = r.findTreeNode(h, key, null);
1964 else
1965 p = null;
1966 V pv = (p == null) ? null : p.val;
1967 val = remappingFunction.apply(key, pv);
1968 if (val != null) {
1969 if (p != null)
1970 p.val = val;
1971 else {
1972 delta = 1;
1973 t.putTreeVal(h, key, val);
1974 }
1975 }
1976 else if (p != null) {
1977 delta = -1;
1978 if (t.removeTreeNode(p))
1979 setTabAt(tab, i, untreeify(t.first));
1980 }
1981 }
1982 else if (f instanceof ReservationNode)
1983 throw new IllegalStateException("Recursive update");
1984 }
1985 }
1986 if (binCount != 0) {
1987 if (binCount >= TREEIFY_THRESHOLD)
1988 treeifyBin(tab, i);
1989 break;
1990 }
1991 }
1992 }
1993 if (delta != 0)
1994 addCount((long)delta, binCount);
1995 return val;
1996 }
1997
1998 /**
1999 * If the specified key is not already associated with a
2000 * (non-null) value, associates it with the given value.
2001 * Otherwise, replaces the value with the results of the given
2002 * remapping function, or removes if {@code null}. The entire
2003 * method invocation is performed atomically. Some attempted
2004 * update operations on this map by other threads may be blocked
2005 * while computation is in progress, so the computation should be
2006 * short and simple, and must not attempt to update any other
2007 * mappings of this Map.
2008 *
2009 * @param key key with which the specified value is to be associated
2010 * @param value the value to use if absent
2011 * @param remappingFunction the function to recompute a value if present
2012 * @return the new value associated with the specified key, or null if none
2013 * @throws NullPointerException if the specified key or the
2014 * remappingFunction is null
2015 * @throws RuntimeException or Error if the remappingFunction does so,
2016 * in which case the mapping is unchanged
2017 */
2018 public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2019 if (key == null || value == null || remappingFunction == null)
2020 throw new NullPointerException();
2021 int h = spread(key.hashCode());
2022 V val = null;
2023 int delta = 0;
2024 int binCount = 0;
2025 for (Node<K,V>[] tab = table;;) {
2026 Node<K,V> f; int n, i, fh;
2027 if (tab == null || (n = tab.length) == 0)
2028 tab = initTable();
2029 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
2030 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value))) {
2031 delta = 1;
2032 val = value;
2033 break;
2034 }
2035 }
2036 else if ((fh = f.hash) == MOVED)
2037 tab = helpTransfer(tab, f);
2038 else {
2039 synchronized (f) {
2040 if (tabAt(tab, i) == f) {
2041 if (fh >= 0) {
2042 binCount = 1;
2043 for (Node<K,V> e = f, pred = null;; ++binCount) {
2044 K ek;
2045 if (e.hash == h &&
2046 ((ek = e.key) == key ||
2047 (ek != null && key.equals(ek)))) {
2048 val = remappingFunction.apply(e.val, value);
2049 if (val != null)
2050 e.val = val;
2051 else {
2052 delta = -1;
2053 Node<K,V> en = e.next;
2054 if (pred != null)
2055 pred.next = en;
2056 else
2057 setTabAt(tab, i, en);
2058 }
2059 break;
2060 }
2061 pred = e;
2062 if ((e = e.next) == null) {
2063 delta = 1;
2064 val = value;
2065 pred.next = new Node<K,V>(h, key, val);
2066 break;
2067 }
2068 }
2069 }
2070 else if (f instanceof TreeBin) {
2071 binCount = 2;
2072 TreeBin<K,V> t = (TreeBin<K,V>)f;
2073 TreeNode<K,V> r = t.root;
2074 TreeNode<K,V> p = (r == null) ? null :
2075 r.findTreeNode(h, key, null);
2076 val = (p == null) ? value :
2077 remappingFunction.apply(p.val, value);
2078 if (val != null) {
2079 if (p != null)
2080 p.val = val;
2081 else {
2082 delta = 1;
2083 t.putTreeVal(h, key, val);
2084 }
2085 }
2086 else if (p != null) {
2087 delta = -1;
2088 if (t.removeTreeNode(p))
2089 setTabAt(tab, i, untreeify(t.first));
2090 }
2091 }
2092 else if (f instanceof ReservationNode)
2093 throw new IllegalStateException("Recursive update");
2094 }
2095 }
2096 if (binCount != 0) {
2097 if (binCount >= TREEIFY_THRESHOLD)
2098 treeifyBin(tab, i);
2099 break;
2100 }
2101 }
2102 }
2103 if (delta != 0)
2104 addCount((long)delta, binCount);
2105 return val;
2106 }
2107
2108 // Hashtable legacy methods
2109
2110 /**
2111 * Tests if some key maps into the specified value in this table.
2112 *
2113 * <p>Note that this method is identical in functionality to
2114 * {@link #containsValue(Object)}, and exists solely to ensure
2115 * full compatibility with class {@link java.util.Hashtable},
2116 * which supported this method prior to introduction of the
2117 * Java Collections Framework.
2118 *
2119 * @param value a value to search for
2120 * @return {@code true} if and only if some key maps to the
2121 * {@code value} argument in this table as
2122 * determined by the {@code equals} method;
2123 * {@code false} otherwise
2124 * @throws NullPointerException if the specified value is null
2125 */
2126 public boolean contains(Object value) {
2127 return containsValue(value);
2128 }
2129
2130 /**
2131 * Returns an enumeration of the keys in this table.
2132 *
2133 * @return an enumeration of the keys in this table
2134 * @see #keySet()
2135 */
2136 public Enumeration<K> keys() {
2137 Node<K,V>[] t;
2138 int f = (t = table) == null ? 0 : t.length;
2139 return new KeyIterator<K,V>(t, f, 0, f, this);
2140 }
2141
2142 /**
2143 * Returns an enumeration of the values in this table.
2144 *
2145 * @return an enumeration of the values in this table
2146 * @see #values()
2147 */
2148 public Enumeration<V> elements() {
2149 Node<K,V>[] t;
2150 int f = (t = table) == null ? 0 : t.length;
2151 return new ValueIterator<K,V>(t, f, 0, f, this);
2152 }
2153
2154 // ConcurrentHashMap-only methods
2155
2156 /**
2157 * Returns the number of mappings. This method should be used
2158 * instead of {@link #size} because a ConcurrentHashMap may
2159 * contain more mappings than can be represented as an int. The
2160 * value returned is an estimate; the actual count may differ if
2161 * there are concurrent insertions or removals.
2162 *
2163 * @return the number of mappings
2164 * @since 1.8
2165 */
2166 public long mappingCount() {
2167 long n = sumCount();
2168 return (n < 0L) ? 0L : n; // ignore transient negative values
2169 }
2170
2171 /**
2172 * Creates a new {@link Set} backed by a ConcurrentHashMap
2173 * from the given type to {@code Boolean.TRUE}.
2174 *
2175 * @param <K> the element type of the returned set
2176 * @return the new set
2177 * @since 1.8
2178 */
2179 public static <K> KeySetView<K,Boolean> newKeySet() {
2180 return new KeySetView<K,Boolean>
2181 (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
2182 }
2183
2184 /**
2185 * Creates a new {@link Set} backed by a ConcurrentHashMap
2186 * from the given type to {@code Boolean.TRUE}.
2187 *
2188 * @param initialCapacity The implementation performs internal
2189 * sizing to accommodate this many elements.
2190 * @param <K> the element type of the returned set
2191 * @return the new set
2192 * @throws IllegalArgumentException if the initial capacity of
2193 * elements is negative
2194 * @since 1.8
2195 */
2196 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2197 return new KeySetView<K,Boolean>
2198 (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2199 }
2200
2201 /**
2202 * Returns a {@link Set} view of the keys in this map, using the
2203 * given common mapped value for any additions (i.e., {@link
2204 * Collection#add} and {@link Collection#addAll(Collection)}).
2205 * This is of course only appropriate if it is acceptable to use
2206 * the same value for all additions from this view.
2207 *
2208 * @param mappedValue the mapped value to use for any additions
2209 * @return the set view
2210 * @throws NullPointerException if the mappedValue is null
2211 */
2212 public KeySetView<K,V> keySet(V mappedValue) {
2213 if (mappedValue == null)
2214 throw new NullPointerException();
2215 return new KeySetView<K,V>(this, mappedValue);
2216 }
2217
2218 /* ---------------- Special Nodes -------------- */
2219
2220 /**
2221 * A node inserted at head of bins during transfer operations.
2222 */
2223 static final class ForwardingNode<K,V> extends Node<K,V> {
2224 final Node<K,V>[] nextTable;
2225 ForwardingNode(Node<K,V>[] tab) {
2226 super(MOVED, null, null);
2227 this.nextTable = tab;
2228 }
2229
2230 Node<K,V> find(int h, Object k) {
2231 // loop to avoid arbitrarily deep recursion on forwarding nodes
2232 outer: for (Node<K,V>[] tab = nextTable;;) {
2233 Node<K,V> e; int n;
2234 if (k == null || tab == null || (n = tab.length) == 0 ||
2235 (e = tabAt(tab, (n - 1) & h)) == null)
2236 return null;
2237 for (;;) {
2238 int eh; K ek;
2239 if ((eh = e.hash) == h &&
2240 ((ek = e.key) == k || (ek != null && k.equals(ek))))
2241 return e;
2242 if (eh < 0) {
2243 if (e instanceof ForwardingNode) {
2244 tab = ((ForwardingNode<K,V>)e).nextTable;
2245 continue outer;
2246 }
2247 else
2248 return e.find(h, k);
2249 }
2250 if ((e = e.next) == null)
2251 return null;
2252 }
2253 }
2254 }
2255 }
2256
2257 /**
2258 * A place-holder node used in computeIfAbsent and compute.
2259 */
2260 static final class ReservationNode<K,V> extends Node<K,V> {
2261 ReservationNode() {
2262 super(RESERVED, null, null);
2263 }
2264
2265 Node<K,V> find(int h, Object k) {
2266 return null;
2267 }
2268 }
2269
2270 /* ---------------- Table Initialization and Resizing -------------- */
2271
2272 /**
2273 * Returns the stamp bits for resizing a table of size n.
2274 * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
2275 */
2276 static final int resizeStamp(int n) {
2277 return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
2278 }
2279
2280 /**
2281 * Initializes table, using the size recorded in sizeCtl.
2282 */
2283 private final Node<K,V>[] initTable() {
2284 Node<K,V>[] tab; int sc;
2285 while ((tab = table) == null || tab.length == 0) {
2286 if ((sc = sizeCtl) < 0)
2287 Thread.yield(); // lost initialization race; just spin
2288 else if (U.compareAndSetInt(this, SIZECTL, sc, -1)) {
2289 try {
2290 if ((tab = table) == null || tab.length == 0) {
2291 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2292 @SuppressWarnings("unchecked")
2293 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2294 table = tab = nt;
2295 sc = n - (n >>> 2);
2296 }
2297 } finally {
2298 sizeCtl = sc;
2299 }
2300 break;
2301 }
2302 }
2303 return tab;
2304 }
2305
2306 /**
2307 * Adds to count, and if table is too small and not already
2308 * resizing, initiates transfer. If already resizing, helps
2309 * perform transfer if work is available. Rechecks occupancy
2310 * after a transfer to see if another resize is already needed
2311 * because resizings are lagging additions.
2312 *
2313 * @param x the count to add
2314 * @param check if <0, don't check resize, if <= 1 only check if uncontended
2315 */
2316 private final void addCount(long x, int check) {
2317 CounterCell[] cs; long b, s;
2318 if ((cs = counterCells) != null ||
2319 !U.compareAndSetLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2320 CounterCell c; long v; int m;
2321 boolean uncontended = true;
2322 if (cs == null || (m = cs.length - 1) < 0 ||
2323 (c = cs[ThreadLocalRandom.getProbe() & m]) == null ||
2324 !(uncontended =
2325 U.compareAndSetLong(c, CELLVALUE, v = c.value, v + x))) {
2326 fullAddCount(x, uncontended);
2327 return;
2328 }
2329 if (check <= 1)
2330 return;
2331 s = sumCount();
2332 }
2333 if (check >= 0) {
2334 Node<K,V>[] tab, nt; int n, sc;
2335 while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2336 (n = tab.length) < MAXIMUM_CAPACITY) {
2337 int rs = resizeStamp(n);
2338 if (sc < 0) {
2339 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2340 sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
2341 transferIndex <= 0)
2342 break;
2343 if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1))
2344 transfer(tab, nt);
2345 }
2346 else if (U.compareAndSetInt(this, SIZECTL, sc,
2347 (rs << RESIZE_STAMP_SHIFT) + 2))
2348 transfer(tab, null);
2349 s = sumCount();
2350 }
2351 }
2352 }
2353
2354 /**
2355 * Helps transfer if a resize is in progress.
2356 */
2357 final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2358 Node<K,V>[] nextTab; int sc;
2359 if (tab != null && (f instanceof ForwardingNode) &&
2360 (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2361 int rs = resizeStamp(tab.length);
2362 while (nextTab == nextTable && table == tab &&
2363 (sc = sizeCtl) < 0) {
2364 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2365 sc == rs + MAX_RESIZERS || transferIndex <= 0)
2366 break;
2367 if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1)) {
2368 transfer(tab, nextTab);
2369 break;
2370 }
2371 }
2372 return nextTab;
2373 }
2374 return table;
2375 }
2376
2377 /**
2378 * Tries to presize table to accommodate the given number of elements.
2379 *
2380 * @param size number of elements (doesn't need to be perfectly accurate)
2381 */
2382 private final void tryPresize(int size) {
2383 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2384 tableSizeFor(size + (size >>> 1) + 1);
2385 int sc;
2386 while ((sc = sizeCtl) >= 0) {
2387 Node<K,V>[] tab = table; int n;
2388 if (tab == null || (n = tab.length) == 0) {
2389 n = (sc > c) ? sc : c;
2390 if (U.compareAndSetInt(this, SIZECTL, sc, -1)) {
2391 try {
2392 if (table == tab) {
2393 @SuppressWarnings("unchecked")
2394 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2395 table = nt;
2396 sc = n - (n >>> 2);
2397 }
2398 } finally {
2399 sizeCtl = sc;
2400 }
2401 }
2402 }
2403 else if (c <= sc || n >= MAXIMUM_CAPACITY)
2404 break;
2405 else if (tab == table) {
2406 int rs = resizeStamp(n);
2407 if (U.compareAndSetInt(this, SIZECTL, sc,
2408 (rs << RESIZE_STAMP_SHIFT) + 2))
2409 transfer(tab, null);
2410 }
2411 }
2412 }
2413
2414 /**
2415 * Moves and/or copies the nodes in each bin to new table. See
2416 * above for explanation.
2417 */
2418 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2419 int n = tab.length, stride;
2420 if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2421 stride = MIN_TRANSFER_STRIDE; // subdivide range
2422 if (nextTab == null) { // initiating
2423 try {
2424 @SuppressWarnings("unchecked")
2425 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
2426 nextTab = nt;
2427 } catch (Throwable ex) { // try to cope with OOME
2428 sizeCtl = Integer.MAX_VALUE;
2429 return;
2430 }
2431 nextTable = nextTab;
2432 transferIndex = n;
2433 }
2434 int nextn = nextTab.length;
2435 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2436 boolean advance = true;
2437 boolean finishing = false; // to ensure sweep before committing nextTab
2438 for (int i = 0, bound = 0;;) {
2439 Node<K,V> f; int fh;
2440 while (advance) {
2441 int nextIndex, nextBound;
2442 if (--i >= bound || finishing)
2443 advance = false;
2444 else if ((nextIndex = transferIndex) <= 0) {
2445 i = -1;
2446 advance = false;
2447 }
2448 else if (U.compareAndSetInt
2449 (this, TRANSFERINDEX, nextIndex,
2450 nextBound = (nextIndex > stride ?
2451 nextIndex - stride : 0))) {
2452 bound = nextBound;
2453 i = nextIndex - 1;
2454 advance = false;
2455 }
2456 }
2457 if (i < 0 || i >= n || i + n >= nextn) {
2458 int sc;
2459 if (finishing) {
2460 nextTable = null;
2461 table = nextTab;
2462 sizeCtl = (n << 1) - (n >>> 1);
2463 return;
2464 }
2465 if (U.compareAndSetInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
2466 if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
2467 return;
2468 finishing = advance = true;
2469 i = n; // recheck before commit
2470 }
2471 }
2472 else if ((f = tabAt(tab, i)) == null)
2473 advance = casTabAt(tab, i, null, fwd);
2474 else if ((fh = f.hash) == MOVED)
2475 advance = true; // already processed
2476 else {
2477 synchronized (f) {
2478 if (tabAt(tab, i) == f) {
2479 Node<K,V> ln, hn;
2480 if (fh >= 0) {
2481 int runBit = fh & n;
2482 Node<K,V> lastRun = f;
2483 for (Node<K,V> p = f.next; p != null; p = p.next) {
2484 int b = p.hash & n;
2485 if (b != runBit) {
2486 runBit = b;
2487 lastRun = p;
2488 }
2489 }
2490 if (runBit == 0) {
2491 ln = lastRun;
2492 hn = null;
2493 }
2494 else {
2495 hn = lastRun;
2496 ln = null;
2497 }
2498 for (Node<K,V> p = f; p != lastRun; p = p.next) {
2499 int ph = p.hash; K pk = p.key; V pv = p.val;
2500 if ((ph & n) == 0)
2501 ln = new Node<K,V>(ph, pk, pv, ln);
2502 else
2503 hn = new Node<K,V>(ph, pk, pv, hn);
2504 }
2505 setTabAt(nextTab, i, ln);
2506 setTabAt(nextTab, i + n, hn);
2507 setTabAt(tab, i, fwd);
2508 advance = true;
2509 }
2510 else if (f instanceof TreeBin) {
2511 TreeBin<K,V> t = (TreeBin<K,V>)f;
2512 TreeNode<K,V> lo = null, loTail = null;
2513 TreeNode<K,V> hi = null, hiTail = null;
2514 int lc = 0, hc = 0;
2515 for (Node<K,V> e = t.first; e != null; e = e.next) {
2516 int h = e.hash;
2517 TreeNode<K,V> p = new TreeNode<K,V>
2518 (h, e.key, e.val, null, null);
2519 if ((h & n) == 0) {
2520 if ((p.prev = loTail) == null)
2521 lo = p;
2522 else
2523 loTail.next = p;
2524 loTail = p;
2525 ++lc;
2526 }
2527 else {
2528 if ((p.prev = hiTail) == null)
2529 hi = p;
2530 else
2531 hiTail.next = p;
2532 hiTail = p;
2533 ++hc;
2534 }
2535 }
2536 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2537 (hc != 0) ? new TreeBin<K,V>(lo) : t;
2538 hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2539 (lc != 0) ? new TreeBin<K,V>(hi) : t;
2540 setTabAt(nextTab, i, ln);
2541 setTabAt(nextTab, i + n, hn);
2542 setTabAt(tab, i, fwd);
2543 advance = true;
2544 }
2545 }
2546 }
2547 }
2548 }
2549 }
2550
2551 /* ---------------- Counter support -------------- */
2552
2553 /**
2554 * A padded cell for distributing counts. Adapted from LongAdder
2555 * and Striped64. See their internal docs for explanation.
2556 */
2557 @jdk.internal.vm.annotation.Contended static final class CounterCell {
2558 volatile long value;
2559 CounterCell(long x) { value = x; }
2560 }
2561
2562 final long sumCount() {
2563 CounterCell[] cs = counterCells;
2564 long sum = baseCount;
2565 if (cs != null) {
2566 for (CounterCell c : cs)
2567 if (c != null)
2568 sum += c.value;
2569 }
2570 return sum;
2571 }
2572
2573 // See LongAdder version for explanation
2574 private final void fullAddCount(long x, boolean wasUncontended) {
2575 int h;
2576 if ((h = ThreadLocalRandom.getProbe()) == 0) {
2577 ThreadLocalRandom.localInit(); // force initialization
2578 h = ThreadLocalRandom.getProbe();
2579 wasUncontended = true;
2580 }
2581 boolean collide = false; // True if last slot nonempty
2582 for (;;) {
2583 CounterCell[] cs; CounterCell c; int n; long v;
2584 if ((cs = counterCells) != null && (n = cs.length) > 0) {
2585 if ((c = cs[(n - 1) & h]) == null) {
2586 if (cellsBusy == 0) { // Try to attach new Cell
2587 CounterCell r = new CounterCell(x); // Optimistic create
2588 if (cellsBusy == 0 &&
2589 U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2590 boolean created = false;
2591 try { // Recheck under lock
2592 CounterCell[] rs; int m, j;
2593 if ((rs = counterCells) != null &&
2594 (m = rs.length) > 0 &&
2595 rs[j = (m - 1) & h] == null) {
2596 rs[j] = r;
2597 created = true;
2598 }
2599 } finally {
2600 cellsBusy = 0;
2601 }
2602 if (created)
2603 break;
2604 continue; // Slot is now non-empty
2605 }
2606 }
2607 collide = false;
2608 }
2609 else if (!wasUncontended) // CAS already known to fail
2610 wasUncontended = true; // Continue after rehash
2611 else if (U.compareAndSetLong(c, CELLVALUE, v = c.value, v + x))
2612 break;
2613 else if (counterCells != cs || n >= NCPU)
2614 collide = false; // At max size or stale
2615 else if (!collide)
2616 collide = true;
2617 else if (cellsBusy == 0 &&
2618 U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2619 try {
2620 if (counterCells == cs) // Expand table unless stale
2621 counterCells = Arrays.copyOf(cs, n << 1);
2622 } finally {
2623 cellsBusy = 0;
2624 }
2625 collide = false;
2626 continue; // Retry with expanded table
2627 }
2628 h = ThreadLocalRandom.advanceProbe(h);
2629 }
2630 else if (cellsBusy == 0 && counterCells == cs &&
2631 U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2632 boolean init = false;
2633 try { // Initialize table
2634 if (counterCells == cs) {
2635 CounterCell[] rs = new CounterCell[2];
2636 rs[h & 1] = new CounterCell(x);
2637 counterCells = rs;
2638 init = true;
2639 }
2640 } finally {
2641 cellsBusy = 0;
2642 }
2643 if (init)
2644 break;
2645 }
2646 else if (U.compareAndSetLong(this, BASECOUNT, v = baseCount, v + x))
2647 break; // Fall back on using base
2648 }
2649 }
2650
2651 /* ---------------- Conversion from/to TreeBins -------------- */
2652
2653 /**
2654 * Replaces all linked nodes in bin at given index unless table is
2655 * too small, in which case resizes instead.
2656 */
2657 private final void treeifyBin(Node<K,V>[] tab, int index) {
2658 Node<K,V> b; int n;
2659 if (tab != null) {
2660 if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
2661 tryPresize(n << 1);
2662 else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2663 synchronized (b) {
2664 if (tabAt(tab, index) == b) {
2665 TreeNode<K,V> hd = null, tl = null;
2666 for (Node<K,V> e = b; e != null; e = e.next) {
2667 TreeNode<K,V> p =
2668 new TreeNode<K,V>(e.hash, e.key, e.val,
2669 null, null);
2670 if ((p.prev = tl) == null)
2671 hd = p;
2672 else
2673 tl.next = p;
2674 tl = p;
2675 }
2676 setTabAt(tab, index, new TreeBin<K,V>(hd));
2677 }
2678 }
2679 }
2680 }
2681 }
2682
2683 /**
2684 * Returns a list of non-TreeNodes replacing those in given list.
2685 */
2686 static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2687 Node<K,V> hd = null, tl = null;
2688 for (Node<K,V> q = b; q != null; q = q.next) {
2689 Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val);
2690 if (tl == null)
2691 hd = p;
2692 else
2693 tl.next = p;
2694 tl = p;
2695 }
2696 return hd;
2697 }
2698
2699 /* ---------------- TreeNodes -------------- */
2700
2701 /**
2702 * Nodes for use in TreeBins.
2703 */
2704 static final class TreeNode<K,V> extends Node<K,V> {
2705 TreeNode<K,V> parent; // red-black tree links
2706 TreeNode<K,V> left;
2707 TreeNode<K,V> right;
2708 TreeNode<K,V> prev; // needed to unlink next upon deletion
2709 boolean red;
2710
2711 TreeNode(int hash, K key, V val, Node<K,V> next,
2712 TreeNode<K,V> parent) {
2713 super(hash, key, val, next);
2714 this.parent = parent;
2715 }
2716
2717 Node<K,V> find(int h, Object k) {
2718 return findTreeNode(h, k, null);
2719 }
2720
2721 /**
2722 * Returns the TreeNode (or null if not found) for the given key
2723 * starting at given root.
2724 */
2725 final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2726 if (k != null) {
2727 TreeNode<K,V> p = this;
2728 do {
2729 int ph, dir; K pk; TreeNode<K,V> q;
2730 TreeNode<K,V> pl = p.left, pr = p.right;
2731 if ((ph = p.hash) > h)
2732 p = pl;
2733 else if (ph < h)
2734 p = pr;
2735 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2736 return p;
2737 else if (pl == null)
2738 p = pr;
2739 else if (pr == null)
2740 p = pl;
2741 else if ((kc != null ||
2742 (kc = comparableClassFor(k)) != null) &&
2743 (dir = compareComparables(kc, k, pk)) != 0)
2744 p = (dir < 0) ? pl : pr;
2745 else if ((q = pr.findTreeNode(h, k, kc)) != null)
2746 return q;
2747 else
2748 p = pl;
2749 } while (p != null);
2750 }
2751 return null;
2752 }
2753 }
2754
2755 /* ---------------- TreeBins -------------- */
2756
2757 /**
2758 * TreeNodes used at the heads of bins. TreeBins do not hold user
2759 * keys or values, but instead point to list of TreeNodes and
2760 * their root. They also maintain a parasitic read-write lock
2761 * forcing writers (who hold bin lock) to wait for readers (who do
2762 * not) to complete before tree restructuring operations.
2763 */
2764 static final class TreeBin<K,V> extends Node<K,V> {
2765 TreeNode<K,V> root;
2766 volatile TreeNode<K,V> first;
2767 volatile Thread waiter;
2768 volatile int lockState;
2769 // values for lockState
2770 static final int WRITER = 1; // set while holding write lock
2771 static final int WAITER = 2; // set when waiting for write lock
2772 static final int READER = 4; // increment value for setting read lock
2773
2774 /**
2775 * Tie-breaking utility for ordering insertions when equal
2776 * hashCodes and non-comparable. We don't require a total
2777 * order, just a consistent insertion rule to maintain
2778 * equivalence across rebalancings. Tie-breaking further than
2779 * necessary simplifies testing a bit.
2780 */
2781 static int tieBreakOrder(Object a, Object b) {
2782 int d;
2783 if (a == null || b == null ||
2784 (d = a.getClass().getName().
2785 compareTo(b.getClass().getName())) == 0)
2786 d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2787 -1 : 1);
2788 return d;
2789 }
2790
2791 /**
2792 * Creates bin with initial set of nodes headed by b.
2793 */
2794 TreeBin(TreeNode<K,V> b) {
2795 super(TREEBIN, null, null);
2796 this.first = b;
2797 TreeNode<K,V> r = null;
2798 for (TreeNode<K,V> x = b, next; x != null; x = next) {
2799 next = (TreeNode<K,V>)x.next;
2800 x.left = x.right = null;
2801 if (r == null) {
2802 x.parent = null;
2803 x.red = false;
2804 r = x;
2805 }
2806 else {
2807 K k = x.key;
2808 int h = x.hash;
2809 Class<?> kc = null;
2810 for (TreeNode<K,V> p = r;;) {
2811 int dir, ph;
2812 K pk = p.key;
2813 if ((ph = p.hash) > h)
2814 dir = -1;
2815 else if (ph < h)
2816 dir = 1;
2817 else if ((kc == null &&
2818 (kc = comparableClassFor(k)) == null) ||
2819 (dir = compareComparables(kc, k, pk)) == 0)
2820 dir = tieBreakOrder(k, pk);
2821 TreeNode<K,V> xp = p;
2822 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2823 x.parent = xp;
2824 if (dir <= 0)
2825 xp.left = x;
2826 else
2827 xp.right = x;
2828 r = balanceInsertion(r, x);
2829 break;
2830 }
2831 }
2832 }
2833 }
2834 this.root = r;
2835 assert checkInvariants(root);
2836 }
2837
2838 /**
2839 * Acquires write lock for tree restructuring.
2840 */
2841 private final void lockRoot() {
2842 if (!U.compareAndSetInt(this, LOCKSTATE, 0, WRITER))
2843 contendedLock(); // offload to separate method
2844 }
2845
2846 /**
2847 * Releases write lock for tree restructuring.
2848 */
2849 private final void unlockRoot() {
2850 lockState = 0;
2851 }
2852
2853 /**
2854 * Possibly blocks awaiting root lock.
2855 */
2856 private final void contendedLock() {
2857 boolean waiting = false;
2858 for (int s;;) {
2859 if (((s = lockState) & ~WAITER) == 0) {
2860 if (U.compareAndSetInt(this, LOCKSTATE, s, WRITER)) {
2861 if (waiting)
2862 waiter = null;
2863 return;
2864 }
2865 }
2866 else if ((s & WAITER) == 0) {
2867 if (U.compareAndSetInt(this, LOCKSTATE, s, s | WAITER)) {
2868 waiting = true;
2869 waiter = Thread.currentThread();
2870 }
2871 }
2872 else if (waiting)
2873 LockSupport.park(this);
2874 }
2875 }
2876
2877 /**
2878 * Returns matching node or null if none. Tries to search
2879 * using tree comparisons from root, but continues linear
2880 * search when lock not available.
2881 */
2882 final Node<K,V> find(int h, Object k) {
2883 if (k != null) {
2884 for (Node<K,V> e = first; e != null; ) {
2885 int s; K ek;
2886 if (((s = lockState) & (WAITER|WRITER)) != 0) {
2887 if (e.hash == h &&
2888 ((ek = e.key) == k || (ek != null && k.equals(ek))))
2889 return e;
2890 e = e.next;
2891 }
2892 else if (U.compareAndSetInt(this, LOCKSTATE, s,
2893 s + READER)) {
2894 TreeNode<K,V> r, p;
2895 try {
2896 p = ((r = root) == null ? null :
2897 r.findTreeNode(h, k, null));
2898 } finally {
2899 Thread w;
2900 if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
2901 (READER|WAITER) && (w = waiter) != null)
2902 LockSupport.unpark(w);
2903 }
2904 return p;
2905 }
2906 }
2907 }
2908 return null;
2909 }
2910
2911 /**
2912 * Finds or adds a node.
2913 * @return null if added
2914 */
2915 final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2916 Class<?> kc = null;
2917 boolean searched = false;
2918 for (TreeNode<K,V> p = root;;) {
2919 int dir, ph; K pk;
2920 if (p == null) {
2921 first = root = new TreeNode<K,V>(h, k, v, null, null);
2922 break;
2923 }
2924 else if ((ph = p.hash) > h)
2925 dir = -1;
2926 else if (ph < h)
2927 dir = 1;
2928 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2929 return p;
2930 else if ((kc == null &&
2931 (kc = comparableClassFor(k)) == null) ||
2932 (dir = compareComparables(kc, k, pk)) == 0) {
2933 if (!searched) {
2934 TreeNode<K,V> q, ch;
2935 searched = true;
2936 if (((ch = p.left) != null &&
2937 (q = ch.findTreeNode(h, k, kc)) != null) ||
2938 ((ch = p.right) != null &&
2939 (q = ch.findTreeNode(h, k, kc)) != null))
2940 return q;
2941 }
2942 dir = tieBreakOrder(k, pk);
2943 }
2944
2945 TreeNode<K,V> xp = p;
2946 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2947 TreeNode<K,V> x, f = first;
2948 first = x = new TreeNode<K,V>(h, k, v, f, xp);
2949 if (f != null)
2950 f.prev = x;
2951 if (dir <= 0)
2952 xp.left = x;
2953 else
2954 xp.right = x;
2955 if (!xp.red)
2956 x.red = true;
2957 else {
2958 lockRoot();
2959 try {
2960 root = balanceInsertion(root, x);
2961 } finally {
2962 unlockRoot();
2963 }
2964 }
2965 break;
2966 }
2967 }
2968 assert checkInvariants(root);
2969 return null;
2970 }
2971
2972 /**
2973 * Removes the given node, that must be present before this
2974 * call. This is messier than typical red-black deletion code
2975 * because we cannot swap the contents of an interior node
2976 * with a leaf successor that is pinned by "next" pointers
2977 * that are accessible independently of lock. So instead we
2978 * swap the tree linkages.
2979 *
2980 * @return true if now too small, so should be untreeified
2981 */
2982 final boolean removeTreeNode(TreeNode<K,V> p) {
2983 TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2984 TreeNode<K,V> pred = p.prev; // unlink traversal pointers
2985 TreeNode<K,V> r, rl;
2986 if (pred == null)
2987 first = next;
2988 else
2989 pred.next = next;
2990 if (next != null)
2991 next.prev = pred;
2992 if (first == null) {
2993 root = null;
2994 return true;
2995 }
2996 if ((r = root) == null || r.right == null || // too small
2997 (rl = r.left) == null || rl.left == null)
2998 return true;
2999 lockRoot();
3000 try {
3001 TreeNode<K,V> replacement;
3002 TreeNode<K,V> pl = p.left;
3003 TreeNode<K,V> pr = p.right;
3004 if (pl != null && pr != null) {
3005 TreeNode<K,V> s = pr, sl;
3006 while ((sl = s.left) != null) // find successor
3007 s = sl;
3008 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
3009 TreeNode<K,V> sr = s.right;
3010 TreeNode<K,V> pp = p.parent;
3011 if (s == pr) { // p was s's direct parent
3012 p.parent = s;
3013 s.right = p;
3014 }
3015 else {
3016 TreeNode<K,V> sp = s.parent;
3017 if ((p.parent = sp) != null) {
3018 if (s == sp.left)
3019 sp.left = p;
3020 else
3021 sp.right = p;
3022 }
3023 if ((s.right = pr) != null)
3024 pr.parent = s;
3025 }
3026 p.left = null;
3027 if ((p.right = sr) != null)
3028 sr.parent = p;
3029 if ((s.left = pl) != null)
3030 pl.parent = s;
3031 if ((s.parent = pp) == null)
3032 r = s;
3033 else if (p == pp.left)
3034 pp.left = s;
3035 else
3036 pp.right = s;
3037 if (sr != null)
3038 replacement = sr;
3039 else
3040 replacement = p;
3041 }
3042 else if (pl != null)
3043 replacement = pl;
3044 else if (pr != null)
3045 replacement = pr;
3046 else
3047 replacement = p;
3048 if (replacement != p) {
3049 TreeNode<K,V> pp = replacement.parent = p.parent;
3050 if (pp == null)
3051 r = replacement;
3052 else if (p == pp.left)
3053 pp.left = replacement;
3054 else
3055 pp.right = replacement;
3056 p.left = p.right = p.parent = null;
3057 }
3058
3059 root = (p.red) ? r : balanceDeletion(r, replacement);
3060
3061 if (p == replacement) { // detach pointers
3062 TreeNode<K,V> pp;
3063 if ((pp = p.parent) != null) {
3064 if (p == pp.left)
3065 pp.left = null;
3066 else if (p == pp.right)
3067 pp.right = null;
3068 p.parent = null;
3069 }
3070 }
3071 } finally {
3072 unlockRoot();
3073 }
3074 assert checkInvariants(root);
3075 return false;
3076 }
3077
3078 /* ------------------------------------------------------------ */
3079 // Red-black tree methods, all adapted from CLR
3080
3081 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
3082 TreeNode<K,V> p) {
3083 TreeNode<K,V> r, pp, rl;
3084 if (p != null && (r = p.right) != null) {
3085 if ((rl = p.right = r.left) != null)
3086 rl.parent = p;
3087 if ((pp = r.parent = p.parent) == null)
3088 (root = r).red = false;
3089 else if (pp.left == p)
3090 pp.left = r;
3091 else
3092 pp.right = r;
3093 r.left = p;
3094 p.parent = r;
3095 }
3096 return root;
3097 }
3098
3099 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
3100 TreeNode<K,V> p) {
3101 TreeNode<K,V> l, pp, lr;
3102 if (p != null && (l = p.left) != null) {
3103 if ((lr = p.left = l.right) != null)
3104 lr.parent = p;
3105 if ((pp = l.parent = p.parent) == null)
3106 (root = l).red = false;
3107 else if (pp.right == p)
3108 pp.right = l;
3109 else
3110 pp.left = l;
3111 l.right = p;
3112 p.parent = l;
3113 }
3114 return root;
3115 }
3116
3117 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
3118 TreeNode<K,V> x) {
3119 x.red = true;
3120 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
3121 if ((xp = x.parent) == null) {
3122 x.red = false;
3123 return x;
3124 }
3125 else if (!xp.red || (xpp = xp.parent) == null)
3126 return root;
3127 if (xp == (xppl = xpp.left)) {
3128 if ((xppr = xpp.right) != null && xppr.red) {
3129 xppr.red = false;
3130 xp.red = false;
3131 xpp.red = true;
3132 x = xpp;
3133 }
3134 else {
3135 if (x == xp.right) {
3136 root = rotateLeft(root, x = xp);
3137 xpp = (xp = x.parent) == null ? null : xp.parent;
3138 }
3139 if (xp != null) {
3140 xp.red = false;
3141 if (xpp != null) {
3142 xpp.red = true;
3143 root = rotateRight(root, xpp);
3144 }
3145 }
3146 }
3147 }
3148 else {
3149 if (xppl != null && xppl.red) {
3150 xppl.red = false;
3151 xp.red = false;
3152 xpp.red = true;
3153 x = xpp;
3154 }
3155 else {
3156 if (x == xp.left) {
3157 root = rotateRight(root, x = xp);
3158 xpp = (xp = x.parent) == null ? null : xp.parent;
3159 }
3160 if (xp != null) {
3161 xp.red = false;
3162 if (xpp != null) {
3163 xpp.red = true;
3164 root = rotateLeft(root, xpp);
3165 }
3166 }
3167 }
3168 }
3169 }
3170 }
3171
3172 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
3173 TreeNode<K,V> x) {
3174 for (TreeNode<K,V> xp, xpl, xpr;;) {
3175 if (x == null || x == root)
3176 return root;
3177 else if ((xp = x.parent) == null) {
3178 x.red = false;
3179 return x;
3180 }
3181 else if (x.red) {
3182 x.red = false;
3183 return root;
3184 }
3185 else if ((xpl = xp.left) == x) {
3186 if ((xpr = xp.right) != null && xpr.red) {
3187 xpr.red = false;
3188 xp.red = true;
3189 root = rotateLeft(root, xp);
3190 xpr = (xp = x.parent) == null ? null : xp.right;
3191 }
3192 if (xpr == null)
3193 x = xp;
3194 else {
3195 TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3196 if ((sr == null || !sr.red) &&
3197 (sl == null || !sl.red)) {
3198 xpr.red = true;
3199 x = xp;
3200 }
3201 else {
3202 if (sr == null || !sr.red) {
3203 if (sl != null)
3204 sl.red = false;
3205 xpr.red = true;
3206 root = rotateRight(root, xpr);
3207 xpr = (xp = x.parent) == null ?
3208 null : xp.right;
3209 }
3210 if (xpr != null) {
3211 xpr.red = (xp == null) ? false : xp.red;
3212 if ((sr = xpr.right) != null)
3213 sr.red = false;
3214 }
3215 if (xp != null) {
3216 xp.red = false;
3217 root = rotateLeft(root, xp);
3218 }
3219 x = root;
3220 }
3221 }
3222 }
3223 else { // symmetric
3224 if (xpl != null && xpl.red) {
3225 xpl.red = false;
3226 xp.red = true;
3227 root = rotateRight(root, xp);
3228 xpl = (xp = x.parent) == null ? null : xp.left;
3229 }
3230 if (xpl == null)
3231 x = xp;
3232 else {
3233 TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3234 if ((sl == null || !sl.red) &&
3235 (sr == null || !sr.red)) {
3236 xpl.red = true;
3237 x = xp;
3238 }
3239 else {
3240 if (sl == null || !sl.red) {
3241 if (sr != null)
3242 sr.red = false;
3243 xpl.red = true;
3244 root = rotateLeft(root, xpl);
3245 xpl = (xp = x.parent) == null ?
3246 null : xp.left;
3247 }
3248 if (xpl != null) {
3249 xpl.red = (xp == null) ? false : xp.red;
3250 if ((sl = xpl.left) != null)
3251 sl.red = false;
3252 }
3253 if (xp != null) {
3254 xp.red = false;
3255 root = rotateRight(root, xp);
3256 }
3257 x = root;
3258 }
3259 }
3260 }
3261 }
3262 }
3263
3264 /**
3265 * Checks invariants recursively for the tree of Nodes rooted at t.
3266 */
3267 static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3268 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3269 tb = t.prev, tn = (TreeNode<K,V>)t.next;
3270 if (tb != null && tb.next != t)
3271 return false;
3272 if (tn != null && tn.prev != t)
3273 return false;
3274 if (tp != null && t != tp.left && t != tp.right)
3275 return false;
3276 if (tl != null && (tl.parent != t || tl.hash > t.hash))
3277 return false;
3278 if (tr != null && (tr.parent != t || tr.hash < t.hash))
3279 return false;
3280 if (t.red && tl != null && tl.red && tr != null && tr.red)
3281 return false;
3282 if (tl != null && !checkInvariants(tl))
3283 return false;
3284 if (tr != null && !checkInvariants(tr))
3285 return false;
3286 return true;
3287 }
3288
3289 private static final Unsafe U = Unsafe.getUnsafe();
3290 private static final long LOCKSTATE
3291 = U.objectFieldOffset(TreeBin.class, "lockState");
3292 }
3293
3294 /* ----------------Table Traversal -------------- */
3295
3296 /**
3297 * Records the table, its length, and current traversal index for a
3298 * traverser that must process a region of a forwarded table before
3299 * proceeding with current table.
3300 */
3301 static final class TableStack<K,V> {
3302 int length;
3303 int index;
3304 Node<K,V>[] tab;
3305 TableStack<K,V> next;
3306 }
3307
3308 /**
3309 * Encapsulates traversal for methods such as containsValue; also
3310 * serves as a base class for other iterators and spliterators.
3311 *
3312 * Method advance visits once each still-valid node that was
3313 * reachable upon iterator construction. It might miss some that
3314 * were added to a bin after the bin was visited, which is OK wrt
3315 * consistency guarantees. Maintaining this property in the face
3316 * of possible ongoing resizes requires a fair amount of
3317 * bookkeeping state that is difficult to optimize away amidst
3318 * volatile accesses. Even so, traversal maintains reasonable
3319 * throughput.
3320 *
3321 * Normally, iteration proceeds bin-by-bin traversing lists.
3322 * However, if the table has been resized, then all future steps
3323 * must traverse both the bin at the current index as well as at
3324 * (index + baseSize); and so on for further resizings. To
3325 * paranoically cope with potential sharing by users of iterators
3326 * across threads, iteration terminates if a bounds checks fails
3327 * for a table read.
3328 */
3329 static class Traverser<K,V> {
3330 Node<K,V>[] tab; // current table; updated if resized
3331 Node<K,V> next; // the next entry to use
3332 TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
3333 int index; // index of bin to use next
3334 int baseIndex; // current index of initial table
3335 int baseLimit; // index bound for initial table
3336 final int baseSize; // initial table size
3337
3338 Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3339 this.tab = tab;
3340 this.baseSize = size;
3341 this.baseIndex = this.index = index;
3342 this.baseLimit = limit;
3343 this.next = null;
3344 }
3345
3346 /**
3347 * Advances if possible, returning next valid node, or null if none.
3348 */
3349 final Node<K,V> advance() {
3350 Node<K,V> e;
3351 if ((e = next) != null)
3352 e = e.next;
3353 for (;;) {
3354 Node<K,V>[] t; int i, n; // must use locals in checks
3355 if (e != null)
3356 return next = e;
3357 if (baseIndex >= baseLimit || (t = tab) == null ||
3358 (n = t.length) <= (i = index) || i < 0)
3359 return next = null;
3360 if ((e = tabAt(t, i)) != null && e.hash < 0) {
3361 if (e instanceof ForwardingNode) {
3362 tab = ((ForwardingNode<K,V>)e).nextTable;
3363 e = null;
3364 pushState(t, i, n);
3365 continue;
3366 }
3367 else if (e instanceof TreeBin)
3368 e = ((TreeBin<K,V>)e).first;
3369 else
3370 e = null;
3371 }
3372 if (stack != null)
3373 recoverState(n);
3374 else if ((index = i + baseSize) >= n)
3375 index = ++baseIndex; // visit upper slots if present
3376 }
3377 }
3378
3379 /**
3380 * Saves traversal state upon encountering a forwarding node.
3381 */
3382 private void pushState(Node<K,V>[] t, int i, int n) {
3383 TableStack<K,V> s = spare; // reuse if possible
3384 if (s != null)
3385 spare = s.next;
3386 else
3387 s = new TableStack<K,V>();
3388 s.tab = t;
3389 s.length = n;
3390 s.index = i;
3391 s.next = stack;
3392 stack = s;
3393 }
3394
3395 /**
3396 * Possibly pops traversal state.
3397 *
3398 * @param n length of current table
3399 */
3400 private void recoverState(int n) {
3401 TableStack<K,V> s; int len;
3402 while ((s = stack) != null && (index += (len = s.length)) >= n) {
3403 n = len;
3404 index = s.index;
3405 tab = s.tab;
3406 s.tab = null;
3407 TableStack<K,V> next = s.next;
3408 s.next = spare; // save for reuse
3409 stack = next;
3410 spare = s;
3411 }
3412 if (s == null && (index += baseSize) >= n)
3413 index = ++baseIndex;
3414 }
3415 }
3416
3417 /**
3418 * Base of key, value, and entry Iterators. Adds fields to
3419 * Traverser to support iterator.remove.
3420 */
3421 static class BaseIterator<K,V> extends Traverser<K,V> {
3422 final ConcurrentHashMap<K,V> map;
3423 Node<K,V> lastReturned;
3424 BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3425 ConcurrentHashMap<K,V> map) {
3426 super(tab, size, index, limit);
3427 this.map = map;
3428 advance();
3429 }
3430
3431 public final boolean hasNext() { return next != null; }
3432 public final boolean hasMoreElements() { return next != null; }
3433
3434 public final void remove() {
3435 Node<K,V> p;
3436 if ((p = lastReturned) == null)
3437 throw new IllegalStateException();
3438 lastReturned = null;
3439 map.replaceNode(p.key, null, null);
3440 }
3441 }
3442
3443 static final class KeyIterator<K,V> extends BaseIterator<K,V>
3444 implements Iterator<K>, Enumeration<K> {
3445 KeyIterator(Node<K,V>[] tab, int size, int index, int limit,
3446 ConcurrentHashMap<K,V> map) {
3447 super(tab, size, index, limit, map);
3448 }
3449
3450 public final K next() {
3451 Node<K,V> p;
3452 if ((p = next) == null)
3453 throw new NoSuchElementException();
3454 K k = p.key;
3455 lastReturned = p;
3456 advance();
3457 return k;
3458 }
3459
3460 public final K nextElement() { return next(); }
3461 }
3462
3463 static final class ValueIterator<K,V> extends BaseIterator<K,V>
3464 implements Iterator<V>, Enumeration<V> {
3465 ValueIterator(Node<K,V>[] tab, int size, int index, int limit,
3466 ConcurrentHashMap<K,V> map) {
3467 super(tab, size, index, limit, map);
3468 }
3469
3470 public final V next() {
3471 Node<K,V> p;
3472 if ((p = next) == null)
3473 throw new NoSuchElementException();
3474 V v = p.val;
3475 lastReturned = p;
3476 advance();
3477 return v;
3478 }
3479
3480 public final V nextElement() { return next(); }
3481 }
3482
3483 static final class EntryIterator<K,V> extends BaseIterator<K,V>
3484 implements Iterator<Map.Entry<K,V>> {
3485 EntryIterator(Node<K,V>[] tab, int size, int index, int limit,
3486 ConcurrentHashMap<K,V> map) {
3487 super(tab, size, index, limit, map);
3488 }
3489
3490 public final Map.Entry<K,V> next() {
3491 Node<K,V> p;
3492 if ((p = next) == null)
3493 throw new NoSuchElementException();
3494 K k = p.key;
3495 V v = p.val;
3496 lastReturned = p;
3497 advance();
3498 return new MapEntry<K,V>(k, v, map);
3499 }
3500 }
3501
3502 /**
3503 * Exported Entry for EntryIterator.
3504 */
3505 static final class MapEntry<K,V> implements Map.Entry<K,V> {
3506 final K key; // non-null
3507 V val; // non-null
3508 final ConcurrentHashMap<K,V> map;
3509 MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
3510 this.key = key;
3511 this.val = val;
3512 this.map = map;
3513 }
3514 public K getKey() { return key; }
3515 public V getValue() { return val; }
3516 public int hashCode() { return key.hashCode() ^ val.hashCode(); }
3517 public String toString() {
3518 return Helpers.mapEntryToString(key, val);
3519 }
3520
3521 public boolean equals(Object o) {
3522 Object k, v; Map.Entry<?,?> e;
3523 return ((o instanceof Map.Entry) &&
3524 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3525 (v = e.getValue()) != null &&
3526 (k == key || k.equals(key)) &&
3527 (v == val || v.equals(val)));
3528 }
3529
3530 /**
3531 * Sets our entry's value and writes through to the map. The
3532 * value to return is somewhat arbitrary here. Since we do not
3533 * necessarily track asynchronous changes, the most recent
3534 * "previous" value could be different from what we return (or
3535 * could even have been removed, in which case the put will
3536 * re-establish). We do not and cannot guarantee more.
3537 */
3538 public V setValue(V value) {
3539 if (value == null) throw new NullPointerException();
3540 V v = val;
3541 val = value;
3542 map.put(key, value);
3543 return v;
3544 }
3545 }
3546
3547 static final class KeySpliterator<K,V> extends Traverser<K,V>
3548 implements Spliterator<K> {
3549 long est; // size estimate
3550 KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3551 long est) {
3552 super(tab, size, index, limit);
3553 this.est = est;
3554 }
3555
3556 public KeySpliterator<K,V> trySplit() {
3557 int i, f, h;
3558 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3559 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3560 f, est >>>= 1);
3561 }
3562
3563 public void forEachRemaining(Consumer<? super K> action) {
3564 if (action == null) throw new NullPointerException();
3565 for (Node<K,V> p; (p = advance()) != null;)
3566 action.accept(p.key);
3567 }
3568
3569 public boolean tryAdvance(Consumer<? super K> action) {
3570 if (action == null) throw new NullPointerException();
3571 Node<K,V> p;
3572 if ((p = advance()) == null)
3573 return false;
3574 action.accept(p.key);
3575 return true;
3576 }
3577
3578 public long estimateSize() { return est; }
3579
3580 public int characteristics() {
3581 return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3582 Spliterator.NONNULL;
3583 }
3584 }
3585
3586 static final class ValueSpliterator<K,V> extends Traverser<K,V>
3587 implements Spliterator<V> {
3588 long est; // size estimate
3589 ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3590 long est) {
3591 super(tab, size, index, limit);
3592 this.est = est;
3593 }
3594
3595 public ValueSpliterator<K,V> trySplit() {
3596 int i, f, h;
3597 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3598 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3599 f, est >>>= 1);
3600 }
3601
3602 public void forEachRemaining(Consumer<? super V> action) {
3603 if (action == null) throw new NullPointerException();
3604 for (Node<K,V> p; (p = advance()) != null;)
3605 action.accept(p.val);
3606 }
3607
3608 public boolean tryAdvance(Consumer<? super V> action) {
3609 if (action == null) throw new NullPointerException();
3610 Node<K,V> p;
3611 if ((p = advance()) == null)
3612 return false;
3613 action.accept(p.val);
3614 return true;
3615 }
3616
3617 public long estimateSize() { return est; }
3618
3619 public int characteristics() {
3620 return Spliterator.CONCURRENT | Spliterator.NONNULL;
3621 }
3622 }
3623
3624 static final class EntrySpliterator<K,V> extends Traverser<K,V>
3625 implements Spliterator<Map.Entry<K,V>> {
3626 final ConcurrentHashMap<K,V> map; // To export MapEntry
3627 long est; // size estimate
3628 EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3629 long est, ConcurrentHashMap<K,V> map) {
3630 super(tab, size, index, limit);
3631 this.map = map;
3632 this.est = est;
3633 }
3634
3635 public EntrySpliterator<K,V> trySplit() {
3636 int i, f, h;
3637 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3638 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3639 f, est >>>= 1, map);
3640 }
3641
3642 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
3643 if (action == null) throw new NullPointerException();
3644 for (Node<K,V> p; (p = advance()) != null; )
3645 action.accept(new MapEntry<K,V>(p.key, p.val, map));
3646 }
3647
3648 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
3649 if (action == null) throw new NullPointerException();
3650 Node<K,V> p;
3651 if ((p = advance()) == null)
3652 return false;
3653 action.accept(new MapEntry<K,V>(p.key, p.val, map));
3654 return true;
3655 }
3656
3657 public long estimateSize() { return est; }
3658
3659 public int characteristics() {
3660 return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3661 Spliterator.NONNULL;
3662 }
3663 }
3664
3665 // Parallel bulk operations
3666
3667 /**
3668 * Computes initial batch value for bulk tasks. The returned value
3669 * is approximately exp2 of the number of times (minus one) to
3670 * split task by two before executing leaf action. This value is
3671 * faster to compute and more convenient to use as a guide to
3672 * splitting than is the depth, since it is used while dividing by
3673 * two anyway.
3674 */
3675 final int batchFor(long b) {
3676 long n;
3677 if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3678 return 0;
3679 int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3680 return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3681 }
3682
3683 /**
3684 * Performs the given action for each (key, value).
3685 *
3686 * @param parallelismThreshold the (estimated) number of elements
3687 * needed for this operation to be executed in parallel
3688 * @param action the action
3689 * @since 1.8
3690 */
3691 public void forEach(long parallelismThreshold,
3692 BiConsumer<? super K,? super V> action) {
3693 if (action == null) throw new NullPointerException();
3694 new ForEachMappingTask<K,V>
3695 (null, batchFor(parallelismThreshold), 0, 0, table,
3696 action).invoke();
3697 }
3698
3699 /**
3700 * Performs the given action for each non-null transformation
3701 * of each (key, value).
3702 *
3703 * @param parallelismThreshold the (estimated) number of elements
3704 * needed for this operation to be executed in parallel
3705 * @param transformer a function returning the transformation
3706 * for an element, or null if there is no transformation (in
3707 * which case the action is not applied)
3708 * @param action the action
3709 * @param <U> the return type of the transformer
3710 * @since 1.8
3711 */
3712 public <U> void forEach(long parallelismThreshold,
3713 BiFunction<? super K, ? super V, ? extends U> transformer,
3714 Consumer<? super U> action) {
3715 if (transformer == null || action == null)
3716 throw new NullPointerException();
3717 new ForEachTransformedMappingTask<K,V,U>
3718 (null, batchFor(parallelismThreshold), 0, 0, table,
3719 transformer, action).invoke();
3720 }
3721
3722 /**
3723 * Returns a non-null result from applying the given search
3724 * function on each (key, value), or null if none. Upon
3725 * success, further element processing is suppressed and the
3726 * results of any other parallel invocations of the search
3727 * function are ignored.
3728 *
3729 * @param parallelismThreshold the (estimated) number of elements
3730 * needed for this operation to be executed in parallel
3731 * @param searchFunction a function returning a non-null
3732 * result on success, else null
3733 * @param <U> the return type of the search function
3734 * @return a non-null result from applying the given search
3735 * function on each (key, value), or null if none
3736 * @since 1.8
3737 */
3738 public <U> U search(long parallelismThreshold,
3739 BiFunction<? super K, ? super V, ? extends U> searchFunction) {
3740 if (searchFunction == null) throw new NullPointerException();
3741 return new SearchMappingsTask<K,V,U>
3742 (null, batchFor(parallelismThreshold), 0, 0, table,
3743 searchFunction, new AtomicReference<U>()).invoke();
3744 }
3745
3746 /**
3747 * Returns the result of accumulating the given transformation
3748 * of all (key, value) pairs using the given reducer to
3749 * combine values, or null if none.
3750 *
3751 * @param parallelismThreshold the (estimated) number of elements
3752 * needed for this operation to be executed in parallel
3753 * @param transformer a function returning the transformation
3754 * for an element, or null if there is no transformation (in
3755 * which case it is not combined)
3756 * @param reducer a commutative associative combining function
3757 * @param <U> the return type of the transformer
3758 * @return the result of accumulating the given transformation
3759 * of all (key, value) pairs
3760 * @since 1.8
3761 */
3762 public <U> U reduce(long parallelismThreshold,
3763 BiFunction<? super K, ? super V, ? extends U> transformer,
3764 BiFunction<? super U, ? super U, ? extends U> reducer) {
3765 if (transformer == null || reducer == null)
3766 throw new NullPointerException();
3767 return new MapReduceMappingsTask<K,V,U>
3768 (null, batchFor(parallelismThreshold), 0, 0, table,
3769 null, transformer, reducer).invoke();
3770 }
3771
3772 /**
3773 * Returns the result of accumulating the given transformation
3774 * of all (key, value) pairs using the given reducer to
3775 * combine values, and the given basis as an identity value.
3776 *
3777 * @param parallelismThreshold the (estimated) number of elements
3778 * needed for this operation to be executed in parallel
3779 * @param transformer a function returning the transformation
3780 * for an element
3781 * @param basis the identity (initial default value) for the reduction
3782 * @param reducer a commutative associative combining function
3783 * @return the result of accumulating the given transformation
3784 * of all (key, value) pairs
3785 * @since 1.8
3786 */
3787 public double reduceToDouble(long parallelismThreshold,
3788 ToDoubleBiFunction<? super K, ? super V> transformer,
3789 double basis,
3790 DoubleBinaryOperator reducer) {
3791 if (transformer == null || reducer == null)
3792 throw new NullPointerException();
3793 return new MapReduceMappingsToDoubleTask<K,V>
3794 (null, batchFor(parallelismThreshold), 0, 0, table,
3795 null, transformer, basis, reducer).invoke();
3796 }
3797
3798 /**
3799 * Returns the result of accumulating the given transformation
3800 * of all (key, value) pairs using the given reducer to
3801 * combine values, and the given basis as an identity value.
3802 *
3803 * @param parallelismThreshold the (estimated) number of elements
3804 * needed for this operation to be executed in parallel
3805 * @param transformer a function returning the transformation
3806 * for an element
3807 * @param basis the identity (initial default value) for the reduction
3808 * @param reducer a commutative associative combining function
3809 * @return the result of accumulating the given transformation
3810 * of all (key, value) pairs
3811 * @since 1.8
3812 */
3813 public long reduceToLong(long parallelismThreshold,
3814 ToLongBiFunction<? super K, ? super V> transformer,
3815 long basis,
3816 LongBinaryOperator reducer) {
3817 if (transformer == null || reducer == null)
3818 throw new NullPointerException();
3819 return new MapReduceMappingsToLongTask<K,V>
3820 (null, batchFor(parallelismThreshold), 0, 0, table,
3821 null, transformer, basis, reducer).invoke();
3822 }
3823
3824 /**
3825 * Returns the result of accumulating the given transformation
3826 * of all (key, value) pairs using the given reducer to
3827 * combine values, and the given basis as an identity value.
3828 *
3829 * @param parallelismThreshold the (estimated) number of elements
3830 * needed for this operation to be executed in parallel
3831 * @param transformer a function returning the transformation
3832 * for an element
3833 * @param basis the identity (initial default value) for the reduction
3834 * @param reducer a commutative associative combining function
3835 * @return the result of accumulating the given transformation
3836 * of all (key, value) pairs
3837 * @since 1.8
3838 */
3839 public int reduceToInt(long parallelismThreshold,
3840 ToIntBiFunction<? super K, ? super V> transformer,
3841 int basis,
3842 IntBinaryOperator reducer) {
3843 if (transformer == null || reducer == null)
3844 throw new NullPointerException();
3845 return new MapReduceMappingsToIntTask<K,V>
3846 (null, batchFor(parallelismThreshold), 0, 0, table,
3847 null, transformer, basis, reducer).invoke();
3848 }
3849
3850 /**
3851 * Performs the given action for each key.
3852 *
3853 * @param parallelismThreshold the (estimated) number of elements
3854 * needed for this operation to be executed in parallel
3855 * @param action the action
3856 * @since 1.8
3857 */
3858 public void forEachKey(long parallelismThreshold,
3859 Consumer<? super K> action) {
3860 if (action == null) throw new NullPointerException();
3861 new ForEachKeyTask<K,V>
3862 (null, batchFor(parallelismThreshold), 0, 0, table,
3863 action).invoke();
3864 }
3865
3866 /**
3867 * Performs the given action for each non-null transformation
3868 * of each key.
3869 *
3870 * @param parallelismThreshold the (estimated) number of elements
3871 * needed for this operation to be executed in parallel
3872 * @param transformer a function returning the transformation
3873 * for an element, or null if there is no transformation (in
3874 * which case the action is not applied)
3875 * @param action the action
3876 * @param <U> the return type of the transformer
3877 * @since 1.8
3878 */
3879 public <U> void forEachKey(long parallelismThreshold,
3880 Function<? super K, ? extends U> transformer,
3881 Consumer<? super U> action) {
3882 if (transformer == null || action == null)
3883 throw new NullPointerException();
3884 new ForEachTransformedKeyTask<K,V,U>
3885 (null, batchFor(parallelismThreshold), 0, 0, table,
3886 transformer, action).invoke();
3887 }
3888
3889 /**
3890 * Returns a non-null result from applying the given search
3891 * function on each key, or null if none. Upon success,
3892 * further element processing is suppressed and the results of
3893 * any other parallel invocations of the search function are
3894 * ignored.
3895 *
3896 * @param parallelismThreshold the (estimated) number of elements
3897 * needed for this operation to be executed in parallel
3898 * @param searchFunction a function returning a non-null
3899 * result on success, else null
3900 * @param <U> the return type of the search function
3901 * @return a non-null result from applying the given search
3902 * function on each key, or null if none
3903 * @since 1.8
3904 */
3905 public <U> U searchKeys(long parallelismThreshold,
3906 Function<? super K, ? extends U> searchFunction) {
3907 if (searchFunction == null) throw new NullPointerException();
3908 return new SearchKeysTask<K,V,U>
3909 (null, batchFor(parallelismThreshold), 0, 0, table,
3910 searchFunction, new AtomicReference<U>()).invoke();
3911 }
3912
3913 /**
3914 * Returns the result of accumulating all keys using the given
3915 * reducer to combine values, or null if none.
3916 *
3917 * @param parallelismThreshold the (estimated) number of elements
3918 * needed for this operation to be executed in parallel
3919 * @param reducer a commutative associative combining function
3920 * @return the result of accumulating all keys using the given
3921 * reducer to combine values, or null if none
3922 * @since 1.8
3923 */
3924 public K reduceKeys(long parallelismThreshold,
3925 BiFunction<? super K, ? super K, ? extends K> reducer) {
3926 if (reducer == null) throw new NullPointerException();
3927 return new ReduceKeysTask<K,V>
3928 (null, batchFor(parallelismThreshold), 0, 0, table,
3929 null, reducer).invoke();
3930 }
3931
3932 /**
3933 * Returns the result of accumulating the given transformation
3934 * of all keys using the given reducer to combine values, or
3935 * null if none.
3936 *
3937 * @param parallelismThreshold the (estimated) number of elements
3938 * needed for this operation to be executed in parallel
3939 * @param transformer a function returning the transformation
3940 * for an element, or null if there is no transformation (in
3941 * which case it is not combined)
3942 * @param reducer a commutative associative combining function
3943 * @param <U> the return type of the transformer
3944 * @return the result of accumulating the given transformation
3945 * of all keys
3946 * @since 1.8
3947 */
3948 public <U> U reduceKeys(long parallelismThreshold,
3949 Function<? super K, ? extends U> transformer,
3950 BiFunction<? super U, ? super U, ? extends U> reducer) {
3951 if (transformer == null || reducer == null)
3952 throw new NullPointerException();
3953 return new MapReduceKeysTask<K,V,U>
3954 (null, batchFor(parallelismThreshold), 0, 0, table,
3955 null, transformer, reducer).invoke();
3956 }
3957
3958 /**
3959 * Returns the result of accumulating the given transformation
3960 * of all keys using the given reducer to combine values, and
3961 * the given basis as an identity value.
3962 *
3963 * @param parallelismThreshold the (estimated) number of elements
3964 * needed for this operation to be executed in parallel
3965 * @param transformer a function returning the transformation
3966 * for an element
3967 * @param basis the identity (initial default value) for the reduction
3968 * @param reducer a commutative associative combining function
3969 * @return the result of accumulating the given transformation
3970 * of all keys
3971 * @since 1.8
3972 */
3973 public double reduceKeysToDouble(long parallelismThreshold,
3974 ToDoubleFunction<? super K> transformer,
3975 double basis,
3976 DoubleBinaryOperator reducer) {
3977 if (transformer == null || reducer == null)
3978 throw new NullPointerException();
3979 return new MapReduceKeysToDoubleTask<K,V>
3980 (null, batchFor(parallelismThreshold), 0, 0, table,
3981 null, transformer, basis, reducer).invoke();
3982 }
3983
3984 /**
3985 * Returns the result of accumulating the given transformation
3986 * of all keys using the given reducer to combine values, and
3987 * the given basis as an identity value.
3988 *
3989 * @param parallelismThreshold the (estimated) number of elements
3990 * needed for this operation to be executed in parallel
3991 * @param transformer a function returning the transformation
3992 * for an element
3993 * @param basis the identity (initial default value) for the reduction
3994 * @param reducer a commutative associative combining function
3995 * @return the result of accumulating the given transformation
3996 * of all keys
3997 * @since 1.8
3998 */
3999 public long reduceKeysToLong(long parallelismThreshold,
4000 ToLongFunction<? super K> transformer,
4001 long basis,
4002 LongBinaryOperator reducer) {
4003 if (transformer == null || reducer == null)
4004 throw new NullPointerException();
4005 return new MapReduceKeysToLongTask<K,V>
4006 (null, batchFor(parallelismThreshold), 0, 0, table,
4007 null, transformer, basis, reducer).invoke();
4008 }
4009
4010 /**
4011 * Returns the result of accumulating the given transformation
4012 * of all keys using the given reducer to combine values, and
4013 * the given basis as an identity value.
4014 *
4015 * @param parallelismThreshold the (estimated) number of elements
4016 * needed for this operation to be executed in parallel
4017 * @param transformer a function returning the transformation
4018 * for an element
4019 * @param basis the identity (initial default value) for the reduction
4020 * @param reducer a commutative associative combining function
4021 * @return the result of accumulating the given transformation
4022 * of all keys
4023 * @since 1.8
4024 */
4025 public int reduceKeysToInt(long parallelismThreshold,
4026 ToIntFunction<? super K> transformer,
4027 int basis,
4028 IntBinaryOperator reducer) {
4029 if (transformer == null || reducer == null)
4030 throw new NullPointerException();
4031 return new MapReduceKeysToIntTask<K,V>
4032 (null, batchFor(parallelismThreshold), 0, 0, table,
4033 null, transformer, basis, reducer).invoke();
4034 }
4035
4036 /**
4037 * Performs the given action for each value.
4038 *
4039 * @param parallelismThreshold the (estimated) number of elements
4040 * needed for this operation to be executed in parallel
4041 * @param action the action
4042 * @since 1.8
4043 */
4044 public void forEachValue(long parallelismThreshold,
4045 Consumer<? super V> action) {
4046 if (action == null)
4047 throw new NullPointerException();
4048 new ForEachValueTask<K,V>
4049 (null, batchFor(parallelismThreshold), 0, 0, table,
4050 action).invoke();
4051 }
4052
4053 /**
4054 * Performs the given action for each non-null transformation
4055 * of each value.
4056 *
4057 * @param parallelismThreshold the (estimated) number of elements
4058 * needed for this operation to be executed in parallel
4059 * @param transformer a function returning the transformation
4060 * for an element, or null if there is no transformation (in
4061 * which case the action is not applied)
4062 * @param action the action
4063 * @param <U> the return type of the transformer
4064 * @since 1.8
4065 */
4066 public <U> void forEachValue(long parallelismThreshold,
4067 Function<? super V, ? extends U> transformer,
4068 Consumer<? super U> action) {
4069 if (transformer == null || action == null)
4070 throw new NullPointerException();
4071 new ForEachTransformedValueTask<K,V,U>
4072 (null, batchFor(parallelismThreshold), 0, 0, table,
4073 transformer, action).invoke();
4074 }
4075
4076 /**
4077 * Returns a non-null result from applying the given search
4078 * function on each value, or null if none. Upon success,
4079 * further element processing is suppressed and the results of
4080 * any other parallel invocations of the search function are
4081 * ignored.
4082 *
4083 * @param parallelismThreshold the (estimated) number of elements
4084 * needed for this operation to be executed in parallel
4085 * @param searchFunction a function returning a non-null
4086 * result on success, else null
4087 * @param <U> the return type of the search function
4088 * @return a non-null result from applying the given search
4089 * function on each value, or null if none
4090 * @since 1.8
4091 */
4092 public <U> U searchValues(long parallelismThreshold,
4093 Function<? super V, ? extends U> searchFunction) {
4094 if (searchFunction == null) throw new NullPointerException();
4095 return new SearchValuesTask<K,V,U>
4096 (null, batchFor(parallelismThreshold), 0, 0, table,
4097 searchFunction, new AtomicReference<U>()).invoke();
4098 }
4099
4100 /**
4101 * Returns the result of accumulating all values using the
4102 * given reducer to combine values, or null if none.
4103 *
4104 * @param parallelismThreshold the (estimated) number of elements
4105 * needed for this operation to be executed in parallel
4106 * @param reducer a commutative associative combining function
4107 * @return the result of accumulating all values
4108 * @since 1.8
4109 */
4110 public V reduceValues(long parallelismThreshold,
4111 BiFunction<? super V, ? super V, ? extends V> reducer) {
4112 if (reducer == null) throw new NullPointerException();
4113 return new ReduceValuesTask<K,V>
4114 (null, batchFor(parallelismThreshold), 0, 0, table,
4115 null, reducer).invoke();
4116 }
4117
4118 /**
4119 * Returns the result of accumulating the given transformation
4120 * of all values using the given reducer to combine values, or
4121 * null if none.
4122 *
4123 * @param parallelismThreshold the (estimated) number of elements
4124 * needed for this operation to be executed in parallel
4125 * @param transformer a function returning the transformation
4126 * for an element, or null if there is no transformation (in
4127 * which case it is not combined)
4128 * @param reducer a commutative associative combining function
4129 * @param <U> the return type of the transformer
4130 * @return the result of accumulating the given transformation
4131 * of all values
4132 * @since 1.8
4133 */
4134 public <U> U reduceValues(long parallelismThreshold,
4135 Function<? super V, ? extends U> transformer,
4136 BiFunction<? super U, ? super U, ? extends U> reducer) {
4137 if (transformer == null || reducer == null)
4138 throw new NullPointerException();
4139 return new MapReduceValuesTask<K,V,U>
4140 (null, batchFor(parallelismThreshold), 0, 0, table,
4141 null, transformer, reducer).invoke();
4142 }
4143
4144 /**
4145 * Returns the result of accumulating the given transformation
4146 * of all values using the given reducer to combine values,
4147 * and the given basis as an identity value.
4148 *
4149 * @param parallelismThreshold the (estimated) number of elements
4150 * needed for this operation to be executed in parallel
4151 * @param transformer a function returning the transformation
4152 * for an element
4153 * @param basis the identity (initial default value) for the reduction
4154 * @param reducer a commutative associative combining function
4155 * @return the result of accumulating the given transformation
4156 * of all values
4157 * @since 1.8
4158 */
4159 public double reduceValuesToDouble(long parallelismThreshold,
4160 ToDoubleFunction<? super V> transformer,
4161 double basis,
4162 DoubleBinaryOperator reducer) {
4163 if (transformer == null || reducer == null)
4164 throw new NullPointerException();
4165 return new MapReduceValuesToDoubleTask<K,V>
4166 (null, batchFor(parallelismThreshold), 0, 0, table,
4167 null, transformer, basis, reducer).invoke();
4168 }
4169
4170 /**
4171 * Returns the result of accumulating the given transformation
4172 * of all values using the given reducer to combine values,
4173 * and the given basis as an identity value.
4174 *
4175 * @param parallelismThreshold the (estimated) number of elements
4176 * needed for this operation to be executed in parallel
4177 * @param transformer a function returning the transformation
4178 * for an element
4179 * @param basis the identity (initial default value) for the reduction
4180 * @param reducer a commutative associative combining function
4181 * @return the result of accumulating the given transformation
4182 * of all values
4183 * @since 1.8
4184 */
4185 public long reduceValuesToLong(long parallelismThreshold,
4186 ToLongFunction<? super V> transformer,
4187 long basis,
4188 LongBinaryOperator reducer) {
4189 if (transformer == null || reducer == null)
4190 throw new NullPointerException();
4191 return new MapReduceValuesToLongTask<K,V>
4192 (null, batchFor(parallelismThreshold), 0, 0, table,
4193 null, transformer, basis, reducer).invoke();
4194 }
4195
4196 /**
4197 * Returns the result of accumulating the given transformation
4198 * of all values using the given reducer to combine values,
4199 * and the given basis as an identity value.
4200 *
4201 * @param parallelismThreshold the (estimated) number of elements
4202 * needed for this operation to be executed in parallel
4203 * @param transformer a function returning the transformation
4204 * for an element
4205 * @param basis the identity (initial default value) for the reduction
4206 * @param reducer a commutative associative combining function
4207 * @return the result of accumulating the given transformation
4208 * of all values
4209 * @since 1.8
4210 */
4211 public int reduceValuesToInt(long parallelismThreshold,
4212 ToIntFunction<? super V> transformer,
4213 int basis,
4214 IntBinaryOperator reducer) {
4215 if (transformer == null || reducer == null)
4216 throw new NullPointerException();
4217 return new MapReduceValuesToIntTask<K,V>
4218 (null, batchFor(parallelismThreshold), 0, 0, table,
4219 null, transformer, basis, reducer).invoke();
4220 }
4221
4222 /**
4223 * Performs the given action for each entry.
4224 *
4225 * @param parallelismThreshold the (estimated) number of elements
4226 * needed for this operation to be executed in parallel
4227 * @param action the action
4228 * @since 1.8
4229 */
4230 public void forEachEntry(long parallelismThreshold,
4231 Consumer<? super Map.Entry<K,V>> action) {
4232 if (action == null) throw new NullPointerException();
4233 new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4234 action).invoke();
4235 }
4236
4237 /**
4238 * Performs the given action for each non-null transformation
4239 * of each entry.
4240 *
4241 * @param parallelismThreshold the (estimated) number of elements
4242 * needed for this operation to be executed in parallel
4243 * @param transformer a function returning the transformation
4244 * for an element, or null if there is no transformation (in
4245 * which case the action is not applied)
4246 * @param action the action
4247 * @param <U> the return type of the transformer
4248 * @since 1.8
4249 */
4250 public <U> void forEachEntry(long parallelismThreshold,
4251 Function<Map.Entry<K,V>, ? extends U> transformer,
4252 Consumer<? super U> action) {
4253 if (transformer == null || action == null)
4254 throw new NullPointerException();
4255 new ForEachTransformedEntryTask<K,V,U>
4256 (null, batchFor(parallelismThreshold), 0, 0, table,
4257 transformer, action).invoke();
4258 }
4259
4260 /**
4261 * Returns a non-null result from applying the given search
4262 * function on each entry, or null if none. Upon success,
4263 * further element processing is suppressed and the results of
4264 * any other parallel invocations of the search function are
4265 * ignored.
4266 *
4267 * @param parallelismThreshold the (estimated) number of elements
4268 * needed for this operation to be executed in parallel
4269 * @param searchFunction a function returning a non-null
4270 * result on success, else null
4271 * @param <U> the return type of the search function
4272 * @return a non-null result from applying the given search
4273 * function on each entry, or null if none
4274 * @since 1.8
4275 */
4276 public <U> U searchEntries(long parallelismThreshold,
4277 Function<Map.Entry<K,V>, ? extends U> searchFunction) {
4278 if (searchFunction == null) throw new NullPointerException();
4279 return new SearchEntriesTask<K,V,U>
4280 (null, batchFor(parallelismThreshold), 0, 0, table,
4281 searchFunction, new AtomicReference<U>()).invoke();
4282 }
4283
4284 /**
4285 * Returns the result of accumulating all entries using the
4286 * given reducer to combine values, or null if none.
4287 *
4288 * @param parallelismThreshold the (estimated) number of elements
4289 * needed for this operation to be executed in parallel
4290 * @param reducer a commutative associative combining function
4291 * @return the result of accumulating all entries
4292 * @since 1.8
4293 */
4294 public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4295 BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4296 if (reducer == null) throw new NullPointerException();
4297 return new ReduceEntriesTask<K,V>
4298 (null, batchFor(parallelismThreshold), 0, 0, table,
4299 null, reducer).invoke();
4300 }
4301
4302 /**
4303 * Returns the result of accumulating the given transformation
4304 * of all entries using the given reducer to combine values,
4305 * or null if none.
4306 *
4307 * @param parallelismThreshold the (estimated) number of elements
4308 * needed for this operation to be executed in parallel
4309 * @param transformer a function returning the transformation
4310 * for an element, or null if there is no transformation (in
4311 * which case it is not combined)
4312 * @param reducer a commutative associative combining function
4313 * @param <U> the return type of the transformer
4314 * @return the result of accumulating the given transformation
4315 * of all entries
4316 * @since 1.8
4317 */
4318 public <U> U reduceEntries(long parallelismThreshold,
4319 Function<Map.Entry<K,V>, ? extends U> transformer,
4320 BiFunction<? super U, ? super U, ? extends U> reducer) {
4321 if (transformer == null || reducer == null)
4322 throw new NullPointerException();
4323 return new MapReduceEntriesTask<K,V,U>
4324 (null, batchFor(parallelismThreshold), 0, 0, table,
4325 null, transformer, reducer).invoke();
4326 }
4327
4328 /**
4329 * Returns the result of accumulating the given transformation
4330 * of all entries using the given reducer to combine values,
4331 * and the given basis as an identity value.
4332 *
4333 * @param parallelismThreshold the (estimated) number of elements
4334 * needed for this operation to be executed in parallel
4335 * @param transformer a function returning the transformation
4336 * for an element
4337 * @param basis the identity (initial default value) for the reduction
4338 * @param reducer a commutative associative combining function
4339 * @return the result of accumulating the given transformation
4340 * of all entries
4341 * @since 1.8
4342 */
4343 public double reduceEntriesToDouble(long parallelismThreshold,
4344 ToDoubleFunction<Map.Entry<K,V>> transformer,
4345 double basis,
4346 DoubleBinaryOperator reducer) {
4347 if (transformer == null || reducer == null)
4348 throw new NullPointerException();
4349 return new MapReduceEntriesToDoubleTask<K,V>
4350 (null, batchFor(parallelismThreshold), 0, 0, table,
4351 null, transformer, basis, reducer).invoke();
4352 }
4353
4354 /**
4355 * Returns the result of accumulating the given transformation
4356 * of all entries using the given reducer to combine values,
4357 * and the given basis as an identity value.
4358 *
4359 * @param parallelismThreshold the (estimated) number of elements
4360 * needed for this operation to be executed in parallel
4361 * @param transformer a function returning the transformation
4362 * for an element
4363 * @param basis the identity (initial default value) for the reduction
4364 * @param reducer a commutative associative combining function
4365 * @return the result of accumulating the given transformation
4366 * of all entries
4367 * @since 1.8
4368 */
4369 public long reduceEntriesToLong(long parallelismThreshold,
4370 ToLongFunction<Map.Entry<K,V>> transformer,
4371 long basis,
4372 LongBinaryOperator reducer) {
4373 if (transformer == null || reducer == null)
4374 throw new NullPointerException();
4375 return new MapReduceEntriesToLongTask<K,V>
4376 (null, batchFor(parallelismThreshold), 0, 0, table,
4377 null, transformer, basis, reducer).invoke();
4378 }
4379
4380 /**
4381 * Returns the result of accumulating the given transformation
4382 * of all entries using the given reducer to combine values,
4383 * and the given basis as an identity value.
4384 *
4385 * @param parallelismThreshold the (estimated) number of elements
4386 * needed for this operation to be executed in parallel
4387 * @param transformer a function returning the transformation
4388 * for an element
4389 * @param basis the identity (initial default value) for the reduction
4390 * @param reducer a commutative associative combining function
4391 * @return the result of accumulating the given transformation
4392 * of all entries
4393 * @since 1.8
4394 */
4395 public int reduceEntriesToInt(long parallelismThreshold,
4396 ToIntFunction<Map.Entry<K,V>> transformer,
4397 int basis,
4398 IntBinaryOperator reducer) {
4399 if (transformer == null || reducer == null)
4400 throw new NullPointerException();
4401 return new MapReduceEntriesToIntTask<K,V>
4402 (null, batchFor(parallelismThreshold), 0, 0, table,
4403 null, transformer, basis, reducer).invoke();
4404 }
4405
4406
4407 /* ----------------Views -------------- */
4408
4409 /**
4410 * Base class for views.
4411 */
4412 abstract static class CollectionView<K,V,E>
4413 implements Collection<E>, java.io.Serializable {
4414 private static final long serialVersionUID = 7249069246763182397L;
4415 final ConcurrentHashMap<K,V> map;
4416 CollectionView(ConcurrentHashMap<K,V> map) { this.map = map; }
4417
4418 /**
4419 * Returns the map backing this view.
4420 *
4421 * @return the map backing this view
4422 */
4423 public ConcurrentHashMap<K,V> getMap() { return map; }
4424
4425 /**
4426 * Removes all of the elements from this view, by removing all
4427 * the mappings from the map backing this view.
4428 */
4429 public final void clear() { map.clear(); }
4430 public final int size() { return map.size(); }
4431 public final boolean isEmpty() { return map.isEmpty(); }
4432
4433 // implementations below rely on concrete classes supplying these
4434 // abstract methods
4435 /**
4436 * Returns an iterator over the elements in this collection.
4437 *
4438 * <p>The returned iterator is
4439 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
4440 *
4441 * @return an iterator over the elements in this collection
4442 */
4443 public abstract Iterator<E> iterator();
4444 public abstract boolean contains(Object o);
4445 public abstract boolean remove(Object o);
4446
4447 private static final String OOME_MSG = "Required array size too large";
4448
4449 public final Object[] toArray() {
4450 long sz = map.mappingCount();
4451 if (sz > MAX_ARRAY_SIZE)
4452 throw new OutOfMemoryError(OOME_MSG);
4453 int n = (int)sz;
4454 Object[] r = new Object[n];
4455 int i = 0;
4456 for (E e : this) {
4457 if (i == n) {
4458 if (n >= MAX_ARRAY_SIZE)
4459 throw new OutOfMemoryError(OOME_MSG);
4460 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4461 n = MAX_ARRAY_SIZE;
4462 else
4463 n += (n >>> 1) + 1;
4464 r = Arrays.copyOf(r, n);
4465 }
4466 r[i++] = e;
4467 }
4468 return (i == n) ? r : Arrays.copyOf(r, i);
4469 }
4470
4471 @SuppressWarnings("unchecked")
4472 public final <T> T[] toArray(T[] a) {
4473 long sz = map.mappingCount();
4474 if (sz > MAX_ARRAY_SIZE)
4475 throw new OutOfMemoryError(OOME_MSG);
4476 int m = (int)sz;
4477 T[] r = (a.length >= m) ? a :
4478 (T[])java.lang.reflect.Array
4479 .newInstance(a.getClass().getComponentType(), m);
4480 int n = r.length;
4481 int i = 0;
4482 for (E e : this) {
4483 if (i == n) {
4484 if (n >= MAX_ARRAY_SIZE)
4485 throw new OutOfMemoryError(OOME_MSG);
4486 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4487 n = MAX_ARRAY_SIZE;
4488 else
4489 n += (n >>> 1) + 1;
4490 r = Arrays.copyOf(r, n);
4491 }
4492 r[i++] = (T)e;
4493 }
4494 if (a == r && i < n) {
4495 r[i] = null; // null-terminate
4496 return r;
4497 }
4498 return (i == n) ? r : Arrays.copyOf(r, i);
4499 }
4500
4501 /**
4502 * Returns a string representation of this collection.
4503 * The string representation consists of the string representations
4504 * of the collection's elements in the order they are returned by
4505 * its iterator, enclosed in square brackets ({@code "[]"}).
4506 * Adjacent elements are separated by the characters {@code ", "}
4507 * (comma and space). Elements are converted to strings as by
4508 * {@link String#valueOf(Object)}.
4509 *
4510 * @return a string representation of this collection
4511 */
4512 public final String toString() {
4513 StringBuilder sb = new StringBuilder();
4514 sb.append('[');
4515 Iterator<E> it = iterator();
4516 if (it.hasNext()) {
4517 for (;;) {
4518 Object e = it.next();
4519 sb.append(e == this ? "(this Collection)" : e);
4520 if (!it.hasNext())
4521 break;
4522 sb.append(',').append(' ');
4523 }
4524 }
4525 return sb.append(']').toString();
4526 }
4527
4528 public final boolean containsAll(Collection<?> c) {
4529 if (c != this) {
4530 for (Object e : c) {
4531 if (e == null || !contains(e))
4532 return false;
4533 }
4534 }
4535 return true;
4536 }
4537
4538 public boolean removeAll(Collection<?> c) {
4539 if (c == null) throw new NullPointerException();
4540 boolean modified = false;
4541 // Use (c instanceof Set) as a hint that lookup in c is as
4542 // efficient as this view
4543 Node<K,V>[] t;
4544 if ((t = map.table) == null) {
4545 return false;
4546 } else if (c instanceof Set<?> && c.size() > t.length) {
4547 for (Iterator<?> it = iterator(); it.hasNext(); ) {
4548 if (c.contains(it.next())) {
4549 it.remove();
4550 modified = true;
4551 }
4552 }
4553 } else {
4554 for (Object e : c)
4555 modified |= remove(e);
4556 }
4557 return modified;
4558 }
4559
4560 public final boolean retainAll(Collection<?> c) {
4561 if (c == null) throw new NullPointerException();
4562 boolean modified = false;
4563 for (Iterator<E> it = iterator(); it.hasNext();) {
4564 if (!c.contains(it.next())) {
4565 it.remove();
4566 modified = true;
4567 }
4568 }
4569 return modified;
4570 }
4571
4572 }
4573
4574 /**
4575 * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4576 * which additions may optionally be enabled by mapping to a
4577 * common value. This class cannot be directly instantiated.
4578 * See {@link #keySet() keySet()},
4579 * {@link #keySet(Object) keySet(V)},
4580 * {@link #newKeySet() newKeySet()},
4581 * {@link #newKeySet(int) newKeySet(int)}.
4582 *
4583 * @since 1.8
4584 */
4585 public static class KeySetView<K,V> extends CollectionView<K,V,K>
4586 implements Set<K>, java.io.Serializable {
4587 private static final long serialVersionUID = 7249069246763182397L;
4588 private final V value;
4589 KeySetView(ConcurrentHashMap<K,V> map, V value) { // non-public
4590 super(map);
4591 this.value = value;
4592 }
4593
4594 /**
4595 * Returns the default mapped value for additions,
4596 * or {@code null} if additions are not supported.
4597 *
4598 * @return the default mapped value for additions, or {@code null}
4599 * if not supported
4600 */
4601 public V getMappedValue() { return value; }
4602
4603 /**
4604 * {@inheritDoc}
4605 * @throws NullPointerException if the specified key is null
4606 */
4607 public boolean contains(Object o) { return map.containsKey(o); }
4608
4609 /**
4610 * Removes the key from this map view, by removing the key (and its
4611 * corresponding value) from the backing map. This method does
4612 * nothing if the key is not in the map.
4613 *
4614 * @param o the key to be removed from the backing map
4615 * @return {@code true} if the backing map contained the specified key
4616 * @throws NullPointerException if the specified key is null
4617 */
4618 public boolean remove(Object o) { return map.remove(o) != null; }
4619
4620 /**
4621 * @return an iterator over the keys of the backing map
4622 */
4623 public Iterator<K> iterator() {
4624 Node<K,V>[] t;
4625 ConcurrentHashMap<K,V> m = map;
4626 int f = (t = m.table) == null ? 0 : t.length;
4627 return new KeyIterator<K,V>(t, f, 0, f, m);
4628 }
4629
4630 /**
4631 * Adds the specified key to this set view by mapping the key to
4632 * the default mapped value in the backing map, if defined.
4633 *
4634 * @param e key to be added
4635 * @return {@code true} if this set changed as a result of the call
4636 * @throws NullPointerException if the specified key is null
4637 * @throws UnsupportedOperationException if no default mapped value
4638 * for additions was provided
4639 */
4640 public boolean add(K e) {
4641 V v;
4642 if ((v = value) == null)
4643 throw new UnsupportedOperationException();
4644 return map.putVal(e, v, true) == null;
4645 }
4646
4647 /**
4648 * Adds all of the elements in the specified collection to this set,
4649 * as if by calling {@link #add} on each one.
4650 *
4651 * @param c the elements to be inserted into this set
4652 * @return {@code true} if this set changed as a result of the call
4653 * @throws NullPointerException if the collection or any of its
4654 * elements are {@code null}
4655 * @throws UnsupportedOperationException if no default mapped value
4656 * for additions was provided
4657 */
4658 public boolean addAll(Collection<? extends K> c) {
4659 boolean added = false;
4660 V v;
4661 if ((v = value) == null)
4662 throw new UnsupportedOperationException();
4663 for (K e : c) {
4664 if (map.putVal(e, v, true) == null)
4665 added = true;
4666 }
4667 return added;
4668 }
4669
4670 public int hashCode() {
4671 int h = 0;
4672 for (K e : this)
4673 h += e.hashCode();
4674 return h;
4675 }
4676
4677 public boolean equals(Object o) {
4678 Set<?> c;
4679 return ((o instanceof Set) &&
4680 ((c = (Set<?>)o) == this ||
4681 (containsAll(c) && c.containsAll(this))));
4682 }
4683
4684 public Spliterator<K> spliterator() {
4685 Node<K,V>[] t;
4686 ConcurrentHashMap<K,V> m = map;
4687 long n = m.sumCount();
4688 int f = (t = m.table) == null ? 0 : t.length;
4689 return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4690 }
4691
4692 public void forEach(Consumer<? super K> action) {
4693 if (action == null) throw new NullPointerException();
4694 Node<K,V>[] t;
4695 if ((t = map.table) != null) {
4696 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4697 for (Node<K,V> p; (p = it.advance()) != null; )
4698 action.accept(p.key);
4699 }
4700 }
4701 }
4702
4703 /**
4704 * A view of a ConcurrentHashMap as a {@link Collection} of
4705 * values, in which additions are disabled. This class cannot be
4706 * directly instantiated. See {@link #values()}.
4707 */
4708 static final class ValuesView<K,V> extends CollectionView<K,V,V>
4709 implements Collection<V>, java.io.Serializable {
4710 private static final long serialVersionUID = 2249069246763182397L;
4711 ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
4712 public final boolean contains(Object o) {
4713 return map.containsValue(o);
4714 }
4715
4716 public final boolean remove(Object o) {
4717 if (o != null) {
4718 for (Iterator<V> it = iterator(); it.hasNext();) {
4719 if (o.equals(it.next())) {
4720 it.remove();
4721 return true;
4722 }
4723 }
4724 }
4725 return false;
4726 }
4727
4728 public final Iterator<V> iterator() {
4729 ConcurrentHashMap<K,V> m = map;
4730 Node<K,V>[] t;
4731 int f = (t = m.table) == null ? 0 : t.length;
4732 return new ValueIterator<K,V>(t, f, 0, f, m);
4733 }
4734
4735 public final boolean add(V e) {
4736 throw new UnsupportedOperationException();
4737 }
4738 public final boolean addAll(Collection<? extends V> c) {
4739 throw new UnsupportedOperationException();
4740 }
4741
4742 @Override public boolean removeAll(Collection<?> c) {
4743 if (c == null) throw new NullPointerException();
4744 boolean modified = false;
4745 for (Iterator<V> it = iterator(); it.hasNext();) {
4746 if (c.contains(it.next())) {
4747 it.remove();
4748 modified = true;
4749 }
4750 }
4751 return modified;
4752 }
4753
4754 public boolean removeIf(Predicate<? super V> filter) {
4755 return map.removeValueIf(filter);
4756 }
4757
4758 public Spliterator<V> spliterator() {
4759 Node<K,V>[] t;
4760 ConcurrentHashMap<K,V> m = map;
4761 long n = m.sumCount();
4762 int f = (t = m.table) == null ? 0 : t.length;
4763 return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4764 }
4765
4766 public void forEach(Consumer<? super V> action) {
4767 if (action == null) throw new NullPointerException();
4768 Node<K,V>[] t;
4769 if ((t = map.table) != null) {
4770 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4771 for (Node<K,V> p; (p = it.advance()) != null; )
4772 action.accept(p.val);
4773 }
4774 }
4775 }
4776
4777 /**
4778 * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4779 * entries. This class cannot be directly instantiated. See
4780 * {@link #entrySet()}.
4781 */
4782 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4783 implements Set<Map.Entry<K,V>>, java.io.Serializable {
4784 private static final long serialVersionUID = 2249069246763182397L;
4785 EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
4786
4787 public boolean contains(Object o) {
4788 Object k, v, r; Map.Entry<?,?> e;
4789 return ((o instanceof Map.Entry) &&
4790 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4791 (r = map.get(k)) != null &&
4792 (v = e.getValue()) != null &&
4793 (v == r || v.equals(r)));
4794 }
4795
4796 public boolean remove(Object o) {
4797 Object k, v; Map.Entry<?,?> e;
4798 return ((o instanceof Map.Entry) &&
4799 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4800 (v = e.getValue()) != null &&
4801 map.remove(k, v));
4802 }
4803
4804 /**
4805 * @return an iterator over the entries of the backing map
4806 */
4807 public Iterator<Map.Entry<K,V>> iterator() {
4808 ConcurrentHashMap<K,V> m = map;
4809 Node<K,V>[] t;
4810 int f = (t = m.table) == null ? 0 : t.length;
4811 return new EntryIterator<K,V>(t, f, 0, f, m);
4812 }
4813
4814 public boolean add(Entry<K,V> e) {
4815 return map.putVal(e.getKey(), e.getValue(), false) == null;
4816 }
4817
4818 public boolean addAll(Collection<? extends Entry<K,V>> c) {
4819 boolean added = false;
4820 for (Entry<K,V> e : c) {
4821 if (add(e))
4822 added = true;
4823 }
4824 return added;
4825 }
4826
4827 public boolean removeIf(Predicate<? super Entry<K,V>> filter) {
4828 return map.removeEntryIf(filter);
4829 }
4830
4831 public final int hashCode() {
4832 int h = 0;
4833 Node<K,V>[] t;
4834 if ((t = map.table) != null) {
4835 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4836 for (Node<K,V> p; (p = it.advance()) != null; ) {
4837 h += p.hashCode();
4838 }
4839 }
4840 return h;
4841 }
4842
4843 public final boolean equals(Object o) {
4844 Set<?> c;
4845 return ((o instanceof Set) &&
4846 ((c = (Set<?>)o) == this ||
4847 (containsAll(c) && c.containsAll(this))));
4848 }
4849
4850 public Spliterator<Map.Entry<K,V>> spliterator() {
4851 Node<K,V>[] t;
4852 ConcurrentHashMap<K,V> m = map;
4853 long n = m.sumCount();
4854 int f = (t = m.table) == null ? 0 : t.length;
4855 return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4856 }
4857
4858 public void forEach(Consumer<? super Map.Entry<K,V>> action) {
4859 if (action == null) throw new NullPointerException();
4860 Node<K,V>[] t;
4861 if ((t = map.table) != null) {
4862 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4863 for (Node<K,V> p; (p = it.advance()) != null; )
4864 action.accept(new MapEntry<K,V>(p.key, p.val, map));
4865 }
4866 }
4867
4868 }
4869
4870 // -------------------------------------------------------
4871
4872 /**
4873 * Base class for bulk tasks. Repeats some fields and code from
4874 * class Traverser, because we need to subclass CountedCompleter.
4875 */
4876 @SuppressWarnings("serial")
4877 abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4878 Node<K,V>[] tab; // same as Traverser
4879 Node<K,V> next;
4880 TableStack<K,V> stack, spare;
4881 int index;
4882 int baseIndex;
4883 int baseLimit;
4884 final int baseSize;
4885 int batch; // split control
4886
4887 BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4888 super(par);
4889 this.batch = b;
4890 this.index = this.baseIndex = i;
4891 if ((this.tab = t) == null)
4892 this.baseSize = this.baseLimit = 0;
4893 else if (par == null)
4894 this.baseSize = this.baseLimit = t.length;
4895 else {
4896 this.baseLimit = f;
4897 this.baseSize = par.baseSize;
4898 }
4899 }
4900
4901 /**
4902 * Same as Traverser version.
4903 */
4904 final Node<K,V> advance() {
4905 Node<K,V> e;
4906 if ((e = next) != null)
4907 e = e.next;
4908 for (;;) {
4909 Node<K,V>[] t; int i, n;
4910 if (e != null)
4911 return next = e;
4912 if (baseIndex >= baseLimit || (t = tab) == null ||
4913 (n = t.length) <= (i = index) || i < 0)
4914 return next = null;
4915 if ((e = tabAt(t, i)) != null && e.hash < 0) {
4916 if (e instanceof ForwardingNode) {
4917 tab = ((ForwardingNode<K,V>)e).nextTable;
4918 e = null;
4919 pushState(t, i, n);
4920 continue;
4921 }
4922 else if (e instanceof TreeBin)
4923 e = ((TreeBin<K,V>)e).first;
4924 else
4925 e = null;
4926 }
4927 if (stack != null)
4928 recoverState(n);
4929 else if ((index = i + baseSize) >= n)
4930 index = ++baseIndex;
4931 }
4932 }
4933
4934 private void pushState(Node<K,V>[] t, int i, int n) {
4935 TableStack<K,V> s = spare;
4936 if (s != null)
4937 spare = s.next;
4938 else
4939 s = new TableStack<K,V>();
4940 s.tab = t;
4941 s.length = n;
4942 s.index = i;
4943 s.next = stack;
4944 stack = s;
4945 }
4946
4947 private void recoverState(int n) {
4948 TableStack<K,V> s; int len;
4949 while ((s = stack) != null && (index += (len = s.length)) >= n) {
4950 n = len;
4951 index = s.index;
4952 tab = s.tab;
4953 s.tab = null;
4954 TableStack<K,V> next = s.next;
4955 s.next = spare; // save for reuse
4956 stack = next;
4957 spare = s;
4958 }
4959 if (s == null && (index += baseSize) >= n)
4960 index = ++baseIndex;
4961 }
4962 }
4963
4964 /*
4965 * Task classes. Coded in a regular but ugly format/style to
4966 * simplify checks that each variant differs in the right way from
4967 * others. The null screenings exist because compilers cannot tell
4968 * that we've already null-checked task arguments, so we force
4969 * simplest hoisted bypass to help avoid convoluted traps.
4970 */
4971 @SuppressWarnings("serial")
4972 static final class ForEachKeyTask<K,V>
4973 extends BulkTask<K,V,Void> {
4974 final Consumer<? super K> action;
4975 ForEachKeyTask
4976 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4977 Consumer<? super K> action) {
4978 super(p, b, i, f, t);
4979 this.action = action;
4980 }
4981 public final void compute() {
4982 final Consumer<? super K> action;
4983 if ((action = this.action) != null) {
4984 for (int i = baseIndex, f, h; batch > 0 &&
4985 (h = ((f = baseLimit) + i) >>> 1) > i;) {
4986 addToPendingCount(1);
4987 new ForEachKeyTask<K,V>
4988 (this, batch >>>= 1, baseLimit = h, f, tab,
4989 action).fork();
4990 }
4991 for (Node<K,V> p; (p = advance()) != null;)
4992 action.accept(p.key);
4993 propagateCompletion();
4994 }
4995 }
4996 }
4997
4998 @SuppressWarnings("serial")
4999 static final class ForEachValueTask<K,V>
5000 extends BulkTask<K,V,Void> {
5001 final Consumer<? super V> action;
5002 ForEachValueTask
5003 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5004 Consumer<? super V> action) {
5005 super(p, b, i, f, t);
5006 this.action = action;
5007 }
5008 public final void compute() {
5009 final Consumer<? super V> action;
5010 if ((action = this.action) != null) {
5011 for (int i = baseIndex, f, h; batch > 0 &&
5012 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5013 addToPendingCount(1);
5014 new ForEachValueTask<K,V>
5015 (this, batch >>>= 1, baseLimit = h, f, tab,
5016 action).fork();
5017 }
5018 for (Node<K,V> p; (p = advance()) != null;)
5019 action.accept(p.val);
5020 propagateCompletion();
5021 }
5022 }
5023 }
5024
5025 @SuppressWarnings("serial")
5026 static final class ForEachEntryTask<K,V>
5027 extends BulkTask<K,V,Void> {
5028 final Consumer<? super Entry<K,V>> action;
5029 ForEachEntryTask
5030 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5031 Consumer<? super Entry<K,V>> action) {
5032 super(p, b, i, f, t);
5033 this.action = action;
5034 }
5035 public final void compute() {
5036 final Consumer<? super Entry<K,V>> action;
5037 if ((action = this.action) != null) {
5038 for (int i = baseIndex, f, h; batch > 0 &&
5039 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5040 addToPendingCount(1);
5041 new ForEachEntryTask<K,V>
5042 (this, batch >>>= 1, baseLimit = h, f, tab,
5043 action).fork();
5044 }
5045 for (Node<K,V> p; (p = advance()) != null; )
5046 action.accept(p);
5047 propagateCompletion();
5048 }
5049 }
5050 }
5051
5052 @SuppressWarnings("serial")
5053 static final class ForEachMappingTask<K,V>
5054 extends BulkTask<K,V,Void> {
5055 final BiConsumer<? super K, ? super V> action;
5056 ForEachMappingTask
5057 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5058 BiConsumer<? super K,? super V> action) {
5059 super(p, b, i, f, t);
5060 this.action = action;
5061 }
5062 public final void compute() {
5063 final BiConsumer<? super K, ? super V> action;
5064 if ((action = this.action) != null) {
5065 for (int i = baseIndex, f, h; batch > 0 &&
5066 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5067 addToPendingCount(1);
5068 new ForEachMappingTask<K,V>
5069 (this, batch >>>= 1, baseLimit = h, f, tab,
5070 action).fork();
5071 }
5072 for (Node<K,V> p; (p = advance()) != null; )
5073 action.accept(p.key, p.val);
5074 propagateCompletion();
5075 }
5076 }
5077 }
5078
5079 @SuppressWarnings("serial")
5080 static final class ForEachTransformedKeyTask<K,V,U>
5081 extends BulkTask<K,V,Void> {
5082 final Function<? super K, ? extends U> transformer;
5083 final Consumer<? super U> action;
5084 ForEachTransformedKeyTask
5085 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5086 Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
5087 super(p, b, i, f, t);
5088 this.transformer = transformer; this.action = action;
5089 }
5090 public final void compute() {
5091 final Function<? super K, ? extends U> transformer;
5092 final Consumer<? super U> action;
5093 if ((transformer = this.transformer) != null &&
5094 (action = this.action) != null) {
5095 for (int i = baseIndex, f, h; batch > 0 &&
5096 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5097 addToPendingCount(1);
5098 new ForEachTransformedKeyTask<K,V,U>
5099 (this, batch >>>= 1, baseLimit = h, f, tab,
5100 transformer, action).fork();
5101 }
5102 for (Node<K,V> p; (p = advance()) != null; ) {
5103 U u;
5104 if ((u = transformer.apply(p.key)) != null)
5105 action.accept(u);
5106 }
5107 propagateCompletion();
5108 }
5109 }
5110 }
5111
5112 @SuppressWarnings("serial")
5113 static final class ForEachTransformedValueTask<K,V,U>
5114 extends BulkTask<K,V,Void> {
5115 final Function<? super V, ? extends U> transformer;
5116 final Consumer<? super U> action;
5117 ForEachTransformedValueTask
5118 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5119 Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
5120 super(p, b, i, f, t);
5121 this.transformer = transformer; this.action = action;
5122 }
5123 public final void compute() {
5124 final Function<? super V, ? extends U> transformer;
5125 final Consumer<? super U> action;
5126 if ((transformer = this.transformer) != null &&
5127 (action = this.action) != null) {
5128 for (int i = baseIndex, f, h; batch > 0 &&
5129 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5130 addToPendingCount(1);
5131 new ForEachTransformedValueTask<K,V,U>
5132 (this, batch >>>= 1, baseLimit = h, f, tab,
5133 transformer, action).fork();
5134 }
5135 for (Node<K,V> p; (p = advance()) != null; ) {
5136 U u;
5137 if ((u = transformer.apply(p.val)) != null)
5138 action.accept(u);
5139 }
5140 propagateCompletion();
5141 }
5142 }
5143 }
5144
5145 @SuppressWarnings("serial")
5146 static final class ForEachTransformedEntryTask<K,V,U>
5147 extends BulkTask<K,V,Void> {
5148 final Function<Map.Entry<K,V>, ? extends U> transformer;
5149 final Consumer<? super U> action;
5150 ForEachTransformedEntryTask
5151 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5152 Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
5153 super(p, b, i, f, t);
5154 this.transformer = transformer; this.action = action;
5155 }
5156 public final void compute() {
5157 final Function<Map.Entry<K,V>, ? extends U> transformer;
5158 final Consumer<? super U> action;
5159 if ((transformer = this.transformer) != null &&
5160 (action = this.action) != null) {
5161 for (int i = baseIndex, f, h; batch > 0 &&
5162 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5163 addToPendingCount(1);
5164 new ForEachTransformedEntryTask<K,V,U>
5165 (this, batch >>>= 1, baseLimit = h, f, tab,
5166 transformer, action).fork();
5167 }
5168 for (Node<K,V> p; (p = advance()) != null; ) {
5169 U u;
5170 if ((u = transformer.apply(p)) != null)
5171 action.accept(u);
5172 }
5173 propagateCompletion();
5174 }
5175 }
5176 }
5177
5178 @SuppressWarnings("serial")
5179 static final class ForEachTransformedMappingTask<K,V,U>
5180 extends BulkTask<K,V,Void> {
5181 final BiFunction<? super K, ? super V, ? extends U> transformer;
5182 final Consumer<? super U> action;
5183 ForEachTransformedMappingTask
5184 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5185 BiFunction<? super K, ? super V, ? extends U> transformer,
5186 Consumer<? super U> action) {
5187 super(p, b, i, f, t);
5188 this.transformer = transformer; this.action = action;
5189 }
5190 public final void compute() {
5191 final BiFunction<? super K, ? super V, ? extends U> transformer;
5192 final Consumer<? super U> action;
5193 if ((transformer = this.transformer) != null &&
5194 (action = this.action) != null) {
5195 for (int i = baseIndex, f, h; batch > 0 &&
5196 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5197 addToPendingCount(1);
5198 new ForEachTransformedMappingTask<K,V,U>
5199 (this, batch >>>= 1, baseLimit = h, f, tab,
5200 transformer, action).fork();
5201 }
5202 for (Node<K,V> p; (p = advance()) != null; ) {
5203 U u;
5204 if ((u = transformer.apply(p.key, p.val)) != null)
5205 action.accept(u);
5206 }
5207 propagateCompletion();
5208 }
5209 }
5210 }
5211
5212 @SuppressWarnings("serial")
5213 static final class SearchKeysTask<K,V,U>
5214 extends BulkTask<K,V,U> {
5215 final Function<? super K, ? extends U> searchFunction;
5216 final AtomicReference<U> result;
5217 SearchKeysTask
5218 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5219 Function<? super K, ? extends U> searchFunction,
5220 AtomicReference<U> result) {
5221 super(p, b, i, f, t);
5222 this.searchFunction = searchFunction; this.result = result;
5223 }
5224 public final U getRawResult() { return result.get(); }
5225 public final void compute() {
5226 final Function<? super K, ? extends U> searchFunction;
5227 final AtomicReference<U> result;
5228 if ((searchFunction = this.searchFunction) != null &&
5229 (result = this.result) != null) {
5230 for (int i = baseIndex, f, h; batch > 0 &&
5231 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5232 if (result.get() != null)
5233 return;
5234 addToPendingCount(1);
5235 new SearchKeysTask<K,V,U>
5236 (this, batch >>>= 1, baseLimit = h, f, tab,
5237 searchFunction, result).fork();
5238 }
5239 while (result.get() == null) {
5240 U u;
5241 Node<K,V> p;
5242 if ((p = advance()) == null) {
5243 propagateCompletion();
5244 break;
5245 }
5246 if ((u = searchFunction.apply(p.key)) != null) {
5247 if (result.compareAndSet(null, u))
5248 quietlyCompleteRoot();
5249 break;
5250 }
5251 }
5252 }
5253 }
5254 }
5255
5256 @SuppressWarnings("serial")
5257 static final class SearchValuesTask<K,V,U>
5258 extends BulkTask<K,V,U> {
5259 final Function<? super V, ? extends U> searchFunction;
5260 final AtomicReference<U> result;
5261 SearchValuesTask
5262 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5263 Function<? super V, ? extends U> searchFunction,
5264 AtomicReference<U> result) {
5265 super(p, b, i, f, t);
5266 this.searchFunction = searchFunction; this.result = result;
5267 }
5268 public final U getRawResult() { return result.get(); }
5269 public final void compute() {
5270 final Function<? super V, ? extends U> searchFunction;
5271 final AtomicReference<U> result;
5272 if ((searchFunction = this.searchFunction) != null &&
5273 (result = this.result) != null) {
5274 for (int i = baseIndex, f, h; batch > 0 &&
5275 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5276 if (result.get() != null)
5277 return;
5278 addToPendingCount(1);
5279 new SearchValuesTask<K,V,U>
5280 (this, batch >>>= 1, baseLimit = h, f, tab,
5281 searchFunction, result).fork();
5282 }
5283 while (result.get() == null) {
5284 U u;
5285 Node<K,V> p;
5286 if ((p = advance()) == null) {
5287 propagateCompletion();
5288 break;
5289 }
5290 if ((u = searchFunction.apply(p.val)) != null) {
5291 if (result.compareAndSet(null, u))
5292 quietlyCompleteRoot();
5293 break;
5294 }
5295 }
5296 }
5297 }
5298 }
5299
5300 @SuppressWarnings("serial")
5301 static final class SearchEntriesTask<K,V,U>
5302 extends BulkTask<K,V,U> {
5303 final Function<Entry<K,V>, ? extends U> searchFunction;
5304 final AtomicReference<U> result;
5305 SearchEntriesTask
5306 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5307 Function<Entry<K,V>, ? extends U> searchFunction,
5308 AtomicReference<U> result) {
5309 super(p, b, i, f, t);
5310 this.searchFunction = searchFunction; this.result = result;
5311 }
5312 public final U getRawResult() { return result.get(); }
5313 public final void compute() {
5314 final Function<Entry<K,V>, ? extends U> searchFunction;
5315 final AtomicReference<U> result;
5316 if ((searchFunction = this.searchFunction) != null &&
5317 (result = this.result) != null) {
5318 for (int i = baseIndex, f, h; batch > 0 &&
5319 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5320 if (result.get() != null)
5321 return;
5322 addToPendingCount(1);
5323 new SearchEntriesTask<K,V,U>
5324 (this, batch >>>= 1, baseLimit = h, f, tab,
5325 searchFunction, result).fork();
5326 }
5327 while (result.get() == null) {
5328 U u;
5329 Node<K,V> p;
5330 if ((p = advance()) == null) {
5331 propagateCompletion();
5332 break;
5333 }
5334 if ((u = searchFunction.apply(p)) != null) {
5335 if (result.compareAndSet(null, u))
5336 quietlyCompleteRoot();
5337 return;
5338 }
5339 }
5340 }
5341 }
5342 }
5343
5344 @SuppressWarnings("serial")
5345 static final class SearchMappingsTask<K,V,U>
5346 extends BulkTask<K,V,U> {
5347 final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5348 final AtomicReference<U> result;
5349 SearchMappingsTask
5350 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5351 BiFunction<? super K, ? super V, ? extends U> searchFunction,
5352 AtomicReference<U> result) {
5353 super(p, b, i, f, t);
5354 this.searchFunction = searchFunction; this.result = result;
5355 }
5356 public final U getRawResult() { return result.get(); }
5357 public final void compute() {
5358 final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5359 final AtomicReference<U> result;
5360 if ((searchFunction = this.searchFunction) != null &&
5361 (result = this.result) != null) {
5362 for (int i = baseIndex, f, h; batch > 0 &&
5363 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5364 if (result.get() != null)
5365 return;
5366 addToPendingCount(1);
5367 new SearchMappingsTask<K,V,U>
5368 (this, batch >>>= 1, baseLimit = h, f, tab,
5369 searchFunction, result).fork();
5370 }
5371 while (result.get() == null) {
5372 U u;
5373 Node<K,V> p;
5374 if ((p = advance()) == null) {
5375 propagateCompletion();
5376 break;
5377 }
5378 if ((u = searchFunction.apply(p.key, p.val)) != null) {
5379 if (result.compareAndSet(null, u))
5380 quietlyCompleteRoot();
5381 break;
5382 }
5383 }
5384 }
5385 }
5386 }
5387
5388 @SuppressWarnings("serial")
5389 static final class ReduceKeysTask<K,V>
5390 extends BulkTask<K,V,K> {
5391 final BiFunction<? super K, ? super K, ? extends K> reducer;
5392 K result;
5393 ReduceKeysTask<K,V> rights, nextRight;
5394 ReduceKeysTask
5395 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5396 ReduceKeysTask<K,V> nextRight,
5397 BiFunction<? super K, ? super K, ? extends K> reducer) {
5398 super(p, b, i, f, t); this.nextRight = nextRight;
5399 this.reducer = reducer;
5400 }
5401 public final K getRawResult() { return result; }
5402 public final void compute() {
5403 final BiFunction<? super K, ? super K, ? extends K> reducer;
5404 if ((reducer = this.reducer) != null) {
5405 for (int i = baseIndex, f, h; batch > 0 &&
5406 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5407 addToPendingCount(1);
5408 (rights = new ReduceKeysTask<K,V>
5409 (this, batch >>>= 1, baseLimit = h, f, tab,
5410 rights, reducer)).fork();
5411 }
5412 K r = null;
5413 for (Node<K,V> p; (p = advance()) != null; ) {
5414 K u = p.key;
5415 r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5416 }
5417 result = r;
5418 CountedCompleter<?> c;
5419 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5420 @SuppressWarnings("unchecked")
5421 ReduceKeysTask<K,V>
5422 t = (ReduceKeysTask<K,V>)c,
5423 s = t.rights;
5424 while (s != null) {
5425 K tr, sr;
5426 if ((sr = s.result) != null)
5427 t.result = (((tr = t.result) == null) ? sr :
5428 reducer.apply(tr, sr));
5429 s = t.rights = s.nextRight;
5430 }
5431 }
5432 }
5433 }
5434 }
5435
5436 @SuppressWarnings("serial")
5437 static final class ReduceValuesTask<K,V>
5438 extends BulkTask<K,V,V> {
5439 final BiFunction<? super V, ? super V, ? extends V> reducer;
5440 V result;
5441 ReduceValuesTask<K,V> rights, nextRight;
5442 ReduceValuesTask
5443 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5444 ReduceValuesTask<K,V> nextRight,
5445 BiFunction<? super V, ? super V, ? extends V> reducer) {
5446 super(p, b, i, f, t); this.nextRight = nextRight;
5447 this.reducer = reducer;
5448 }
5449 public final V getRawResult() { return result; }
5450 public final void compute() {
5451 final BiFunction<? super V, ? super V, ? extends V> reducer;
5452 if ((reducer = this.reducer) != null) {
5453 for (int i = baseIndex, f, h; batch > 0 &&
5454 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5455 addToPendingCount(1);
5456 (rights = new ReduceValuesTask<K,V>
5457 (this, batch >>>= 1, baseLimit = h, f, tab,
5458 rights, reducer)).fork();
5459 }
5460 V r = null;
5461 for (Node<K,V> p; (p = advance()) != null; ) {
5462 V v = p.val;
5463 r = (r == null) ? v : reducer.apply(r, v);
5464 }
5465 result = r;
5466 CountedCompleter<?> c;
5467 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5468 @SuppressWarnings("unchecked")
5469 ReduceValuesTask<K,V>
5470 t = (ReduceValuesTask<K,V>)c,
5471 s = t.rights;
5472 while (s != null) {
5473 V tr, sr;
5474 if ((sr = s.result) != null)
5475 t.result = (((tr = t.result) == null) ? sr :
5476 reducer.apply(tr, sr));
5477 s = t.rights = s.nextRight;
5478 }
5479 }
5480 }
5481 }
5482 }
5483
5484 @SuppressWarnings("serial")
5485 static final class ReduceEntriesTask<K,V>
5486 extends BulkTask<K,V,Map.Entry<K,V>> {
5487 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5488 Map.Entry<K,V> result;
5489 ReduceEntriesTask<K,V> rights, nextRight;
5490 ReduceEntriesTask
5491 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5492 ReduceEntriesTask<K,V> nextRight,
5493 BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5494 super(p, b, i, f, t); this.nextRight = nextRight;
5495 this.reducer = reducer;
5496 }
5497 public final Map.Entry<K,V> getRawResult() { return result; }
5498 public final void compute() {
5499 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5500 if ((reducer = this.reducer) != null) {
5501 for (int i = baseIndex, f, h; batch > 0 &&
5502 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5503 addToPendingCount(1);
5504 (rights = new ReduceEntriesTask<K,V>
5505 (this, batch >>>= 1, baseLimit = h, f, tab,
5506 rights, reducer)).fork();
5507 }
5508 Map.Entry<K,V> r = null;
5509 for (Node<K,V> p; (p = advance()) != null; )
5510 r = (r == null) ? p : reducer.apply(r, p);
5511 result = r;
5512 CountedCompleter<?> c;
5513 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5514 @SuppressWarnings("unchecked")
5515 ReduceEntriesTask<K,V>
5516 t = (ReduceEntriesTask<K,V>)c,
5517 s = t.rights;
5518 while (s != null) {
5519 Map.Entry<K,V> tr, sr;
5520 if ((sr = s.result) != null)
5521 t.result = (((tr = t.result) == null) ? sr :
5522 reducer.apply(tr, sr));
5523 s = t.rights = s.nextRight;
5524 }
5525 }
5526 }
5527 }
5528 }
5529
5530 @SuppressWarnings("serial")
5531 static final class MapReduceKeysTask<K,V,U>
5532 extends BulkTask<K,V,U> {
5533 final Function<? super K, ? extends U> transformer;
5534 final BiFunction<? super U, ? super U, ? extends U> reducer;
5535 U result;
5536 MapReduceKeysTask<K,V,U> rights, nextRight;
5537 MapReduceKeysTask
5538 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5539 MapReduceKeysTask<K,V,U> nextRight,
5540 Function<? super K, ? extends U> transformer,
5541 BiFunction<? super U, ? super U, ? extends U> reducer) {
5542 super(p, b, i, f, t); this.nextRight = nextRight;
5543 this.transformer = transformer;
5544 this.reducer = reducer;
5545 }
5546 public final U getRawResult() { return result; }
5547 public final void compute() {
5548 final Function<? super K, ? extends U> transformer;
5549 final BiFunction<? super U, ? super U, ? extends U> reducer;
5550 if ((transformer = this.transformer) != null &&
5551 (reducer = this.reducer) != null) {
5552 for (int i = baseIndex, f, h; batch > 0 &&
5553 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5554 addToPendingCount(1);
5555 (rights = new MapReduceKeysTask<K,V,U>
5556 (this, batch >>>= 1, baseLimit = h, f, tab,
5557 rights, transformer, reducer)).fork();
5558 }
5559 U r = null;
5560 for (Node<K,V> p; (p = advance()) != null; ) {
5561 U u;
5562 if ((u = transformer.apply(p.key)) != null)
5563 r = (r == null) ? u : reducer.apply(r, u);
5564 }
5565 result = r;
5566 CountedCompleter<?> c;
5567 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5568 @SuppressWarnings("unchecked")
5569 MapReduceKeysTask<K,V,U>
5570 t = (MapReduceKeysTask<K,V,U>)c,
5571 s = t.rights;
5572 while (s != null) {
5573 U tr, sr;
5574 if ((sr = s.result) != null)
5575 t.result = (((tr = t.result) == null) ? sr :
5576 reducer.apply(tr, sr));
5577 s = t.rights = s.nextRight;
5578 }
5579 }
5580 }
5581 }
5582 }
5583
5584 @SuppressWarnings("serial")
5585 static final class MapReduceValuesTask<K,V,U>
5586 extends BulkTask<K,V,U> {
5587 final Function<? super V, ? extends U> transformer;
5588 final BiFunction<? super U, ? super U, ? extends U> reducer;
5589 U result;
5590 MapReduceValuesTask<K,V,U> rights, nextRight;
5591 MapReduceValuesTask
5592 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5593 MapReduceValuesTask<K,V,U> nextRight,
5594 Function<? super V, ? extends U> transformer,
5595 BiFunction<? super U, ? super U, ? extends U> reducer) {
5596 super(p, b, i, f, t); this.nextRight = nextRight;
5597 this.transformer = transformer;
5598 this.reducer = reducer;
5599 }
5600 public final U getRawResult() { return result; }
5601 public final void compute() {
5602 final Function<? super V, ? extends U> transformer;
5603 final BiFunction<? super U, ? super U, ? extends U> reducer;
5604 if ((transformer = this.transformer) != null &&
5605 (reducer = this.reducer) != null) {
5606 for (int i = baseIndex, f, h; batch > 0 &&
5607 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5608 addToPendingCount(1);
5609 (rights = new MapReduceValuesTask<K,V,U>
5610 (this, batch >>>= 1, baseLimit = h, f, tab,
5611 rights, transformer, reducer)).fork();
5612 }
5613 U r = null;
5614 for (Node<K,V> p; (p = advance()) != null; ) {
5615 U u;
5616 if ((u = transformer.apply(p.val)) != null)
5617 r = (r == null) ? u : reducer.apply(r, u);
5618 }
5619 result = r;
5620 CountedCompleter<?> c;
5621 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5622 @SuppressWarnings("unchecked")
5623 MapReduceValuesTask<K,V,U>
5624 t = (MapReduceValuesTask<K,V,U>)c,
5625 s = t.rights;
5626 while (s != null) {
5627 U tr, sr;
5628 if ((sr = s.result) != null)
5629 t.result = (((tr = t.result) == null) ? sr :
5630 reducer.apply(tr, sr));
5631 s = t.rights = s.nextRight;
5632 }
5633 }
5634 }
5635 }
5636 }
5637
5638 @SuppressWarnings("serial")
5639 static final class MapReduceEntriesTask<K,V,U>
5640 extends BulkTask<K,V,U> {
5641 final Function<Map.Entry<K,V>, ? extends U> transformer;
5642 final BiFunction<? super U, ? super U, ? extends U> reducer;
5643 U result;
5644 MapReduceEntriesTask<K,V,U> rights, nextRight;
5645 MapReduceEntriesTask
5646 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5647 MapReduceEntriesTask<K,V,U> nextRight,
5648 Function<Map.Entry<K,V>, ? extends U> transformer,
5649 BiFunction<? super U, ? super U, ? extends U> reducer) {
5650 super(p, b, i, f, t); this.nextRight = nextRight;
5651 this.transformer = transformer;
5652 this.reducer = reducer;
5653 }
5654 public final U getRawResult() { return result; }
5655 public final void compute() {
5656 final Function<Map.Entry<K,V>, ? extends U> transformer;
5657 final BiFunction<? super U, ? super U, ? extends U> reducer;
5658 if ((transformer = this.transformer) != null &&
5659 (reducer = this.reducer) != null) {
5660 for (int i = baseIndex, f, h; batch > 0 &&
5661 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5662 addToPendingCount(1);
5663 (rights = new MapReduceEntriesTask<K,V,U>
5664 (this, batch >>>= 1, baseLimit = h, f, tab,
5665 rights, transformer, reducer)).fork();
5666 }
5667 U r = null;
5668 for (Node<K,V> p; (p = advance()) != null; ) {
5669 U u;
5670 if ((u = transformer.apply(p)) != null)
5671 r = (r == null) ? u : reducer.apply(r, u);
5672 }
5673 result = r;
5674 CountedCompleter<?> c;
5675 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5676 @SuppressWarnings("unchecked")
5677 MapReduceEntriesTask<K,V,U>
5678 t = (MapReduceEntriesTask<K,V,U>)c,
5679 s = t.rights;
5680 while (s != null) {
5681 U tr, sr;
5682 if ((sr = s.result) != null)
5683 t.result = (((tr = t.result) == null) ? sr :
5684 reducer.apply(tr, sr));
5685 s = t.rights = s.nextRight;
5686 }
5687 }
5688 }
5689 }
5690 }
5691
5692 @SuppressWarnings("serial")
5693 static final class MapReduceMappingsTask<K,V,U>
5694 extends BulkTask<K,V,U> {
5695 final BiFunction<? super K, ? super V, ? extends U> transformer;
5696 final BiFunction<? super U, ? super U, ? extends U> reducer;
5697 U result;
5698 MapReduceMappingsTask<K,V,U> rights, nextRight;
5699 MapReduceMappingsTask
5700 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5701 MapReduceMappingsTask<K,V,U> nextRight,
5702 BiFunction<? super K, ? super V, ? extends U> transformer,
5703 BiFunction<? super U, ? super U, ? extends U> reducer) {
5704 super(p, b, i, f, t); this.nextRight = nextRight;
5705 this.transformer = transformer;
5706 this.reducer = reducer;
5707 }
5708 public final U getRawResult() { return result; }
5709 public final void compute() {
5710 final BiFunction<? super K, ? super V, ? extends U> transformer;
5711 final BiFunction<? super U, ? super U, ? extends U> reducer;
5712 if ((transformer = this.transformer) != null &&
5713 (reducer = this.reducer) != null) {
5714 for (int i = baseIndex, f, h; batch > 0 &&
5715 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5716 addToPendingCount(1);
5717 (rights = new MapReduceMappingsTask<K,V,U>
5718 (this, batch >>>= 1, baseLimit = h, f, tab,
5719 rights, transformer, reducer)).fork();
5720 }
5721 U r = null;
5722 for (Node<K,V> p; (p = advance()) != null; ) {
5723 U u;
5724 if ((u = transformer.apply(p.key, p.val)) != null)
5725 r = (r == null) ? u : reducer.apply(r, u);
5726 }
5727 result = r;
5728 CountedCompleter<?> c;
5729 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5730 @SuppressWarnings("unchecked")
5731 MapReduceMappingsTask<K,V,U>
5732 t = (MapReduceMappingsTask<K,V,U>)c,
5733 s = t.rights;
5734 while (s != null) {
5735 U tr, sr;
5736 if ((sr = s.result) != null)
5737 t.result = (((tr = t.result) == null) ? sr :
5738 reducer.apply(tr, sr));
5739 s = t.rights = s.nextRight;
5740 }
5741 }
5742 }
5743 }
5744 }
5745
5746 @SuppressWarnings("serial")
5747 static final class MapReduceKeysToDoubleTask<K,V>
5748 extends BulkTask<K,V,Double> {
5749 final ToDoubleFunction<? super K> transformer;
5750 final DoubleBinaryOperator reducer;
5751 final double basis;
5752 double result;
5753 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5754 MapReduceKeysToDoubleTask
5755 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5756 MapReduceKeysToDoubleTask<K,V> nextRight,
5757 ToDoubleFunction<? super K> transformer,
5758 double basis,
5759 DoubleBinaryOperator reducer) {
5760 super(p, b, i, f, t); this.nextRight = nextRight;
5761 this.transformer = transformer;
5762 this.basis = basis; this.reducer = reducer;
5763 }
5764 public final Double getRawResult() { return result; }
5765 public final void compute() {
5766 final ToDoubleFunction<? super K> transformer;
5767 final DoubleBinaryOperator reducer;
5768 if ((transformer = this.transformer) != null &&
5769 (reducer = this.reducer) != null) {
5770 double r = this.basis;
5771 for (int i = baseIndex, f, h; batch > 0 &&
5772 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5773 addToPendingCount(1);
5774 (rights = new MapReduceKeysToDoubleTask<K,V>
5775 (this, batch >>>= 1, baseLimit = h, f, tab,
5776 rights, transformer, r, reducer)).fork();
5777 }
5778 for (Node<K,V> p; (p = advance()) != null; )
5779 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key));
5780 result = r;
5781 CountedCompleter<?> c;
5782 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5783 @SuppressWarnings("unchecked")
5784 MapReduceKeysToDoubleTask<K,V>
5785 t = (MapReduceKeysToDoubleTask<K,V>)c,
5786 s = t.rights;
5787 while (s != null) {
5788 t.result = reducer.applyAsDouble(t.result, s.result);
5789 s = t.rights = s.nextRight;
5790 }
5791 }
5792 }
5793 }
5794 }
5795
5796 @SuppressWarnings("serial")
5797 static final class MapReduceValuesToDoubleTask<K,V>
5798 extends BulkTask<K,V,Double> {
5799 final ToDoubleFunction<? super V> transformer;
5800 final DoubleBinaryOperator reducer;
5801 final double basis;
5802 double result;
5803 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5804 MapReduceValuesToDoubleTask
5805 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5806 MapReduceValuesToDoubleTask<K,V> nextRight,
5807 ToDoubleFunction<? super V> transformer,
5808 double basis,
5809 DoubleBinaryOperator reducer) {
5810 super(p, b, i, f, t); this.nextRight = nextRight;
5811 this.transformer = transformer;
5812 this.basis = basis; this.reducer = reducer;
5813 }
5814 public final Double getRawResult() { return result; }
5815 public final void compute() {
5816 final ToDoubleFunction<? super V> transformer;
5817 final DoubleBinaryOperator reducer;
5818 if ((transformer = this.transformer) != null &&
5819 (reducer = this.reducer) != null) {
5820 double r = this.basis;
5821 for (int i = baseIndex, f, h; batch > 0 &&
5822 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5823 addToPendingCount(1);
5824 (rights = new MapReduceValuesToDoubleTask<K,V>
5825 (this, batch >>>= 1, baseLimit = h, f, tab,
5826 rights, transformer, r, reducer)).fork();
5827 }
5828 for (Node<K,V> p; (p = advance()) != null; )
5829 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
5830 result = r;
5831 CountedCompleter<?> c;
5832 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5833 @SuppressWarnings("unchecked")
5834 MapReduceValuesToDoubleTask<K,V>
5835 t = (MapReduceValuesToDoubleTask<K,V>)c,
5836 s = t.rights;
5837 while (s != null) {
5838 t.result = reducer.applyAsDouble(t.result, s.result);
5839 s = t.rights = s.nextRight;
5840 }
5841 }
5842 }
5843 }
5844 }
5845
5846 @SuppressWarnings("serial")
5847 static final class MapReduceEntriesToDoubleTask<K,V>
5848 extends BulkTask<K,V,Double> {
5849 final ToDoubleFunction<Map.Entry<K,V>> transformer;
5850 final DoubleBinaryOperator reducer;
5851 final double basis;
5852 double result;
5853 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5854 MapReduceEntriesToDoubleTask
5855 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5856 MapReduceEntriesToDoubleTask<K,V> nextRight,
5857 ToDoubleFunction<Map.Entry<K,V>> transformer,
5858 double basis,
5859 DoubleBinaryOperator reducer) {
5860 super(p, b, i, f, t); this.nextRight = nextRight;
5861 this.transformer = transformer;
5862 this.basis = basis; this.reducer = reducer;
5863 }
5864 public final Double getRawResult() { return result; }
5865 public final void compute() {
5866 final ToDoubleFunction<Map.Entry<K,V>> transformer;
5867 final DoubleBinaryOperator reducer;
5868 if ((transformer = this.transformer) != null &&
5869 (reducer = this.reducer) != null) {
5870 double r = this.basis;
5871 for (int i = baseIndex, f, h; batch > 0 &&
5872 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5873 addToPendingCount(1);
5874 (rights = new MapReduceEntriesToDoubleTask<K,V>
5875 (this, batch >>>= 1, baseLimit = h, f, tab,
5876 rights, transformer, r, reducer)).fork();
5877 }
5878 for (Node<K,V> p; (p = advance()) != null; )
5879 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
5880 result = r;
5881 CountedCompleter<?> c;
5882 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5883 @SuppressWarnings("unchecked")
5884 MapReduceEntriesToDoubleTask<K,V>
5885 t = (MapReduceEntriesToDoubleTask<K,V>)c,
5886 s = t.rights;
5887 while (s != null) {
5888 t.result = reducer.applyAsDouble(t.result, s.result);
5889 s = t.rights = s.nextRight;
5890 }
5891 }
5892 }
5893 }
5894 }
5895
5896 @SuppressWarnings("serial")
5897 static final class MapReduceMappingsToDoubleTask<K,V>
5898 extends BulkTask<K,V,Double> {
5899 final ToDoubleBiFunction<? super K, ? super V> transformer;
5900 final DoubleBinaryOperator reducer;
5901 final double basis;
5902 double result;
5903 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5904 MapReduceMappingsToDoubleTask
5905 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5906 MapReduceMappingsToDoubleTask<K,V> nextRight,
5907 ToDoubleBiFunction<? super K, ? super V> transformer,
5908 double basis,
5909 DoubleBinaryOperator reducer) {
5910 super(p, b, i, f, t); this.nextRight = nextRight;
5911 this.transformer = transformer;
5912 this.basis = basis; this.reducer = reducer;
5913 }
5914 public final Double getRawResult() { return result; }
5915 public final void compute() {
5916 final ToDoubleBiFunction<? super K, ? super V> transformer;
5917 final DoubleBinaryOperator reducer;
5918 if ((transformer = this.transformer) != null &&
5919 (reducer = this.reducer) != null) {
5920 double r = this.basis;
5921 for (int i = baseIndex, f, h; batch > 0 &&
5922 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5923 addToPendingCount(1);
5924 (rights = new MapReduceMappingsToDoubleTask<K,V>
5925 (this, batch >>>= 1, baseLimit = h, f, tab,
5926 rights, transformer, r, reducer)).fork();
5927 }
5928 for (Node<K,V> p; (p = advance()) != null; )
5929 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val));
5930 result = r;
5931 CountedCompleter<?> c;
5932 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5933 @SuppressWarnings("unchecked")
5934 MapReduceMappingsToDoubleTask<K,V>
5935 t = (MapReduceMappingsToDoubleTask<K,V>)c,
5936 s = t.rights;
5937 while (s != null) {
5938 t.result = reducer.applyAsDouble(t.result, s.result);
5939 s = t.rights = s.nextRight;
5940 }
5941 }
5942 }
5943 }
5944 }
5945
5946 @SuppressWarnings("serial")
5947 static final class MapReduceKeysToLongTask<K,V>
5948 extends BulkTask<K,V,Long> {
5949 final ToLongFunction<? super K> transformer;
5950 final LongBinaryOperator reducer;
5951 final long basis;
5952 long result;
5953 MapReduceKeysToLongTask<K,V> rights, nextRight;
5954 MapReduceKeysToLongTask
5955 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5956 MapReduceKeysToLongTask<K,V> nextRight,
5957 ToLongFunction<? super K> transformer,
5958 long basis,
5959 LongBinaryOperator reducer) {
5960 super(p, b, i, f, t); this.nextRight = nextRight;
5961 this.transformer = transformer;
5962 this.basis = basis; this.reducer = reducer;
5963 }
5964 public final Long getRawResult() { return result; }
5965 public final void compute() {
5966 final ToLongFunction<? super K> transformer;
5967 final LongBinaryOperator reducer;
5968 if ((transformer = this.transformer) != null &&
5969 (reducer = this.reducer) != null) {
5970 long r = this.basis;
5971 for (int i = baseIndex, f, h; batch > 0 &&
5972 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5973 addToPendingCount(1);
5974 (rights = new MapReduceKeysToLongTask<K,V>
5975 (this, batch >>>= 1, baseLimit = h, f, tab,
5976 rights, transformer, r, reducer)).fork();
5977 }
5978 for (Node<K,V> p; (p = advance()) != null; )
5979 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key));
5980 result = r;
5981 CountedCompleter<?> c;
5982 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5983 @SuppressWarnings("unchecked")
5984 MapReduceKeysToLongTask<K,V>
5985 t = (MapReduceKeysToLongTask<K,V>)c,
5986 s = t.rights;
5987 while (s != null) {
5988 t.result = reducer.applyAsLong(t.result, s.result);
5989 s = t.rights = s.nextRight;
5990 }
5991 }
5992 }
5993 }
5994 }
5995
5996 @SuppressWarnings("serial")
5997 static final class MapReduceValuesToLongTask<K,V>
5998 extends BulkTask<K,V,Long> {
5999 final ToLongFunction<? super V> transformer;
6000 final LongBinaryOperator reducer;
6001 final long basis;
6002 long result;
6003 MapReduceValuesToLongTask<K,V> rights, nextRight;
6004 MapReduceValuesToLongTask
6005 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6006 MapReduceValuesToLongTask<K,V> nextRight,
6007 ToLongFunction<? super V> transformer,
6008 long basis,
6009 LongBinaryOperator reducer) {
6010 super(p, b, i, f, t); this.nextRight = nextRight;
6011 this.transformer = transformer;
6012 this.basis = basis; this.reducer = reducer;
6013 }
6014 public final Long getRawResult() { return result; }
6015 public final void compute() {
6016 final ToLongFunction<? super V> transformer;
6017 final LongBinaryOperator reducer;
6018 if ((transformer = this.transformer) != null &&
6019 (reducer = this.reducer) != null) {
6020 long r = this.basis;
6021 for (int i = baseIndex, f, h; batch > 0 &&
6022 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6023 addToPendingCount(1);
6024 (rights = new MapReduceValuesToLongTask<K,V>
6025 (this, batch >>>= 1, baseLimit = h, f, tab,
6026 rights, transformer, r, reducer)).fork();
6027 }
6028 for (Node<K,V> p; (p = advance()) != null; )
6029 r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
6030 result = r;
6031 CountedCompleter<?> c;
6032 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6033 @SuppressWarnings("unchecked")
6034 MapReduceValuesToLongTask<K,V>
6035 t = (MapReduceValuesToLongTask<K,V>)c,
6036 s = t.rights;
6037 while (s != null) {
6038 t.result = reducer.applyAsLong(t.result, s.result);
6039 s = t.rights = s.nextRight;
6040 }
6041 }
6042 }
6043 }
6044 }
6045
6046 @SuppressWarnings("serial")
6047 static final class MapReduceEntriesToLongTask<K,V>
6048 extends BulkTask<K,V,Long> {
6049 final ToLongFunction<Map.Entry<K,V>> transformer;
6050 final LongBinaryOperator reducer;
6051 final long basis;
6052 long result;
6053 MapReduceEntriesToLongTask<K,V> rights, nextRight;
6054 MapReduceEntriesToLongTask
6055 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6056 MapReduceEntriesToLongTask<K,V> nextRight,
6057 ToLongFunction<Map.Entry<K,V>> transformer,
6058 long basis,
6059 LongBinaryOperator reducer) {
6060 super(p, b, i, f, t); this.nextRight = nextRight;
6061 this.transformer = transformer;
6062 this.basis = basis; this.reducer = reducer;
6063 }
6064 public final Long getRawResult() { return result; }
6065 public final void compute() {
6066 final ToLongFunction<Map.Entry<K,V>> transformer;
6067 final LongBinaryOperator reducer;
6068 if ((transformer = this.transformer) != null &&
6069 (reducer = this.reducer) != null) {
6070 long r = this.basis;
6071 for (int i = baseIndex, f, h; batch > 0 &&
6072 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6073 addToPendingCount(1);
6074 (rights = new MapReduceEntriesToLongTask<K,V>
6075 (this, batch >>>= 1, baseLimit = h, f, tab,
6076 rights, transformer, r, reducer)).fork();
6077 }
6078 for (Node<K,V> p; (p = advance()) != null; )
6079 r = reducer.applyAsLong(r, transformer.applyAsLong(p));
6080 result = r;
6081 CountedCompleter<?> c;
6082 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6083 @SuppressWarnings("unchecked")
6084 MapReduceEntriesToLongTask<K,V>
6085 t = (MapReduceEntriesToLongTask<K,V>)c,
6086 s = t.rights;
6087 while (s != null) {
6088 t.result = reducer.applyAsLong(t.result, s.result);
6089 s = t.rights = s.nextRight;
6090 }
6091 }
6092 }
6093 }
6094 }
6095
6096 @SuppressWarnings("serial")
6097 static final class MapReduceMappingsToLongTask<K,V>
6098 extends BulkTask<K,V,Long> {
6099 final ToLongBiFunction<? super K, ? super V> transformer;
6100 final LongBinaryOperator reducer;
6101 final long basis;
6102 long result;
6103 MapReduceMappingsToLongTask<K,V> rights, nextRight;
6104 MapReduceMappingsToLongTask
6105 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6106 MapReduceMappingsToLongTask<K,V> nextRight,
6107 ToLongBiFunction<? super K, ? super V> transformer,
6108 long basis,
6109 LongBinaryOperator reducer) {
6110 super(p, b, i, f, t); this.nextRight = nextRight;
6111 this.transformer = transformer;
6112 this.basis = basis; this.reducer = reducer;
6113 }
6114 public final Long getRawResult() { return result; }
6115 public final void compute() {
6116 final ToLongBiFunction<? super K, ? super V> transformer;
6117 final LongBinaryOperator reducer;
6118 if ((transformer = this.transformer) != null &&
6119 (reducer = this.reducer) != null) {
6120 long r = this.basis;
6121 for (int i = baseIndex, f, h; batch > 0 &&
6122 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6123 addToPendingCount(1);
6124 (rights = new MapReduceMappingsToLongTask<K,V>
6125 (this, batch >>>= 1, baseLimit = h, f, tab,
6126 rights, transformer, r, reducer)).fork();
6127 }
6128 for (Node<K,V> p; (p = advance()) != null; )
6129 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val));
6130 result = r;
6131 CountedCompleter<?> c;
6132 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6133 @SuppressWarnings("unchecked")
6134 MapReduceMappingsToLongTask<K,V>
6135 t = (MapReduceMappingsToLongTask<K,V>)c,
6136 s = t.rights;
6137 while (s != null) {
6138 t.result = reducer.applyAsLong(t.result, s.result);
6139 s = t.rights = s.nextRight;
6140 }
6141 }
6142 }
6143 }
6144 }
6145
6146 @SuppressWarnings("serial")
6147 static final class MapReduceKeysToIntTask<K,V>
6148 extends BulkTask<K,V,Integer> {
6149 final ToIntFunction<? super K> transformer;
6150 final IntBinaryOperator reducer;
6151 final int basis;
6152 int result;
6153 MapReduceKeysToIntTask<K,V> rights, nextRight;
6154 MapReduceKeysToIntTask
6155 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6156 MapReduceKeysToIntTask<K,V> nextRight,
6157 ToIntFunction<? super K> transformer,
6158 int basis,
6159 IntBinaryOperator reducer) {
6160 super(p, b, i, f, t); this.nextRight = nextRight;
6161 this.transformer = transformer;
6162 this.basis = basis; this.reducer = reducer;
6163 }
6164 public final Integer getRawResult() { return result; }
6165 public final void compute() {
6166 final ToIntFunction<? super K> transformer;
6167 final IntBinaryOperator reducer;
6168 if ((transformer = this.transformer) != null &&
6169 (reducer = this.reducer) != null) {
6170 int r = this.basis;
6171 for (int i = baseIndex, f, h; batch > 0 &&
6172 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6173 addToPendingCount(1);
6174 (rights = new MapReduceKeysToIntTask<K,V>
6175 (this, batch >>>= 1, baseLimit = h, f, tab,
6176 rights, transformer, r, reducer)).fork();
6177 }
6178 for (Node<K,V> p; (p = advance()) != null; )
6179 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key));
6180 result = r;
6181 CountedCompleter<?> c;
6182 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6183 @SuppressWarnings("unchecked")
6184 MapReduceKeysToIntTask<K,V>
6185 t = (MapReduceKeysToIntTask<K,V>)c,
6186 s = t.rights;
6187 while (s != null) {
6188 t.result = reducer.applyAsInt(t.result, s.result);
6189 s = t.rights = s.nextRight;
6190 }
6191 }
6192 }
6193 }
6194 }
6195
6196 @SuppressWarnings("serial")
6197 static final class MapReduceValuesToIntTask<K,V>
6198 extends BulkTask<K,V,Integer> {
6199 final ToIntFunction<? super V> transformer;
6200 final IntBinaryOperator reducer;
6201 final int basis;
6202 int result;
6203 MapReduceValuesToIntTask<K,V> rights, nextRight;
6204 MapReduceValuesToIntTask
6205 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6206 MapReduceValuesToIntTask<K,V> nextRight,
6207 ToIntFunction<? super V> transformer,
6208 int basis,
6209 IntBinaryOperator reducer) {
6210 super(p, b, i, f, t); this.nextRight = nextRight;
6211 this.transformer = transformer;
6212 this.basis = basis; this.reducer = reducer;
6213 }
6214 public final Integer getRawResult() { return result; }
6215 public final void compute() {
6216 final ToIntFunction<? super V> transformer;
6217 final IntBinaryOperator reducer;
6218 if ((transformer = this.transformer) != null &&
6219 (reducer = this.reducer) != null) {
6220 int r = this.basis;
6221 for (int i = baseIndex, f, h; batch > 0 &&
6222 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6223 addToPendingCount(1);
6224 (rights = new MapReduceValuesToIntTask<K,V>
6225 (this, batch >>>= 1, baseLimit = h, f, tab,
6226 rights, transformer, r, reducer)).fork();
6227 }
6228 for (Node<K,V> p; (p = advance()) != null; )
6229 r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
6230 result = r;
6231 CountedCompleter<?> c;
6232 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6233 @SuppressWarnings("unchecked")
6234 MapReduceValuesToIntTask<K,V>
6235 t = (MapReduceValuesToIntTask<K,V>)c,
6236 s = t.rights;
6237 while (s != null) {
6238 t.result = reducer.applyAsInt(t.result, s.result);
6239 s = t.rights = s.nextRight;
6240 }
6241 }
6242 }
6243 }
6244 }
6245
6246 @SuppressWarnings("serial")
6247 static final class MapReduceEntriesToIntTask<K,V>
6248 extends BulkTask<K,V,Integer> {
6249 final ToIntFunction<Map.Entry<K,V>> transformer;
6250 final IntBinaryOperator reducer;
6251 final int basis;
6252 int result;
6253 MapReduceEntriesToIntTask<K,V> rights, nextRight;
6254 MapReduceEntriesToIntTask
6255 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6256 MapReduceEntriesToIntTask<K,V> nextRight,
6257 ToIntFunction<Map.Entry<K,V>> transformer,
6258 int basis,
6259 IntBinaryOperator reducer) {
6260 super(p, b, i, f, t); this.nextRight = nextRight;
6261 this.transformer = transformer;
6262 this.basis = basis; this.reducer = reducer;
6263 }
6264 public final Integer getRawResult() { return result; }
6265 public final void compute() {
6266 final ToIntFunction<Map.Entry<K,V>> transformer;
6267 final IntBinaryOperator reducer;
6268 if ((transformer = this.transformer) != null &&
6269 (reducer = this.reducer) != null) {
6270 int r = this.basis;
6271 for (int i = baseIndex, f, h; batch > 0 &&
6272 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6273 addToPendingCount(1);
6274 (rights = new MapReduceEntriesToIntTask<K,V>
6275 (this, batch >>>= 1, baseLimit = h, f, tab,
6276 rights, transformer, r, reducer)).fork();
6277 }
6278 for (Node<K,V> p; (p = advance()) != null; )
6279 r = reducer.applyAsInt(r, transformer.applyAsInt(p));
6280 result = r;
6281 CountedCompleter<?> c;
6282 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6283 @SuppressWarnings("unchecked")
6284 MapReduceEntriesToIntTask<K,V>
6285 t = (MapReduceEntriesToIntTask<K,V>)c,
6286 s = t.rights;
6287 while (s != null) {
6288 t.result = reducer.applyAsInt(t.result, s.result);
6289 s = t.rights = s.nextRight;
6290 }
6291 }
6292 }
6293 }
6294 }
6295
6296 @SuppressWarnings("serial")
6297 static final class MapReduceMappingsToIntTask<K,V>
6298 extends BulkTask<K,V,Integer> {
6299 final ToIntBiFunction<? super K, ? super V> transformer;
6300 final IntBinaryOperator reducer;
6301 final int basis;
6302 int result;
6303 MapReduceMappingsToIntTask<K,V> rights, nextRight;
6304 MapReduceMappingsToIntTask
6305 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6306 MapReduceMappingsToIntTask<K,V> nextRight,
6307 ToIntBiFunction<? super K, ? super V> transformer,
6308 int basis,
6309 IntBinaryOperator reducer) {
6310 super(p, b, i, f, t); this.nextRight = nextRight;
6311 this.transformer = transformer;
6312 this.basis = basis; this.reducer = reducer;
6313 }
6314 public final Integer getRawResult() { return result; }
6315 public final void compute() {
6316 final ToIntBiFunction<? super K, ? super V> transformer;
6317 final IntBinaryOperator reducer;
6318 if ((transformer = this.transformer) != null &&
6319 (reducer = this.reducer) != null) {
6320 int r = this.basis;
6321 for (int i = baseIndex, f, h; batch > 0 &&
6322 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6323 addToPendingCount(1);
6324 (rights = new MapReduceMappingsToIntTask<K,V>
6325 (this, batch >>>= 1, baseLimit = h, f, tab,
6326 rights, transformer, r, reducer)).fork();
6327 }
6328 for (Node<K,V> p; (p = advance()) != null; )
6329 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val));
6330 result = r;
6331 CountedCompleter<?> c;
6332 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6333 @SuppressWarnings("unchecked")
6334 MapReduceMappingsToIntTask<K,V>
6335 t = (MapReduceMappingsToIntTask<K,V>)c,
6336 s = t.rights;
6337 while (s != null) {
6338 t.result = reducer.applyAsInt(t.result, s.result);
6339 s = t.rights = s.nextRight;
6340 }
6341 }
6342 }
6343 }
6344 }
6345
6346 // Unsafe mechanics
6347 private static final Unsafe U = Unsafe.getUnsafe();
6348 private static final long SIZECTL;
6349 private static final long TRANSFERINDEX;
6350 private static final long BASECOUNT;
6351 private static final long CELLSBUSY;
6352 private static final long CELLVALUE;
6353 private static final int ABASE;
6354 private static final int ASHIFT;
6355
6356 static {
6357 SIZECTL = U.objectFieldOffset
6358 (ConcurrentHashMap.class, "sizeCtl");
6359 TRANSFERINDEX = U.objectFieldOffset
6360 (ConcurrentHashMap.class, "transferIndex");
6361 BASECOUNT = U.objectFieldOffset
6362 (ConcurrentHashMap.class, "baseCount");
6363 CELLSBUSY = U.objectFieldOffset
6364 (ConcurrentHashMap.class, "cellsBusy");
6365
6366 CELLVALUE = U.objectFieldOffset
6367 (CounterCell.class, "value");
6368
6369 ABASE = U.arrayBaseOffset(Node[].class);
6370 int scale = U.arrayIndexScale(Node[].class);
6371 if ((scale & (scale - 1)) != 0)
6372 throw new ExceptionInInitializerError("array index scale not a power of two");
6373 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6374
6375 // Reduce the risk of rare disastrous classloading in first call to
6376 // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773
6377 Class<?> ensureLoaded = LockSupport.class;
6378
6379 // Eager class load observed to help JIT during startup
6380 ensureLoaded = ReservationNode.class;
6381 }
6382 }
6383