View Javadoc
1   /*
2    * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3    *
4    * This code is free software; you can redistribute it and/or modify it
5    * under the terms of the GNU General Public License version 2 only, as
6    * published by the Free Software Foundation.  Oracle designates this
7    * particular file as subject to the "Classpath" exception as provided
8    * by Oracle in the LICENSE file that accompanied this code.
9    *
10   * This code is distributed in the hope that it will be useful, but WITHOUT
11   * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13   * version 2 for more details (a copy is included in the LICENSE file that
14   * accompanied this code).
15   *
16   * You should have received a copy of the GNU General Public License version
17   * 2 along with this work; if not, write to the Free Software Foundation,
18   * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19   *
20   * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21   * or visit www.oracle.com if you need additional information or have any
22   * questions.
23   */
24  
25  /*
26   * This file is available under and governed by the GNU General Public
27   * License version 2 only, as published by the Free Software Foundation.
28   * However, the following notice accompanied the original version of this
29   * file:
30   *
31   * Written by Doug Lea with assistance from members of JCP JSR-166
32   * Expert Group and released to the public domain, as explained at
33   * http://creativecommons.org/publicdomain/zero/1.0/
34   */
35  
36  package java.util.concurrent;
37  
38  import java.io.ObjectStreamField;
39  import java.io.Serializable;
40  import java.lang.reflect.ParameterizedType;
41  import java.lang.reflect.Type;
42  import java.util.AbstractMap;
43  import java.util.Arrays;
44  import java.util.Collection;
45  import java.util.Comparator;
46  import java.util.Enumeration;
47  import java.util.HashMap;
48  import java.util.Hashtable;
49  import java.util.Iterator;
50  import java.util.Map;
51  import java.util.NoSuchElementException;
52  import java.util.Set;
53  import java.util.Spliterator;
54  import java.util.concurrent.ConcurrentMap;
55  import java.util.concurrent.ForkJoinPool;
56  import java.util.concurrent.atomic.AtomicReference;
57  import java.util.concurrent.locks.LockSupport;
58  import java.util.concurrent.locks.ReentrantLock;
59  import java.util.function.BiConsumer;
60  import java.util.function.BiFunction;
61  import java.util.function.BinaryOperator;
62  import java.util.function.Consumer;
63  import java.util.function.DoubleBinaryOperator;
64  import java.util.function.Function;
65  import java.util.function.IntBinaryOperator;
66  import java.util.function.LongBinaryOperator;
67  import java.util.function.ToDoubleBiFunction;
68  import java.util.function.ToDoubleFunction;
69  import java.util.function.ToIntBiFunction;
70  import java.util.function.ToIntFunction;
71  import java.util.function.ToLongBiFunction;
72  import java.util.function.ToLongFunction;
73  import java.util.stream.Stream;
74  
75  /**
76   * A hash table supporting full concurrency of retrievals and
77   * high expected concurrency for updates. This class obeys the
78   * same functional specification as {@link java.util.Hashtable}, and
79   * includes versions of methods corresponding to each method of
80   * {@code Hashtable}. However, even though all operations are
81   * thread-safe, retrieval operations do <em>not</em> entail locking,
82   * and there is <em>not</em> any support for locking the entire table
83   * in a way that prevents all access.  This class is fully
84   * interoperable with {@code Hashtable} in programs that rely on its
85   * thread safety but not on its synchronization details.
86   *
87   * <p>Retrieval operations (including {@code get}) generally do not
88   * block, so may overlap with update operations (including {@code put}
89   * and {@code remove}). Retrievals reflect the results of the most
90   * recently <em>completed</em> update operations holding upon their
91   * onset. (More formally, an update operation for a given key bears a
92   * <em>happens-before</em> relation with any (non-null) retrieval for
93   * that key reporting the updated value.)  For aggregate operations
94   * such as {@code putAll} and {@code clear}, concurrent retrievals may
95   * reflect insertion or removal of only some entries.  Similarly,
96   * Iterators, Spliterators and Enumerations return elements reflecting the
97   * state of the hash table at some point at or since the creation of the
98   * iterator/enumeration.  They do <em>not</em> throw {@link
99   * java.util.ConcurrentModificationException ConcurrentModificationException}.
100  * However, iterators are designed to be used by only one thread at a time.
101  * Bear in mind that the results of aggregate status methods including
102  * {@code size}, {@code isEmpty}, and {@code containsValue} are typically
103  * useful only when a map is not undergoing concurrent updates in other threads.
104  * Otherwise the results of these methods reflect transient states
105  * that may be adequate for monitoring or estimation purposes, but not
106  * for program control.
107  *
108  * <p>The table is dynamically expanded when there are too many
109  * collisions (i.e., keys that have distinct hash codes but fall into
110  * the same slot modulo the table size), with the expected average
111  * effect of maintaining roughly two bins per mapping (corresponding
112  * to a 0.75 load factor threshold for resizing). There may be much
113  * variance around this average as mappings are added and removed, but
114  * overall, this maintains a commonly accepted time/space tradeoff for
115  * hash tables.  However, resizing this or any other kind of hash
116  * table may be a relatively slow operation. When possible, it is a
117  * good idea to provide a size estimate as an optional {@code
118  * initialCapacity} constructor argument. An additional optional
119  * {@code loadFactor} constructor argument provides a further means of
120  * customizing initial table capacity by specifying the table density
121  * to be used in calculating the amount of space to allocate for the
122  * given number of elements.  Also, for compatibility with previous
123  * versions of this class, constructors may optionally specify an
124  * expected {@code concurrencyLevel} as an additional hint for
125  * internal sizing.  Note that using many keys with exactly the same
126  * {@code hashCode()} is a sure way to slow down performance of any
127  * hash table. To ameliorate impact, when keys are {@link Comparable},
128  * this class may use comparison order among keys to help break ties.
129  *
130  * <p>A {@link Set} projection of a ConcurrentHashMap may be created
131  * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
132  * (using {@link #keySet(Object)} when only keys are of interest, and the
133  * mapped values are (perhaps transiently) not used or all take the
134  * same mapping value.
135  *
136  * <p>A ConcurrentHashMap can be used as scalable frequency map (a
137  * form of histogram or multiset) by using {@link
138  * java.util.concurrent.atomic.LongAdder} values and initializing via
139  * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
140  * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
141  * {@code freqs.computeIfAbsent(k -> new LongAdder()).increment();}
142  *
143  * <p>This class and its views and iterators implement all of the
144  * <em>optional</em> methods of the {@link Map} and {@link Iterator}
145  * interfaces.
146  *
147  * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
148  * does <em>not</em> allow {@code null} to be used as a key or value.
149  *
150  * <p>ConcurrentHashMaps support a set of sequential and parallel bulk
151  * operations that, unlike most {@link Stream} methods, are designed
152  * to be safely, and often sensibly, applied even with maps that are
153  * being concurrently updated by other threads; for example, when
154  * computing a snapshot summary of the values in a shared registry.
155  * There are three kinds of operation, each with four forms, accepting
156  * functions with Keys, Values, Entries, and (Key, Value) arguments
157  * and/or return values. Because the elements of a ConcurrentHashMap
158  * are not ordered in any particular way, and may be processed in
159  * different orders in different parallel executions, the correctness
160  * of supplied functions should not depend on any ordering, or on any
161  * other objects or values that may transiently change while
162  * computation is in progress; and except for forEach actions, should
163  * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry}
164  * objects do not support method {@code setValue}.
165  *
166  * <ul>
167  * <li> forEach: Perform a given action on each element.
168  * A variant form applies a given transformation on each element
169  * before performing the action.</li>
170  *
171  * <li> search: Return the first available non-null result of
172  * applying a given function on each element; skipping further
173  * search when a result is found.</li>
174  *
175  * <li> reduce: Accumulate each element.  The supplied reduction
176  * function cannot rely on ordering (more formally, it should be
177  * both associative and commutative).  There are five variants:
178  *
179  * <ul>
180  *
181  * <li> Plain reductions. (There is not a form of this method for
182  * (key, value) function arguments since there is no corresponding
183  * return type.)</li>
184  *
185  * <li> Mapped reductions that accumulate the results of a given
186  * function applied to each element.</li>
187  *
188  * <li> Reductions to scalar doubles, longs, and ints, using a
189  * given basis value.</li>
190  *
191  * </ul>
192  * </li>
193  * </ul>
194  *
195  * <p>These bulk operations accept a {@code parallelismThreshold}
196  * argument. Methods proceed sequentially if the current map size is
197  * estimated to be less than the given threshold. Using a value of
198  * {@code Long.MAX_VALUE} suppresses all parallelism.  Using a value
199  * of {@code 1} results in maximal parallelism by partitioning into
200  * enough subtasks to fully utilize the {@link
201  * ForkJoinPool#commonPool()} that is used for all parallel
202  * computations. Normally, you would initially choose one of these
203  * extreme values, and then measure performance of using in-between
204  * values that trade off overhead versus throughput.
205  *
206  * <p>The concurrency properties of bulk operations follow
207  * from those of ConcurrentHashMap: Any non-null result returned
208  * from {@code get(key)} and related access methods bears a
209  * happens-before relation with the associated insertion or
210  * update.  The result of any bulk operation reflects the
211  * composition of these per-element relations (but is not
212  * necessarily atomic with respect to the map as a whole unless it
213  * is somehow known to be quiescent).  Conversely, because keys
214  * and values in the map are never null, null serves as a reliable
215  * atomic indicator of the current lack of any result.  To
216  * maintain this property, null serves as an implicit basis for
217  * all non-scalar reduction operations. For the double, long, and
218  * int versions, the basis should be one that, when combined with
219  * any other value, returns that other value (more formally, it
220  * should be the identity element for the reduction). Most common
221  * reductions have these properties; for example, computing a sum
222  * with basis 0 or a minimum with basis MAX_VALUE.
223  *
224  * <p>Search and transformation functions provided as arguments
225  * should similarly return null to indicate the lack of any result
226  * (in which case it is not used). In the case of mapped
227  * reductions, this also enables transformations to serve as
228  * filters, returning null (or, in the case of primitive
229  * specializations, the identity basis) if the element should not
230  * be combined. You can create compound transformations and
231  * filterings by composing them yourself under this "null means
232  * there is nothing there now" rule before using them in search or
233  * reduce operations.
234  *
235  * <p>Methods accepting and/or returning Entry arguments maintain
236  * key-value associations. They may be useful for example when
237  * finding the key for the greatest value. Note that "plain" Entry
238  * arguments can be supplied using {@code new
239  * AbstractMap.SimpleEntry(k,v)}.
240  *
241  * <p>Bulk operations may complete abruptly, throwing an
242  * exception encountered in the application of a supplied
243  * function. Bear in mind when handling such exceptions that other
244  * concurrently executing functions could also have thrown
245  * exceptions, or would have done so if the first exception had
246  * not occurred.
247  *
248  * <p>Speedups for parallel compared to sequential forms are common
249  * but not guaranteed.  Parallel operations involving brief functions
250  * on small maps may execute more slowly than sequential forms if the
251  * underlying work to parallelize the computation is more expensive
252  * than the computation itself.  Similarly, parallelization may not
253  * lead to much actual parallelism if all processors are busy
254  * performing unrelated tasks.
255  *
256  * <p>All arguments to all task methods must be non-null.
257  *
258  * <p>This class is a member of the
259  * <a href="{@docRoot}/../technotes/guides/collections/index.html">
260  * Java Collections Framework</a>.
261  *
262  * @since 1.5
263  * @author Doug Lea
264  * @param <K> the type of keys maintained by this map
265  * @param <V> the type of mapped values
266  */
267 public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
268     implements ConcurrentMap<K,V>, Serializable {
269     private static final long serialVersionUID = 7249069246763182397L;
270 
271     /*
272      * Overview:
273      *
274      * The primary design goal of this hash table is to maintain
275      * concurrent readability (typically method get(), but also
276      * iterators and related methods) while minimizing update
277      * contention. Secondary goals are to keep space consumption about
278      * the same or better than java.util.HashMap, and to support high
279      * initial insertion rates on an empty table by many threads.
280      *
281      * This map usually acts as a binned (bucketed) hash table.  Each
282      * key-value mapping is held in a Node.  Most nodes are instances
283      * of the basic Node class with hash, key, value, and next
284      * fields. However, various subclasses exist: TreeNodes are
285      * arranged in balanced trees, not lists.  TreeBins hold the roots
286      * of sets of TreeNodes. ForwardingNodes are placed at the heads
287      * of bins during resizing. ReservationNodes are used as
288      * placeholders while establishing values in computeIfAbsent and
289      * related methods.  The types TreeBin, ForwardingNode, and
290      * ReservationNode do not hold normal user keys, values, or
291      * hashes, and are readily distinguishable during search etc
292      * because they have negative hash fields and null key and value
293      * fields. (These special nodes are either uncommon or transient,
294      * so the impact of carrying around some unused fields is
295      * insignificant.)
296      *
297      * The table is lazily initialized to a power-of-two size upon the
298      * first insertion.  Each bin in the table normally contains a
299      * list of Nodes (most often, the list has only zero or one Node).
300      * Table accesses require volatile/atomic reads, writes, and
301      * CASes.  Because there is no other way to arrange this without
302      * adding further indirections, we use intrinsics
303      * (sun.misc.Unsafe) operations.
304      *
305      * We use the top (sign) bit of Node hash fields for control
306      * purposes -- it is available anyway because of addressing
307      * constraints.  Nodes with negative hash fields are specially
308      * handled or ignored in map methods.
309      *
310      * Insertion (via put or its variants) of the first node in an
311      * empty bin is performed by just CASing it to the bin.  This is
312      * by far the most common case for put operations under most
313      * key/hash distributions.  Other update operations (insert,
314      * delete, and replace) require locks.  We do not want to waste
315      * the space required to associate a distinct lock object with
316      * each bin, so instead use the first node of a bin list itself as
317      * a lock. Locking support for these locks relies on builtin
318      * "synchronized" monitors.
319      *
320      * Using the first node of a list as a lock does not by itself
321      * suffice though: When a node is locked, any update must first
322      * validate that it is still the first node after locking it, and
323      * retry if not. Because new nodes are always appended to lists,
324      * once a node is first in a bin, it remains first until deleted
325      * or the bin becomes invalidated (upon resizing).
326      *
327      * The main disadvantage of per-bin locks is that other update
328      * operations on other nodes in a bin list protected by the same
329      * lock can stall, for example when user equals() or mapping
330      * functions take a long time.  However, statistically, under
331      * random hash codes, this is not a common problem.  Ideally, the
332      * frequency of nodes in bins follows a Poisson distribution
333      * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
334      * parameter of about 0.5 on average, given the resizing threshold
335      * of 0.75, although with a large variance because of resizing
336      * granularity. Ignoring variance, the expected occurrences of
337      * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
338      * first values are:
339      *
340      * 0:    0.60653066
341      * 1:    0.30326533
342      * 2:    0.07581633
343      * 3:    0.01263606
344      * 4:    0.00157952
345      * 5:    0.00015795
346      * 6:    0.00001316
347      * 7:    0.00000094
348      * 8:    0.00000006
349      * more: less than 1 in ten million
350      *
351      * Lock contention probability for two threads accessing distinct
352      * elements is roughly 1 / (8 * #elements) under random hashes.
353      *
354      * Actual hash code distributions encountered in practice
355      * sometimes deviate significantly from uniform randomness.  This
356      * includes the case when N > (1<<30), so some keys MUST collide.
357      * Similarly for dumb or hostile usages in which multiple keys are
358      * designed to have identical hash codes or ones that differs only
359      * in masked-out high bits. So we use a secondary strategy that
360      * applies when the number of nodes in a bin exceeds a
361      * threshold. These TreeBins use a balanced tree to hold nodes (a
362      * specialized form of red-black trees), bounding search time to
363      * O(log N).  Each search step in a TreeBin is at least twice as
364      * slow as in a regular list, but given that N cannot exceed
365      * (1<<64) (before running out of addresses) this bounds search
366      * steps, lock hold times, etc, to reasonable constants (roughly
367      * 100 nodes inspected per operation worst case) so long as keys
368      * are Comparable (which is very common -- String, Long, etc).
369      * TreeBin nodes (TreeNodes) also maintain the same "next"
370      * traversal pointers as regular nodes, so can be traversed in
371      * iterators in the same way.
372      *
373      * The table is resized when occupancy exceeds a percentage
374      * threshold (nominally, 0.75, but see below).  Any thread
375      * noticing an overfull bin may assist in resizing after the
376      * initiating thread allocates and sets up the replacement array.
377      * However, rather than stalling, these other threads may proceed
378      * with insertions etc.  The use of TreeBins shields us from the
379      * worst case effects of overfilling while resizes are in
380      * progress.  Resizing proceeds by transferring bins, one by one,
381      * from the table to the next table. However, threads claim small
382      * blocks of indices to transfer (via field transferIndex) before
383      * doing so, reducing contention.  A generation stamp in field
384      * sizeCtl ensures that resizings do not overlap. Because we are
385      * using power-of-two expansion, the elements from each bin must
386      * either stay at same index, or move with a power of two
387      * offset. We eliminate unnecessary node creation by catching
388      * cases where old nodes can be reused because their next fields
389      * won't change.  On average, only about one-sixth of them need
390      * cloning when a table doubles. The nodes they replace will be
391      * garbage collectable as soon as they are no longer referenced by
392      * any reader thread that may be in the midst of concurrently
393      * traversing table.  Upon transfer, the old table bin contains
394      * only a special forwarding node (with hash field "MOVED") that
395      * contains the next table as its key. On encountering a
396      * forwarding node, access and update operations restart, using
397      * the new table.
398      *
399      * Each bin transfer requires its bin lock, which can stall
400      * waiting for locks while resizing. However, because other
401      * threads can join in and help resize rather than contend for
402      * locks, average aggregate waits become shorter as resizing
403      * progresses.  The transfer operation must also ensure that all
404      * accessible bins in both the old and new table are usable by any
405      * traversal.  This is arranged in part by proceeding from the
406      * last bin (table.length - 1) up towards the first.  Upon seeing
407      * a forwarding node, traversals (see class Traverser) arrange to
408      * move to the new table without revisiting nodes.  To ensure that
409      * no intervening nodes are skipped even when moved out of order,
410      * a stack (see class TableStack) is created on first encounter of
411      * a forwarding node during a traversal, to maintain its place if
412      * later processing the current table. The need for these
413      * save/restore mechanics is relatively rare, but when one
414      * forwarding node is encountered, typically many more will be.
415      * So Traversers use a simple caching scheme to avoid creating so
416      * many new TableStack nodes. (Thanks to Peter Levart for
417      * suggesting use of a stack here.)
418      *
419      * The traversal scheme also applies to partial traversals of
420      * ranges of bins (via an alternate Traverser constructor)
421      * to support partitioned aggregate operations.  Also, read-only
422      * operations give up if ever forwarded to a null table, which
423      * provides support for shutdown-style clearing, which is also not
424      * currently implemented.
425      *
426      * Lazy table initialization minimizes footprint until first use,
427      * and also avoids resizings when the first operation is from a
428      * putAll, constructor with map argument, or deserialization.
429      * These cases attempt to override the initial capacity settings,
430      * but harmlessly fail to take effect in cases of races.
431      *
432      * The element count is maintained using a specialization of
433      * LongAdder. We need to incorporate a specialization rather than
434      * just use a LongAdder in order to access implicit
435      * contention-sensing that leads to creation of multiple
436      * CounterCells.  The counter mechanics avoid contention on
437      * updates but can encounter cache thrashing if read too
438      * frequently during concurrent access. To avoid reading so often,
439      * resizing under contention is attempted only upon adding to a
440      * bin already holding two or more nodes. Under uniform hash
441      * distributions, the probability of this occurring at threshold
442      * is around 13%, meaning that only about 1 in 8 puts check
443      * threshold (and after resizing, many fewer do so).
444      *
445      * TreeBins use a special form of comparison for search and
446      * related operations (which is the main reason we cannot use
447      * existing collections such as TreeMaps). TreeBins contain
448      * Comparable elements, but may contain others, as well as
449      * elements that are Comparable but not necessarily Comparable for
450      * the same T, so we cannot invoke compareTo among them. To handle
451      * this, the tree is ordered primarily by hash value, then by
452      * Comparable.compareTo order if applicable.  On lookup at a node,
453      * if elements are not comparable or compare as 0 then both left
454      * and right children may need to be searched in the case of tied
455      * hash values. (This corresponds to the full list search that
456      * would be necessary if all elements were non-Comparable and had
457      * tied hashes.) On insertion, to keep a total ordering (or as
458      * close as is required here) across rebalancings, we compare
459      * classes and identityHashCodes as tie-breakers. The red-black
460      * balancing code is updated from pre-jdk-collections
461      * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
462      * based in turn on Cormen, Leiserson, and Rivest "Introduction to
463      * Algorithms" (CLR).
464      *
465      * TreeBins also require an additional locking mechanism.  While
466      * list traversal is always possible by readers even during
467      * updates, tree traversal is not, mainly because of tree-rotations
468      * that may change the root node and/or its linkages.  TreeBins
469      * include a simple read-write lock mechanism parasitic on the
470      * main bin-synchronization strategy: Structural adjustments
471      * associated with an insertion or removal are already bin-locked
472      * (and so cannot conflict with other writers) but must wait for
473      * ongoing readers to finish. Since there can be only one such
474      * waiter, we use a simple scheme using a single "waiter" field to
475      * block writers.  However, readers need never block.  If the root
476      * lock is held, they proceed along the slow traversal path (via
477      * next-pointers) until the lock becomes available or the list is
478      * exhausted, whichever comes first. These cases are not fast, but
479      * maximize aggregate expected throughput.
480      *
481      * Maintaining API and serialization compatibility with previous
482      * versions of this class introduces several oddities. Mainly: We
483      * leave untouched but unused constructor arguments refering to
484      * concurrencyLevel. We accept a loadFactor constructor argument,
485      * but apply it only to initial table capacity (which is the only
486      * time that we can guarantee to honor it.) We also declare an
487      * unused "Segment" class that is instantiated in minimal form
488      * only when serializing.
489      *
490      * Also, solely for compatibility with previous versions of this
491      * class, it extends AbstractMap, even though all of its methods
492      * are overridden, so it is just useless baggage.
493      *
494      * This file is organized to make things a little easier to follow
495      * while reading than they might otherwise: First the main static
496      * declarations and utilities, then fields, then main public
497      * methods (with a few factorings of multiple public methods into
498      * internal ones), then sizing methods, trees, traversers, and
499      * bulk operations.
500      */
501 
502     /* ---------------- Constants -------------- */
503 
504     /**
505      * The largest possible table capacity.  This value must be
506      * exactly 1<<30 to stay within Java array allocation and indexing
507      * bounds for power of two table sizes, and is further required
508      * because the top two bits of 32bit hash fields are used for
509      * control purposes.
510      */
511     private static final int MAXIMUM_CAPACITY = 1 << 30;
512 
513     /**
514      * The default initial table capacity.  Must be a power of 2
515      * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
516      */
517     private static final int DEFAULT_CAPACITY = 16;
518 
519     /**
520      * The largest possible (non-power of two) array size.
521      * Needed by toArray and related methods.
522      */
523     static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
524 
525     /**
526      * The default concurrency level for this table. Unused but
527      * defined for compatibility with previous versions of this class.
528      */
529     private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
530 
531     /**
532      * The load factor for this table. Overrides of this value in
533      * constructors affect only the initial table capacity.  The
534      * actual floating point value isn't normally used -- it is
535      * simpler to use expressions such as {@code n - (n >>> 2)} for
536      * the associated resizing threshold.
537      */
538     private static final float LOAD_FACTOR = 0.75f;
539 
540     /**
541      * The bin count threshold for using a tree rather than list for a
542      * bin.  Bins are converted to trees when adding an element to a
543      * bin with at least this many nodes. The value must be greater
544      * than 2, and should be at least 8 to mesh with assumptions in
545      * tree removal about conversion back to plain bins upon
546      * shrinkage.
547      */
548     static final int TREEIFY_THRESHOLD = 8;
549 
550     /**
551      * The bin count threshold for untreeifying a (split) bin during a
552      * resize operation. Should be less than TREEIFY_THRESHOLD, and at
553      * most 6 to mesh with shrinkage detection under removal.
554      */
555     static final int UNTREEIFY_THRESHOLD = 6;
556 
557     /**
558      * The smallest table capacity for which bins may be treeified.
559      * (Otherwise the table is resized if too many nodes in a bin.)
560      * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
561      * conflicts between resizing and treeification thresholds.
562      */
563     static final int MIN_TREEIFY_CAPACITY = 64;
564 
565     /**
566      * Minimum number of rebinnings per transfer step. Ranges are
567      * subdivided to allow multiple resizer threads.  This value
568      * serves as a lower bound to avoid resizers encountering
569      * excessive memory contention.  The value should be at least
570      * DEFAULT_CAPACITY.
571      */
572     private static final int MIN_TRANSFER_STRIDE = 16;
573 
574     /**
575      * The number of bits used for generation stamp in sizeCtl.
576      * Must be at least 6 for 32bit arrays.
577      */
578     private static int RESIZE_STAMP_BITS = 16;
579 
580     /**
581      * The maximum number of threads that can help resize.
582      * Must fit in 32 - RESIZE_STAMP_BITS bits.
583      */
584     private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
585 
586     /**
587      * The bit shift for recording size stamp in sizeCtl.
588      */
589     private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
590 
591     /*
592      * Encodings for Node hash fields. See above for explanation.
593      */
594     static final int MOVED     = -1; // hash for forwarding nodes
595     static final int TREEBIN   = -2; // hash for roots of trees
596     static final int RESERVED  = -3; // hash for transient reservations
597     static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
598 
599     /** Number of CPUS, to place bounds on some sizings */
600     static final int NCPU = Runtime.getRuntime().availableProcessors();
601 
602     /** For serialization compatibility. */
603     private static final ObjectStreamField[] serialPersistentFields = {
604         new ObjectStreamField("segments", Segment[].class),
605         new ObjectStreamField("segmentMask", Integer.TYPE),
606         new ObjectStreamField("segmentShift", Integer.TYPE)
607     };
608 
609     /* ---------------- Nodes -------------- */
610 
611     /**
612      * Key-value entry.  This class is never exported out as a
613      * user-mutable Map.Entry (i.e., one supporting setValue; see
614      * MapEntry below), but can be used for read-only traversals used
615      * in bulk tasks.  Subclasses of Node with a negative hash field
616      * are special, and contain null keys and values (but are never
617      * exported).  Otherwise, keys and vals are never null.
618      */
619     static class Node<K,V> implements Map.Entry<K,V> {
620         final int hash;
621         final K key;
622         volatile V val;
623         volatile Node<K,V> next;
624 
625         Node(int hash, K key, V val, Node<K,V> next) {
626             this.hash = hash;
627             this.key = key;
628             this.val = val;
629             this.next = next;
630         }
631 
632         public final K getKey()       { return key; }
633         public final V getValue()     { return val; }
634         public final int hashCode()   { return key.hashCode() ^ val.hashCode(); }
635         public final String toString(){ return key + "=" + val; }
636         public final V setValue(V value) {
637             throw new UnsupportedOperationException();
638         }
639 
640         public final boolean equals(Object o) {
641             Object k, v, u; Map.Entry<?,?> e;
642             return ((o instanceof Map.Entry) &&
643                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
644                     (v = e.getValue()) != null &&
645                     (k == key || k.equals(key)) &&
646                     (v == (u = val) || v.equals(u)));
647         }
648 
649         /**
650          * Virtualized support for map.get(); overridden in subclasses.
651          */
652         Node<K,V> find(int h, Object k) {
653             Node<K,V> e = this;
654             if (k != null) {
655                 do {
656                     K ek;
657                     if (e.hash == h &&
658                         ((ek = e.key) == k || (ek != null && k.equals(ek))))
659                         return e;
660                 } while ((e = e.next) != null);
661             }
662             return null;
663         }
664     }
665 
666     /* ---------------- Static utilities -------------- */
667 
668     /**
669      * Spreads (XORs) higher bits of hash to lower and also forces top
670      * bit to 0. Because the table uses power-of-two masking, sets of
671      * hashes that vary only in bits above the current mask will
672      * always collide. (Among known examples are sets of Float keys
673      * holding consecutive whole numbers in small tables.)  So we
674      * apply a transform that spreads the impact of higher bits
675      * downward. There is a tradeoff between speed, utility, and
676      * quality of bit-spreading. Because many common sets of hashes
677      * are already reasonably distributed (so don't benefit from
678      * spreading), and because we use trees to handle large sets of
679      * collisions in bins, we just XOR some shifted bits in the
680      * cheapest possible way to reduce systematic lossage, as well as
681      * to incorporate impact of the highest bits that would otherwise
682      * never be used in index calculations because of table bounds.
683      */
684     static final int spread(int h) {
685         return (h ^ (h >>> 16)) & HASH_BITS;
686     }
687 
688     /**
689      * Returns a power of two table size for the given desired capacity.
690      * See Hackers Delight, sec 3.2
691      */
692     private static final int tableSizeFor(int c) {
693         int n = c - 1;
694         n |= n >>> 1;
695         n |= n >>> 2;
696         n |= n >>> 4;
697         n |= n >>> 8;
698         n |= n >>> 16;
699         return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
700     }
701 
702     /**
703      * Returns x's Class if it is of the form "class C implements
704      * Comparable<C>", else null.
705      */
706     static Class<?> comparableClassFor(Object x) {
707         if (x instanceof Comparable) {
708             Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
709             if ((c = x.getClass()) == String.class) // bypass checks
710                 return c;
711             if ((ts = c.getGenericInterfaces()) != null) {
712                 for (int i = 0; i < ts.length; ++i) {
713                     if (((t = ts[i]) instanceof ParameterizedType) &&
714                         ((p = (ParameterizedType)t).getRawType() ==
715                          Comparable.class) &&
716                         (as = p.getActualTypeArguments()) != null &&
717                         as.length == 1 && as[0] == c) // type arg is c
718                         return c;
719                 }
720             }
721         }
722         return null;
723     }
724 
725     /**
726      * Returns k.compareTo(x) if x matches kc (k's screened comparable
727      * class), else 0.
728      */
729     @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
730     static int compareComparables(Class<?> kc, Object k, Object x) {
731         return (x == null || x.getClass() != kc ? 0 :
732                 ((Comparable)k).compareTo(x));
733     }
734 
735     /* ---------------- Table element access -------------- */
736 
737     /*
738      * Volatile access methods are used for table elements as well as
739      * elements of in-progress next table while resizing.  All uses of
740      * the tab arguments must be null checked by callers.  All callers
741      * also paranoically precheck that tab's length is not zero (or an
742      * equivalent check), thus ensuring that any index argument taking
743      * the form of a hash value anded with (length - 1) is a valid
744      * index.  Note that, to be correct wrt arbitrary concurrency
745      * errors by users, these checks must operate on local variables,
746      * which accounts for some odd-looking inline assignments below.
747      * Note that calls to setTabAt always occur within locked regions,
748      * and so in principle require only release ordering, not
749      * full volatile semantics, but are currently coded as volatile
750      * writes to be conservative.
751      */
752 
753     @SuppressWarnings("unchecked")
754     static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
755         return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
756     }
757 
758     static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
759                                         Node<K,V> c, Node<K,V> v) {
760         return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
761     }
762 
763     static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
764         U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
765     }
766 
767     /* ---------------- Fields -------------- */
768 
769     /**
770      * The array of bins. Lazily initialized upon first insertion.
771      * Size is always a power of two. Accessed directly by iterators.
772      */
773     transient volatile Node<K,V>[] table;
774 
775     /**
776      * The next table to use; non-null only while resizing.
777      */
778     private transient volatile Node<K,V>[] nextTable;
779 
780     /**
781      * Base counter value, used mainly when there is no contention,
782      * but also as a fallback during table initialization
783      * races. Updated via CAS.
784      */
785     private transient volatile long baseCount;
786 
787     /**
788      * Table initialization and resizing control.  When negative, the
789      * table is being initialized or resized: -1 for initialization,
790      * else -(1 + the number of active resizing threads).  Otherwise,
791      * when table is null, holds the initial table size to use upon
792      * creation, or 0 for default. After initialization, holds the
793      * next element count value upon which to resize the table.
794      */
795     private transient volatile int sizeCtl;
796 
797     /**
798      * The next table index (plus one) to split while resizing.
799      */
800     private transient volatile int transferIndex;
801 
802     /**
803      * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
804      */
805     private transient volatile int cellsBusy;
806 
807     /**
808      * Table of counter cells. When non-null, size is a power of 2.
809      */
810     private transient volatile CounterCell[] counterCells;
811 
812     // views
813     private transient KeySetView<K,V> keySet;
814     private transient ValuesView<K,V> values;
815     private transient EntrySetView<K,V> entrySet;
816 
817 
818     /* ---------------- Public operations -------------- */
819 
820     /**
821      * Creates a new, empty map with the default initial table size (16).
822      */
823     public ConcurrentHashMap() {
824     }
825 
826     /**
827      * Creates a new, empty map with an initial table size
828      * accommodating the specified number of elements without the need
829      * to dynamically resize.
830      *
831      * @param initialCapacity The implementation performs internal
832      * sizing to accommodate this many elements.
833      * @throws IllegalArgumentException if the initial capacity of
834      * elements is negative
835      */
836     public ConcurrentHashMap(int initialCapacity) {
837         if (initialCapacity < 0)
838             throw new IllegalArgumentException();
839         int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
840                    MAXIMUM_CAPACITY :
841                    tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
842         this.sizeCtl = cap;
843     }
844 
845     /**
846      * Creates a new map with the same mappings as the given map.
847      *
848      * @param m the map
849      */
850     public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
851         this.sizeCtl = DEFAULT_CAPACITY;
852         putAll(m);
853     }
854 
855     /**
856      * Creates a new, empty map with an initial table size based on
857      * the given number of elements ({@code initialCapacity}) and
858      * initial table density ({@code loadFactor}).
859      *
860      * @param initialCapacity the initial capacity. The implementation
861      * performs internal sizing to accommodate this many elements,
862      * given the specified load factor.
863      * @param loadFactor the load factor (table density) for
864      * establishing the initial table size
865      * @throws IllegalArgumentException if the initial capacity of
866      * elements is negative or the load factor is nonpositive
867      *
868      * @since 1.6
869      */
870     public ConcurrentHashMap(int initialCapacity, float loadFactor) {
871         this(initialCapacity, loadFactor, 1);
872     }
873 
874     /**
875      * Creates a new, empty map with an initial table size based on
876      * the given number of elements ({@code initialCapacity}), table
877      * density ({@code loadFactor}), and number of concurrently
878      * updating threads ({@code concurrencyLevel}).
879      *
880      * @param initialCapacity the initial capacity. The implementation
881      * performs internal sizing to accommodate this many elements,
882      * given the specified load factor.
883      * @param loadFactor the load factor (table density) for
884      * establishing the initial table size
885      * @param concurrencyLevel the estimated number of concurrently
886      * updating threads. The implementation may use this value as
887      * a sizing hint.
888      * @throws IllegalArgumentException if the initial capacity is
889      * negative or the load factor or concurrencyLevel are
890      * nonpositive
891      */
892     public ConcurrentHashMap(int initialCapacity,
893                              float loadFactor, int concurrencyLevel) {
894         if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
895             throw new IllegalArgumentException();
896         if (initialCapacity < concurrencyLevel)   // Use at least as many bins
897             initialCapacity = concurrencyLevel;   // as estimated threads
898         long size = (long)(1.0 + (long)initialCapacity / loadFactor);
899         int cap = (size >= (long)MAXIMUM_CAPACITY) ?
900             MAXIMUM_CAPACITY : tableSizeFor((int)size);
901         this.sizeCtl = cap;
902     }
903 
904     // Original (since JDK1.2) Map methods
905 
906     /**
907      * {@inheritDoc}
908      */
909     public int size() {
910         long n = sumCount();
911         return ((n < 0L) ? 0 :
912                 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
913                 (int)n);
914     }
915 
916     /**
917      * {@inheritDoc}
918      */
919     public boolean isEmpty() {
920         return sumCount() <= 0L; // ignore transient negative values
921     }
922 
923     /**
924      * Returns the value to which the specified key is mapped,
925      * or {@code null} if this map contains no mapping for the key.
926      *
927      * <p>More formally, if this map contains a mapping from a key
928      * {@code k} to a value {@code v} such that {@code key.equals(k)},
929      * then this method returns {@code v}; otherwise it returns
930      * {@code null}.  (There can be at most one such mapping.)
931      *
932      * @throws NullPointerException if the specified key is null
933      */
934     public V get(Object key) {
935         Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
936         int h = spread(key.hashCode());
937         if ((tab = table) != null && (n = tab.length) > 0 &&
938             (e = tabAt(tab, (n - 1) & h)) != null) {
939             if ((eh = e.hash) == h) {
940                 if ((ek = e.key) == key || (ek != null && key.equals(ek)))
941                     return e.val;
942             }
943             else if (eh < 0)
944                 return (p = e.find(h, key)) != null ? p.val : null;
945             while ((e = e.next) != null) {
946                 if (e.hash == h &&
947                     ((ek = e.key) == key || (ek != null && key.equals(ek))))
948                     return e.val;
949             }
950         }
951         return null;
952     }
953 
954     /**
955      * Tests if the specified object is a key in this table.
956      *
957      * @param  key possible key
958      * @return {@code true} if and only if the specified object
959      *         is a key in this table, as determined by the
960      *         {@code equals} method; {@code false} otherwise
961      * @throws NullPointerException if the specified key is null
962      */
963     public boolean containsKey(Object key) {
964         return get(key) != null;
965     }
966 
967     /**
968      * Returns {@code true} if this map maps one or more keys to the
969      * specified value. Note: This method may require a full traversal
970      * of the map, and is much slower than method {@code containsKey}.
971      *
972      * @param value value whose presence in this map is to be tested
973      * @return {@code true} if this map maps one or more keys to the
974      *         specified value
975      * @throws NullPointerException if the specified value is null
976      */
977     public boolean containsValue(Object value) {
978         if (value == null)
979             throw new NullPointerException();
980         Node<K,V>[] t;
981         if ((t = table) != null) {
982             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
983             for (Node<K,V> p; (p = it.advance()) != null; ) {
984                 V v;
985                 if ((v = p.val) == value || (v != null && value.equals(v)))
986                     return true;
987             }
988         }
989         return false;
990     }
991 
992     /**
993      * Maps the specified key to the specified value in this table.
994      * Neither the key nor the value can be null.
995      *
996      * <p>The value can be retrieved by calling the {@code get} method
997      * with a key that is equal to the original key.
998      *
999      * @param key key with which the specified value is to be associated
1000      * @param value value to be associated with the specified key
1001      * @return the previous value associated with {@code key}, or
1002      *         {@code null} if there was no mapping for {@code key}
1003      * @throws NullPointerException if the specified key or value is null
1004      */
1005     public V put(K key, V value) {
1006         return putVal(key, value, false);
1007     }
1008 
1009     /** Implementation for put and putIfAbsent */
1010     final V putVal(K key, V value, boolean onlyIfAbsent) {
1011         if (key == null || value == null) throw new NullPointerException();
1012         int hash = spread(key.hashCode());
1013         int binCount = 0;
1014         for (Node<K,V>[] tab = table;;) {
1015             Node<K,V> f; int n, i, fh;
1016             if (tab == null || (n = tab.length) == 0)
1017                 tab = initTable();
1018             else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1019                 if (casTabAt(tab, i, null,
1020                              new Node<K,V>(hash, key, value, null)))
1021                     break;                   // no lock when adding to empty bin
1022             }
1023             else if ((fh = f.hash) == MOVED)
1024                 tab = helpTransfer(tab, f);
1025             else {
1026                 V oldVal = null;
1027                 synchronized (f) {
1028                     if (tabAt(tab, i) == f) {
1029                         if (fh >= 0) {
1030                             binCount = 1;
1031                             for (Node<K,V> e = f;; ++binCount) {
1032                                 K ek;
1033                                 if (e.hash == hash &&
1034                                     ((ek = e.key) == key ||
1035                                      (ek != null && key.equals(ek)))) {
1036                                     oldVal = e.val;
1037                                     if (!onlyIfAbsent)
1038                                         e.val = value;
1039                                     break;
1040                                 }
1041                                 Node<K,V> pred = e;
1042                                 if ((e = e.next) == null) {
1043                                     pred.next = new Node<K,V>(hash, key,
1044                                                               value, null);
1045                                     break;
1046                                 }
1047                             }
1048                         }
1049                         else if (f instanceof TreeBin) {
1050                             Node<K,V> p;
1051                             binCount = 2;
1052                             if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1053                                                            value)) != null) {
1054                                 oldVal = p.val;
1055                                 if (!onlyIfAbsent)
1056                                     p.val = value;
1057                             }
1058                         }
1059                     }
1060                 }
1061                 if (binCount != 0) {
1062                     if (binCount >= TREEIFY_THRESHOLD)
1063                         treeifyBin(tab, i);
1064                     if (oldVal != null)
1065                         return oldVal;
1066                     break;
1067                 }
1068             }
1069         }
1070         addCount(1L, binCount);
1071         return null;
1072     }
1073 
1074     /**
1075      * Copies all of the mappings from the specified map to this one.
1076      * These mappings replace any mappings that this map had for any of the
1077      * keys currently in the specified map.
1078      *
1079      * @param m mappings to be stored in this map
1080      */
1081     public void putAll(Map<? extends K, ? extends V> m) {
1082         tryPresize(m.size());
1083         for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1084             putVal(e.getKey(), e.getValue(), false);
1085     }
1086 
1087     /**
1088      * Removes the key (and its corresponding value) from this map.
1089      * This method does nothing if the key is not in the map.
1090      *
1091      * @param  key the key that needs to be removed
1092      * @return the previous value associated with {@code key}, or
1093      *         {@code null} if there was no mapping for {@code key}
1094      * @throws NullPointerException if the specified key is null
1095      */
1096     public V remove(Object key) {
1097         return replaceNode(key, null, null);
1098     }
1099 
1100     /**
1101      * Implementation for the four public remove/replace methods:
1102      * Replaces node value with v, conditional upon match of cv if
1103      * non-null.  If resulting value is null, delete.
1104      */
1105     final V replaceNode(Object key, V value, Object cv) {
1106         int hash = spread(key.hashCode());
1107         for (Node<K,V>[] tab = table;;) {
1108             Node<K,V> f; int n, i, fh;
1109             if (tab == null || (n = tab.length) == 0 ||
1110                 (f = tabAt(tab, i = (n - 1) & hash)) == null)
1111                 break;
1112             else if ((fh = f.hash) == MOVED)
1113                 tab = helpTransfer(tab, f);
1114             else {
1115                 V oldVal = null;
1116                 boolean validated = false;
1117                 synchronized (f) {
1118                     if (tabAt(tab, i) == f) {
1119                         if (fh >= 0) {
1120                             validated = true;
1121                             for (Node<K,V> e = f, pred = null;;) {
1122                                 K ek;
1123                                 if (e.hash == hash &&
1124                                     ((ek = e.key) == key ||
1125                                      (ek != null && key.equals(ek)))) {
1126                                     V ev = e.val;
1127                                     if (cv == null || cv == ev ||
1128                                         (ev != null && cv.equals(ev))) {
1129                                         oldVal = ev;
1130                                         if (value != null)
1131                                             e.val = value;
1132                                         else if (pred != null)
1133                                             pred.next = e.next;
1134                                         else
1135                                             setTabAt(tab, i, e.next);
1136                                     }
1137                                     break;
1138                                 }
1139                                 pred = e;
1140                                 if ((e = e.next) == null)
1141                                     break;
1142                             }
1143                         }
1144                         else if (f instanceof TreeBin) {
1145                             validated = true;
1146                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1147                             TreeNode<K,V> r, p;
1148                             if ((r = t.root) != null &&
1149                                 (p = r.findTreeNode(hash, key, null)) != null) {
1150                                 V pv = p.val;
1151                                 if (cv == null || cv == pv ||
1152                                     (pv != null && cv.equals(pv))) {
1153                                     oldVal = pv;
1154                                     if (value != null)
1155                                         p.val = value;
1156                                     else if (t.removeTreeNode(p))
1157                                         setTabAt(tab, i, untreeify(t.first));
1158                                 }
1159                             }
1160                         }
1161                     }
1162                 }
1163                 if (validated) {
1164                     if (oldVal != null) {
1165                         if (value == null)
1166                             addCount(-1L, -1);
1167                         return oldVal;
1168                     }
1169                     break;
1170                 }
1171             }
1172         }
1173         return null;
1174     }
1175 
1176     /**
1177      * Removes all of the mappings from this map.
1178      */
1179     public void clear() {
1180         long delta = 0L; // negative number of deletions
1181         int i = 0;
1182         Node<K,V>[] tab = table;
1183         while (tab != null && i < tab.length) {
1184             int fh;
1185             Node<K,V> f = tabAt(tab, i);
1186             if (f == null)
1187                 ++i;
1188             else if ((fh = f.hash) == MOVED) {
1189                 tab = helpTransfer(tab, f);
1190                 i = 0; // restart
1191             }
1192             else {
1193                 synchronized (f) {
1194                     if (tabAt(tab, i) == f) {
1195                         Node<K,V> p = (fh >= 0 ? f :
1196                                        (f instanceof TreeBin) ?
1197                                        ((TreeBin<K,V>)f).first : null);
1198                         while (p != null) {
1199                             --delta;
1200                             p = p.next;
1201                         }
1202                         setTabAt(tab, i++, null);
1203                     }
1204                 }
1205             }
1206         }
1207         if (delta != 0L)
1208             addCount(delta, -1);
1209     }
1210 
1211     /**
1212      * Returns a {@link Set} view of the keys contained in this map.
1213      * The set is backed by the map, so changes to the map are
1214      * reflected in the set, and vice-versa. The set supports element
1215      * removal, which removes the corresponding mapping from this map,
1216      * via the {@code Iterator.remove}, {@code Set.remove},
1217      * {@code removeAll}, {@code retainAll}, and {@code clear}
1218      * operations.  It does not support the {@code add} or
1219      * {@code addAll} operations.
1220      *
1221      * <p>The view's iterators and spliterators are
1222      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1223      *
1224      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1225      * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1226      *
1227      * @return the set view
1228      */
1229     public KeySetView<K,V> keySet() {
1230         KeySetView<K,V> ks;
1231         return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null));
1232     }
1233 
1234     /**
1235      * Returns a {@link Collection} view of the values contained in this map.
1236      * The collection is backed by the map, so changes to the map are
1237      * reflected in the collection, and vice-versa.  The collection
1238      * supports element removal, which removes the corresponding
1239      * mapping from this map, via the {@code Iterator.remove},
1240      * {@code Collection.remove}, {@code removeAll},
1241      * {@code retainAll}, and {@code clear} operations.  It does not
1242      * support the {@code add} or {@code addAll} operations.
1243      *
1244      * <p>The view's iterators and spliterators are
1245      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1246      *
1247      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}
1248      * and {@link Spliterator#NONNULL}.
1249      *
1250      * @return the collection view
1251      */
1252     public Collection<V> values() {
1253         ValuesView<K,V> vs;
1254         return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this));
1255     }
1256 
1257     /**
1258      * Returns a {@link Set} view of the mappings contained in this map.
1259      * The set is backed by the map, so changes to the map are
1260      * reflected in the set, and vice-versa.  The set supports element
1261      * removal, which removes the corresponding mapping from the map,
1262      * via the {@code Iterator.remove}, {@code Set.remove},
1263      * {@code removeAll}, {@code retainAll}, and {@code clear}
1264      * operations.
1265      *
1266      * <p>The view's iterators and spliterators are
1267      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1268      *
1269      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1270      * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1271      *
1272      * @return the set view
1273      */
1274     public Set<Map.Entry<K,V>> entrySet() {
1275         EntrySetView<K,V> es;
1276         return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this));
1277     }
1278 
1279     /**
1280      * Returns the hash code value for this {@link Map}, i.e.,
1281      * the sum of, for each key-value pair in the map,
1282      * {@code key.hashCode() ^ value.hashCode()}.
1283      *
1284      * @return the hash code value for this map
1285      */
1286     public int hashCode() {
1287         int h = 0;
1288         Node<K,V>[] t;
1289         if ((t = table) != null) {
1290             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1291             for (Node<K,V> p; (p = it.advance()) != null; )
1292                 h += p.key.hashCode() ^ p.val.hashCode();
1293         }
1294         return h;
1295     }
1296 
1297     /**
1298      * Returns a string representation of this map.  The string
1299      * representation consists of a list of key-value mappings (in no
1300      * particular order) enclosed in braces ("{@code {}}").  Adjacent
1301      * mappings are separated by the characters {@code ", "} (comma
1302      * and space).  Each key-value mapping is rendered as the key
1303      * followed by an equals sign ("{@code =}") followed by the
1304      * associated value.
1305      *
1306      * @return a string representation of this map
1307      */
1308     public String toString() {
1309         Node<K,V>[] t;
1310         int f = (t = table) == null ? 0 : t.length;
1311         Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1312         StringBuilder sb = new StringBuilder();
1313         sb.append('{');
1314         Node<K,V> p;
1315         if ((p = it.advance()) != null) {
1316             for (;;) {
1317                 K k = p.key;
1318                 V v = p.val;
1319                 sb.append(k == this ? "(this Map)" : k);
1320                 sb.append('=');
1321                 sb.append(v == this ? "(this Map)" : v);
1322                 if ((p = it.advance()) == null)
1323                     break;
1324                 sb.append(',').append(' ');
1325             }
1326         }
1327         return sb.append('}').toString();
1328     }
1329 
1330     /**
1331      * Compares the specified object with this map for equality.
1332      * Returns {@code true} if the given object is a map with the same
1333      * mappings as this map.  This operation may return misleading
1334      * results if either map is concurrently modified during execution
1335      * of this method.
1336      *
1337      * @param o object to be compared for equality with this map
1338      * @return {@code true} if the specified object is equal to this map
1339      */
1340     public boolean equals(Object o) {
1341         if (o != this) {
1342             if (!(o instanceof Map))
1343                 return false;
1344             Map<?,?> m = (Map<?,?>) o;
1345             Node<K,V>[] t;
1346             int f = (t = table) == null ? 0 : t.length;
1347             Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1348             for (Node<K,V> p; (p = it.advance()) != null; ) {
1349                 V val = p.val;
1350                 Object v = m.get(p.key);
1351                 if (v == null || (v != val && !v.equals(val)))
1352                     return false;
1353             }
1354             for (Map.Entry<?,?> e : m.entrySet()) {
1355                 Object mk, mv, v;
1356                 if ((mk = e.getKey()) == null ||
1357                     (mv = e.getValue()) == null ||
1358                     (v = get(mk)) == null ||
1359                     (mv != v && !mv.equals(v)))
1360                     return false;
1361             }
1362         }
1363         return true;
1364     }
1365 
1366     /**
1367      * Stripped-down version of helper class used in previous version,
1368      * declared for the sake of serialization compatibility
1369      */
1370     static class Segment<K,V> extends ReentrantLock implements Serializable {
1371         private static final long serialVersionUID = 2249069246763182397L;
1372         final float loadFactor;
1373         Segment(float lf) { this.loadFactor = lf; }
1374     }
1375 
1376     /**
1377      * Saves the state of the {@code ConcurrentHashMap} instance to a
1378      * stream (i.e., serializes it).
1379      * @param s the stream
1380      * @throws java.io.IOException if an I/O error occurs
1381      * @serialData
1382      * the key (Object) and value (Object)
1383      * for each key-value mapping, followed by a null pair.
1384      * The key-value mappings are emitted in no particular order.
1385      */
1386     private void writeObject(java.io.ObjectOutputStream s)
1387         throws java.io.IOException {
1388         // For serialization compatibility
1389         // Emulate segment calculation from previous version of this class
1390         int sshift = 0;
1391         int ssize = 1;
1392         while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1393             ++sshift;
1394             ssize <<= 1;
1395         }
1396         int segmentShift = 32 - sshift;
1397         int segmentMask = ssize - 1;
1398         @SuppressWarnings("unchecked")
1399         Segment<K,V>[] segments = (Segment<K,V>[])
1400             new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1401         for (int i = 0; i < segments.length; ++i)
1402             segments[i] = new Segment<K,V>(LOAD_FACTOR);
1403         s.putFields().put("segments", segments);
1404         s.putFields().put("segmentShift", segmentShift);
1405         s.putFields().put("segmentMask", segmentMask);
1406         s.writeFields();
1407 
1408         Node<K,V>[] t;
1409         if ((t = table) != null) {
1410             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1411             for (Node<K,V> p; (p = it.advance()) != null; ) {
1412                 s.writeObject(p.key);
1413                 s.writeObject(p.val);
1414             }
1415         }
1416         s.writeObject(null);
1417         s.writeObject(null);
1418         segments = null; // throw away
1419     }
1420 
1421     /**
1422      * Reconstitutes the instance from a stream (that is, deserializes it).
1423      * @param s the stream
1424      * @throws ClassNotFoundException if the class of a serialized object
1425      *         could not be found
1426      * @throws java.io.IOException if an I/O error occurs
1427      */
1428     private void readObject(java.io.ObjectInputStream s)
1429         throws java.io.IOException, ClassNotFoundException {
1430         /*
1431          * To improve performance in typical cases, we create nodes
1432          * while reading, then place in table once size is known.
1433          * However, we must also validate uniqueness and deal with
1434          * overpopulated bins while doing so, which requires
1435          * specialized versions of putVal mechanics.
1436          */
1437         sizeCtl = -1; // force exclusion for table construction
1438         s.defaultReadObject();
1439         long size = 0L;
1440         Node<K,V> p = null;
1441         for (;;) {
1442             @SuppressWarnings("unchecked")
1443             K k = (K) s.readObject();
1444             @SuppressWarnings("unchecked")
1445             V v = (V) s.readObject();
1446             if (k != null && v != null) {
1447                 p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1448                 ++size;
1449             }
1450             else
1451                 break;
1452         }
1453         if (size == 0L)
1454             sizeCtl = 0;
1455         else {
1456             int n;
1457             if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
1458                 n = MAXIMUM_CAPACITY;
1459             else {
1460                 int sz = (int)size;
1461                 n = tableSizeFor(sz + (sz >>> 1) + 1);
1462             }
1463             @SuppressWarnings("unchecked")
1464             Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
1465             int mask = n - 1;
1466             long added = 0L;
1467             while (p != null) {
1468                 boolean insertAtFront;
1469                 Node<K,V> next = p.next, first;
1470                 int h = p.hash, j = h & mask;
1471                 if ((first = tabAt(tab, j)) == null)
1472                     insertAtFront = true;
1473                 else {
1474                     K k = p.key;
1475                     if (first.hash < 0) {
1476                         TreeBin<K,V> t = (TreeBin<K,V>)first;
1477                         if (t.putTreeVal(h, k, p.val) == null)
1478                             ++added;
1479                         insertAtFront = false;
1480                     }
1481                     else {
1482                         int binCount = 0;
1483                         insertAtFront = true;
1484                         Node<K,V> q; K qk;
1485                         for (q = first; q != null; q = q.next) {
1486                             if (q.hash == h &&
1487                                 ((qk = q.key) == k ||
1488                                  (qk != null && k.equals(qk)))) {
1489                                 insertAtFront = false;
1490                                 break;
1491                             }
1492                             ++binCount;
1493                         }
1494                         if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1495                             insertAtFront = false;
1496                             ++added;
1497                             p.next = first;
1498                             TreeNode<K,V> hd = null, tl = null;
1499                             for (q = p; q != null; q = q.next) {
1500                                 TreeNode<K,V> t = new TreeNode<K,V>
1501                                     (q.hash, q.key, q.val, null, null);
1502                                 if ((t.prev = tl) == null)
1503                                     hd = t;
1504                                 else
1505                                     tl.next = t;
1506                                 tl = t;
1507                             }
1508                             setTabAt(tab, j, new TreeBin<K,V>(hd));
1509                         }
1510                     }
1511                 }
1512                 if (insertAtFront) {
1513                     ++added;
1514                     p.next = first;
1515                     setTabAt(tab, j, p);
1516                 }
1517                 p = next;
1518             }
1519             table = tab;
1520             sizeCtl = n - (n >>> 2);
1521             baseCount = added;
1522         }
1523     }
1524 
1525     // ConcurrentMap methods
1526 
1527     /**
1528      * {@inheritDoc}
1529      *
1530      * @return the previous value associated with the specified key,
1531      *         or {@code null} if there was no mapping for the key
1532      * @throws NullPointerException if the specified key or value is null
1533      */
1534     public V putIfAbsent(K key, V value) {
1535         return putVal(key, value, true);
1536     }
1537 
1538     /**
1539      * {@inheritDoc}
1540      *
1541      * @throws NullPointerException if the specified key is null
1542      */
1543     public boolean remove(Object key, Object value) {
1544         if (key == null)
1545             throw new NullPointerException();
1546         return value != null && replaceNode(key, null, value) != null;
1547     }
1548 
1549     /**
1550      * {@inheritDoc}
1551      *
1552      * @throws NullPointerException if any of the arguments are null
1553      */
1554     public boolean replace(K key, V oldValue, V newValue) {
1555         if (key == null || oldValue == null || newValue == null)
1556             throw new NullPointerException();
1557         return replaceNode(key, newValue, oldValue) != null;
1558     }
1559 
1560     /**
1561      * {@inheritDoc}
1562      *
1563      * @return the previous value associated with the specified key,
1564      *         or {@code null} if there was no mapping for the key
1565      * @throws NullPointerException if the specified key or value is null
1566      */
1567     public V replace(K key, V value) {
1568         if (key == null || value == null)
1569             throw new NullPointerException();
1570         return replaceNode(key, value, null);
1571     }
1572 
1573     // Overrides of JDK8+ Map extension method defaults
1574 
1575     /**
1576      * Returns the value to which the specified key is mapped, or the
1577      * given default value if this map contains no mapping for the
1578      * key.
1579      *
1580      * @param key the key whose associated value is to be returned
1581      * @param defaultValue the value to return if this map contains
1582      * no mapping for the given key
1583      * @return the mapping for the key, if present; else the default value
1584      * @throws NullPointerException if the specified key is null
1585      */
1586     public V getOrDefault(Object key, V defaultValue) {
1587         V v;
1588         return (v = get(key)) == null ? defaultValue : v;
1589     }
1590 
1591     public void forEach(BiConsumer<? super K, ? super V> action) {
1592         if (action == null) throw new NullPointerException();
1593         Node<K,V>[] t;
1594         if ((t = table) != null) {
1595             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1596             for (Node<K,V> p; (p = it.advance()) != null; ) {
1597                 action.accept(p.key, p.val);
1598             }
1599         }
1600     }
1601 
1602     public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1603         if (function == null) throw new NullPointerException();
1604         Node<K,V>[] t;
1605         if ((t = table) != null) {
1606             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1607             for (Node<K,V> p; (p = it.advance()) != null; ) {
1608                 V oldValue = p.val;
1609                 for (K key = p.key;;) {
1610                     V newValue = function.apply(key, oldValue);
1611                     if (newValue == null)
1612                         throw new NullPointerException();
1613                     if (replaceNode(key, newValue, oldValue) != null ||
1614                         (oldValue = get(key)) == null)
1615                         break;
1616                 }
1617             }
1618         }
1619     }
1620 
1621     /**
1622      * If the specified key is not already associated with a value,
1623      * attempts to compute its value using the given mapping function
1624      * and enters it into this map unless {@code null}.  The entire
1625      * method invocation is performed atomically, so the function is
1626      * applied at most once per key.  Some attempted update operations
1627      * on this map by other threads may be blocked while computation
1628      * is in progress, so the computation should be short and simple,
1629      * and must not attempt to update any other mappings of this map.
1630      *
1631      * @param key key with which the specified value is to be associated
1632      * @param mappingFunction the function to compute a value
1633      * @return the current (existing or computed) value associated with
1634      *         the specified key, or null if the computed value is null
1635      * @throws NullPointerException if the specified key or mappingFunction
1636      *         is null
1637      * @throws IllegalStateException if the computation detectably
1638      *         attempts a recursive update to this map that would
1639      *         otherwise never complete
1640      * @throws RuntimeException or Error if the mappingFunction does so,
1641      *         in which case the mapping is left unestablished
1642      */
1643     public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
1644         if (key == null || mappingFunction == null)
1645             throw new NullPointerException();
1646         int h = spread(key.hashCode());
1647         V val = null;
1648         int binCount = 0;
1649         for (Node<K,V>[] tab = table;;) {
1650             Node<K,V> f; int n, i, fh;
1651             if (tab == null || (n = tab.length) == 0)
1652                 tab = initTable();
1653             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1654                 Node<K,V> r = new ReservationNode<K,V>();
1655                 synchronized (r) {
1656                     if (casTabAt(tab, i, null, r)) {
1657                         binCount = 1;
1658                         Node<K,V> node = null;
1659                         try {
1660                             if ((val = mappingFunction.apply(key)) != null)
1661                                 node = new Node<K,V>(h, key, val, null);
1662                         } finally {
1663                             setTabAt(tab, i, node);
1664                         }
1665                     }
1666                 }
1667                 if (binCount != 0)
1668                     break;
1669             }
1670             else if ((fh = f.hash) == MOVED)
1671                 tab = helpTransfer(tab, f);
1672             else {
1673                 boolean added = false;
1674                 synchronized (f) {
1675                     if (tabAt(tab, i) == f) {
1676                         if (fh >= 0) {
1677                             binCount = 1;
1678                             for (Node<K,V> e = f;; ++binCount) {
1679                                 K ek; V ev;
1680                                 if (e.hash == h &&
1681                                     ((ek = e.key) == key ||
1682                                      (ek != null && key.equals(ek)))) {
1683                                     val = e.val;
1684                                     break;
1685                                 }
1686                                 Node<K,V> pred = e;
1687                                 if ((e = e.next) == null) {
1688                                     if ((val = mappingFunction.apply(key)) != null) {
1689                                         added = true;
1690                                         pred.next = new Node<K,V>(h, key, val, null);
1691                                     }
1692                                     break;
1693                                 }
1694                             }
1695                         }
1696                         else if (f instanceof TreeBin) {
1697                             binCount = 2;
1698                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1699                             TreeNode<K,V> r, p;
1700                             if ((r = t.root) != null &&
1701                                 (p = r.findTreeNode(h, key, null)) != null)
1702                                 val = p.val;
1703                             else if ((val = mappingFunction.apply(key)) != null) {
1704                                 added = true;
1705                                 t.putTreeVal(h, key, val);
1706                             }
1707                         }
1708                     }
1709                 }
1710                 if (binCount != 0) {
1711                     if (binCount >= TREEIFY_THRESHOLD)
1712                         treeifyBin(tab, i);
1713                     if (!added)
1714                         return val;
1715                     break;
1716                 }
1717             }
1718         }
1719         if (val != null)
1720             addCount(1L, binCount);
1721         return val;
1722     }
1723 
1724     /**
1725      * If the value for the specified key is present, attempts to
1726      * compute a new mapping given the key and its current mapped
1727      * value.  The entire method invocation is performed atomically.
1728      * Some attempted update operations on this map by other threads
1729      * may be blocked while computation is in progress, so the
1730      * computation should be short and simple, and must not attempt to
1731      * update any other mappings of this map.
1732      *
1733      * @param key key with which a value may be associated
1734      * @param remappingFunction the function to compute a value
1735      * @return the new value associated with the specified key, or null if none
1736      * @throws NullPointerException if the specified key or remappingFunction
1737      *         is null
1738      * @throws IllegalStateException if the computation detectably
1739      *         attempts a recursive update to this map that would
1740      *         otherwise never complete
1741      * @throws RuntimeException or Error if the remappingFunction does so,
1742      *         in which case the mapping is unchanged
1743      */
1744     public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1745         if (key == null || remappingFunction == null)
1746             throw new NullPointerException();
1747         int h = spread(key.hashCode());
1748         V val = null;
1749         int delta = 0;
1750         int binCount = 0;
1751         for (Node<K,V>[] tab = table;;) {
1752             Node<K,V> f; int n, i, fh;
1753             if (tab == null || (n = tab.length) == 0)
1754                 tab = initTable();
1755             else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1756                 break;
1757             else if ((fh = f.hash) == MOVED)
1758                 tab = helpTransfer(tab, f);
1759             else {
1760                 synchronized (f) {
1761                     if (tabAt(tab, i) == f) {
1762                         if (fh >= 0) {
1763                             binCount = 1;
1764                             for (Node<K,V> e = f, pred = null;; ++binCount) {
1765                                 K ek;
1766                                 if (e.hash == h &&
1767                                     ((ek = e.key) == key ||
1768                                      (ek != null && key.equals(ek)))) {
1769                                     val = remappingFunction.apply(key, e.val);
1770                                     if (val != null)
1771                                         e.val = val;
1772                                     else {
1773                                         delta = -1;
1774                                         Node<K,V> en = e.next;
1775                                         if (pred != null)
1776                                             pred.next = en;
1777                                         else
1778                                             setTabAt(tab, i, en);
1779                                     }
1780                                     break;
1781                                 }
1782                                 pred = e;
1783                                 if ((e = e.next) == null)
1784                                     break;
1785                             }
1786                         }
1787                         else if (f instanceof TreeBin) {
1788                             binCount = 2;
1789                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1790                             TreeNode<K,V> r, p;
1791                             if ((r = t.root) != null &&
1792                                 (p = r.findTreeNode(h, key, null)) != null) {
1793                                 val = remappingFunction.apply(key, p.val);
1794                                 if (val != null)
1795                                     p.val = val;
1796                                 else {
1797                                     delta = -1;
1798                                     if (t.removeTreeNode(p))
1799                                         setTabAt(tab, i, untreeify(t.first));
1800                                 }
1801                             }
1802                         }
1803                     }
1804                 }
1805                 if (binCount != 0)
1806                     break;
1807             }
1808         }
1809         if (delta != 0)
1810             addCount((long)delta, binCount);
1811         return val;
1812     }
1813 
1814     /**
1815      * Attempts to compute a mapping for the specified key and its
1816      * current mapped value (or {@code null} if there is no current
1817      * mapping). The entire method invocation is performed atomically.
1818      * Some attempted update operations on this map by other threads
1819      * may be blocked while computation is in progress, so the
1820      * computation should be short and simple, and must not attempt to
1821      * update any other mappings of this Map.
1822      *
1823      * @param key key with which the specified value is to be associated
1824      * @param remappingFunction the function to compute a value
1825      * @return the new value associated with the specified key, or null if none
1826      * @throws NullPointerException if the specified key or remappingFunction
1827      *         is null
1828      * @throws IllegalStateException if the computation detectably
1829      *         attempts a recursive update to this map that would
1830      *         otherwise never complete
1831      * @throws RuntimeException or Error if the remappingFunction does so,
1832      *         in which case the mapping is unchanged
1833      */
1834     public V compute(K key,
1835                      BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1836         if (key == null || remappingFunction == null)
1837             throw new NullPointerException();
1838         int h = spread(key.hashCode());
1839         V val = null;
1840         int delta = 0;
1841         int binCount = 0;
1842         for (Node<K,V>[] tab = table;;) {
1843             Node<K,V> f; int n, i, fh;
1844             if (tab == null || (n = tab.length) == 0)
1845                 tab = initTable();
1846             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1847                 Node<K,V> r = new ReservationNode<K,V>();
1848                 synchronized (r) {
1849                     if (casTabAt(tab, i, null, r)) {
1850                         binCount = 1;
1851                         Node<K,V> node = null;
1852                         try {
1853                             if ((val = remappingFunction.apply(key, null)) != null) {
1854                                 delta = 1;
1855                                 node = new Node<K,V>(h, key, val, null);
1856                             }
1857                         } finally {
1858                             setTabAt(tab, i, node);
1859                         }
1860                     }
1861                 }
1862                 if (binCount != 0)
1863                     break;
1864             }
1865             else if ((fh = f.hash) == MOVED)
1866                 tab = helpTransfer(tab, f);
1867             else {
1868                 synchronized (f) {
1869                     if (tabAt(tab, i) == f) {
1870                         if (fh >= 0) {
1871                             binCount = 1;
1872                             for (Node<K,V> e = f, pred = null;; ++binCount) {
1873                                 K ek;
1874                                 if (e.hash == h &&
1875                                     ((ek = e.key) == key ||
1876                                      (ek != null && key.equals(ek)))) {
1877                                     val = remappingFunction.apply(key, e.val);
1878                                     if (val != null)
1879                                         e.val = val;
1880                                     else {
1881                                         delta = -1;
1882                                         Node<K,V> en = e.next;
1883                                         if (pred != null)
1884                                             pred.next = en;
1885                                         else
1886                                             setTabAt(tab, i, en);
1887                                     }
1888                                     break;
1889                                 }
1890                                 pred = e;
1891                                 if ((e = e.next) == null) {
1892                                     val = remappingFunction.apply(key, null);
1893                                     if (val != null) {
1894                                         delta = 1;
1895                                         pred.next =
1896                                             new Node<K,V>(h, key, val, null);
1897                                     }
1898                                     break;
1899                                 }
1900                             }
1901                         }
1902                         else if (f instanceof TreeBin) {
1903                             binCount = 1;
1904                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1905                             TreeNode<K,V> r, p;
1906                             if ((r = t.root) != null)
1907                                 p = r.findTreeNode(h, key, null);
1908                             else
1909                                 p = null;
1910                             V pv = (p == null) ? null : p.val;
1911                             val = remappingFunction.apply(key, pv);
1912                             if (val != null) {
1913                                 if (p != null)
1914                                     p.val = val;
1915                                 else {
1916                                     delta = 1;
1917                                     t.putTreeVal(h, key, val);
1918                                 }
1919                             }
1920                             else if (p != null) {
1921                                 delta = -1;
1922                                 if (t.removeTreeNode(p))
1923                                     setTabAt(tab, i, untreeify(t.first));
1924                             }
1925                         }
1926                     }
1927                 }
1928                 if (binCount != 0) {
1929                     if (binCount >= TREEIFY_THRESHOLD)
1930                         treeifyBin(tab, i);
1931                     break;
1932                 }
1933             }
1934         }
1935         if (delta != 0)
1936             addCount((long)delta, binCount);
1937         return val;
1938     }
1939 
1940     /**
1941      * If the specified key is not already associated with a
1942      * (non-null) value, associates it with the given value.
1943      * Otherwise, replaces the value with the results of the given
1944      * remapping function, or removes if {@code null}. The entire
1945      * method invocation is performed atomically.  Some attempted
1946      * update operations on this map by other threads may be blocked
1947      * while computation is in progress, so the computation should be
1948      * short and simple, and must not attempt to update any other
1949      * mappings of this Map.
1950      *
1951      * @param key key with which the specified value is to be associated
1952      * @param value the value to use if absent
1953      * @param remappingFunction the function to recompute a value if present
1954      * @return the new value associated with the specified key, or null if none
1955      * @throws NullPointerException if the specified key or the
1956      *         remappingFunction is null
1957      * @throws RuntimeException or Error if the remappingFunction does so,
1958      *         in which case the mapping is unchanged
1959      */
1960     public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
1961         if (key == null || value == null || remappingFunction == null)
1962             throw new NullPointerException();
1963         int h = spread(key.hashCode());
1964         V val = null;
1965         int delta = 0;
1966         int binCount = 0;
1967         for (Node<K,V>[] tab = table;;) {
1968             Node<K,V> f; int n, i, fh;
1969             if (tab == null || (n = tab.length) == 0)
1970                 tab = initTable();
1971             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1972                 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) {
1973                     delta = 1;
1974                     val = value;
1975                     break;
1976                 }
1977             }
1978             else if ((fh = f.hash) == MOVED)
1979                 tab = helpTransfer(tab, f);
1980             else {
1981                 synchronized (f) {
1982                     if (tabAt(tab, i) == f) {
1983                         if (fh >= 0) {
1984                             binCount = 1;
1985                             for (Node<K,V> e = f, pred = null;; ++binCount) {
1986                                 K ek;
1987                                 if (e.hash == h &&
1988                                     ((ek = e.key) == key ||
1989                                      (ek != null && key.equals(ek)))) {
1990                                     val = remappingFunction.apply(e.val, value);
1991                                     if (val != null)
1992                                         e.val = val;
1993                                     else {
1994                                         delta = -1;
1995                                         Node<K,V> en = e.next;
1996                                         if (pred != null)
1997                                             pred.next = en;
1998                                         else
1999                                             setTabAt(tab, i, en);
2000                                     }
2001                                     break;
2002                                 }
2003                                 pred = e;
2004                                 if ((e = e.next) == null) {
2005                                     delta = 1;
2006                                     val = value;
2007                                     pred.next =
2008                                         new Node<K,V>(h, key, val, null);
2009                                     break;
2010                                 }
2011                             }
2012                         }
2013                         else if (f instanceof TreeBin) {
2014                             binCount = 2;
2015                             TreeBin<K,V> t = (TreeBin<K,V>)f;
2016                             TreeNode<K,V> r = t.root;
2017                             TreeNode<K,V> p = (r == null) ? null :
2018                                 r.findTreeNode(h, key, null);
2019                             val = (p == null) ? value :
2020                                 remappingFunction.apply(p.val, value);
2021                             if (val != null) {
2022                                 if (p != null)
2023                                     p.val = val;
2024                                 else {
2025                                     delta = 1;
2026                                     t.putTreeVal(h, key, val);
2027                                 }
2028                             }
2029                             else if (p != null) {
2030                                 delta = -1;
2031                                 if (t.removeTreeNode(p))
2032                                     setTabAt(tab, i, untreeify(t.first));
2033                             }
2034                         }
2035                     }
2036                 }
2037                 if (binCount != 0) {
2038                     if (binCount >= TREEIFY_THRESHOLD)
2039                         treeifyBin(tab, i);
2040                     break;
2041                 }
2042             }
2043         }
2044         if (delta != 0)
2045             addCount((long)delta, binCount);
2046         return val;
2047     }
2048 
2049     // Hashtable legacy methods
2050 
2051     /**
2052      * Legacy method testing if some key maps into the specified value
2053      * in this table.  This method is identical in functionality to
2054      * {@link #containsValue(Object)}, and exists solely to ensure
2055      * full compatibility with class {@link java.util.Hashtable},
2056      * which supported this method prior to introduction of the
2057      * Java Collections framework.
2058      *
2059      * @param  value a value to search for
2060      * @return {@code true} if and only if some key maps to the
2061      *         {@code value} argument in this table as
2062      *         determined by the {@code equals} method;
2063      *         {@code false} otherwise
2064      * @throws NullPointerException if the specified value is null
2065      */
2066     public boolean contains(Object value) {
2067         return containsValue(value);
2068     }
2069 
2070     /**
2071      * Returns an enumeration of the keys in this table.
2072      *
2073      * @return an enumeration of the keys in this table
2074      * @see #keySet()
2075      */
2076     public Enumeration<K> keys() {
2077         Node<K,V>[] t;
2078         int f = (t = table) == null ? 0 : t.length;
2079         return new KeyIterator<K,V>(t, f, 0, f, this);
2080     }
2081 
2082     /**
2083      * Returns an enumeration of the values in this table.
2084      *
2085      * @return an enumeration of the values in this table
2086      * @see #values()
2087      */
2088     public Enumeration<V> elements() {
2089         Node<K,V>[] t;
2090         int f = (t = table) == null ? 0 : t.length;
2091         return new ValueIterator<K,V>(t, f, 0, f, this);
2092     }
2093 
2094     // ConcurrentHashMap-only methods
2095 
2096     /**
2097      * Returns the number of mappings. This method should be used
2098      * instead of {@link #size} because a ConcurrentHashMap may
2099      * contain more mappings than can be represented as an int. The
2100      * value returned is an estimate; the actual count may differ if
2101      * there are concurrent insertions or removals.
2102      *
2103      * @return the number of mappings
2104      * @since 1.8
2105      */
2106     public long mappingCount() {
2107         long n = sumCount();
2108         return (n < 0L) ? 0L : n; // ignore transient negative values
2109     }
2110 
2111     /**
2112      * Creates a new {@link Set} backed by a ConcurrentHashMap
2113      * from the given type to {@code Boolean.TRUE}.
2114      *
2115      * @param <K> the element type of the returned set
2116      * @return the new set
2117      * @since 1.8
2118      */
2119     public static <K> KeySetView<K,Boolean> newKeySet() {
2120         return new KeySetView<K,Boolean>
2121             (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
2122     }
2123 
2124     /**
2125      * Creates a new {@link Set} backed by a ConcurrentHashMap
2126      * from the given type to {@code Boolean.TRUE}.
2127      *
2128      * @param initialCapacity The implementation performs internal
2129      * sizing to accommodate this many elements.
2130      * @param <K> the element type of the returned set
2131      * @return the new set
2132      * @throws IllegalArgumentException if the initial capacity of
2133      * elements is negative
2134      * @since 1.8
2135      */
2136     public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2137         return new KeySetView<K,Boolean>
2138             (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2139     }
2140 
2141     /**
2142      * Returns a {@link Set} view of the keys in this map, using the
2143      * given common mapped value for any additions (i.e., {@link
2144      * Collection#add} and {@link Collection#addAll(Collection)}).
2145      * This is of course only appropriate if it is acceptable to use
2146      * the same value for all additions from this view.
2147      *
2148      * @param mappedValue the mapped value to use for any additions
2149      * @return the set view
2150      * @throws NullPointerException if the mappedValue is null
2151      */
2152     public KeySetView<K,V> keySet(V mappedValue) {
2153         if (mappedValue == null)
2154             throw new NullPointerException();
2155         return new KeySetView<K,V>(this, mappedValue);
2156     }
2157 
2158     /* ---------------- Special Nodes -------------- */
2159 
2160     /**
2161      * A node inserted at head of bins during transfer operations.
2162      */
2163     static final class ForwardingNode<K,V> extends Node<K,V> {
2164         final Node<K,V>[] nextTable;
2165         ForwardingNode(Node<K,V>[] tab) {
2166             super(MOVED, null, null, null);
2167             this.nextTable = tab;
2168         }
2169 
2170         Node<K,V> find(int h, Object k) {
2171             // loop to avoid arbitrarily deep recursion on forwarding nodes
2172             outer: for (Node<K,V>[] tab = nextTable;;) {
2173                 Node<K,V> e; int n;
2174                 if (k == null || tab == null || (n = tab.length) == 0 ||
2175                     (e = tabAt(tab, (n - 1) & h)) == null)
2176                     return null;
2177                 for (;;) {
2178                     int eh; K ek;
2179                     if ((eh = e.hash) == h &&
2180                         ((ek = e.key) == k || (ek != null && k.equals(ek))))
2181                         return e;
2182                     if (eh < 0) {
2183                         if (e instanceof ForwardingNode) {
2184                             tab = ((ForwardingNode<K,V>)e).nextTable;
2185                             continue outer;
2186                         }
2187                         else
2188                             return e.find(h, k);
2189                     }
2190                     if ((e = e.next) == null)
2191                         return null;
2192                 }
2193             }
2194         }
2195     }
2196 
2197     /**
2198      * A place-holder node used in computeIfAbsent and compute
2199      */
2200     static final class ReservationNode<K,V> extends Node<K,V> {
2201         ReservationNode() {
2202             super(RESERVED, null, null, null);
2203         }
2204 
2205         Node<K,V> find(int h, Object k) {
2206             return null;
2207         }
2208     }
2209 
2210     /* ---------------- Table Initialization and Resizing -------------- */
2211 
2212     /**
2213      * Returns the stamp bits for resizing a table of size n.
2214      * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
2215      */
2216     static final int resizeStamp(int n) {
2217         return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
2218     }
2219 
2220     /**
2221      * Initializes table, using the size recorded in sizeCtl.
2222      */
2223     private final Node<K,V>[] initTable() {
2224         Node<K,V>[] tab; int sc;
2225         while ((tab = table) == null || tab.length == 0) {
2226             if ((sc = sizeCtl) < 0)
2227                 Thread.yield(); // lost initialization race; just spin
2228             else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2229                 try {
2230                     if ((tab = table) == null || tab.length == 0) {
2231                         int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2232                         @SuppressWarnings("unchecked")
2233                         Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2234                         table = tab = nt;
2235                         sc = n - (n >>> 2);
2236                     }
2237                 } finally {
2238                     sizeCtl = sc;
2239                 }
2240                 break;
2241             }
2242         }
2243         return tab;
2244     }
2245 
2246     /**
2247      * Adds to count, and if table is too small and not already
2248      * resizing, initiates transfer. If already resizing, helps
2249      * perform transfer if work is available.  Rechecks occupancy
2250      * after a transfer to see if another resize is already needed
2251      * because resizings are lagging additions.
2252      *
2253      * @param x the count to add
2254      * @param check if <0, don't check resize, if <= 1 only check if uncontended
2255      */
2256     private final void addCount(long x, int check) {
2257         CounterCell[] as; long b, s;
2258         if ((as = counterCells) != null ||
2259             !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2260             CounterCell a; long v; int m;
2261             boolean uncontended = true;
2262             if (as == null || (m = as.length - 1) < 0 ||
2263                 (a = as[ThreadLocalRandom.getProbe() & m]) == null ||
2264                 !(uncontended =
2265                   U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
2266                 fullAddCount(x, uncontended);
2267                 return;
2268             }
2269             if (check <= 1)
2270                 return;
2271             s = sumCount();
2272         }
2273         if (check >= 0) {
2274             Node<K,V>[] tab, nt; int n, sc;
2275             while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2276                    (n = tab.length) < MAXIMUM_CAPACITY) {
2277                 int rs = resizeStamp(n);
2278                 if (sc < 0) {
2279                     if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2280                         sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
2281                         transferIndex <= 0)
2282                         break;
2283                     if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
2284                         transfer(tab, nt);
2285                 }
2286                 else if (U.compareAndSwapInt(this, SIZECTL, sc,
2287                                              (rs << RESIZE_STAMP_SHIFT) + 2))
2288                     transfer(tab, null);
2289                 s = sumCount();
2290             }
2291         }
2292     }
2293 
2294     /**
2295      * Helps transfer if a resize is in progress.
2296      */
2297     final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2298         Node<K,V>[] nextTab; int sc;
2299         if (tab != null && (f instanceof ForwardingNode) &&
2300             (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2301             int rs = resizeStamp(tab.length);
2302             while (nextTab == nextTable && table == tab &&
2303                    (sc = sizeCtl) < 0) {
2304                 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2305                     sc == rs + MAX_RESIZERS || transferIndex <= 0)
2306                     break;
2307                 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
2308                     transfer(tab, nextTab);
2309                     break;
2310                 }
2311             }
2312             return nextTab;
2313         }
2314         return table;
2315     }
2316 
2317     /**
2318      * Tries to presize table to accommodate the given number of elements.
2319      *
2320      * @param size number of elements (doesn't need to be perfectly accurate)
2321      */
2322     private final void tryPresize(int size) {
2323         int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2324             tableSizeFor(size + (size >>> 1) + 1);
2325         int sc;
2326         while ((sc = sizeCtl) >= 0) {
2327             Node<K,V>[] tab = table; int n;
2328             if (tab == null || (n = tab.length) == 0) {
2329                 n = (sc > c) ? sc : c;
2330                 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
2331                     try {
2332                         if (table == tab) {
2333                             @SuppressWarnings("unchecked")
2334                             Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2335                             table = nt;
2336                             sc = n - (n >>> 2);
2337                         }
2338                     } finally {
2339                         sizeCtl = sc;
2340                     }
2341                 }
2342             }
2343             else if (c <= sc || n >= MAXIMUM_CAPACITY)
2344                 break;
2345             else if (tab == table) {
2346                 int rs = resizeStamp(n);
2347                 if (sc < 0) {
2348                     Node<K,V>[] nt;
2349                     if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 ||
2350                         sc == rs + MAX_RESIZERS || (nt = nextTable) == null ||
2351                         transferIndex <= 0)
2352                         break;
2353                     if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1))
2354                         transfer(tab, nt);
2355                 }
2356                 else if (U.compareAndSwapInt(this, SIZECTL, sc,
2357                                              (rs << RESIZE_STAMP_SHIFT) + 2))
2358                     transfer(tab, null);
2359             }
2360         }
2361     }
2362 
2363     /**
2364      * Moves and/or copies the nodes in each bin to new table. See
2365      * above for explanation.
2366      */
2367     private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2368         int n = tab.length, stride;
2369         if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2370             stride = MIN_TRANSFER_STRIDE; // subdivide range
2371         if (nextTab == null) {            // initiating
2372             try {
2373                 @SuppressWarnings("unchecked")
2374                 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
2375                 nextTab = nt;
2376             } catch (Throwable ex) {      // try to cope with OOME
2377                 sizeCtl = Integer.MAX_VALUE;
2378                 return;
2379             }
2380             nextTable = nextTab;
2381             transferIndex = n;
2382         }
2383         int nextn = nextTab.length;
2384         ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2385         boolean advance = true;
2386         boolean finishing = false; // to ensure sweep before committing nextTab
2387         for (int i = 0, bound = 0;;) {
2388             Node<K,V> f; int fh;
2389             while (advance) {
2390                 int nextIndex, nextBound;
2391                 if (--i >= bound || finishing)
2392                     advance = false;
2393                 else if ((nextIndex = transferIndex) <= 0) {
2394                     i = -1;
2395                     advance = false;
2396                 }
2397                 else if (U.compareAndSwapInt
2398                          (this, TRANSFERINDEX, nextIndex,
2399                           nextBound = (nextIndex > stride ?
2400                                        nextIndex - stride : 0))) {
2401                     bound = nextBound;
2402                     i = nextIndex - 1;
2403                     advance = false;
2404                 }
2405             }
2406             if (i < 0 || i >= n || i + n >= nextn) {
2407                 int sc;
2408                 if (finishing) {
2409                     nextTable = null;
2410                     table = nextTab;
2411                     sizeCtl = (n << 1) - (n >>> 1);
2412                     return;
2413                 }
2414                 if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
2415                     if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
2416                         return;
2417                     finishing = advance = true;
2418                     i = n; // recheck before commit
2419                 }
2420             }
2421             else if ((f = tabAt(tab, i)) == null)
2422                 advance = casTabAt(tab, i, null, fwd);
2423             else if ((fh = f.hash) == MOVED)
2424                 advance = true; // already processed
2425             else {
2426                 synchronized (f) {
2427                     if (tabAt(tab, i) == f) {
2428                         Node<K,V> ln, hn;
2429                         if (fh >= 0) {
2430                             int runBit = fh & n;
2431                             Node<K,V> lastRun = f;
2432                             for (Node<K,V> p = f.next; p != null; p = p.next) {
2433                                 int b = p.hash & n;
2434                                 if (b != runBit) {
2435                                     runBit = b;
2436                                     lastRun = p;
2437                                 }
2438                             }
2439                             if (runBit == 0) {
2440                                 ln = lastRun;
2441                                 hn = null;
2442                             }
2443                             else {
2444                                 hn = lastRun;
2445                                 ln = null;
2446                             }
2447                             for (Node<K,V> p = f; p != lastRun; p = p.next) {
2448                                 int ph = p.hash; K pk = p.key; V pv = p.val;
2449                                 if ((ph & n) == 0)
2450                                     ln = new Node<K,V>(ph, pk, pv, ln);
2451                                 else
2452                                     hn = new Node<K,V>(ph, pk, pv, hn);
2453                             }
2454                             setTabAt(nextTab, i, ln);
2455                             setTabAt(nextTab, i + n, hn);
2456                             setTabAt(tab, i, fwd);
2457                             advance = true;
2458                         }
2459                         else if (f instanceof TreeBin) {
2460                             TreeBin<K,V> t = (TreeBin<K,V>)f;
2461                             TreeNode<K,V> lo = null, loTail = null;
2462                             TreeNode<K,V> hi = null, hiTail = null;
2463                             int lc = 0, hc = 0;
2464                             for (Node<K,V> e = t.first; e != null; e = e.next) {
2465                                 int h = e.hash;
2466                                 TreeNode<K,V> p = new TreeNode<K,V>
2467                                     (h, e.key, e.val, null, null);
2468                                 if ((h & n) == 0) {
2469                                     if ((p.prev = loTail) == null)
2470                                         lo = p;
2471                                     else
2472                                         loTail.next = p;
2473                                     loTail = p;
2474                                     ++lc;
2475                                 }
2476                                 else {
2477                                     if ((p.prev = hiTail) == null)
2478                                         hi = p;
2479                                     else
2480                                         hiTail.next = p;
2481                                     hiTail = p;
2482                                     ++hc;
2483                                 }
2484                             }
2485                             ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2486                                 (hc != 0) ? new TreeBin<K,V>(lo) : t;
2487                             hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2488                                 (lc != 0) ? new TreeBin<K,V>(hi) : t;
2489                             setTabAt(nextTab, i, ln);
2490                             setTabAt(nextTab, i + n, hn);
2491                             setTabAt(tab, i, fwd);
2492                             advance = true;
2493                         }
2494                     }
2495                 }
2496             }
2497         }
2498     }
2499 
2500     /* ---------------- Counter support -------------- */
2501 
2502     /**
2503      * A padded cell for distributing counts.  Adapted from LongAdder
2504      * and Striped64.  See their internal docs for explanation.
2505      */
2506     @sun.misc.Contended static final class CounterCell {
2507         volatile long value;
2508         CounterCell(long x) { value = x; }
2509     }
2510 
2511     final long sumCount() {
2512         CounterCell[] as = counterCells; CounterCell a;
2513         long sum = baseCount;
2514         if (as != null) {
2515             for (int i = 0; i < as.length; ++i) {
2516                 if ((a = as[i]) != null)
2517                     sum += a.value;
2518             }
2519         }
2520         return sum;
2521     }
2522 
2523     // See LongAdder version for explanation
2524     private final void fullAddCount(long x, boolean wasUncontended) {
2525         int h;
2526         if ((h = ThreadLocalRandom.getProbe()) == 0) {
2527             ThreadLocalRandom.localInit();      // force initialization
2528             h = ThreadLocalRandom.getProbe();
2529             wasUncontended = true;
2530         }
2531         boolean collide = false;                // True if last slot nonempty
2532         for (;;) {
2533             CounterCell[] as; CounterCell a; int n; long v;
2534             if ((as = counterCells) != null && (n = as.length) > 0) {
2535                 if ((a = as[(n - 1) & h]) == null) {
2536                     if (cellsBusy == 0) {            // Try to attach new Cell
2537                         CounterCell r = new CounterCell(x); // Optimistic create
2538                         if (cellsBusy == 0 &&
2539                             U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2540                             boolean created = false;
2541                             try {               // Recheck under lock
2542                                 CounterCell[] rs; int m, j;
2543                                 if ((rs = counterCells) != null &&
2544                                     (m = rs.length) > 0 &&
2545                                     rs[j = (m - 1) & h] == null) {
2546                                     rs[j] = r;
2547                                     created = true;
2548                                 }
2549                             } finally {
2550                                 cellsBusy = 0;
2551                             }
2552                             if (created)
2553                                 break;
2554                             continue;           // Slot is now non-empty
2555                         }
2556                     }
2557                     collide = false;
2558                 }
2559                 else if (!wasUncontended)       // CAS already known to fail
2560                     wasUncontended = true;      // Continue after rehash
2561                 else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
2562                     break;
2563                 else if (counterCells != as || n >= NCPU)
2564                     collide = false;            // At max size or stale
2565                 else if (!collide)
2566                     collide = true;
2567                 else if (cellsBusy == 0 &&
2568                          U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2569                     try {
2570                         if (counterCells == as) {// Expand table unless stale
2571                             CounterCell[] rs = new CounterCell[n << 1];
2572                             for (int i = 0; i < n; ++i)
2573                                 rs[i] = as[i];
2574                             counterCells = rs;
2575                         }
2576                     } finally {
2577                         cellsBusy = 0;
2578                     }
2579                     collide = false;
2580                     continue;                   // Retry with expanded table
2581                 }
2582                 h = ThreadLocalRandom.advanceProbe(h);
2583             }
2584             else if (cellsBusy == 0 && counterCells == as &&
2585                      U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
2586                 boolean init = false;
2587                 try {                           // Initialize table
2588                     if (counterCells == as) {
2589                         CounterCell[] rs = new CounterCell[2];
2590                         rs[h & 1] = new CounterCell(x);
2591                         counterCells = rs;
2592                         init = true;
2593                     }
2594                 } finally {
2595                     cellsBusy = 0;
2596                 }
2597                 if (init)
2598                     break;
2599             }
2600             else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
2601                 break;                          // Fall back on using base
2602         }
2603     }
2604 
2605     /* ---------------- Conversion from/to TreeBins -------------- */
2606 
2607     /**
2608      * Replaces all linked nodes in bin at given index unless table is
2609      * too small, in which case resizes instead.
2610      */
2611     private final void treeifyBin(Node<K,V>[] tab, int index) {
2612         Node<K,V> b; int n, sc;
2613         if (tab != null) {
2614             if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
2615                 tryPresize(n << 1);
2616             else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2617                 synchronized (b) {
2618                     if (tabAt(tab, index) == b) {
2619                         TreeNode<K,V> hd = null, tl = null;
2620                         for (Node<K,V> e = b; e != null; e = e.next) {
2621                             TreeNode<K,V> p =
2622                                 new TreeNode<K,V>(e.hash, e.key, e.val,
2623                                                   null, null);
2624                             if ((p.prev = tl) == null)
2625                                 hd = p;
2626                             else
2627                                 tl.next = p;
2628                             tl = p;
2629                         }
2630                         setTabAt(tab, index, new TreeBin<K,V>(hd));
2631                     }
2632                 }
2633             }
2634         }
2635     }
2636 
2637     /**
2638      * Returns a list on non-TreeNodes replacing those in given list.
2639      */
2640     static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2641         Node<K,V> hd = null, tl = null;
2642         for (Node<K,V> q = b; q != null; q = q.next) {
2643             Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null);
2644             if (tl == null)
2645                 hd = p;
2646             else
2647                 tl.next = p;
2648             tl = p;
2649         }
2650         return hd;
2651     }
2652 
2653     /* ---------------- TreeNodes -------------- */
2654 
2655     /**
2656      * Nodes for use in TreeBins
2657      */
2658     static final class TreeNode<K,V> extends Node<K,V> {
2659         TreeNode<K,V> parent;  // red-black tree links
2660         TreeNode<K,V> left;
2661         TreeNode<K,V> right;
2662         TreeNode<K,V> prev;    // needed to unlink next upon deletion
2663         boolean red;
2664 
2665         TreeNode(int hash, K key, V val, Node<K,V> next,
2666                  TreeNode<K,V> parent) {
2667             super(hash, key, val, next);
2668             this.parent = parent;
2669         }
2670 
2671         Node<K,V> find(int h, Object k) {
2672             return findTreeNode(h, k, null);
2673         }
2674 
2675         /**
2676          * Returns the TreeNode (or null if not found) for the given key
2677          * starting at given root.
2678          */
2679         final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2680             if (k != null) {
2681                 TreeNode<K,V> p = this;
2682                 do  {
2683                     int ph, dir; K pk; TreeNode<K,V> q;
2684                     TreeNode<K,V> pl = p.left, pr = p.right;
2685                     if ((ph = p.hash) > h)
2686                         p = pl;
2687                     else if (ph < h)
2688                         p = pr;
2689                     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2690                         return p;
2691                     else if (pl == null)
2692                         p = pr;
2693                     else if (pr == null)
2694                         p = pl;
2695                     else if ((kc != null ||
2696                               (kc = comparableClassFor(k)) != null) &&
2697                              (dir = compareComparables(kc, k, pk)) != 0)
2698                         p = (dir < 0) ? pl : pr;
2699                     else if ((q = pr.findTreeNode(h, k, kc)) != null)
2700                         return q;
2701                     else
2702                         p = pl;
2703                 } while (p != null);
2704             }
2705             return null;
2706         }
2707     }
2708 
2709     /* ---------------- TreeBins -------------- */
2710 
2711     /**
2712      * TreeNodes used at the heads of bins. TreeBins do not hold user
2713      * keys or values, but instead point to list of TreeNodes and
2714      * their root. They also maintain a parasitic read-write lock
2715      * forcing writers (who hold bin lock) to wait for readers (who do
2716      * not) to complete before tree restructuring operations.
2717      */
2718     static final class TreeBin<K,V> extends Node<K,V> {
2719         TreeNode<K,V> root;
2720         volatile TreeNode<K,V> first;
2721         volatile Thread waiter;
2722         volatile int lockState;
2723         // values for lockState
2724         static final int WRITER = 1; // set while holding write lock
2725         static final int WAITER = 2; // set when waiting for write lock
2726         static final int READER = 4; // increment value for setting read lock
2727 
2728         /**
2729          * Tie-breaking utility for ordering insertions when equal
2730          * hashCodes and non-comparable. We don't require a total
2731          * order, just a consistent insertion rule to maintain
2732          * equivalence across rebalancings. Tie-breaking further than
2733          * necessary simplifies testing a bit.
2734          */
2735         static int tieBreakOrder(Object a, Object b) {
2736             int d;
2737             if (a == null || b == null ||
2738                 (d = a.getClass().getName().
2739                  compareTo(b.getClass().getName())) == 0)
2740                 d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2741                      -1 : 1);
2742             return d;
2743         }
2744 
2745         /**
2746          * Creates bin with initial set of nodes headed by b.
2747          */
2748         TreeBin(TreeNode<K,V> b) {
2749             super(TREEBIN, null, null, null);
2750             this.first = b;
2751             TreeNode<K,V> r = null;
2752             for (TreeNode<K,V> x = b, next; x != null; x = next) {
2753                 next = (TreeNode<K,V>)x.next;
2754                 x.left = x.right = null;
2755                 if (r == null) {
2756                     x.parent = null;
2757                     x.red = false;
2758                     r = x;
2759                 }
2760                 else {
2761                     K k = x.key;
2762                     int h = x.hash;
2763                     Class<?> kc = null;
2764                     for (TreeNode<K,V> p = r;;) {
2765                         int dir, ph;
2766                         K pk = p.key;
2767                         if ((ph = p.hash) > h)
2768                             dir = -1;
2769                         else if (ph < h)
2770                             dir = 1;
2771                         else if ((kc == null &&
2772                                   (kc = comparableClassFor(k)) == null) ||
2773                                  (dir = compareComparables(kc, k, pk)) == 0)
2774                             dir = tieBreakOrder(k, pk);
2775                             TreeNode<K,V> xp = p;
2776                         if ((p = (dir <= 0) ? p.left : p.right) == null) {
2777                             x.parent = xp;
2778                             if (dir <= 0)
2779                                 xp.left = x;
2780                             else
2781                                 xp.right = x;
2782                             r = balanceInsertion(r, x);
2783                             break;
2784                         }
2785                     }
2786                 }
2787             }
2788             this.root = r;
2789             assert checkInvariants(root);
2790         }
2791 
2792         /**
2793          * Acquires write lock for tree restructuring.
2794          */
2795         private final void lockRoot() {
2796             if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER))
2797                 contendedLock(); // offload to separate method
2798         }
2799 
2800         /**
2801          * Releases write lock for tree restructuring.
2802          */
2803         private final void unlockRoot() {
2804             lockState = 0;
2805         }
2806 
2807         /**
2808          * Possibly blocks awaiting root lock.
2809          */
2810         private final void contendedLock() {
2811             boolean waiting = false;
2812             for (int s;;) {
2813                 if (((s = lockState) & ~WAITER) == 0) {
2814                     if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) {
2815                         if (waiting)
2816                             waiter = null;
2817                         return;
2818                     }
2819                 }
2820                 else if ((s & WAITER) == 0) {
2821                     if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) {
2822                         waiting = true;
2823                         waiter = Thread.currentThread();
2824                     }
2825                 }
2826                 else if (waiting)
2827                     LockSupport.park(this);
2828             }
2829         }
2830 
2831         /**
2832          * Returns matching node or null if none. Tries to search
2833          * using tree comparisons from root, but continues linear
2834          * search when lock not available.
2835          */
2836         final Node<K,V> find(int h, Object k) {
2837             if (k != null) {
2838                 for (Node<K,V> e = first; e != null; ) {
2839                     int s; K ek;
2840                     if (((s = lockState) & (WAITER|WRITER)) != 0) {
2841                         if (e.hash == h &&
2842                             ((ek = e.key) == k || (ek != null && k.equals(ek))))
2843                             return e;
2844                         e = e.next;
2845                     }
2846                     else if (U.compareAndSwapInt(this, LOCKSTATE, s,
2847                                                  s + READER)) {
2848                         TreeNode<K,V> r, p;
2849                         try {
2850                             p = ((r = root) == null ? null :
2851                                  r.findTreeNode(h, k, null));
2852                         } finally {
2853                             Thread w;
2854                             if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
2855                                 (READER|WAITER) && (w = waiter) != null)
2856                                 LockSupport.unpark(w);
2857                         }
2858                         return p;
2859                     }
2860                 }
2861             }
2862             return null;
2863         }
2864 
2865         /**
2866          * Finds or adds a node.
2867          * @return null if added
2868          */
2869         final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2870             Class<?> kc = null;
2871             boolean searched = false;
2872             for (TreeNode<K,V> p = root;;) {
2873                 int dir, ph; K pk;
2874                 if (p == null) {
2875                     first = root = new TreeNode<K,V>(h, k, v, null, null);
2876                     break;
2877                 }
2878                 else if ((ph = p.hash) > h)
2879                     dir = -1;
2880                 else if (ph < h)
2881                     dir = 1;
2882                 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2883                     return p;
2884                 else if ((kc == null &&
2885                           (kc = comparableClassFor(k)) == null) ||
2886                          (dir = compareComparables(kc, k, pk)) == 0) {
2887                     if (!searched) {
2888                         TreeNode<K,V> q, ch;
2889                         searched = true;
2890                         if (((ch = p.left) != null &&
2891                              (q = ch.findTreeNode(h, k, kc)) != null) ||
2892                             ((ch = p.right) != null &&
2893                              (q = ch.findTreeNode(h, k, kc)) != null))
2894                             return q;
2895                     }
2896                     dir = tieBreakOrder(k, pk);
2897                 }
2898 
2899                 TreeNode<K,V> xp = p;
2900                 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2901                     TreeNode<K,V> x, f = first;
2902                     first = x = new TreeNode<K,V>(h, k, v, f, xp);
2903                     if (f != null)
2904                         f.prev = x;
2905                     if (dir <= 0)
2906                         xp.left = x;
2907                     else
2908                         xp.right = x;
2909                     if (!xp.red)
2910                         x.red = true;
2911                     else {
2912                         lockRoot();
2913                         try {
2914                             root = balanceInsertion(root, x);
2915                         } finally {
2916                             unlockRoot();
2917                         }
2918                     }
2919                     break;
2920                 }
2921             }
2922             assert checkInvariants(root);
2923             return null;
2924         }
2925 
2926         /**
2927          * Removes the given node, that must be present before this
2928          * call.  This is messier than typical red-black deletion code
2929          * because we cannot swap the contents of an interior node
2930          * with a leaf successor that is pinned by "next" pointers
2931          * that are accessible independently of lock. So instead we
2932          * swap the tree linkages.
2933          *
2934          * @return true if now too small, so should be untreeified
2935          */
2936         final boolean removeTreeNode(TreeNode<K,V> p) {
2937             TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2938             TreeNode<K,V> pred = p.prev;  // unlink traversal pointers
2939             TreeNode<K,V> r, rl;
2940             if (pred == null)
2941                 first = next;
2942             else
2943                 pred.next = next;
2944             if (next != null)
2945                 next.prev = pred;
2946             if (first == null) {
2947                 root = null;
2948                 return true;
2949             }
2950             if ((r = root) == null || r.right == null || // too small
2951                 (rl = r.left) == null || rl.left == null)
2952                 return true;
2953             lockRoot();
2954             try {
2955                 TreeNode<K,V> replacement;
2956                 TreeNode<K,V> pl = p.left;
2957                 TreeNode<K,V> pr = p.right;
2958                 if (pl != null && pr != null) {
2959                     TreeNode<K,V> s = pr, sl;
2960                     while ((sl = s.left) != null) // find successor
2961                         s = sl;
2962                     boolean c = s.red; s.red = p.red; p.red = c; // swap colors
2963                     TreeNode<K,V> sr = s.right;
2964                     TreeNode<K,V> pp = p.parent;
2965                     if (s == pr) { // p was s's direct parent
2966                         p.parent = s;
2967                         s.right = p;
2968                     }
2969                     else {
2970                         TreeNode<K,V> sp = s.parent;
2971                         if ((p.parent = sp) != null) {
2972                             if (s == sp.left)
2973                                 sp.left = p;
2974                             else
2975                                 sp.right = p;
2976                         }
2977                         if ((s.right = pr) != null)
2978                             pr.parent = s;
2979                     }
2980                     p.left = null;
2981                     if ((p.right = sr) != null)
2982                         sr.parent = p;
2983                     if ((s.left = pl) != null)
2984                         pl.parent = s;
2985                     if ((s.parent = pp) == null)
2986                         r = s;
2987                     else if (p == pp.left)
2988                         pp.left = s;
2989                     else
2990                         pp.right = s;
2991                     if (sr != null)
2992                         replacement = sr;
2993                     else
2994                         replacement = p;
2995                 }
2996                 else if (pl != null)
2997                     replacement = pl;
2998                 else if (pr != null)
2999                     replacement = pr;
3000                 else
3001                     replacement = p;
3002                 if (replacement != p) {
3003                     TreeNode<K,V> pp = replacement.parent = p.parent;
3004                     if (pp == null)
3005                         r = replacement;
3006                     else if (p == pp.left)
3007                         pp.left = replacement;
3008                     else
3009                         pp.right = replacement;
3010                     p.left = p.right = p.parent = null;
3011                 }
3012 
3013                 root = (p.red) ? r : balanceDeletion(r, replacement);
3014 
3015                 if (p == replacement) {  // detach pointers
3016                     TreeNode<K,V> pp;
3017                     if ((pp = p.parent) != null) {
3018                         if (p == pp.left)
3019                             pp.left = null;
3020                         else if (p == pp.right)
3021                             pp.right = null;
3022                         p.parent = null;
3023                     }
3024                 }
3025             } finally {
3026                 unlockRoot();
3027             }
3028             assert checkInvariants(root);
3029             return false;
3030         }
3031 
3032         /* ------------------------------------------------------------ */
3033         // Red-black tree methods, all adapted from CLR
3034 
3035         static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
3036                                               TreeNode<K,V> p) {
3037             TreeNode<K,V> r, pp, rl;
3038             if (p != null && (r = p.right) != null) {
3039                 if ((rl = p.right = r.left) != null)
3040                     rl.parent = p;
3041                 if ((pp = r.parent = p.parent) == null)
3042                     (root = r).red = false;
3043                 else if (pp.left == p)
3044                     pp.left = r;
3045                 else
3046                     pp.right = r;
3047                 r.left = p;
3048                 p.parent = r;
3049             }
3050             return root;
3051         }
3052 
3053         static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
3054                                                TreeNode<K,V> p) {
3055             TreeNode<K,V> l, pp, lr;
3056             if (p != null && (l = p.left) != null) {
3057                 if ((lr = p.left = l.right) != null)
3058                     lr.parent = p;
3059                 if ((pp = l.parent = p.parent) == null)
3060                     (root = l).red = false;
3061                 else if (pp.right == p)
3062                     pp.right = l;
3063                 else
3064                     pp.left = l;
3065                 l.right = p;
3066                 p.parent = l;
3067             }
3068             return root;
3069         }
3070 
3071         static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
3072                                                     TreeNode<K,V> x) {
3073             x.red = true;
3074             for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
3075                 if ((xp = x.parent) == null) {
3076                     x.red = false;
3077                     return x;
3078                 }
3079                 else if (!xp.red || (xpp = xp.parent) == null)
3080                     return root;
3081                 if (xp == (xppl = xpp.left)) {
3082                     if ((xppr = xpp.right) != null && xppr.red) {
3083                         xppr.red = false;
3084                         xp.red = false;
3085                         xpp.red = true;
3086                         x = xpp;
3087                     }
3088                     else {
3089                         if (x == xp.right) {
3090                             root = rotateLeft(root, x = xp);
3091                             xpp = (xp = x.parent) == null ? null : xp.parent;
3092                         }
3093                         if (xp != null) {
3094                             xp.red = false;
3095                             if (xpp != null) {
3096                                 xpp.red = true;
3097                                 root = rotateRight(root, xpp);
3098                             }
3099                         }
3100                     }
3101                 }
3102                 else {
3103                     if (xppl != null && xppl.red) {
3104                         xppl.red = false;
3105                         xp.red = false;
3106                         xpp.red = true;
3107                         x = xpp;
3108                     }
3109                     else {
3110                         if (x == xp.left) {
3111                             root = rotateRight(root, x = xp);
3112                             xpp = (xp = x.parent) == null ? null : xp.parent;
3113                         }
3114                         if (xp != null) {
3115                             xp.red = false;
3116                             if (xpp != null) {
3117                                 xpp.red = true;
3118                                 root = rotateLeft(root, xpp);
3119                             }
3120                         }
3121                     }
3122                 }
3123             }
3124         }
3125 
3126         static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
3127                                                    TreeNode<K,V> x) {
3128             for (TreeNode<K,V> xp, xpl, xpr;;)  {
3129                 if (x == null || x == root)
3130                     return root;
3131                 else if ((xp = x.parent) == null) {
3132                     x.red = false;
3133                     return x;
3134                 }
3135                 else if (x.red) {
3136                     x.red = false;
3137                     return root;
3138                 }
3139                 else if ((xpl = xp.left) == x) {
3140                     if ((xpr = xp.right) != null && xpr.red) {
3141                         xpr.red = false;
3142                         xp.red = true;
3143                         root = rotateLeft(root, xp);
3144                         xpr = (xp = x.parent) == null ? null : xp.right;
3145                     }
3146                     if (xpr == null)
3147                         x = xp;
3148                     else {
3149                         TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3150                         if ((sr == null || !sr.red) &&
3151                             (sl == null || !sl.red)) {
3152                             xpr.red = true;
3153                             x = xp;
3154                         }
3155                         else {
3156                             if (sr == null || !sr.red) {
3157                                 if (sl != null)
3158                                     sl.red = false;
3159                                 xpr.red = true;
3160                                 root = rotateRight(root, xpr);
3161                                 xpr = (xp = x.parent) == null ?
3162                                     null : xp.right;
3163                             }
3164                             if (xpr != null) {
3165                                 xpr.red = (xp == null) ? false : xp.red;
3166                                 if ((sr = xpr.right) != null)
3167                                     sr.red = false;
3168                             }
3169                             if (xp != null) {
3170                                 xp.red = false;
3171                                 root = rotateLeft(root, xp);
3172                             }
3173                             x = root;
3174                         }
3175                     }
3176                 }
3177                 else { // symmetric
3178                     if (xpl != null && xpl.red) {
3179                         xpl.red = false;
3180                         xp.red = true;
3181                         root = rotateRight(root, xp);
3182                         xpl = (xp = x.parent) == null ? null : xp.left;
3183                     }
3184                     if (xpl == null)
3185                         x = xp;
3186                     else {
3187                         TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3188                         if ((sl == null || !sl.red) &&
3189                             (sr == null || !sr.red)) {
3190                             xpl.red = true;
3191                             x = xp;
3192                         }
3193                         else {
3194                             if (sl == null || !sl.red) {
3195                                 if (sr != null)
3196                                     sr.red = false;
3197                                 xpl.red = true;
3198                                 root = rotateLeft(root, xpl);
3199                                 xpl = (xp = x.parent) == null ?
3200                                     null : xp.left;
3201                             }
3202                             if (xpl != null) {
3203                                 xpl.red = (xp == null) ? false : xp.red;
3204                                 if ((sl = xpl.left) != null)
3205                                     sl.red = false;
3206                             }
3207                             if (xp != null) {
3208                                 xp.red = false;
3209                                 root = rotateRight(root, xp);
3210                             }
3211                             x = root;
3212                         }
3213                     }
3214                 }
3215             }
3216         }
3217 
3218         /**
3219          * Recursive invariant check
3220          */
3221         static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3222             TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3223                 tb = t.prev, tn = (TreeNode<K,V>)t.next;
3224             if (tb != null && tb.next != t)
3225                 return false;
3226             if (tn != null && tn.prev != t)
3227                 return false;
3228             if (tp != null && t != tp.left && t != tp.right)
3229                 return false;
3230             if (tl != null && (tl.parent != t || tl.hash > t.hash))
3231                 return false;
3232             if (tr != null && (tr.parent != t || tr.hash < t.hash))
3233                 return false;
3234             if (t.red && tl != null && tl.red && tr != null && tr.red)
3235                 return false;
3236             if (tl != null && !checkInvariants(tl))
3237                 return false;
3238             if (tr != null && !checkInvariants(tr))
3239                 return false;
3240             return true;
3241         }
3242 
3243         private static final sun.misc.Unsafe U;
3244         private static final long LOCKSTATE;
3245         static {
3246             try {
3247                 U = sun.misc.Unsafe.getUnsafe();
3248                 Class<?> k = TreeBin.class;
3249                 LOCKSTATE = U.objectFieldOffset
3250                     (k.getDeclaredField("lockState"));
3251             } catch (Exception e) {
3252                 throw new Error(e);
3253             }
3254         }
3255     }
3256 
3257     /* ----------------Table Traversal -------------- */
3258 
3259     /**
3260      * Records the table, its length, and current traversal index for a
3261      * traverser that must process a region of a forwarded table before
3262      * proceeding with current table.
3263      */
3264     static final class TableStack<K,V> {
3265         int length;
3266         int index;
3267         Node<K,V>[] tab;
3268         TableStack<K,V> next;
3269     }
3270 
3271     /**
3272      * Encapsulates traversal for methods such as containsValue; also
3273      * serves as a base class for other iterators and spliterators.
3274      *
3275      * Method advance visits once each still-valid node that was
3276      * reachable upon iterator construction. It might miss some that
3277      * were added to a bin after the bin was visited, which is OK wrt
3278      * consistency guarantees. Maintaining this property in the face
3279      * of possible ongoing resizes requires a fair amount of
3280      * bookkeeping state that is difficult to optimize away amidst
3281      * volatile accesses.  Even so, traversal maintains reasonable
3282      * throughput.
3283      *
3284      * Normally, iteration proceeds bin-by-bin traversing lists.
3285      * However, if the table has been resized, then all future steps
3286      * must traverse both the bin at the current index as well as at
3287      * (index + baseSize); and so on for further resizings. To
3288      * paranoically cope with potential sharing by users of iterators
3289      * across threads, iteration terminates if a bounds checks fails
3290      * for a table read.
3291      */
3292     static class Traverser<K,V> {
3293         Node<K,V>[] tab;        // current table; updated if resized
3294         Node<K,V> next;         // the next entry to use
3295         TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
3296         int index;              // index of bin to use next
3297         int baseIndex;          // current index of initial table
3298         int baseLimit;          // index bound for initial table
3299         final int baseSize;     // initial table size
3300 
3301         Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3302             this.tab = tab;
3303             this.baseSize = size;
3304             this.baseIndex = this.index = index;
3305             this.baseLimit = limit;
3306             this.next = null;
3307         }
3308 
3309         /**
3310          * Advances if possible, returning next valid node, or null if none.
3311          */
3312         final Node<K,V> advance() {
3313             Node<K,V> e;
3314             if ((e = next) != null)
3315                 e = e.next;
3316             for (;;) {
3317                 Node<K,V>[] t; int i, n;  // must use locals in checks
3318                 if (e != null)
3319                     return next = e;
3320                 if (baseIndex >= baseLimit || (t = tab) == null ||
3321                     (n = t.length) <= (i = index) || i < 0)
3322                     return next = null;
3323                 if ((e = tabAt(t, i)) != null && e.hash < 0) {
3324                     if (e instanceof ForwardingNode) {
3325                         tab = ((ForwardingNode<K,V>)e).nextTable;
3326                         e = null;
3327                         pushState(t, i, n);
3328                         continue;
3329                     }
3330                     else if (e instanceof TreeBin)
3331                         e = ((TreeBin<K,V>)e).first;
3332                     else
3333                         e = null;
3334                 }
3335                 if (stack != null)
3336                     recoverState(n);
3337                 else if ((index = i + baseSize) >= n)
3338                     index = ++baseIndex; // visit upper slots if present
3339             }
3340         }
3341 
3342         /**
3343          * Saves traversal state upon encountering a forwarding node.
3344          */
3345         private void pushState(Node<K,V>[] t, int i, int n) {
3346             TableStack<K,V> s = spare;  // reuse if possible
3347             if (s != null)
3348                 spare = s.next;
3349             else
3350                 s = new TableStack<K,V>();
3351             s.tab = t;
3352             s.length = n;
3353             s.index = i;
3354             s.next = stack;
3355             stack = s;
3356         }
3357 
3358         /**
3359          * Possibly pops traversal state.
3360          *
3361          * @param n length of current table
3362          */
3363         private void recoverState(int n) {
3364             TableStack<K,V> s; int len;
3365             while ((s = stack) != null && (index += (len = s.length)) >= n) {
3366                 n = len;
3367                 index = s.index;
3368                 tab = s.tab;
3369                 s.tab = null;
3370                 TableStack<K,V> next = s.next;
3371                 s.next = spare; // save for reuse
3372                 stack = next;
3373                 spare = s;
3374             }
3375             if (s == null && (index += baseSize) >= n)
3376                 index = ++baseIndex;
3377         }
3378     }
3379 
3380     /**
3381      * Base of key, value, and entry Iterators. Adds fields to
3382      * Traverser to support iterator.remove.
3383      */
3384     static class BaseIterator<K,V> extends Traverser<K,V> {
3385         final ConcurrentHashMap<K,V> map;
3386         Node<K,V> lastReturned;
3387         BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3388                     ConcurrentHashMap<K,V> map) {
3389             super(tab, size, index, limit);
3390             this.map = map;
3391             advance();
3392         }
3393 
3394         public final boolean hasNext() { return next != null; }
3395         public final boolean hasMoreElements() { return next != null; }
3396 
3397         public final void remove() {
3398             Node<K,V> p;
3399             if ((p = lastReturned) == null)
3400                 throw new IllegalStateException();
3401             lastReturned = null;
3402             map.replaceNode(p.key, null, null);
3403         }
3404     }
3405 
3406     static final class KeyIterator<K,V> extends BaseIterator<K,V>
3407         implements Iterator<K>, Enumeration<K> {
3408         KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
3409                     ConcurrentHashMap<K,V> map) {
3410             super(tab, index, size, limit, map);
3411         }
3412 
3413         public final K next() {
3414             Node<K,V> p;
3415             if ((p = next) == null)
3416                 throw new NoSuchElementException();
3417             K k = p.key;
3418             lastReturned = p;
3419             advance();
3420             return k;
3421         }
3422 
3423         public final K nextElement() { return next(); }
3424     }
3425 
3426     static final class ValueIterator<K,V> extends BaseIterator<K,V>
3427         implements Iterator<V>, Enumeration<V> {
3428         ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
3429                       ConcurrentHashMap<K,V> map) {
3430             super(tab, index, size, limit, map);
3431         }
3432 
3433         public final V next() {
3434             Node<K,V> p;
3435             if ((p = next) == null)
3436                 throw new NoSuchElementException();
3437             V v = p.val;
3438             lastReturned = p;
3439             advance();
3440             return v;
3441         }
3442 
3443         public final V nextElement() { return next(); }
3444     }
3445 
3446     static final class EntryIterator<K,V> extends BaseIterator<K,V>
3447         implements Iterator<Map.Entry<K,V>> {
3448         EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
3449                       ConcurrentHashMap<K,V> map) {
3450             super(tab, index, size, limit, map);
3451         }
3452 
3453         public final Map.Entry<K,V> next() {
3454             Node<K,V> p;
3455             if ((p = next) == null)
3456                 throw new NoSuchElementException();
3457             K k = p.key;
3458             V v = p.val;
3459             lastReturned = p;
3460             advance();
3461             return new MapEntry<K,V>(k, v, map);
3462         }
3463     }
3464 
3465     /**
3466      * Exported Entry for EntryIterator
3467      */
3468     static final class MapEntry<K,V> implements Map.Entry<K,V> {
3469         final K key; // non-null
3470         V val;       // non-null
3471         final ConcurrentHashMap<K,V> map;
3472         MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
3473             this.key = key;
3474             this.val = val;
3475             this.map = map;
3476         }
3477         public K getKey()        { return key; }
3478         public V getValue()      { return val; }
3479         public int hashCode()    { return key.hashCode() ^ val.hashCode(); }
3480         public String toString() { return key + "=" + val; }
3481 
3482         public boolean equals(Object o) {
3483             Object k, v; Map.Entry<?,?> e;
3484             return ((o instanceof Map.Entry) &&
3485                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3486                     (v = e.getValue()) != null &&
3487                     (k == key || k.equals(key)) &&
3488                     (v == val || v.equals(val)));
3489         }
3490 
3491         /**
3492          * Sets our entry's value and writes through to the map. The
3493          * value to return is somewhat arbitrary here. Since we do not
3494          * necessarily track asynchronous changes, the most recent
3495          * "previous" value could be different from what we return (or
3496          * could even have been removed, in which case the put will
3497          * re-establish). We do not and cannot guarantee more.
3498          */
3499         public V setValue(V value) {
3500             if (value == null) throw new NullPointerException();
3501             V v = val;
3502             val = value;
3503             map.put(key, value);
3504             return v;
3505         }
3506     }
3507 
3508     static final class KeySpliterator<K,V> extends Traverser<K,V>
3509         implements Spliterator<K> {
3510         long est;               // size estimate
3511         KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3512                        long est) {
3513             super(tab, size, index, limit);
3514             this.est = est;
3515         }
3516 
3517         public Spliterator<K> trySplit() {
3518             int i, f, h;
3519             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3520                 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3521                                         f, est >>>= 1);
3522         }
3523 
3524         public void forEachRemaining(Consumer<? super K> action) {
3525             if (action == null) throw new NullPointerException();
3526             for (Node<K,V> p; (p = advance()) != null;)
3527                 action.accept(p.key);
3528         }
3529 
3530         public boolean tryAdvance(Consumer<? super K> action) {
3531             if (action == null) throw new NullPointerException();
3532             Node<K,V> p;
3533             if ((p = advance()) == null)
3534                 return false;
3535             action.accept(p.key);
3536             return true;
3537         }
3538 
3539         public long estimateSize() { return est; }
3540 
3541         public int characteristics() {
3542             return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3543                 Spliterator.NONNULL;
3544         }
3545     }
3546 
3547     static final class ValueSpliterator<K,V> extends Traverser<K,V>
3548         implements Spliterator<V> {
3549         long est;               // size estimate
3550         ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3551                          long est) {
3552             super(tab, size, index, limit);
3553             this.est = est;
3554         }
3555 
3556         public Spliterator<V> trySplit() {
3557             int i, f, h;
3558             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3559                 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3560                                           f, est >>>= 1);
3561         }
3562 
3563         public void forEachRemaining(Consumer<? super V> action) {
3564             if (action == null) throw new NullPointerException();
3565             for (Node<K,V> p; (p = advance()) != null;)
3566                 action.accept(p.val);
3567         }
3568 
3569         public boolean tryAdvance(Consumer<? super V> action) {
3570             if (action == null) throw new NullPointerException();
3571             Node<K,V> p;
3572             if ((p = advance()) == null)
3573                 return false;
3574             action.accept(p.val);
3575             return true;
3576         }
3577 
3578         public long estimateSize() { return est; }
3579 
3580         public int characteristics() {
3581             return Spliterator.CONCURRENT | Spliterator.NONNULL;
3582         }
3583     }
3584 
3585     static final class EntrySpliterator<K,V> extends Traverser<K,V>
3586         implements Spliterator<Map.Entry<K,V>> {
3587         final ConcurrentHashMap<K,V> map; // To export MapEntry
3588         long est;               // size estimate
3589         EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3590                          long est, ConcurrentHashMap<K,V> map) {
3591             super(tab, size, index, limit);
3592             this.map = map;
3593             this.est = est;
3594         }
3595 
3596         public Spliterator<Map.Entry<K,V>> trySplit() {
3597             int i, f, h;
3598             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3599                 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3600                                           f, est >>>= 1, map);
3601         }
3602 
3603         public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
3604             if (action == null) throw new NullPointerException();
3605             for (Node<K,V> p; (p = advance()) != null; )
3606                 action.accept(new MapEntry<K,V>(p.key, p.val, map));
3607         }
3608 
3609         public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
3610             if (action == null) throw new NullPointerException();
3611             Node<K,V> p;
3612             if ((p = advance()) == null)
3613                 return false;
3614             action.accept(new MapEntry<K,V>(p.key, p.val, map));
3615             return true;
3616         }
3617 
3618         public long estimateSize() { return est; }
3619 
3620         public int characteristics() {
3621             return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3622                 Spliterator.NONNULL;
3623         }
3624     }
3625 
3626     // Parallel bulk operations
3627 
3628     /**
3629      * Computes initial batch value for bulk tasks. The returned value
3630      * is approximately exp2 of the number of times (minus one) to
3631      * split task by two before executing leaf action. This value is
3632      * faster to compute and more convenient to use as a guide to
3633      * splitting than is the depth, since it is used while dividing by
3634      * two anyway.
3635      */
3636     final int batchFor(long b) {
3637         long n;
3638         if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3639             return 0;
3640         int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3641         return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3642     }
3643 
3644     /**
3645      * Performs the given action for each (key, value).
3646      *
3647      * @param parallelismThreshold the (estimated) number of elements
3648      * needed for this operation to be executed in parallel
3649      * @param action the action
3650      * @since 1.8
3651      */
3652     public void forEach(long parallelismThreshold,
3653                         BiConsumer<? super K,? super V> action) {
3654         if (action == null) throw new NullPointerException();
3655         new ForEachMappingTask<K,V>
3656             (null, batchFor(parallelismThreshold), 0, 0, table,
3657              action).invoke();
3658     }
3659 
3660     /**
3661      * Performs the given action for each non-null transformation
3662      * of each (key, value).
3663      *
3664      * @param parallelismThreshold the (estimated) number of elements
3665      * needed for this operation to be executed in parallel
3666      * @param transformer a function returning the transformation
3667      * for an element, or null if there is no transformation (in
3668      * which case the action is not applied)
3669      * @param action the action
3670      * @param <U> the return type of the transformer
3671      * @since 1.8
3672      */
3673     public <U> void forEach(long parallelismThreshold,
3674                             BiFunction<? super K, ? super V, ? extends U> transformer,
3675                             Consumer<? super U> action) {
3676         if (transformer == null || action == null)
3677             throw new NullPointerException();
3678         new ForEachTransformedMappingTask<K,V,U>
3679             (null, batchFor(parallelismThreshold), 0, 0, table,
3680              transformer, action).invoke();
3681     }
3682 
3683     /**
3684      * Returns a non-null result from applying the given search
3685      * function on each (key, value), or null if none.  Upon
3686      * success, further element processing is suppressed and the
3687      * results of any other parallel invocations of the search
3688      * function are ignored.
3689      *
3690      * @param parallelismThreshold the (estimated) number of elements
3691      * needed for this operation to be executed in parallel
3692      * @param searchFunction a function returning a non-null
3693      * result on success, else null
3694      * @param <U> the return type of the search function
3695      * @return a non-null result from applying the given search
3696      * function on each (key, value), or null if none
3697      * @since 1.8
3698      */
3699     public <U> U search(long parallelismThreshold,
3700                         BiFunction<? super K, ? super V, ? extends U> searchFunction) {
3701         if (searchFunction == null) throw new NullPointerException();
3702         return new SearchMappingsTask<K,V,U>
3703             (null, batchFor(parallelismThreshold), 0, 0, table,
3704              searchFunction, new AtomicReference<U>()).invoke();
3705     }
3706 
3707     /**
3708      * Returns the result of accumulating the given transformation
3709      * of all (key, value) pairs using the given reducer to
3710      * combine values, or null if none.
3711      *
3712      * @param parallelismThreshold the (estimated) number of elements
3713      * needed for this operation to be executed in parallel
3714      * @param transformer a function returning the transformation
3715      * for an element, or null if there is no transformation (in
3716      * which case it is not combined)
3717      * @param reducer a commutative associative combining function
3718      * @param <U> the return type of the transformer
3719      * @return the result of accumulating the given transformation
3720      * of all (key, value) pairs
3721      * @since 1.8
3722      */
3723     public <U> U reduce(long parallelismThreshold,
3724                         BiFunction<? super K, ? super V, ? extends U> transformer,
3725                         BiFunction<? super U, ? super U, ? extends U> reducer) {
3726         if (transformer == null || reducer == null)
3727             throw new NullPointerException();
3728         return new MapReduceMappingsTask<K,V,U>
3729             (null, batchFor(parallelismThreshold), 0, 0, table,
3730              null, transformer, reducer).invoke();
3731     }
3732 
3733     /**
3734      * Returns the result of accumulating the given transformation
3735      * of all (key, value) pairs using the given reducer to
3736      * combine values, and the given basis as an identity value.
3737      *
3738      * @param parallelismThreshold the (estimated) number of elements
3739      * needed for this operation to be executed in parallel
3740      * @param transformer a function returning the transformation
3741      * for an element
3742      * @param basis the identity (initial default value) for the reduction
3743      * @param reducer a commutative associative combining function
3744      * @return the result of accumulating the given transformation
3745      * of all (key, value) pairs
3746      * @since 1.8
3747      */
3748     public double reduceToDouble(long parallelismThreshold,
3749                                  ToDoubleBiFunction<? super K, ? super V> transformer,
3750                                  double basis,
3751                                  DoubleBinaryOperator reducer) {
3752         if (transformer == null || reducer == null)
3753             throw new NullPointerException();
3754         return new MapReduceMappingsToDoubleTask<K,V>
3755             (null, batchFor(parallelismThreshold), 0, 0, table,
3756              null, transformer, basis, reducer).invoke();
3757     }
3758 
3759     /**
3760      * Returns the result of accumulating the given transformation
3761      * of all (key, value) pairs using the given reducer to
3762      * combine values, and the given basis as an identity value.
3763      *
3764      * @param parallelismThreshold the (estimated) number of elements
3765      * needed for this operation to be executed in parallel
3766      * @param transformer a function returning the transformation
3767      * for an element
3768      * @param basis the identity (initial default value) for the reduction
3769      * @param reducer a commutative associative combining function
3770      * @return the result of accumulating the given transformation
3771      * of all (key, value) pairs
3772      * @since 1.8
3773      */
3774     public long reduceToLong(long parallelismThreshold,
3775                              ToLongBiFunction<? super K, ? super V> transformer,
3776                              long basis,
3777                              LongBinaryOperator reducer) {
3778         if (transformer == null || reducer == null)
3779             throw new NullPointerException();
3780         return new MapReduceMappingsToLongTask<K,V>
3781             (null, batchFor(parallelismThreshold), 0, 0, table,
3782              null, transformer, basis, reducer).invoke();
3783     }
3784 
3785     /**
3786      * Returns the result of accumulating the given transformation
3787      * of all (key, value) pairs using the given reducer to
3788      * combine values, and the given basis as an identity value.
3789      *
3790      * @param parallelismThreshold the (estimated) number of elements
3791      * needed for this operation to be executed in parallel
3792      * @param transformer a function returning the transformation
3793      * for an element
3794      * @param basis the identity (initial default value) for the reduction
3795      * @param reducer a commutative associative combining function
3796      * @return the result of accumulating the given transformation
3797      * of all (key, value) pairs
3798      * @since 1.8
3799      */
3800     public int reduceToInt(long parallelismThreshold,
3801                            ToIntBiFunction<? super K, ? super V> transformer,
3802                            int basis,
3803                            IntBinaryOperator reducer) {
3804         if (transformer == null || reducer == null)
3805             throw new NullPointerException();
3806         return new MapReduceMappingsToIntTask<K,V>
3807             (null, batchFor(parallelismThreshold), 0, 0, table,
3808              null, transformer, basis, reducer).invoke();
3809     }
3810 
3811     /**
3812      * Performs the given action for each key.
3813      *
3814      * @param parallelismThreshold the (estimated) number of elements
3815      * needed for this operation to be executed in parallel
3816      * @param action the action
3817      * @since 1.8
3818      */
3819     public void forEachKey(long parallelismThreshold,
3820                            Consumer<? super K> action) {
3821         if (action == null) throw new NullPointerException();
3822         new ForEachKeyTask<K,V>
3823             (null, batchFor(parallelismThreshold), 0, 0, table,
3824              action).invoke();
3825     }
3826 
3827     /**
3828      * Performs the given action for each non-null transformation
3829      * of each key.
3830      *
3831      * @param parallelismThreshold the (estimated) number of elements
3832      * needed for this operation to be executed in parallel
3833      * @param transformer a function returning the transformation
3834      * for an element, or null if there is no transformation (in
3835      * which case the action is not applied)
3836      * @param action the action
3837      * @param <U> the return type of the transformer
3838      * @since 1.8
3839      */
3840     public <U> void forEachKey(long parallelismThreshold,
3841                                Function<? super K, ? extends U> transformer,
3842                                Consumer<? super U> action) {
3843         if (transformer == null || action == null)
3844             throw new NullPointerException();
3845         new ForEachTransformedKeyTask<K,V,U>
3846             (null, batchFor(parallelismThreshold), 0, 0, table,
3847              transformer, action).invoke();
3848     }
3849 
3850     /**
3851      * Returns a non-null result from applying the given search
3852      * function on each key, or null if none. Upon success,
3853      * further element processing is suppressed and the results of
3854      * any other parallel invocations of the search function are
3855      * ignored.
3856      *
3857      * @param parallelismThreshold the (estimated) number of elements
3858      * needed for this operation to be executed in parallel
3859      * @param searchFunction a function returning a non-null
3860      * result on success, else null
3861      * @param <U> the return type of the search function
3862      * @return a non-null result from applying the given search
3863      * function on each key, or null if none
3864      * @since 1.8
3865      */
3866     public <U> U searchKeys(long parallelismThreshold,
3867                             Function<? super K, ? extends U> searchFunction) {
3868         if (searchFunction == null) throw new NullPointerException();
3869         return new SearchKeysTask<K,V,U>
3870             (null, batchFor(parallelismThreshold), 0, 0, table,
3871              searchFunction, new AtomicReference<U>()).invoke();
3872     }
3873 
3874     /**
3875      * Returns the result of accumulating all keys using the given
3876      * reducer to combine values, or null if none.
3877      *
3878      * @param parallelismThreshold the (estimated) number of elements
3879      * needed for this operation to be executed in parallel
3880      * @param reducer a commutative associative combining function
3881      * @return the result of accumulating all keys using the given
3882      * reducer to combine values, or null if none
3883      * @since 1.8
3884      */
3885     public K reduceKeys(long parallelismThreshold,
3886                         BiFunction<? super K, ? super K, ? extends K> reducer) {
3887         if (reducer == null) throw new NullPointerException();
3888         return new ReduceKeysTask<K,V>
3889             (null, batchFor(parallelismThreshold), 0, 0, table,
3890              null, reducer).invoke();
3891     }
3892 
3893     /**
3894      * Returns the result of accumulating the given transformation
3895      * of all keys using the given reducer to combine values, or
3896      * null if none.
3897      *
3898      * @param parallelismThreshold the (estimated) number of elements
3899      * needed for this operation to be executed in parallel
3900      * @param transformer a function returning the transformation
3901      * for an element, or null if there is no transformation (in
3902      * which case it is not combined)
3903      * @param reducer a commutative associative combining function
3904      * @param <U> the return type of the transformer
3905      * @return the result of accumulating the given transformation
3906      * of all keys
3907      * @since 1.8
3908      */
3909     public <U> U reduceKeys(long parallelismThreshold,
3910                             Function<? super K, ? extends U> transformer,
3911          BiFunction<? super U, ? super U, ? extends U> reducer) {
3912         if (transformer == null || reducer == null)
3913             throw new NullPointerException();
3914         return new MapReduceKeysTask<K,V,U>
3915             (null, batchFor(parallelismThreshold), 0, 0, table,
3916              null, transformer, reducer).invoke();
3917     }
3918 
3919     /**
3920      * Returns the result of accumulating the given transformation
3921      * of all keys using the given reducer to combine values, and
3922      * the given basis as an identity value.
3923      *
3924      * @param parallelismThreshold the (estimated) number of elements
3925      * needed for this operation to be executed in parallel
3926      * @param transformer a function returning the transformation
3927      * for an element
3928      * @param basis the identity (initial default value) for the reduction
3929      * @param reducer a commutative associative combining function
3930      * @return the result of accumulating the given transformation
3931      * of all keys
3932      * @since 1.8
3933      */
3934     public double reduceKeysToDouble(long parallelismThreshold,
3935                                      ToDoubleFunction<? super K> transformer,
3936                                      double basis,
3937                                      DoubleBinaryOperator reducer) {
3938         if (transformer == null || reducer == null)
3939             throw new NullPointerException();
3940         return new MapReduceKeysToDoubleTask<K,V>
3941             (null, batchFor(parallelismThreshold), 0, 0, table,
3942              null, transformer, basis, reducer).invoke();
3943     }
3944 
3945     /**
3946      * Returns the result of accumulating the given transformation
3947      * of all keys using the given reducer to combine values, and
3948      * the given basis as an identity value.
3949      *
3950      * @param parallelismThreshold the (estimated) number of elements
3951      * needed for this operation to be executed in parallel
3952      * @param transformer a function returning the transformation
3953      * for an element
3954      * @param basis the identity (initial default value) for the reduction
3955      * @param reducer a commutative associative combining function
3956      * @return the result of accumulating the given transformation
3957      * of all keys
3958      * @since 1.8
3959      */
3960     public long reduceKeysToLong(long parallelismThreshold,
3961                                  ToLongFunction<? super K> transformer,
3962                                  long basis,
3963                                  LongBinaryOperator reducer) {
3964         if (transformer == null || reducer == null)
3965             throw new NullPointerException();
3966         return new MapReduceKeysToLongTask<K,V>
3967             (null, batchFor(parallelismThreshold), 0, 0, table,
3968              null, transformer, basis, reducer).invoke();
3969     }
3970 
3971     /**
3972      * Returns the result of accumulating the given transformation
3973      * of all keys using the given reducer to combine values, and
3974      * the given basis as an identity value.
3975      *
3976      * @param parallelismThreshold the (estimated) number of elements
3977      * needed for this operation to be executed in parallel
3978      * @param transformer a function returning the transformation
3979      * for an element
3980      * @param basis the identity (initial default value) for the reduction
3981      * @param reducer a commutative associative combining function
3982      * @return the result of accumulating the given transformation
3983      * of all keys
3984      * @since 1.8
3985      */
3986     public int reduceKeysToInt(long parallelismThreshold,
3987                                ToIntFunction<? super K> transformer,
3988                                int basis,
3989                                IntBinaryOperator reducer) {
3990         if (transformer == null || reducer == null)
3991             throw new NullPointerException();
3992         return new MapReduceKeysToIntTask<K,V>
3993             (null, batchFor(parallelismThreshold), 0, 0, table,
3994              null, transformer, basis, reducer).invoke();
3995     }
3996 
3997     /**
3998      * Performs the given action for each value.
3999      *
4000      * @param parallelismThreshold the (estimated) number of elements
4001      * needed for this operation to be executed in parallel
4002      * @param action the action
4003      * @since 1.8
4004      */
4005     public void forEachValue(long parallelismThreshold,
4006                              Consumer<? super V> action) {
4007         if (action == null)
4008             throw new NullPointerException();
4009         new ForEachValueTask<K,V>
4010             (null, batchFor(parallelismThreshold), 0, 0, table,
4011              action).invoke();
4012     }
4013 
4014     /**
4015      * Performs the given action for each non-null transformation
4016      * of each value.
4017      *
4018      * @param parallelismThreshold the (estimated) number of elements
4019      * needed for this operation to be executed in parallel
4020      * @param transformer a function returning the transformation
4021      * for an element, or null if there is no transformation (in
4022      * which case the action is not applied)
4023      * @param action the action
4024      * @param <U> the return type of the transformer
4025      * @since 1.8
4026      */
4027     public <U> void forEachValue(long parallelismThreshold,
4028                                  Function<? super V, ? extends U> transformer,
4029                                  Consumer<? super U> action) {
4030         if (transformer == null || action == null)
4031             throw new NullPointerException();
4032         new ForEachTransformedValueTask<K,V,U>
4033             (null, batchFor(parallelismThreshold), 0, 0, table,
4034              transformer, action).invoke();
4035     }
4036 
4037     /**
4038      * Returns a non-null result from applying the given search
4039      * function on each value, or null if none.  Upon success,
4040      * further element processing is suppressed and the results of
4041      * any other parallel invocations of the search function are
4042      * ignored.
4043      *
4044      * @param parallelismThreshold the (estimated) number of elements
4045      * needed for this operation to be executed in parallel
4046      * @param searchFunction a function returning a non-null
4047      * result on success, else null
4048      * @param <U> the return type of the search function
4049      * @return a non-null result from applying the given search
4050      * function on each value, or null if none
4051      * @since 1.8
4052      */
4053     public <U> U searchValues(long parallelismThreshold,
4054                               Function<? super V, ? extends U> searchFunction) {
4055         if (searchFunction == null) throw new NullPointerException();
4056         return new SearchValuesTask<K,V,U>
4057             (null, batchFor(parallelismThreshold), 0, 0, table,
4058              searchFunction, new AtomicReference<U>()).invoke();
4059     }
4060 
4061     /**
4062      * Returns the result of accumulating all values using the
4063      * given reducer to combine values, or null if none.
4064      *
4065      * @param parallelismThreshold the (estimated) number of elements
4066      * needed for this operation to be executed in parallel
4067      * @param reducer a commutative associative combining function
4068      * @return the result of accumulating all values
4069      * @since 1.8
4070      */
4071     public V reduceValues(long parallelismThreshold,
4072                           BiFunction<? super V, ? super V, ? extends V> reducer) {
4073         if (reducer == null) throw new NullPointerException();
4074         return new ReduceValuesTask<K,V>
4075             (null, batchFor(parallelismThreshold), 0, 0, table,
4076              null, reducer).invoke();
4077     }
4078 
4079     /**
4080      * Returns the result of accumulating the given transformation
4081      * of all values using the given reducer to combine values, or
4082      * null if none.
4083      *
4084      * @param parallelismThreshold the (estimated) number of elements
4085      * needed for this operation to be executed in parallel
4086      * @param transformer a function returning the transformation
4087      * for an element, or null if there is no transformation (in
4088      * which case it is not combined)
4089      * @param reducer a commutative associative combining function
4090      * @param <U> the return type of the transformer
4091      * @return the result of accumulating the given transformation
4092      * of all values
4093      * @since 1.8
4094      */
4095     public <U> U reduceValues(long parallelismThreshold,
4096                               Function<? super V, ? extends U> transformer,
4097                               BiFunction<? super U, ? super U, ? extends U> reducer) {
4098         if (transformer == null || reducer == null)
4099             throw new NullPointerException();
4100         return new MapReduceValuesTask<K,V,U>
4101             (null, batchFor(parallelismThreshold), 0, 0, table,
4102              null, transformer, reducer).invoke();
4103     }
4104 
4105     /**
4106      * Returns the result of accumulating the given transformation
4107      * of all values using the given reducer to combine values,
4108      * and the given basis as an identity value.
4109      *
4110      * @param parallelismThreshold the (estimated) number of elements
4111      * needed for this operation to be executed in parallel
4112      * @param transformer a function returning the transformation
4113      * for an element
4114      * @param basis the identity (initial default value) for the reduction
4115      * @param reducer a commutative associative combining function
4116      * @return the result of accumulating the given transformation
4117      * of all values
4118      * @since 1.8
4119      */
4120     public double reduceValuesToDouble(long parallelismThreshold,
4121                                        ToDoubleFunction<? super V> transformer,
4122                                        double basis,
4123                                        DoubleBinaryOperator reducer) {
4124         if (transformer == null || reducer == null)
4125             throw new NullPointerException();
4126         return new MapReduceValuesToDoubleTask<K,V>
4127             (null, batchFor(parallelismThreshold), 0, 0, table,
4128              null, transformer, basis, reducer).invoke();
4129     }
4130 
4131     /**
4132      * Returns the result of accumulating the given transformation
4133      * of all values using the given reducer to combine values,
4134      * and the given basis as an identity value.
4135      *
4136      * @param parallelismThreshold the (estimated) number of elements
4137      * needed for this operation to be executed in parallel
4138      * @param transformer a function returning the transformation
4139      * for an element
4140      * @param basis the identity (initial default value) for the reduction
4141      * @param reducer a commutative associative combining function
4142      * @return the result of accumulating the given transformation
4143      * of all values
4144      * @since 1.8
4145      */
4146     public long reduceValuesToLong(long parallelismThreshold,
4147                                    ToLongFunction<? super V> transformer,
4148                                    long basis,
4149                                    LongBinaryOperator reducer) {
4150         if (transformer == null || reducer == null)
4151             throw new NullPointerException();
4152         return new MapReduceValuesToLongTask<K,V>
4153             (null, batchFor(parallelismThreshold), 0, 0, table,
4154              null, transformer, basis, reducer).invoke();
4155     }
4156 
4157     /**
4158      * Returns the result of accumulating the given transformation
4159      * of all values using the given reducer to combine values,
4160      * and the given basis as an identity value.
4161      *
4162      * @param parallelismThreshold the (estimated) number of elements
4163      * needed for this operation to be executed in parallel
4164      * @param transformer a function returning the transformation
4165      * for an element
4166      * @param basis the identity (initial default value) for the reduction
4167      * @param reducer a commutative associative combining function
4168      * @return the result of accumulating the given transformation
4169      * of all values
4170      * @since 1.8
4171      */
4172     public int reduceValuesToInt(long parallelismThreshold,
4173                                  ToIntFunction<? super V> transformer,
4174                                  int basis,
4175                                  IntBinaryOperator reducer) {
4176         if (transformer == null || reducer == null)
4177             throw new NullPointerException();
4178         return new MapReduceValuesToIntTask<K,V>
4179             (null, batchFor(parallelismThreshold), 0, 0, table,
4180              null, transformer, basis, reducer).invoke();
4181     }
4182 
4183     /**
4184      * Performs the given action for each entry.
4185      *
4186      * @param parallelismThreshold the (estimated) number of elements
4187      * needed for this operation to be executed in parallel
4188      * @param action the action
4189      * @since 1.8
4190      */
4191     public void forEachEntry(long parallelismThreshold,
4192                              Consumer<? super Map.Entry<K,V>> action) {
4193         if (action == null) throw new NullPointerException();
4194         new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4195                                   action).invoke();
4196     }
4197 
4198     /**
4199      * Performs the given action for each non-null transformation
4200      * of each entry.
4201      *
4202      * @param parallelismThreshold the (estimated) number of elements
4203      * needed for this operation to be executed in parallel
4204      * @param transformer a function returning the transformation
4205      * for an element, or null if there is no transformation (in
4206      * which case the action is not applied)
4207      * @param action the action
4208      * @param <U> the return type of the transformer
4209      * @since 1.8
4210      */
4211     public <U> void forEachEntry(long parallelismThreshold,
4212                                  Function<Map.Entry<K,V>, ? extends U> transformer,
4213                                  Consumer<? super U> action) {
4214         if (transformer == null || action == null)
4215             throw new NullPointerException();
4216         new ForEachTransformedEntryTask<K,V,U>
4217             (null, batchFor(parallelismThreshold), 0, 0, table,
4218              transformer, action).invoke();
4219     }
4220 
4221     /**
4222      * Returns a non-null result from applying the given search
4223      * function on each entry, or null if none.  Upon success,
4224      * further element processing is suppressed and the results of
4225      * any other parallel invocations of the search function are
4226      * ignored.
4227      *
4228      * @param parallelismThreshold the (estimated) number of elements
4229      * needed for this operation to be executed in parallel
4230      * @param searchFunction a function returning a non-null
4231      * result on success, else null
4232      * @param <U> the return type of the search function
4233      * @return a non-null result from applying the given search
4234      * function on each entry, or null if none
4235      * @since 1.8
4236      */
4237     public <U> U searchEntries(long parallelismThreshold,
4238                                Function<Map.Entry<K,V>, ? extends U> searchFunction) {
4239         if (searchFunction == null) throw new NullPointerException();
4240         return new SearchEntriesTask<K,V,U>
4241             (null, batchFor(parallelismThreshold), 0, 0, table,
4242              searchFunction, new AtomicReference<U>()).invoke();
4243     }
4244 
4245     /**
4246      * Returns the result of accumulating all entries using the
4247      * given reducer to combine values, or null if none.
4248      *
4249      * @param parallelismThreshold the (estimated) number of elements
4250      * needed for this operation to be executed in parallel
4251      * @param reducer a commutative associative combining function
4252      * @return the result of accumulating all entries
4253      * @since 1.8
4254      */
4255     public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4256                                         BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4257         if (reducer == null) throw new NullPointerException();
4258         return new ReduceEntriesTask<K,V>
4259             (null, batchFor(parallelismThreshold), 0, 0, table,
4260              null, reducer).invoke();
4261     }
4262 
4263     /**
4264      * Returns the result of accumulating the given transformation
4265      * of all entries using the given reducer to combine values,
4266      * or null if none.
4267      *
4268      * @param parallelismThreshold the (estimated) number of elements
4269      * needed for this operation to be executed in parallel
4270      * @param transformer a function returning the transformation
4271      * for an element, or null if there is no transformation (in
4272      * which case it is not combined)
4273      * @param reducer a commutative associative combining function
4274      * @param <U> the return type of the transformer
4275      * @return the result of accumulating the given transformation
4276      * of all entries
4277      * @since 1.8
4278      */
4279     public <U> U reduceEntries(long parallelismThreshold,
4280                                Function<Map.Entry<K,V>, ? extends U> transformer,
4281                                BiFunction<? super U, ? super U, ? extends U> reducer) {
4282         if (transformer == null || reducer == null)
4283             throw new NullPointerException();
4284         return new MapReduceEntriesTask<K,V,U>
4285             (null, batchFor(parallelismThreshold), 0, 0, table,
4286              null, transformer, reducer).invoke();
4287     }
4288 
4289     /**
4290      * Returns the result of accumulating the given transformation
4291      * of all entries using the given reducer to combine values,
4292      * and the given basis as an identity value.
4293      *
4294      * @param parallelismThreshold the (estimated) number of elements
4295      * needed for this operation to be executed in parallel
4296      * @param transformer a function returning the transformation
4297      * for an element
4298      * @param basis the identity (initial default value) for the reduction
4299      * @param reducer a commutative associative combining function
4300      * @return the result of accumulating the given transformation
4301      * of all entries
4302      * @since 1.8
4303      */
4304     public double reduceEntriesToDouble(long parallelismThreshold,
4305                                         ToDoubleFunction<Map.Entry<K,V>> transformer,
4306                                         double basis,
4307                                         DoubleBinaryOperator reducer) {
4308         if (transformer == null || reducer == null)
4309             throw new NullPointerException();
4310         return new MapReduceEntriesToDoubleTask<K,V>
4311             (null, batchFor(parallelismThreshold), 0, 0, table,
4312              null, transformer, basis, reducer).invoke();
4313     }
4314 
4315     /**
4316      * Returns the result of accumulating the given transformation
4317      * of all entries using the given reducer to combine values,
4318      * and the given basis as an identity value.
4319      *
4320      * @param parallelismThreshold the (estimated) number of elements
4321      * needed for this operation to be executed in parallel
4322      * @param transformer a function returning the transformation
4323      * for an element
4324      * @param basis the identity (initial default value) for the reduction
4325      * @param reducer a commutative associative combining function
4326      * @return the result of accumulating the given transformation
4327      * of all entries
4328      * @since 1.8
4329      */
4330     public long reduceEntriesToLong(long parallelismThreshold,
4331                                     ToLongFunction<Map.Entry<K,V>> transformer,
4332                                     long basis,
4333                                     LongBinaryOperator reducer) {
4334         if (transformer == null || reducer == null)
4335             throw new NullPointerException();
4336         return new MapReduceEntriesToLongTask<K,V>
4337             (null, batchFor(parallelismThreshold), 0, 0, table,
4338              null, transformer, basis, reducer).invoke();
4339     }
4340 
4341     /**
4342      * Returns the result of accumulating the given transformation
4343      * of all entries using the given reducer to combine values,
4344      * and the given basis as an identity value.
4345      *
4346      * @param parallelismThreshold the (estimated) number of elements
4347      * needed for this operation to be executed in parallel
4348      * @param transformer a function returning the transformation
4349      * for an element
4350      * @param basis the identity (initial default value) for the reduction
4351      * @param reducer a commutative associative combining function
4352      * @return the result of accumulating the given transformation
4353      * of all entries
4354      * @since 1.8
4355      */
4356     public int reduceEntriesToInt(long parallelismThreshold,
4357                                   ToIntFunction<Map.Entry<K,V>> transformer,
4358                                   int basis,
4359                                   IntBinaryOperator reducer) {
4360         if (transformer == null || reducer == null)
4361             throw new NullPointerException();
4362         return new MapReduceEntriesToIntTask<K,V>
4363             (null, batchFor(parallelismThreshold), 0, 0, table,
4364              null, transformer, basis, reducer).invoke();
4365     }
4366 
4367 
4368     /* ----------------Views -------------- */
4369 
4370     /**
4371      * Base class for views.
4372      */
4373     abstract static class CollectionView<K,V,E>
4374         implements Collection<E>, java.io.Serializable {
4375         private static final long serialVersionUID = 7249069246763182397L;
4376         final ConcurrentHashMap<K,V> map;
4377         CollectionView(ConcurrentHashMap<K,V> map)  { this.map = map; }
4378 
4379         /**
4380          * Returns the map backing this view.
4381          *
4382          * @return the map backing this view
4383          */
4384         public ConcurrentHashMap<K,V> getMap() { return map; }
4385 
4386         /**
4387          * Removes all of the elements from this view, by removing all
4388          * the mappings from the map backing this view.
4389          */
4390         public final void clear()      { map.clear(); }
4391         public final int size()        { return map.size(); }
4392         public final boolean isEmpty() { return map.isEmpty(); }
4393 
4394         // implementations below rely on concrete classes supplying these
4395         // abstract methods
4396         /**
4397          * Returns an iterator over the elements in this collection.
4398          *
4399          * <p>The returned iterator is
4400          * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
4401          *
4402          * @return an iterator over the elements in this collection
4403          */
4404         public abstract Iterator<E> iterator();
4405         public abstract boolean contains(Object o);
4406         public abstract boolean remove(Object o);
4407 
4408         private static final String oomeMsg = "Required array size too large";
4409 
4410         public final Object[] toArray() {
4411             long sz = map.mappingCount();
4412             if (sz > MAX_ARRAY_SIZE)
4413                 throw new OutOfMemoryError(oomeMsg);
4414             int n = (int)sz;
4415             Object[] r = new Object[n];
4416             int i = 0;
4417             for (E e : this) {
4418                 if (i == n) {
4419                     if (n >= MAX_ARRAY_SIZE)
4420                         throw new OutOfMemoryError(oomeMsg);
4421                     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4422                         n = MAX_ARRAY_SIZE;
4423                     else
4424                         n += (n >>> 1) + 1;
4425                     r = Arrays.copyOf(r, n);
4426                 }
4427                 r[i++] = e;
4428             }
4429             return (i == n) ? r : Arrays.copyOf(r, i);
4430         }
4431 
4432         @SuppressWarnings("unchecked")
4433         public final <T> T[] toArray(T[] a) {
4434             long sz = map.mappingCount();
4435             if (sz > MAX_ARRAY_SIZE)
4436                 throw new OutOfMemoryError(oomeMsg);
4437             int m = (int)sz;
4438             T[] r = (a.length >= m) ? a :
4439                 (T[])java.lang.reflect.Array
4440                 .newInstance(a.getClass().getComponentType(), m);
4441             int n = r.length;
4442             int i = 0;
4443             for (E e : this) {
4444                 if (i == n) {
4445                     if (n >= MAX_ARRAY_SIZE)
4446                         throw new OutOfMemoryError(oomeMsg);
4447                     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4448                         n = MAX_ARRAY_SIZE;
4449                     else
4450                         n += (n >>> 1) + 1;
4451                     r = Arrays.copyOf(r, n);
4452                 }
4453                 r[i++] = (T)e;
4454             }
4455             if (a == r && i < n) {
4456                 r[i] = null; // null-terminate
4457                 return r;
4458             }
4459             return (i == n) ? r : Arrays.copyOf(r, i);
4460         }
4461 
4462         /**
4463          * Returns a string representation of this collection.
4464          * The string representation consists of the string representations
4465          * of the collection's elements in the order they are returned by
4466          * its iterator, enclosed in square brackets ({@code "[]"}).
4467          * Adjacent elements are separated by the characters {@code ", "}
4468          * (comma and space).  Elements are converted to strings as by
4469          * {@link String#valueOf(Object)}.
4470          *
4471          * @return a string representation of this collection
4472          */
4473         public final String toString() {
4474             StringBuilder sb = new StringBuilder();
4475             sb.append('[');
4476             Iterator<E> it = iterator();
4477             if (it.hasNext()) {
4478                 for (;;) {
4479                     Object e = it.next();
4480                     sb.append(e == this ? "(this Collection)" : e);
4481                     if (!it.hasNext())
4482                         break;
4483                     sb.append(',').append(' ');
4484                 }
4485             }
4486             return sb.append(']').toString();
4487         }
4488 
4489         public final boolean containsAll(Collection<?> c) {
4490             if (c != this) {
4491                 for (Object e : c) {
4492                     if (e == null || !contains(e))
4493                         return false;
4494                 }
4495             }
4496             return true;
4497         }
4498 
4499         public final boolean removeAll(Collection<?> c) {
4500             if (c == null) throw new NullPointerException();
4501             boolean modified = false;
4502             for (Iterator<E> it = iterator(); it.hasNext();) {
4503                 if (c.contains(it.next())) {
4504                     it.remove();
4505                     modified = true;
4506                 }
4507             }
4508             return modified;
4509         }
4510 
4511         public final boolean retainAll(Collection<?> c) {
4512             if (c == null) throw new NullPointerException();
4513             boolean modified = false;
4514             for (Iterator<E> it = iterator(); it.hasNext();) {
4515                 if (!c.contains(it.next())) {
4516                     it.remove();
4517                     modified = true;
4518                 }
4519             }
4520             return modified;
4521         }
4522 
4523     }
4524 
4525     /**
4526      * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4527      * which additions may optionally be enabled by mapping to a
4528      * common value.  This class cannot be directly instantiated.
4529      * See {@link #keySet() keySet()},
4530      * {@link #keySet(Object) keySet(V)},
4531      * {@link #newKeySet() newKeySet()},
4532      * {@link #newKeySet(int) newKeySet(int)}.
4533      *
4534      * @since 1.8
4535      */
4536     public static class KeySetView<K,V> extends CollectionView<K,V,K>
4537         implements Set<K>, java.io.Serializable {
4538         private static final long serialVersionUID = 7249069246763182397L;
4539         private final V value;
4540         KeySetView(ConcurrentHashMap<K,V> map, V value) {  // non-public
4541             super(map);
4542             this.value = value;
4543         }
4544 
4545         /**
4546          * Returns the default mapped value for additions,
4547          * or {@code null} if additions are not supported.
4548          *
4549          * @return the default mapped value for additions, or {@code null}
4550          * if not supported
4551          */
4552         public V getMappedValue() { return value; }
4553 
4554         /**
4555          * {@inheritDoc}
4556          * @throws NullPointerException if the specified key is null
4557          */
4558         public boolean contains(Object o) { return map.containsKey(o); }
4559 
4560         /**
4561          * Removes the key from this map view, by removing the key (and its
4562          * corresponding value) from the backing map.  This method does
4563          * nothing if the key is not in the map.
4564          *
4565          * @param  o the key to be removed from the backing map
4566          * @return {@code true} if the backing map contained the specified key
4567          * @throws NullPointerException if the specified key is null
4568          */
4569         public boolean remove(Object o) { return map.remove(o) != null; }
4570 
4571         /**
4572          * @return an iterator over the keys of the backing map
4573          */
4574         public Iterator<K> iterator() {
4575             Node<K,V>[] t;
4576             ConcurrentHashMap<K,V> m = map;
4577             int f = (t = m.table) == null ? 0 : t.length;
4578             return new KeyIterator<K,V>(t, f, 0, f, m);
4579         }
4580 
4581         /**
4582          * Adds the specified key to this set view by mapping the key to
4583          * the default mapped value in the backing map, if defined.
4584          *
4585          * @param e key to be added
4586          * @return {@code true} if this set changed as a result of the call
4587          * @throws NullPointerException if the specified key is null
4588          * @throws UnsupportedOperationException if no default mapped value
4589          * for additions was provided
4590          */
4591         public boolean add(K e) {
4592             V v;
4593             if ((v = value) == null)
4594                 throw new UnsupportedOperationException();
4595             return map.putVal(e, v, true) == null;
4596         }
4597 
4598         /**
4599          * Adds all of the elements in the specified collection to this set,
4600          * as if by calling {@link #add} on each one.
4601          *
4602          * @param c the elements to be inserted into this set
4603          * @return {@code true} if this set changed as a result of the call
4604          * @throws NullPointerException if the collection or any of its
4605          * elements are {@code null}
4606          * @throws UnsupportedOperationException if no default mapped value
4607          * for additions was provided
4608          */
4609         public boolean addAll(Collection<? extends K> c) {
4610             boolean added = false;
4611             V v;
4612             if ((v = value) == null)
4613                 throw new UnsupportedOperationException();
4614             for (K e : c) {
4615                 if (map.putVal(e, v, true) == null)
4616                     added = true;
4617             }
4618             return added;
4619         }
4620 
4621         public int hashCode() {
4622             int h = 0;
4623             for (K e : this)
4624                 h += e.hashCode();
4625             return h;
4626         }
4627 
4628         public boolean equals(Object o) {
4629             Set<?> c;
4630             return ((o instanceof Set) &&
4631                     ((c = (Set<?>)o) == this ||
4632                      (containsAll(c) && c.containsAll(this))));
4633         }
4634 
4635         public Spliterator<K> spliterator() {
4636             Node<K,V>[] t;
4637             ConcurrentHashMap<K,V> m = map;
4638             long n = m.sumCount();
4639             int f = (t = m.table) == null ? 0 : t.length;
4640             return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4641         }
4642 
4643         public void forEach(Consumer<? super K> action) {
4644             if (action == null) throw new NullPointerException();
4645             Node<K,V>[] t;
4646             if ((t = map.table) != null) {
4647                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4648                 for (Node<K,V> p; (p = it.advance()) != null; )
4649                     action.accept(p.key);
4650             }
4651         }
4652     }
4653 
4654     /**
4655      * A view of a ConcurrentHashMap as a {@link Collection} of
4656      * values, in which additions are disabled. This class cannot be
4657      * directly instantiated. See {@link #values()}.
4658      */
4659     static final class ValuesView<K,V> extends CollectionView<K,V,V>
4660         implements Collection<V>, java.io.Serializable {
4661         private static final long serialVersionUID = 2249069246763182397L;
4662         ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
4663         public final boolean contains(Object o) {
4664             return map.containsValue(o);
4665         }
4666 
4667         public final boolean remove(Object o) {
4668             if (o != null) {
4669                 for (Iterator<V> it = iterator(); it.hasNext();) {
4670                     if (o.equals(it.next())) {
4671                         it.remove();
4672                         return true;
4673                     }
4674                 }
4675             }
4676             return false;
4677         }
4678 
4679         public final Iterator<V> iterator() {
4680             ConcurrentHashMap<K,V> m = map;
4681             Node<K,V>[] t;
4682             int f = (t = m.table) == null ? 0 : t.length;
4683             return new ValueIterator<K,V>(t, f, 0, f, m);
4684         }
4685 
4686         public final boolean add(V e) {
4687             throw new UnsupportedOperationException();
4688         }
4689         public final boolean addAll(Collection<? extends V> c) {
4690             throw new UnsupportedOperationException();
4691         }
4692 
4693         public Spliterator<V> spliterator() {
4694             Node<K,V>[] t;
4695             ConcurrentHashMap<K,V> m = map;
4696             long n = m.sumCount();
4697             int f = (t = m.table) == null ? 0 : t.length;
4698             return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4699         }
4700 
4701         public void forEach(Consumer<? super V> action) {
4702             if (action == null) throw new NullPointerException();
4703             Node<K,V>[] t;
4704             if ((t = map.table) != null) {
4705                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4706                 for (Node<K,V> p; (p = it.advance()) != null; )
4707                     action.accept(p.val);
4708             }
4709         }
4710     }
4711 
4712     /**
4713      * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4714      * entries.  This class cannot be directly instantiated. See
4715      * {@link #entrySet()}.
4716      */
4717     static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4718         implements Set<Map.Entry<K,V>>, java.io.Serializable {
4719         private static final long serialVersionUID = 2249069246763182397L;
4720         EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
4721 
4722         public boolean contains(Object o) {
4723             Object k, v, r; Map.Entry<?,?> e;
4724             return ((o instanceof Map.Entry) &&
4725                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4726                     (r = map.get(k)) != null &&
4727                     (v = e.getValue()) != null &&
4728                     (v == r || v.equals(r)));
4729         }
4730 
4731         public boolean remove(Object o) {
4732             Object k, v; Map.Entry<?,?> e;
4733             return ((o instanceof Map.Entry) &&
4734                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4735                     (v = e.getValue()) != null &&
4736                     map.remove(k, v));
4737         }
4738 
4739         /**
4740          * @return an iterator over the entries of the backing map
4741          */
4742         public Iterator<Map.Entry<K,V>> iterator() {
4743             ConcurrentHashMap<K,V> m = map;
4744             Node<K,V>[] t;
4745             int f = (t = m.table) == null ? 0 : t.length;
4746             return new EntryIterator<K,V>(t, f, 0, f, m);
4747         }
4748 
4749         public boolean add(Entry<K,V> e) {
4750             return map.putVal(e.getKey(), e.getValue(), false) == null;
4751         }
4752 
4753         public boolean addAll(Collection<? extends Entry<K,V>> c) {
4754             boolean added = false;
4755             for (Entry<K,V> e : c) {
4756                 if (add(e))
4757                     added = true;
4758             }
4759             return added;
4760         }
4761 
4762         public final int hashCode() {
4763             int h = 0;
4764             Node<K,V>[] t;
4765             if ((t = map.table) != null) {
4766                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4767                 for (Node<K,V> p; (p = it.advance()) != null; ) {
4768                     h += p.hashCode();
4769                 }
4770             }
4771             return h;
4772         }
4773 
4774         public final boolean equals(Object o) {
4775             Set<?> c;
4776             return ((o instanceof Set) &&
4777                     ((c = (Set<?>)o) == this ||
4778                      (containsAll(c) && c.containsAll(this))));
4779         }
4780 
4781         public Spliterator<Map.Entry<K,V>> spliterator() {
4782             Node<K,V>[] t;
4783             ConcurrentHashMap<K,V> m = map;
4784             long n = m.sumCount();
4785             int f = (t = m.table) == null ? 0 : t.length;
4786             return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4787         }
4788 
4789         public void forEach(Consumer<? super Map.Entry<K,V>> action) {
4790             if (action == null) throw new NullPointerException();
4791             Node<K,V>[] t;
4792             if ((t = map.table) != null) {
4793                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4794                 for (Node<K,V> p; (p = it.advance()) != null; )
4795                     action.accept(new MapEntry<K,V>(p.key, p.val, map));
4796             }
4797         }
4798 
4799     }
4800 
4801     // -------------------------------------------------------
4802 
4803     /**
4804      * Base class for bulk tasks. Repeats some fields and code from
4805      * class Traverser, because we need to subclass CountedCompleter.
4806      */
4807     @SuppressWarnings("serial")
4808     abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4809         Node<K,V>[] tab;        // same as Traverser
4810         Node<K,V> next;
4811         TableStack<K,V> stack, spare;
4812         int index;
4813         int baseIndex;
4814         int baseLimit;
4815         final int baseSize;
4816         int batch;              // split control
4817 
4818         BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4819             super(par);
4820             this.batch = b;
4821             this.index = this.baseIndex = i;
4822             if ((this.tab = t) == null)
4823                 this.baseSize = this.baseLimit = 0;
4824             else if (par == null)
4825                 this.baseSize = this.baseLimit = t.length;
4826             else {
4827                 this.baseLimit = f;
4828                 this.baseSize = par.baseSize;
4829             }
4830         }
4831 
4832         /**
4833          * Same as Traverser version
4834          */
4835         final Node<K,V> advance() {
4836             Node<K,V> e;
4837             if ((e = next) != null)
4838                 e = e.next;
4839             for (;;) {
4840                 Node<K,V>[] t; int i, n;
4841                 if (e != null)
4842                     return next = e;
4843                 if (baseIndex >= baseLimit || (t = tab) == null ||
4844                     (n = t.length) <= (i = index) || i < 0)
4845                     return next = null;
4846                 if ((e = tabAt(t, i)) != null && e.hash < 0) {
4847                     if (e instanceof ForwardingNode) {
4848                         tab = ((ForwardingNode<K,V>)e).nextTable;
4849                         e = null;
4850                         pushState(t, i, n);
4851                         continue;
4852                     }
4853                     else if (e instanceof TreeBin)
4854                         e = ((TreeBin<K,V>)e).first;
4855                     else
4856                         e = null;
4857                 }
4858                 if (stack != null)
4859                     recoverState(n);
4860                 else if ((index = i + baseSize) >= n)
4861                     index = ++baseIndex;
4862             }
4863         }
4864 
4865         private void pushState(Node<K,V>[] t, int i, int n) {
4866             TableStack<K,V> s = spare;
4867             if (s != null)
4868                 spare = s.next;
4869             else
4870                 s = new TableStack<K,V>();
4871             s.tab = t;
4872             s.length = n;
4873             s.index = i;
4874             s.next = stack;
4875             stack = s;
4876         }
4877 
4878         private void recoverState(int n) {
4879             TableStack<K,V> s; int len;
4880             while ((s = stack) != null && (index += (len = s.length)) >= n) {
4881                 n = len;
4882                 index = s.index;
4883                 tab = s.tab;
4884                 s.tab = null;
4885                 TableStack<K,V> next = s.next;
4886                 s.next = spare; // save for reuse
4887                 stack = next;
4888                 spare = s;
4889             }
4890             if (s == null && (index += baseSize) >= n)
4891                 index = ++baseIndex;
4892         }
4893     }
4894 
4895     /*
4896      * Task classes. Coded in a regular but ugly format/style to
4897      * simplify checks that each variant differs in the right way from
4898      * others. The null screenings exist because compilers cannot tell
4899      * that we've already null-checked task arguments, so we force
4900      * simplest hoisted bypass to help avoid convoluted traps.
4901      */
4902     @SuppressWarnings("serial")
4903     static final class ForEachKeyTask<K,V>
4904         extends BulkTask<K,V,Void> {
4905         final Consumer<? super K> action;
4906         ForEachKeyTask
4907             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4908              Consumer<? super K> action) {
4909             super(p, b, i, f, t);
4910             this.action = action;
4911         }
4912         public final void compute() {
4913             final Consumer<? super K> action;
4914             if ((action = this.action) != null) {
4915                 for (int i = baseIndex, f, h; batch > 0 &&
4916                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
4917                     addToPendingCount(1);
4918                     new ForEachKeyTask<K,V>
4919                         (this, batch >>>= 1, baseLimit = h, f, tab,
4920                          action).fork();
4921                 }
4922                 for (Node<K,V> p; (p = advance()) != null;)
4923                     action.accept(p.key);
4924                 propagateCompletion();
4925             }
4926         }
4927     }
4928 
4929     @SuppressWarnings("serial")
4930     static final class ForEachValueTask<K,V>
4931         extends BulkTask<K,V,Void> {
4932         final Consumer<? super V> action;
4933         ForEachValueTask
4934             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4935              Consumer<? super V> action) {
4936             super(p, b, i, f, t);
4937             this.action = action;
4938         }
4939         public final void compute() {
4940             final Consumer<? super V> action;
4941             if ((action = this.action) != null) {
4942                 for (int i = baseIndex, f, h; batch > 0 &&
4943                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
4944                     addToPendingCount(1);
4945                     new ForEachValueTask<K,V>
4946                         (this, batch >>>= 1, baseLimit = h, f, tab,
4947                          action).fork();
4948                 }
4949                 for (Node<K,V> p; (p = advance()) != null;)
4950                     action.accept(p.val);
4951                 propagateCompletion();
4952             }
4953         }
4954     }
4955 
4956     @SuppressWarnings("serial")
4957     static final class ForEachEntryTask<K,V>
4958         extends BulkTask<K,V,Void> {
4959         final Consumer<? super Entry<K,V>> action;
4960         ForEachEntryTask
4961             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4962              Consumer<? super Entry<K,V>> action) {
4963             super(p, b, i, f, t);
4964             this.action = action;
4965         }
4966         public final void compute() {
4967             final Consumer<? super Entry<K,V>> action;
4968             if ((action = this.action) != null) {
4969                 for (int i = baseIndex, f, h; batch > 0 &&
4970                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
4971                     addToPendingCount(1);
4972                     new ForEachEntryTask<K,V>
4973                         (this, batch >>>= 1, baseLimit = h, f, tab,
4974                          action).fork();
4975                 }
4976                 for (Node<K,V> p; (p = advance()) != null; )
4977                     action.accept(p);
4978                 propagateCompletion();
4979             }
4980         }
4981     }
4982 
4983     @SuppressWarnings("serial")
4984     static final class ForEachMappingTask<K,V>
4985         extends BulkTask<K,V,Void> {
4986         final BiConsumer<? super K, ? super V> action;
4987         ForEachMappingTask
4988             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4989              BiConsumer<? super K,? super V> action) {
4990             super(p, b, i, f, t);
4991             this.action = action;
4992         }
4993         public final void compute() {
4994             final BiConsumer<? super K, ? super V> action;
4995             if ((action = this.action) != null) {
4996                 for (int i = baseIndex, f, h; batch > 0 &&
4997                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
4998                     addToPendingCount(1);
4999                     new ForEachMappingTask<K,V>
5000                         (this, batch >>>= 1, baseLimit = h, f, tab,
5001                          action).fork();
5002                 }
5003                 for (Node<K,V> p; (p = advance()) != null; )
5004                     action.accept(p.key, p.val);
5005                 propagateCompletion();
5006             }
5007         }
5008     }
5009 
5010     @SuppressWarnings("serial")
5011     static final class ForEachTransformedKeyTask<K,V,U>
5012         extends BulkTask<K,V,Void> {
5013         final Function<? super K, ? extends U> transformer;
5014         final Consumer<? super U> action;
5015         ForEachTransformedKeyTask
5016             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5017              Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
5018             super(p, b, i, f, t);
5019             this.transformer = transformer; this.action = action;
5020         }
5021         public final void compute() {
5022             final Function<? super K, ? extends U> transformer;
5023             final Consumer<? super U> action;
5024             if ((transformer = this.transformer) != null &&
5025                 (action = this.action) != null) {
5026                 for (int i = baseIndex, f, h; batch > 0 &&
5027                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5028                     addToPendingCount(1);
5029                     new ForEachTransformedKeyTask<K,V,U>
5030                         (this, batch >>>= 1, baseLimit = h, f, tab,
5031                          transformer, action).fork();
5032                 }
5033                 for (Node<K,V> p; (p = advance()) != null; ) {
5034                     U u;
5035                     if ((u = transformer.apply(p.key)) != null)
5036                         action.accept(u);
5037                 }
5038                 propagateCompletion();
5039             }
5040         }
5041     }
5042 
5043     @SuppressWarnings("serial")
5044     static final class ForEachTransformedValueTask<K,V,U>
5045         extends BulkTask<K,V,Void> {
5046         final Function<? super V, ? extends U> transformer;
5047         final Consumer<? super U> action;
5048         ForEachTransformedValueTask
5049             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5050              Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
5051             super(p, b, i, f, t);
5052             this.transformer = transformer; this.action = action;
5053         }
5054         public final void compute() {
5055             final Function<? super V, ? extends U> transformer;
5056             final Consumer<? super U> action;
5057             if ((transformer = this.transformer) != null &&
5058                 (action = this.action) != null) {
5059                 for (int i = baseIndex, f, h; batch > 0 &&
5060                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5061                     addToPendingCount(1);
5062                     new ForEachTransformedValueTask<K,V,U>
5063                         (this, batch >>>= 1, baseLimit = h, f, tab,
5064                          transformer, action).fork();
5065                 }
5066                 for (Node<K,V> p; (p = advance()) != null; ) {
5067                     U u;
5068                     if ((u = transformer.apply(p.val)) != null)
5069                         action.accept(u);
5070                 }
5071                 propagateCompletion();
5072             }
5073         }
5074     }
5075 
5076     @SuppressWarnings("serial")
5077     static final class ForEachTransformedEntryTask<K,V,U>
5078         extends BulkTask<K,V,Void> {
5079         final Function<Map.Entry<K,V>, ? extends U> transformer;
5080         final Consumer<? super U> action;
5081         ForEachTransformedEntryTask
5082             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5083              Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
5084             super(p, b, i, f, t);
5085             this.transformer = transformer; this.action = action;
5086         }
5087         public final void compute() {
5088             final Function<Map.Entry<K,V>, ? extends U> transformer;
5089             final Consumer<? super U> action;
5090             if ((transformer = this.transformer) != null &&
5091                 (action = this.action) != null) {
5092                 for (int i = baseIndex, f, h; batch > 0 &&
5093                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5094                     addToPendingCount(1);
5095                     new ForEachTransformedEntryTask<K,V,U>
5096                         (this, batch >>>= 1, baseLimit = h, f, tab,
5097                          transformer, action).fork();
5098                 }
5099                 for (Node<K,V> p; (p = advance()) != null; ) {
5100                     U u;
5101                     if ((u = transformer.apply(p)) != null)
5102                         action.accept(u);
5103                 }
5104                 propagateCompletion();
5105             }
5106         }
5107     }
5108 
5109     @SuppressWarnings("serial")
5110     static final class ForEachTransformedMappingTask<K,V,U>
5111         extends BulkTask<K,V,Void> {
5112         final BiFunction<? super K, ? super V, ? extends U> transformer;
5113         final Consumer<? super U> action;
5114         ForEachTransformedMappingTask
5115             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5116              BiFunction<? super K, ? super V, ? extends U> transformer,
5117              Consumer<? super U> action) {
5118             super(p, b, i, f, t);
5119             this.transformer = transformer; this.action = action;
5120         }
5121         public final void compute() {
5122             final BiFunction<? super K, ? super V, ? extends U> transformer;
5123             final Consumer<? super U> action;
5124             if ((transformer = this.transformer) != null &&
5125                 (action = this.action) != null) {
5126                 for (int i = baseIndex, f, h; batch > 0 &&
5127                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5128                     addToPendingCount(1);
5129                     new ForEachTransformedMappingTask<K,V,U>
5130                         (this, batch >>>= 1, baseLimit = h, f, tab,
5131                          transformer, action).fork();
5132                 }
5133                 for (Node<K,V> p; (p = advance()) != null; ) {
5134                     U u;
5135                     if ((u = transformer.apply(p.key, p.val)) != null)
5136                         action.accept(u);
5137                 }
5138                 propagateCompletion();
5139             }
5140         }
5141     }
5142 
5143     @SuppressWarnings("serial")
5144     static final class SearchKeysTask<K,V,U>
5145         extends BulkTask<K,V,U> {
5146         final Function<? super K, ? extends U> searchFunction;
5147         final AtomicReference<U> result;
5148         SearchKeysTask
5149             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5150              Function<? super K, ? extends U> searchFunction,
5151              AtomicReference<U> result) {
5152             super(p, b, i, f, t);
5153             this.searchFunction = searchFunction; this.result = result;
5154         }
5155         public final U getRawResult() { return result.get(); }
5156         public final void compute() {
5157             final Function<? super K, ? extends U> searchFunction;
5158             final AtomicReference<U> result;
5159             if ((searchFunction = this.searchFunction) != null &&
5160                 (result = this.result) != null) {
5161                 for (int i = baseIndex, f, h; batch > 0 &&
5162                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5163                     if (result.get() != null)
5164                         return;
5165                     addToPendingCount(1);
5166                     new SearchKeysTask<K,V,U>
5167                         (this, batch >>>= 1, baseLimit = h, f, tab,
5168                          searchFunction, result).fork();
5169                 }
5170                 while (result.get() == null) {
5171                     U u;
5172                     Node<K,V> p;
5173                     if ((p = advance()) == null) {
5174                         propagateCompletion();
5175                         break;
5176                     }
5177                     if ((u = searchFunction.apply(p.key)) != null) {
5178                         if (result.compareAndSet(null, u))
5179                             quietlyCompleteRoot();
5180                         break;
5181                     }
5182                 }
5183             }
5184         }
5185     }
5186 
5187     @SuppressWarnings("serial")
5188     static final class SearchValuesTask<K,V,U>
5189         extends BulkTask<K,V,U> {
5190         final Function<? super V, ? extends U> searchFunction;
5191         final AtomicReference<U> result;
5192         SearchValuesTask
5193             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5194              Function<? super V, ? extends U> searchFunction,
5195              AtomicReference<U> result) {
5196             super(p, b, i, f, t);
5197             this.searchFunction = searchFunction; this.result = result;
5198         }
5199         public final U getRawResult() { return result.get(); }
5200         public final void compute() {
5201             final Function<? super V, ? extends U> searchFunction;
5202             final AtomicReference<U> result;
5203             if ((searchFunction = this.searchFunction) != null &&
5204                 (result = this.result) != null) {
5205                 for (int i = baseIndex, f, h; batch > 0 &&
5206                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5207                     if (result.get() != null)
5208                         return;
5209                     addToPendingCount(1);
5210                     new SearchValuesTask<K,V,U>
5211                         (this, batch >>>= 1, baseLimit = h, f, tab,
5212                          searchFunction, result).fork();
5213                 }
5214                 while (result.get() == null) {
5215                     U u;
5216                     Node<K,V> p;
5217                     if ((p = advance()) == null) {
5218                         propagateCompletion();
5219                         break;
5220                     }
5221                     if ((u = searchFunction.apply(p.val)) != null) {
5222                         if (result.compareAndSet(null, u))
5223                             quietlyCompleteRoot();
5224                         break;
5225                     }
5226                 }
5227             }
5228         }
5229     }
5230 
5231     @SuppressWarnings("serial")
5232     static final class SearchEntriesTask<K,V,U>
5233         extends BulkTask<K,V,U> {
5234         final Function<Entry<K,V>, ? extends U> searchFunction;
5235         final AtomicReference<U> result;
5236         SearchEntriesTask
5237             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5238              Function<Entry<K,V>, ? extends U> searchFunction,
5239              AtomicReference<U> result) {
5240             super(p, b, i, f, t);
5241             this.searchFunction = searchFunction; this.result = result;
5242         }
5243         public final U getRawResult() { return result.get(); }
5244         public final void compute() {
5245             final Function<Entry<K,V>, ? extends U> searchFunction;
5246             final AtomicReference<U> result;
5247             if ((searchFunction = this.searchFunction) != null &&
5248                 (result = this.result) != null) {
5249                 for (int i = baseIndex, f, h; batch > 0 &&
5250                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5251                     if (result.get() != null)
5252                         return;
5253                     addToPendingCount(1);
5254                     new SearchEntriesTask<K,V,U>
5255                         (this, batch >>>= 1, baseLimit = h, f, tab,
5256                          searchFunction, result).fork();
5257                 }
5258                 while (result.get() == null) {
5259                     U u;
5260                     Node<K,V> p;
5261                     if ((p = advance()) == null) {
5262                         propagateCompletion();
5263                         break;
5264                     }
5265                     if ((u = searchFunction.apply(p)) != null) {
5266                         if (result.compareAndSet(null, u))
5267                             quietlyCompleteRoot();
5268                         return;
5269                     }
5270                 }
5271             }
5272         }
5273     }
5274 
5275     @SuppressWarnings("serial")
5276     static final class SearchMappingsTask<K,V,U>
5277         extends BulkTask<K,V,U> {
5278         final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5279         final AtomicReference<U> result;
5280         SearchMappingsTask
5281             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5282              BiFunction<? super K, ? super V, ? extends U> searchFunction,
5283              AtomicReference<U> result) {
5284             super(p, b, i, f, t);
5285             this.searchFunction = searchFunction; this.result = result;
5286         }
5287         public final U getRawResult() { return result.get(); }
5288         public final void compute() {
5289             final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5290             final AtomicReference<U> result;
5291             if ((searchFunction = this.searchFunction) != null &&
5292                 (result = this.result) != null) {
5293                 for (int i = baseIndex, f, h; batch > 0 &&
5294                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5295                     if (result.get() != null)
5296                         return;
5297                     addToPendingCount(1);
5298                     new SearchMappingsTask<K,V,U>
5299                         (this, batch >>>= 1, baseLimit = h, f, tab,
5300                          searchFunction, result).fork();
5301                 }
5302                 while (result.get() == null) {
5303                     U u;
5304                     Node<K,V> p;
5305                     if ((p = advance()) == null) {
5306                         propagateCompletion();
5307                         break;
5308                     }
5309                     if ((u = searchFunction.apply(p.key, p.val)) != null) {
5310                         if (result.compareAndSet(null, u))
5311                             quietlyCompleteRoot();
5312                         break;
5313                     }
5314                 }
5315             }
5316         }
5317     }
5318 
5319     @SuppressWarnings("serial")
5320     static final class ReduceKeysTask<K,V>
5321         extends BulkTask<K,V,K> {
5322         final BiFunction<? super K, ? super K, ? extends K> reducer;
5323         K result;
5324         ReduceKeysTask<K,V> rights, nextRight;
5325         ReduceKeysTask
5326             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5327              ReduceKeysTask<K,V> nextRight,
5328              BiFunction<? super K, ? super K, ? extends K> reducer) {
5329             super(p, b, i, f, t); this.nextRight = nextRight;
5330             this.reducer = reducer;
5331         }
5332         public final K getRawResult() { return result; }
5333         public final void compute() {
5334             final BiFunction<? super K, ? super K, ? extends K> reducer;
5335             if ((reducer = this.reducer) != null) {
5336                 for (int i = baseIndex, f, h; batch > 0 &&
5337                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5338                     addToPendingCount(1);
5339                     (rights = new ReduceKeysTask<K,V>
5340                      (this, batch >>>= 1, baseLimit = h, f, tab,
5341                       rights, reducer)).fork();
5342                 }
5343                 K r = null;
5344                 for (Node<K,V> p; (p = advance()) != null; ) {
5345                     K u = p.key;
5346                     r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5347                 }
5348                 result = r;
5349                 CountedCompleter<?> c;
5350                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5351                     @SuppressWarnings("unchecked")
5352                     ReduceKeysTask<K,V>
5353                         t = (ReduceKeysTask<K,V>)c,
5354                         s = t.rights;
5355                     while (s != null) {
5356                         K tr, sr;
5357                         if ((sr = s.result) != null)
5358                             t.result = (((tr = t.result) == null) ? sr :
5359                                         reducer.apply(tr, sr));
5360                         s = t.rights = s.nextRight;
5361                     }
5362                 }
5363             }
5364         }
5365     }
5366 
5367     @SuppressWarnings("serial")
5368     static final class ReduceValuesTask<K,V>
5369         extends BulkTask<K,V,V> {
5370         final BiFunction<? super V, ? super V, ? extends V> reducer;
5371         V result;
5372         ReduceValuesTask<K,V> rights, nextRight;
5373         ReduceValuesTask
5374             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5375              ReduceValuesTask<K,V> nextRight,
5376              BiFunction<? super V, ? super V, ? extends V> reducer) {
5377             super(p, b, i, f, t); this.nextRight = nextRight;
5378             this.reducer = reducer;
5379         }
5380         public final V getRawResult() { return result; }
5381         public final void compute() {
5382             final BiFunction<? super V, ? super V, ? extends V> reducer;
5383             if ((reducer = this.reducer) != null) {
5384                 for (int i = baseIndex, f, h; batch > 0 &&
5385                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5386                     addToPendingCount(1);
5387                     (rights = new ReduceValuesTask<K,V>
5388                      (this, batch >>>= 1, baseLimit = h, f, tab,
5389                       rights, reducer)).fork();
5390                 }
5391                 V r = null;
5392                 for (Node<K,V> p; (p = advance()) != null; ) {
5393                     V v = p.val;
5394                     r = (r == null) ? v : reducer.apply(r, v);
5395                 }
5396                 result = r;
5397                 CountedCompleter<?> c;
5398                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5399                     @SuppressWarnings("unchecked")
5400                     ReduceValuesTask<K,V>
5401                         t = (ReduceValuesTask<K,V>)c,
5402                         s = t.rights;
5403                     while (s != null) {
5404                         V tr, sr;
5405                         if ((sr = s.result) != null)
5406                             t.result = (((tr = t.result) == null) ? sr :
5407                                         reducer.apply(tr, sr));
5408                         s = t.rights = s.nextRight;
5409                     }
5410                 }
5411             }
5412         }
5413     }
5414 
5415     @SuppressWarnings("serial")
5416     static final class ReduceEntriesTask<K,V>
5417         extends BulkTask<K,V,Map.Entry<K,V>> {
5418         final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5419         Map.Entry<K,V> result;
5420         ReduceEntriesTask<K,V> rights, nextRight;
5421         ReduceEntriesTask
5422             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5423              ReduceEntriesTask<K,V> nextRight,
5424              BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5425             super(p, b, i, f, t); this.nextRight = nextRight;
5426             this.reducer = reducer;
5427         }
5428         public final Map.Entry<K,V> getRawResult() { return result; }
5429         public final void compute() {
5430             final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5431             if ((reducer = this.reducer) != null) {
5432                 for (int i = baseIndex, f, h; batch > 0 &&
5433                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5434                     addToPendingCount(1);
5435                     (rights = new ReduceEntriesTask<K,V>
5436                      (this, batch >>>= 1, baseLimit = h, f, tab,
5437                       rights, reducer)).fork();
5438                 }
5439                 Map.Entry<K,V> r = null;
5440                 for (Node<K,V> p; (p = advance()) != null; )
5441                     r = (r == null) ? p : reducer.apply(r, p);
5442                 result = r;
5443                 CountedCompleter<?> c;
5444                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5445                     @SuppressWarnings("unchecked")
5446                     ReduceEntriesTask<K,V>
5447                         t = (ReduceEntriesTask<K,V>)c,
5448                         s = t.rights;
5449                     while (s != null) {
5450                         Map.Entry<K,V> tr, sr;
5451                         if ((sr = s.result) != null)
5452                             t.result = (((tr = t.result) == null) ? sr :
5453                                         reducer.apply(tr, sr));
5454                         s = t.rights = s.nextRight;
5455                     }
5456                 }
5457             }
5458         }
5459     }
5460 
5461     @SuppressWarnings("serial")
5462     static final class MapReduceKeysTask<K,V,U>
5463         extends BulkTask<K,V,U> {
5464         final Function<? super K, ? extends U> transformer;
5465         final BiFunction<? super U, ? super U, ? extends U> reducer;
5466         U result;
5467         MapReduceKeysTask<K,V,U> rights, nextRight;
5468         MapReduceKeysTask
5469             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5470              MapReduceKeysTask<K,V,U> nextRight,
5471              Function<? super K, ? extends U> transformer,
5472              BiFunction<? super U, ? super U, ? extends U> reducer) {
5473             super(p, b, i, f, t); this.nextRight = nextRight;
5474             this.transformer = transformer;
5475             this.reducer = reducer;
5476         }
5477         public final U getRawResult() { return result; }
5478         public final void compute() {
5479             final Function<? super K, ? extends U> transformer;
5480             final BiFunction<? super U, ? super U, ? extends U> reducer;
5481             if ((transformer = this.transformer) != null &&
5482                 (reducer = this.reducer) != null) {
5483                 for (int i = baseIndex, f, h; batch > 0 &&
5484                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5485                     addToPendingCount(1);
5486                     (rights = new MapReduceKeysTask<K,V,U>
5487                      (this, batch >>>= 1, baseLimit = h, f, tab,
5488                       rights, transformer, reducer)).fork();
5489                 }
5490                 U r = null;
5491                 for (Node<K,V> p; (p = advance()) != null; ) {
5492                     U u;
5493                     if ((u = transformer.apply(p.key)) != null)
5494                         r = (r == null) ? u : reducer.apply(r, u);
5495                 }
5496                 result = r;
5497                 CountedCompleter<?> c;
5498                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5499                     @SuppressWarnings("unchecked")
5500                     MapReduceKeysTask<K,V,U>
5501                         t = (MapReduceKeysTask<K,V,U>)c,
5502                         s = t.rights;
5503                     while (s != null) {
5504                         U tr, sr;
5505                         if ((sr = s.result) != null)
5506                             t.result = (((tr = t.result) == null) ? sr :
5507                                         reducer.apply(tr, sr));
5508                         s = t.rights = s.nextRight;
5509                     }
5510                 }
5511             }
5512         }
5513     }
5514 
5515     @SuppressWarnings("serial")
5516     static final class MapReduceValuesTask<K,V,U>
5517         extends BulkTask<K,V,U> {
5518         final Function<? super V, ? extends U> transformer;
5519         final BiFunction<? super U, ? super U, ? extends U> reducer;
5520         U result;
5521         MapReduceValuesTask<K,V,U> rights, nextRight;
5522         MapReduceValuesTask
5523             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5524              MapReduceValuesTask<K,V,U> nextRight,
5525              Function<? super V, ? extends U> transformer,
5526              BiFunction<? super U, ? super U, ? extends U> reducer) {
5527             super(p, b, i, f, t); this.nextRight = nextRight;
5528             this.transformer = transformer;
5529             this.reducer = reducer;
5530         }
5531         public final U getRawResult() { return result; }
5532         public final void compute() {
5533             final Function<? super V, ? extends U> transformer;
5534             final BiFunction<? super U, ? super U, ? extends U> reducer;
5535             if ((transformer = this.transformer) != null &&
5536                 (reducer = this.reducer) != null) {
5537                 for (int i = baseIndex, f, h; batch > 0 &&
5538                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5539                     addToPendingCount(1);
5540                     (rights = new MapReduceValuesTask<K,V,U>
5541                      (this, batch >>>= 1, baseLimit = h, f, tab,
5542                       rights, transformer, reducer)).fork();
5543                 }
5544                 U r = null;
5545                 for (Node<K,V> p; (p = advance()) != null; ) {
5546                     U u;
5547                     if ((u = transformer.apply(p.val)) != null)
5548                         r = (r == null) ? u : reducer.apply(r, u);
5549                 }
5550                 result = r;
5551                 CountedCompleter<?> c;
5552                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5553                     @SuppressWarnings("unchecked")
5554                     MapReduceValuesTask<K,V,U>
5555                         t = (MapReduceValuesTask<K,V,U>)c,
5556                         s = t.rights;
5557                     while (s != null) {
5558                         U tr, sr;
5559                         if ((sr = s.result) != null)
5560                             t.result = (((tr = t.result) == null) ? sr :
5561                                         reducer.apply(tr, sr));
5562                         s = t.rights = s.nextRight;
5563                     }
5564                 }
5565             }
5566         }
5567     }
5568 
5569     @SuppressWarnings("serial")
5570     static final class MapReduceEntriesTask<K,V,U>
5571         extends BulkTask<K,V,U> {
5572         final Function<Map.Entry<K,V>, ? extends U> transformer;
5573         final BiFunction<? super U, ? super U, ? extends U> reducer;
5574         U result;
5575         MapReduceEntriesTask<K,V,U> rights, nextRight;
5576         MapReduceEntriesTask
5577             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5578              MapReduceEntriesTask<K,V,U> nextRight,
5579              Function<Map.Entry<K,V>, ? extends U> transformer,
5580              BiFunction<? super U, ? super U, ? extends U> reducer) {
5581             super(p, b, i, f, t); this.nextRight = nextRight;
5582             this.transformer = transformer;
5583             this.reducer = reducer;
5584         }
5585         public final U getRawResult() { return result; }
5586         public final void compute() {
5587             final Function<Map.Entry<K,V>, ? extends U> transformer;
5588             final BiFunction<? super U, ? super U, ? extends U> reducer;
5589             if ((transformer = this.transformer) != null &&
5590                 (reducer = this.reducer) != null) {
5591                 for (int i = baseIndex, f, h; batch > 0 &&
5592                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5593                     addToPendingCount(1);
5594                     (rights = new MapReduceEntriesTask<K,V,U>
5595                      (this, batch >>>= 1, baseLimit = h, f, tab,
5596                       rights, transformer, reducer)).fork();
5597                 }
5598                 U r = null;
5599                 for (Node<K,V> p; (p = advance()) != null; ) {
5600                     U u;
5601                     if ((u = transformer.apply(p)) != null)
5602                         r = (r == null) ? u : reducer.apply(r, u);
5603                 }
5604                 result = r;
5605                 CountedCompleter<?> c;
5606                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5607                     @SuppressWarnings("unchecked")
5608                     MapReduceEntriesTask<K,V,U>
5609                         t = (MapReduceEntriesTask<K,V,U>)c,
5610                         s = t.rights;
5611                     while (s != null) {
5612                         U tr, sr;
5613                         if ((sr = s.result) != null)
5614                             t.result = (((tr = t.result) == null) ? sr :
5615                                         reducer.apply(tr, sr));
5616                         s = t.rights = s.nextRight;
5617                     }
5618                 }
5619             }
5620         }
5621     }
5622 
5623     @SuppressWarnings("serial")
5624     static final class MapReduceMappingsTask<K,V,U>
5625         extends BulkTask<K,V,U> {
5626         final BiFunction<? super K, ? super V, ? extends U> transformer;
5627         final BiFunction<? super U, ? super U, ? extends U> reducer;
5628         U result;
5629         MapReduceMappingsTask<K,V,U> rights, nextRight;
5630         MapReduceMappingsTask
5631             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5632              MapReduceMappingsTask<K,V,U> nextRight,
5633              BiFunction<? super K, ? super V, ? extends U> transformer,
5634              BiFunction<? super U, ? super U, ? extends U> reducer) {
5635             super(p, b, i, f, t); this.nextRight = nextRight;
5636             this.transformer = transformer;
5637             this.reducer = reducer;
5638         }
5639         public final U getRawResult() { return result; }
5640         public final void compute() {
5641             final BiFunction<? super K, ? super V, ? extends U> transformer;
5642             final BiFunction<? super U, ? super U, ? extends U> reducer;
5643             if ((transformer = this.transformer) != null &&
5644                 (reducer = this.reducer) != null) {
5645                 for (int i = baseIndex, f, h; batch > 0 &&
5646                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5647                     addToPendingCount(1);
5648                     (rights = new MapReduceMappingsTask<K,V,U>
5649                      (this, batch >>>= 1, baseLimit = h, f, tab,
5650                       rights, transformer, reducer)).fork();
5651                 }
5652                 U r = null;
5653                 for (Node<K,V> p; (p = advance()) != null; ) {
5654                     U u;
5655                     if ((u = transformer.apply(p.key, p.val)) != null)
5656                         r = (r == null) ? u : reducer.apply(r, u);
5657                 }
5658                 result = r;
5659                 CountedCompleter<?> c;
5660                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5661                     @SuppressWarnings("unchecked")
5662                     MapReduceMappingsTask<K,V,U>
5663                         t = (MapReduceMappingsTask<K,V,U>)c,
5664                         s = t.rights;
5665                     while (s != null) {
5666                         U tr, sr;
5667                         if ((sr = s.result) != null)
5668                             t.result = (((tr = t.result) == null) ? sr :
5669                                         reducer.apply(tr, sr));
5670                         s = t.rights = s.nextRight;
5671                     }
5672                 }
5673             }
5674         }
5675     }
5676 
5677     @SuppressWarnings("serial")
5678     static final class MapReduceKeysToDoubleTask<K,V>
5679         extends BulkTask<K,V,Double> {
5680         final ToDoubleFunction<? super K> transformer;
5681         final DoubleBinaryOperator reducer;
5682         final double basis;
5683         double result;
5684         MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5685         MapReduceKeysToDoubleTask
5686             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5687              MapReduceKeysToDoubleTask<K,V> nextRight,
5688              ToDoubleFunction<? super K> transformer,
5689              double basis,
5690              DoubleBinaryOperator reducer) {
5691             super(p, b, i, f, t); this.nextRight = nextRight;
5692             this.transformer = transformer;
5693             this.basis = basis; this.reducer = reducer;
5694         }
5695         public final Double getRawResult() { return result; }
5696         public final void compute() {
5697             final ToDoubleFunction<? super K> transformer;
5698             final DoubleBinaryOperator reducer;
5699             if ((transformer = this.transformer) != null &&
5700                 (reducer = this.reducer) != null) {
5701                 double r = this.basis;
5702                 for (int i = baseIndex, f, h; batch > 0 &&
5703                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5704                     addToPendingCount(1);
5705                     (rights = new MapReduceKeysToDoubleTask<K,V>
5706                      (this, batch >>>= 1, baseLimit = h, f, tab,
5707                       rights, transformer, r, reducer)).fork();
5708                 }
5709                 for (Node<K,V> p; (p = advance()) != null; )
5710                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key));
5711                 result = r;
5712                 CountedCompleter<?> c;
5713                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5714                     @SuppressWarnings("unchecked")
5715                     MapReduceKeysToDoubleTask<K,V>
5716                         t = (MapReduceKeysToDoubleTask<K,V>)c,
5717                         s = t.rights;
5718                     while (s != null) {
5719                         t.result = reducer.applyAsDouble(t.result, s.result);
5720                         s = t.rights = s.nextRight;
5721                     }
5722                 }
5723             }
5724         }
5725     }
5726 
5727     @SuppressWarnings("serial")
5728     static final class MapReduceValuesToDoubleTask<K,V>
5729         extends BulkTask<K,V,Double> {
5730         final ToDoubleFunction<? super V> transformer;
5731         final DoubleBinaryOperator reducer;
5732         final double basis;
5733         double result;
5734         MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5735         MapReduceValuesToDoubleTask
5736             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5737              MapReduceValuesToDoubleTask<K,V> nextRight,
5738              ToDoubleFunction<? super V> transformer,
5739              double basis,
5740              DoubleBinaryOperator reducer) {
5741             super(p, b, i, f, t); this.nextRight = nextRight;
5742             this.transformer = transformer;
5743             this.basis = basis; this.reducer = reducer;
5744         }
5745         public final Double getRawResult() { return result; }
5746         public final void compute() {
5747             final ToDoubleFunction<? super V> transformer;
5748             final DoubleBinaryOperator reducer;
5749             if ((transformer = this.transformer) != null &&
5750                 (reducer = this.reducer) != null) {
5751                 double r = this.basis;
5752                 for (int i = baseIndex, f, h; batch > 0 &&
5753                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5754                     addToPendingCount(1);
5755                     (rights = new MapReduceValuesToDoubleTask<K,V>
5756                      (this, batch >>>= 1, baseLimit = h, f, tab,
5757                       rights, transformer, r, reducer)).fork();
5758                 }
5759                 for (Node<K,V> p; (p = advance()) != null; )
5760                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
5761                 result = r;
5762                 CountedCompleter<?> c;
5763                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5764                     @SuppressWarnings("unchecked")
5765                     MapReduceValuesToDoubleTask<K,V>
5766                         t = (MapReduceValuesToDoubleTask<K,V>)c,
5767                         s = t.rights;
5768                     while (s != null) {
5769                         t.result = reducer.applyAsDouble(t.result, s.result);
5770                         s = t.rights = s.nextRight;
5771                     }
5772                 }
5773             }
5774         }
5775     }
5776 
5777     @SuppressWarnings("serial")
5778     static final class MapReduceEntriesToDoubleTask<K,V>
5779         extends BulkTask<K,V,Double> {
5780         final ToDoubleFunction<Map.Entry<K,V>> transformer;
5781         final DoubleBinaryOperator reducer;
5782         final double basis;
5783         double result;
5784         MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5785         MapReduceEntriesToDoubleTask
5786             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5787              MapReduceEntriesToDoubleTask<K,V> nextRight,
5788              ToDoubleFunction<Map.Entry<K,V>> transformer,
5789              double basis,
5790              DoubleBinaryOperator reducer) {
5791             super(p, b, i, f, t); this.nextRight = nextRight;
5792             this.transformer = transformer;
5793             this.basis = basis; this.reducer = reducer;
5794         }
5795         public final Double getRawResult() { return result; }
5796         public final void compute() {
5797             final ToDoubleFunction<Map.Entry<K,V>> transformer;
5798             final DoubleBinaryOperator reducer;
5799             if ((transformer = this.transformer) != null &&
5800                 (reducer = this.reducer) != null) {
5801                 double r = this.basis;
5802                 for (int i = baseIndex, f, h; batch > 0 &&
5803                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5804                     addToPendingCount(1);
5805                     (rights = new MapReduceEntriesToDoubleTask<K,V>
5806                      (this, batch >>>= 1, baseLimit = h, f, tab,
5807                       rights, transformer, r, reducer)).fork();
5808                 }
5809                 for (Node<K,V> p; (p = advance()) != null; )
5810                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
5811                 result = r;
5812                 CountedCompleter<?> c;
5813                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5814                     @SuppressWarnings("unchecked")
5815                     MapReduceEntriesToDoubleTask<K,V>
5816                         t = (MapReduceEntriesToDoubleTask<K,V>)c,
5817                         s = t.rights;
5818                     while (s != null) {
5819                         t.result = reducer.applyAsDouble(t.result, s.result);
5820                         s = t.rights = s.nextRight;
5821                     }
5822                 }
5823             }
5824         }
5825     }
5826 
5827     @SuppressWarnings("serial")
5828     static final class MapReduceMappingsToDoubleTask<K,V>
5829         extends BulkTask<K,V,Double> {
5830         final ToDoubleBiFunction<? super K, ? super V> transformer;
5831         final DoubleBinaryOperator reducer;
5832         final double basis;
5833         double result;
5834         MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5835         MapReduceMappingsToDoubleTask
5836             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5837              MapReduceMappingsToDoubleTask<K,V> nextRight,
5838              ToDoubleBiFunction<? super K, ? super V> transformer,
5839              double basis,
5840              DoubleBinaryOperator reducer) {
5841             super(p, b, i, f, t); this.nextRight = nextRight;
5842             this.transformer = transformer;
5843             this.basis = basis; this.reducer = reducer;
5844         }
5845         public final Double getRawResult() { return result; }
5846         public final void compute() {
5847             final ToDoubleBiFunction<? super K, ? super V> transformer;
5848             final DoubleBinaryOperator reducer;
5849             if ((transformer = this.transformer) != null &&
5850                 (reducer = this.reducer) != null) {
5851                 double r = this.basis;
5852                 for (int i = baseIndex, f, h; batch > 0 &&
5853                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5854                     addToPendingCount(1);
5855                     (rights = new MapReduceMappingsToDoubleTask<K,V>
5856                      (this, batch >>>= 1, baseLimit = h, f, tab,
5857                       rights, transformer, r, reducer)).fork();
5858                 }
5859                 for (Node<K,V> p; (p = advance()) != null; )
5860                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val));
5861                 result = r;
5862                 CountedCompleter<?> c;
5863                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5864                     @SuppressWarnings("unchecked")
5865                     MapReduceMappingsToDoubleTask<K,V>
5866                         t = (MapReduceMappingsToDoubleTask<K,V>)c,
5867                         s = t.rights;
5868                     while (s != null) {
5869                         t.result = reducer.applyAsDouble(t.result, s.result);
5870                         s = t.rights = s.nextRight;
5871                     }
5872                 }
5873             }
5874         }
5875     }
5876 
5877     @SuppressWarnings("serial")
5878     static final class MapReduceKeysToLongTask<K,V>
5879         extends BulkTask<K,V,Long> {
5880         final ToLongFunction<? super K> transformer;
5881         final LongBinaryOperator reducer;
5882         final long basis;
5883         long result;
5884         MapReduceKeysToLongTask<K,V> rights, nextRight;
5885         MapReduceKeysToLongTask
5886             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5887              MapReduceKeysToLongTask<K,V> nextRight,
5888              ToLongFunction<? super K> transformer,
5889              long basis,
5890              LongBinaryOperator reducer) {
5891             super(p, b, i, f, t); this.nextRight = nextRight;
5892             this.transformer = transformer;
5893             this.basis = basis; this.reducer = reducer;
5894         }
5895         public final Long getRawResult() { return result; }
5896         public final void compute() {
5897             final ToLongFunction<? super K> transformer;
5898             final LongBinaryOperator reducer;
5899             if ((transformer = this.transformer) != null &&
5900                 (reducer = this.reducer) != null) {
5901                 long r = this.basis;
5902                 for (int i = baseIndex, f, h; batch > 0 &&
5903                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5904                     addToPendingCount(1);
5905                     (rights = new MapReduceKeysToLongTask<K,V>
5906                      (this, batch >>>= 1, baseLimit = h, f, tab,
5907                       rights, transformer, r, reducer)).fork();
5908                 }
5909                 for (Node<K,V> p; (p = advance()) != null; )
5910                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.key));
5911                 result = r;
5912                 CountedCompleter<?> c;
5913                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5914                     @SuppressWarnings("unchecked")
5915                     MapReduceKeysToLongTask<K,V>
5916                         t = (MapReduceKeysToLongTask<K,V>)c,
5917                         s = t.rights;
5918                     while (s != null) {
5919                         t.result = reducer.applyAsLong(t.result, s.result);
5920                         s = t.rights = s.nextRight;
5921                     }
5922                 }
5923             }
5924         }
5925     }
5926 
5927     @SuppressWarnings("serial")
5928     static final class MapReduceValuesToLongTask<K,V>
5929         extends BulkTask<K,V,Long> {
5930         final ToLongFunction<? super V> transformer;
5931         final LongBinaryOperator reducer;
5932         final long basis;
5933         long result;
5934         MapReduceValuesToLongTask<K,V> rights, nextRight;
5935         MapReduceValuesToLongTask
5936             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5937              MapReduceValuesToLongTask<K,V> nextRight,
5938              ToLongFunction<? super V> transformer,
5939              long basis,
5940              LongBinaryOperator reducer) {
5941             super(p, b, i, f, t); this.nextRight = nextRight;
5942             this.transformer = transformer;
5943             this.basis = basis; this.reducer = reducer;
5944         }
5945         public final Long getRawResult() { return result; }
5946         public final void compute() {
5947             final ToLongFunction<? super V> transformer;
5948             final LongBinaryOperator reducer;
5949             if ((transformer = this.transformer) != null &&
5950                 (reducer = this.reducer) != null) {
5951                 long r = this.basis;
5952                 for (int i = baseIndex, f, h; batch > 0 &&
5953                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5954                     addToPendingCount(1);
5955                     (rights = new MapReduceValuesToLongTask<K,V>
5956                      (this, batch >>>= 1, baseLimit = h, f, tab,
5957                       rights, transformer, r, reducer)).fork();
5958                 }
5959                 for (Node<K,V> p; (p = advance()) != null; )
5960                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
5961                 result = r;
5962                 CountedCompleter<?> c;
5963                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5964                     @SuppressWarnings("unchecked")
5965                     MapReduceValuesToLongTask<K,V>
5966                         t = (MapReduceValuesToLongTask<K,V>)c,
5967                         s = t.rights;
5968                     while (s != null) {
5969                         t.result = reducer.applyAsLong(t.result, s.result);
5970                         s = t.rights = s.nextRight;
5971                     }
5972                 }
5973             }
5974         }
5975     }
5976 
5977     @SuppressWarnings("serial")
5978     static final class MapReduceEntriesToLongTask<K,V>
5979         extends BulkTask<K,V,Long> {
5980         final ToLongFunction<Map.Entry<K,V>> transformer;
5981         final LongBinaryOperator reducer;
5982         final long basis;
5983         long result;
5984         MapReduceEntriesToLongTask<K,V> rights, nextRight;
5985         MapReduceEntriesToLongTask
5986             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5987              MapReduceEntriesToLongTask<K,V> nextRight,
5988              ToLongFunction<Map.Entry<K,V>> transformer,
5989              long basis,
5990              LongBinaryOperator reducer) {
5991             super(p, b, i, f, t); this.nextRight = nextRight;
5992             this.transformer = transformer;
5993             this.basis = basis; this.reducer = reducer;
5994         }
5995         public final Long getRawResult() { return result; }
5996         public final void compute() {
5997             final ToLongFunction<Map.Entry<K,V>> transformer;
5998             final LongBinaryOperator reducer;
5999             if ((transformer = this.transformer) != null &&
6000                 (reducer = this.reducer) != null) {
6001                 long r = this.basis;
6002                 for (int i = baseIndex, f, h; batch > 0 &&
6003                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6004                     addToPendingCount(1);
6005                     (rights = new MapReduceEntriesToLongTask<K,V>
6006                      (this, batch >>>= 1, baseLimit = h, f, tab,
6007                       rights, transformer, r, reducer)).fork();
6008                 }
6009                 for (Node<K,V> p; (p = advance()) != null; )
6010                     r = reducer.applyAsLong(r, transformer.applyAsLong(p));
6011                 result = r;
6012                 CountedCompleter<?> c;
6013                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6014                     @SuppressWarnings("unchecked")
6015                     MapReduceEntriesToLongTask<K,V>
6016                         t = (MapReduceEntriesToLongTask<K,V>)c,
6017                         s = t.rights;
6018                     while (s != null) {
6019                         t.result = reducer.applyAsLong(t.result, s.result);
6020                         s = t.rights = s.nextRight;
6021                     }
6022                 }
6023             }
6024         }
6025     }
6026 
6027     @SuppressWarnings("serial")
6028     static final class MapReduceMappingsToLongTask<K,V>
6029         extends BulkTask<K,V,Long> {
6030         final ToLongBiFunction<? super K, ? super V> transformer;
6031         final LongBinaryOperator reducer;
6032         final long basis;
6033         long result;
6034         MapReduceMappingsToLongTask<K,V> rights, nextRight;
6035         MapReduceMappingsToLongTask
6036             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6037              MapReduceMappingsToLongTask<K,V> nextRight,
6038              ToLongBiFunction<? super K, ? super V> transformer,
6039              long basis,
6040              LongBinaryOperator reducer) {
6041             super(p, b, i, f, t); this.nextRight = nextRight;
6042             this.transformer = transformer;
6043             this.basis = basis; this.reducer = reducer;
6044         }
6045         public final Long getRawResult() { return result; }
6046         public final void compute() {
6047             final ToLongBiFunction<? super K, ? super V> transformer;
6048             final LongBinaryOperator reducer;
6049             if ((transformer = this.transformer) != null &&
6050                 (reducer = this.reducer) != null) {
6051                 long r = this.basis;
6052                 for (int i = baseIndex, f, h; batch > 0 &&
6053                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6054                     addToPendingCount(1);
6055                     (rights = new MapReduceMappingsToLongTask<K,V>
6056                      (this, batch >>>= 1, baseLimit = h, f, tab,
6057                       rights, transformer, r, reducer)).fork();
6058                 }
6059                 for (Node<K,V> p; (p = advance()) != null; )
6060                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val));
6061                 result = r;
6062                 CountedCompleter<?> c;
6063                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6064                     @SuppressWarnings("unchecked")
6065                     MapReduceMappingsToLongTask<K,V>
6066                         t = (MapReduceMappingsToLongTask<K,V>)c,
6067                         s = t.rights;
6068                     while (s != null) {
6069                         t.result = reducer.applyAsLong(t.result, s.result);
6070                         s = t.rights = s.nextRight;
6071                     }
6072                 }
6073             }
6074         }
6075     }
6076 
6077     @SuppressWarnings("serial")
6078     static final class MapReduceKeysToIntTask<K,V>
6079         extends BulkTask<K,V,Integer> {
6080         final ToIntFunction<? super K> transformer;
6081         final IntBinaryOperator reducer;
6082         final int basis;
6083         int result;
6084         MapReduceKeysToIntTask<K,V> rights, nextRight;
6085         MapReduceKeysToIntTask
6086             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6087              MapReduceKeysToIntTask<K,V> nextRight,
6088              ToIntFunction<? super K> transformer,
6089              int basis,
6090              IntBinaryOperator reducer) {
6091             super(p, b, i, f, t); this.nextRight = nextRight;
6092             this.transformer = transformer;
6093             this.basis = basis; this.reducer = reducer;
6094         }
6095         public final Integer getRawResult() { return result; }
6096         public final void compute() {
6097             final ToIntFunction<? super K> transformer;
6098             final IntBinaryOperator reducer;
6099             if ((transformer = this.transformer) != null &&
6100                 (reducer = this.reducer) != null) {
6101                 int r = this.basis;
6102                 for (int i = baseIndex, f, h; batch > 0 &&
6103                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6104                     addToPendingCount(1);
6105                     (rights = new MapReduceKeysToIntTask<K,V>
6106                      (this, batch >>>= 1, baseLimit = h, f, tab,
6107                       rights, transformer, r, reducer)).fork();
6108                 }
6109                 for (Node<K,V> p; (p = advance()) != null; )
6110                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.key));
6111                 result = r;
6112                 CountedCompleter<?> c;
6113                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6114                     @SuppressWarnings("unchecked")
6115                     MapReduceKeysToIntTask<K,V>
6116                         t = (MapReduceKeysToIntTask<K,V>)c,
6117                         s = t.rights;
6118                     while (s != null) {
6119                         t.result = reducer.applyAsInt(t.result, s.result);
6120                         s = t.rights = s.nextRight;
6121                     }
6122                 }
6123             }
6124         }
6125     }
6126 
6127     @SuppressWarnings("serial")
6128     static final class MapReduceValuesToIntTask<K,V>
6129         extends BulkTask<K,V,Integer> {
6130         final ToIntFunction<? super V> transformer;
6131         final IntBinaryOperator reducer;
6132         final int basis;
6133         int result;
6134         MapReduceValuesToIntTask<K,V> rights, nextRight;
6135         MapReduceValuesToIntTask
6136             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6137              MapReduceValuesToIntTask<K,V> nextRight,
6138              ToIntFunction<? super V> transformer,
6139              int basis,
6140              IntBinaryOperator reducer) {
6141             super(p, b, i, f, t); this.nextRight = nextRight;
6142             this.transformer = transformer;
6143             this.basis = basis; this.reducer = reducer;
6144         }
6145         public final Integer getRawResult() { return result; }
6146         public final void compute() {
6147             final ToIntFunction<? super V> transformer;
6148             final IntBinaryOperator reducer;
6149             if ((transformer = this.transformer) != null &&
6150                 (reducer = this.reducer) != null) {
6151                 int r = this.basis;
6152                 for (int i = baseIndex, f, h; batch > 0 &&
6153                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6154                     addToPendingCount(1);
6155                     (rights = new MapReduceValuesToIntTask<K,V>
6156                      (this, batch >>>= 1, baseLimit = h, f, tab,
6157                       rights, transformer, r, reducer)).fork();
6158                 }
6159                 for (Node<K,V> p; (p = advance()) != null; )
6160                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
6161                 result = r;
6162                 CountedCompleter<?> c;
6163                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6164                     @SuppressWarnings("unchecked")
6165                     MapReduceValuesToIntTask<K,V>
6166                         t = (MapReduceValuesToIntTask<K,V>)c,
6167                         s = t.rights;
6168                     while (s != null) {
6169                         t.result = reducer.applyAsInt(t.result, s.result);
6170                         s = t.rights = s.nextRight;
6171                     }
6172                 }
6173             }
6174         }
6175     }
6176 
6177     @SuppressWarnings("serial")
6178     static final class MapReduceEntriesToIntTask<K,V>
6179         extends BulkTask<K,V,Integer> {
6180         final ToIntFunction<Map.Entry<K,V>> transformer;
6181         final IntBinaryOperator reducer;
6182         final int basis;
6183         int result;
6184         MapReduceEntriesToIntTask<K,V> rights, nextRight;
6185         MapReduceEntriesToIntTask
6186             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6187              MapReduceEntriesToIntTask<K,V> nextRight,
6188              ToIntFunction<Map.Entry<K,V>> transformer,
6189              int basis,
6190              IntBinaryOperator reducer) {
6191             super(p, b, i, f, t); this.nextRight = nextRight;
6192             this.transformer = transformer;
6193             this.basis = basis; this.reducer = reducer;
6194         }
6195         public final Integer getRawResult() { return result; }
6196         public final void compute() {
6197             final ToIntFunction<Map.Entry<K,V>> transformer;
6198             final IntBinaryOperator reducer;
6199             if ((transformer = this.transformer) != null &&
6200                 (reducer = this.reducer) != null) {
6201                 int r = this.basis;
6202                 for (int i = baseIndex, f, h; batch > 0 &&
6203                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6204                     addToPendingCount(1);
6205                     (rights = new MapReduceEntriesToIntTask<K,V>
6206                      (this, batch >>>= 1, baseLimit = h, f, tab,
6207                       rights, transformer, r, reducer)).fork();
6208                 }
6209                 for (Node<K,V> p; (p = advance()) != null; )
6210                     r = reducer.applyAsInt(r, transformer.applyAsInt(p));
6211                 result = r;
6212                 CountedCompleter<?> c;
6213                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6214                     @SuppressWarnings("unchecked")
6215                     MapReduceEntriesToIntTask<K,V>
6216                         t = (MapReduceEntriesToIntTask<K,V>)c,
6217                         s = t.rights;
6218                     while (s != null) {
6219                         t.result = reducer.applyAsInt(t.result, s.result);
6220                         s = t.rights = s.nextRight;
6221                     }
6222                 }
6223             }
6224         }
6225     }
6226 
6227     @SuppressWarnings("serial")
6228     static final class MapReduceMappingsToIntTask<K,V>
6229         extends BulkTask<K,V,Integer> {
6230         final ToIntBiFunction<? super K, ? super V> transformer;
6231         final IntBinaryOperator reducer;
6232         final int basis;
6233         int result;
6234         MapReduceMappingsToIntTask<K,V> rights, nextRight;
6235         MapReduceMappingsToIntTask
6236             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6237              MapReduceMappingsToIntTask<K,V> nextRight,
6238              ToIntBiFunction<? super K, ? super V> transformer,
6239              int basis,
6240              IntBinaryOperator reducer) {
6241             super(p, b, i, f, t); this.nextRight = nextRight;
6242             this.transformer = transformer;
6243             this.basis = basis; this.reducer = reducer;
6244         }
6245         public final Integer getRawResult() { return result; }
6246         public final void compute() {
6247             final ToIntBiFunction<? super K, ? super V> transformer;
6248             final IntBinaryOperator reducer;
6249             if ((transformer = this.transformer) != null &&
6250                 (reducer = this.reducer) != null) {
6251                 int r = this.basis;
6252                 for (int i = baseIndex, f, h; batch > 0 &&
6253                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6254                     addToPendingCount(1);
6255                     (rights = new MapReduceMappingsToIntTask<K,V>
6256                      (this, batch >>>= 1, baseLimit = h, f, tab,
6257                       rights, transformer, r, reducer)).fork();
6258                 }
6259                 for (Node<K,V> p; (p = advance()) != null; )
6260                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val));
6261                 result = r;
6262                 CountedCompleter<?> c;
6263                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6264                     @SuppressWarnings("unchecked")
6265                     MapReduceMappingsToIntTask<K,V>
6266                         t = (MapReduceMappingsToIntTask<K,V>)c,
6267                         s = t.rights;
6268                     while (s != null) {
6269                         t.result = reducer.applyAsInt(t.result, s.result);
6270                         s = t.rights = s.nextRight;
6271                     }
6272                 }
6273             }
6274         }
6275     }
6276 
6277     // Unsafe mechanics
6278     private static final sun.misc.Unsafe U;
6279     private static final long SIZECTL;
6280     private static final long TRANSFERINDEX;
6281     private static final long BASECOUNT;
6282     private static final long CELLSBUSY;
6283     private static final long CELLVALUE;
6284     private static final long ABASE;
6285     private static final int ASHIFT;
6286 
6287     static {
6288         try {
6289             U = sun.misc.Unsafe.getUnsafe();
6290             Class<?> k = ConcurrentHashMap.class;
6291             SIZECTL = U.objectFieldOffset
6292                 (k.getDeclaredField("sizeCtl"));
6293             TRANSFERINDEX = U.objectFieldOffset
6294                 (k.getDeclaredField("transferIndex"));
6295             BASECOUNT = U.objectFieldOffset
6296                 (k.getDeclaredField("baseCount"));
6297             CELLSBUSY = U.objectFieldOffset
6298                 (k.getDeclaredField("cellsBusy"));
6299             Class<?> ck = CounterCell.class;
6300             CELLVALUE = U.objectFieldOffset
6301                 (ck.getDeclaredField("value"));
6302             Class<?> ak = Node[].class;
6303             ABASE = U.arrayBaseOffset(ak);
6304             int scale = U.arrayIndexScale(ak);
6305             if ((scale & (scale - 1)) != 0)
6306                 throw new Error("data type scale not a power of two");
6307             ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6308         } catch (Exception e) {
6309             throw new Error(e);
6310         }
6311     }
6312 }