提交 e57821b6 编写于 作者: P psandoz

8023463: Improvements to HashMap/LinkedHashMap use of bins/buckets and trees (red/black)

8012913: LinkedHashMap key/value/entry spliterators should report ORDERED
Reviewed-by: mduigou, forax, bchristi, alanb
Contributed-by: NDoug Lea &lt;dl@cs.oswego.edu&gt;, Paul Sandoz <paul.sandoz@oracle.com>
上级 a576565e
...@@ -25,13 +25,14 @@ ...@@ -25,13 +25,14 @@
package java.util; package java.util;
import java.io.*; import java.io.IOException;
import java.io.InvalidObjectException;
import java.io.Serializable;
import java.lang.reflect.ParameterizedType; import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type; import java.lang.reflect.Type;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.BiConsumer; import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.BiFunction; import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Function; import java.util.function.Function;
/** /**
...@@ -63,20 +64,25 @@ import java.util.function.Function; ...@@ -63,20 +64,25 @@ import java.util.function.Function;
* structures are rebuilt) so that the hash table has approximately twice the * structures are rebuilt) so that the hash table has approximately twice the
* number of buckets. * number of buckets.
* *
* <p>As a general rule, the default load factor (.75) offers a good tradeoff * <p>As a general rule, the default load factor (.75) offers a good
* between time and space costs. Higher values decrease the space overhead * tradeoff between time and space costs. Higher values decrease the
* but increase the lookup cost (reflected in most of the operations of the * space overhead but increase the lookup cost (reflected in most of
* <tt>HashMap</tt> class, including <tt>get</tt> and <tt>put</tt>). The * the operations of the <tt>HashMap</tt> class, including
* expected number of entries in the map and its load factor should be taken * <tt>get</tt> and <tt>put</tt>). The expected number of entries in
* into account when setting its initial capacity, so as to minimize the * the map and its load factor should be taken into account when
* number of rehash operations. If the initial capacity is greater * setting its initial capacity, so as to minimize the number of
* than the maximum number of entries divided by the load factor, no * rehash operations. If the initial capacity is greater than the
* rehash operations will ever occur. * maximum number of entries divided by the load factor, no rehash
* operations will ever occur.
* *
* <p>If many mappings are to be stored in a <tt>HashMap</tt> instance, * <p>If many mappings are to be stored in a <tt>HashMap</tt>
* creating it with a sufficiently large capacity will allow the mappings to * instance, creating it with a sufficiently large capacity will allow
* be stored more efficiently than letting it perform automatic rehashing as * the mappings to be stored more efficiently than letting it perform
* needed to grow the table. * automatic rehashing as needed to grow the table. Note that using
* many keys with the same {@code hashCode()} is a sure way to slow
* down performance of any hash table. To ameliorate impact, when keys
* are {@link Comparable}, this class may use comparison order among
* keys to help break ties.
* *
* <p><strong>Note that this implementation is not synchronized.</strong> * <p><strong>Note that this implementation is not synchronized.</strong>
* If multiple threads access a hash map concurrently, and at least one of * If multiple threads access a hash map concurrently, and at least one of
...@@ -128,11 +134,100 @@ import java.util.function.Function; ...@@ -128,11 +134,100 @@ import java.util.function.Function;
* @see Hashtable * @see Hashtable
* @since 1.2 * @since 1.2
*/ */
public class HashMap<K,V> extends AbstractMap<K,V>
implements Map<K,V>, Cloneable, Serializable {
private static final long serialVersionUID = 362498820763181265L;
public class HashMap<K,V> /*
extends AbstractMap<K,V> * Implementation notes.
implements Map<K,V>, Cloneable, Serializable *
{ * This map usually acts as a binned (bucketed) hash table, but
* when bins get too large, they are transformed into bins of
* TreeNodes, each structured similarly to those in
* java.util.TreeMap. Most methods try to use normal bins, but
* relay to TreeNode methods when applicable (simply by checking
* instanceof a node). Bins of TreeNodes may be traversed and
* used like any others, but additionally support faster lookup
* when overpopulated. However, since the vast majority of bins in
* normal use are not overpopulated, checking for existence of
* tree bins may be delayed in the course of table methods.
*
* Tree bins (i.e., bins whose elements are all TreeNodes) are
* ordered primarily by hashCode, but in the case of ties, if two
* elements are of the same "class C implements Comparable<C>",
* type then their compareTo method is used for ordering. (We
* conservatively check generic types via reflection to validate
* this -- see method comparableClassFor). The added complexity
* of tree bins is worthwhile in providing worst-case O(log n)
* operations when keys either have distinct hashes or are
* orderable, Thus, performance degrades gracefully under
* accidental or malicious usages in which hashCode() methods
* return values that are poorly distributed, as well as those in
* which many keys share a hashCode, so long as they are also
* Comparable. (If neither of these apply, we may waste about a
* factor of two in time and space compared to taking no
* precautions. But the only known cases stem from poor user
* programming practices that are already so slow that this makes
* little difference.)
*
* Because TreeNodes are about twice the size of regular nodes, we
* use them only when bins contain enough nodes to warrant use
* (see TREEIFY_THRESHOLD). And when they become too small (due to
* removal or resizing) they are converted back to plain bins. In
* usages with well-distributed user hashCodes, tree bins are
* rarely used. Ideally, under random hashCodes, the frequency of
* nodes in bins follows a Poisson distribution
* (http://en.wikipedia.org/wiki/Poisson_distribution) with a
* parameter of about 0.5 on average for the default resizing
* threshold of 0.75, although with a large variance because of
* resizing granularity. Ignoring variance, the expected
* occurrences of list size k are (exp(-0.5) * pow(0.5, k) /
* factorial(k)). The first values are:
*
* 0: 0.60653066
* 1: 0.30326533
* 2: 0.07581633
* 3: 0.01263606
* 4: 0.00157952
* 5: 0.00015795
* 6: 0.00001316
* 7: 0.00000094
* 8: 0.00000006
* more: less than 1 in ten million
*
* The root of a tree bin is normally its first node. However,
* sometimes (currently only upon Iterator.remove), the root might
* be elsewhere, but can be recovered following parent links
* (method TreeNode.root()).
*
* All applicable internal methods accept a hash code as an
* argument (as normally supplied from a public method), allowing
* them to call each other without recomputing user hashCodes.
* Most internal methods also accept a "tab" argument, that is
* normally the current table, but may be a new or old one when
* resizing or converting.
*
* When bin lists are treeified, split, or untreeified, we keep
* them in the same relative access/traversal order (i.e., field
* Node.next) to better preserve locality, and to slightly
* simplify handling of splits and traversals that invoke
* iterator.remove. When using comparators on insertion, to keep a
* total ordering (or as close as is required here) across
* rebalancings, we compare classes and identityHashCodes as
* tie-breakers.
*
* The use and transitions among plain vs tree modes is
* complicated by the existence of subclass LinkedHashMap. See
* below for hook methods defined to be invoked upon insertion,
* removal and access that allow LinkedHashMap internals to
* otherwise remain independent of these mechanics. (This also
* requires that a map instance be passed to some utility methods
* that may create new nodes.)
*
* The concurrent-programming-like SSA-based coding style helps
* avoid aliasing errors amid all of the twisty pointer operations.
*/
/** /**
* The default initial capacity - MUST be a power of two. * The default initial capacity - MUST be a power of two.
...@@ -152,140 +247,111 @@ public class HashMap<K,V> ...@@ -152,140 +247,111 @@ public class HashMap<K,V>
static final float DEFAULT_LOAD_FACTOR = 0.75f; static final float DEFAULT_LOAD_FACTOR = 0.75f;
/** /**
* An empty table instance to share when the table is not inflated. * The bin count threshold for using a tree rather than list for a
*/ * bin. Bins are converted to trees when adding an element to a
static final Object[] EMPTY_TABLE = {}; * bin with at least this many nodes. The value must be greater
* than 2 and should be at least 8 to mesh with assumptions in
/** * tree removal about conversion back to plain bins upon
* The table, resized as necessary. Length MUST Always be a power of two. * shrinkage.
*/
transient Object[] table = EMPTY_TABLE;
/**
* The number of key-value mappings contained in this map.
*/
transient int size;
/**
* The next size value at which to resize (capacity * load factor).
* @serial
*/ */
// If table == EMPTY_TABLE then this is the initial capacity at which the static final int TREEIFY_THRESHOLD = 8;
// table will be created when inflated.
int threshold;
/** /**
* The load factor for the hash table. * The bin count threshold for untreeifying a (split) bin during a
* * resize operation. Should be less than TREEIFY_THRESHOLD, and at
* @serial * most 6 to mesh with shrinkage detection under removal.
*/ */
final float loadFactor; static final int UNTREEIFY_THRESHOLD = 6;
/** /**
* The number of times this HashMap has been structurally modified * The smallest table capacity for which bins may be treeified.
* Structural modifications are those that change the number of mappings in * (Otherwise the table is resized if too many nodes in a bin.)
* the HashMap or otherwise modify its internal structure (e.g., * Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts
* rehash). This field is used to make iterators on Collection-views of * between resizing and treeification thresholds.
* the HashMap fail-fast. (See ConcurrentModificationException).
*/ */
transient int modCount; static final int MIN_TREEIFY_CAPACITY = 64;
/** /**
* Holds values which can't be initialized until after VM is booted. * Basic hash bin node, used for most entries. (See below for
* TreeNode subclass, and in LinkedHashMap for its Entry subclass.)
*/ */
private static class Holder { static class Node<K,V> implements Map.Entry<K,V> {
static final sun.misc.Unsafe UNSAFE; final int hash;
final K key;
V value;
Node<K,V> next;
/** Node(int hash, K key, V value, Node<K,V> next) {
* Offset of "final" hashSeed field we must set in this.hash = hash;
* readObject() method. this.key = key;
*/ this.value = value;
static final long HASHSEED_OFFSET; this.next = next;
}
static final boolean USE_HASHSEED; public final K getKey() { return key; }
public final V getValue() { return value; }
public final String toString() { return key + "=" + value; }
static { public final int hashCode() {
String hashSeedProp = java.security.AccessController.doPrivileged( return Objects.hashCode(key) ^ Objects.hashCode(value);
new sun.security.action.GetPropertyAction( }
"jdk.map.useRandomSeed"));
boolean localBool = (null != hashSeedProp)
? Boolean.parseBoolean(hashSeedProp) : false;
USE_HASHSEED = localBool;
if (USE_HASHSEED) { public final V setValue(V newValue) {
try { V oldValue = value;
UNSAFE = sun.misc.Unsafe.getUnsafe(); value = newValue;
HASHSEED_OFFSET = UNSAFE.objectFieldOffset( return oldValue;
HashMap.class.getDeclaredField("hashSeed"));
} catch (NoSuchFieldException | SecurityException e) {
throw new InternalError("Failed to record hashSeed offset", e);
} }
} else {
UNSAFE = null; public final boolean equals(Object o) {
HASHSEED_OFFSET = 0; if (o == this)
return true;
if (o instanceof Map.Entry) {
Map.Entry<?,?> e = (Map.Entry<?,?>)o;
if (Objects.equals(key, e.getKey()) &&
Objects.equals(value, e.getValue()))
return true;
} }
return false;
} }
} }
/* /* ---------------- Static utilities -------------- */
* A randomizing value associated with this instance that is applied to
* hash code of keys to make hash collisions harder to find.
*
* Non-final so it can be set lazily, but be sure not to set more than once.
*/
transient final int hashSeed;
/*
* TreeBin/TreeNode code from CHM doesn't handle the null key. Store the
* null key entry here.
*/
transient Entry<K,V> nullKeyEntry = null;
/*
* In order to improve performance under high hash-collision conditions,
* HashMap will switch to storing a bin's entries in a balanced tree
* (TreeBin) instead of a linked-list once the number of entries in the bin
* passes a certain threshold (TreeBin.TREE_THRESHOLD), if at least one of
* the keys in the bin implements Comparable. This technique is borrowed
* from ConcurrentHashMap.
*/
/*
* Code based on CHMv8
*
* Node type for TreeBin
*/
final static class TreeNode<K,V> {
TreeNode parent; // red-black tree links
TreeNode left;
TreeNode right;
TreeNode prev; // needed to unlink next upon deletion
boolean red;
final HashMap.Entry<K,V> entry;
TreeNode(HashMap.Entry<K,V> entry, Object next, TreeNode parent) { /**
this.entry = entry; * Computes key.hashCode() and spreads (XORs) higher bits of hash
this.entry.next = next; * to lower. Because the table uses power-of-two masking, sets of
this.parent = parent; * hashes that vary only in bits above the current mask will
} * always collide. (Among known examples are sets of Float keys
* holding consecutive whole numbers in small tables.) So we
* apply a transform that spreads the impact of higher bits
* downward. There is a tradeoff between speed, utility, and
* quality of bit-spreading. Because many common sets of hashes
* are already reasonably distributed (so don't benefit from
* spreading), and because we use trees to handle large sets of
* collisions in bins, we just XOR some shifted bits in the
* cheapest possible way to reduce systematic lossage, as well as
* to incorporate impact of the highest bits that would otherwise
* never be used in index calculations because of table bounds.
*/
static final int hash(Object key) {
int h;
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
} }
/** /**
* Returns a Class for the given object of the form "class C * Returns x's Class if it is of the form "class C implements
* implements Comparable<C>", if one exists, else null. See the TreeBin * Comparable<C>", else null.
* docs, below, for explanation.
*/ */
static Class<?> comparableClassFor(Object x) { static Class<?> comparableClassFor(Object x) {
Class<?> c, s, cmpc; Type[] ts, as; Type t; ParameterizedType p; if (x instanceof Comparable) {
Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
if ((c = x.getClass()) == String.class) // bypass checks if ((c = x.getClass()) == String.class) // bypass checks
return c; return c;
if ((cmpc = Comparable.class).isAssignableFrom(c)) {
while (cmpc.isAssignableFrom(s = c.getSuperclass()))
c = s; // find topmost comparable class
if ((ts = c.getGenericInterfaces()) != null) { if ((ts = c.getGenericInterfaces()) != null) {
for (int i = 0; i < ts.length; ++i) { for (int i = 0; i < ts.length; ++i) {
if (((t = ts[i]) instanceof ParameterizedType) && if (((t = ts[i]) instanceof ParameterizedType) &&
((p = (ParameterizedType)t).getRawType() == cmpc) && ((p = (ParameterizedType)t).getRawType() ==
Comparable.class) &&
(as = p.getActualTypeArguments()) != null && (as = p.getActualTypeArguments()) != null &&
as.length == 1 && as[0] == c) // type arg is c as.length == 1 && as[0] == c) // type arg is c
return c; return c;
...@@ -295,1899 +361,931 @@ public class HashMap<K,V> ...@@ -295,1899 +361,931 @@ public class HashMap<K,V>
return null; return null;
} }
/* /**
* Code based on CHMv8 * Returns k.compareTo(x) if x matches kc (k's screened comparable
* * class), else 0.
* A specialized form of red-black tree for use in bins
* whose size exceeds a threshold.
*
* TreeBins use a special form of comparison for search and
* related operations (which is the main reason we cannot use
* existing collections such as TreeMaps). TreeBins contain
* Comparable elements, but may contain others, as well as
* elements that are Comparable but not necessarily Comparable<T>
* for the same T, so we cannot invoke compareTo among them. To
* handle this, the tree is ordered primarily by hash value, then
* by Comparable.compareTo order if applicable. On lookup at a
* node, if elements are not comparable or compare as 0 then both
* left and right children may need to be searched in the case of
* tied hash values. (This corresponds to the full list search
* that would be necessary if all elements were non-Comparable and
* had tied hashes.) The red-black balancing code is updated from
* pre-jdk-collections
* (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
* based in turn on Cormen, Leiserson, and Rivest "Introduction to
* Algorithms" (CLR).
*/ */
final class TreeBin { @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
/* static int compareComparables(Class<?> kc, Object k, Object x) {
* The bin count threshold for using a tree rather than list for a bin. The return (x == null || x.getClass() != kc ? 0 :
* value reflects the approximate break-even point for using tree-based ((Comparable)k).compareTo(x));
* operations. }
/**
* Returns a power of two size for the given target capacity.
*/ */
static final int TREE_THRESHOLD = 16; static final int tableSizeFor(int cap) {
int n = cap - 1;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}
TreeNode<K,V> root; // root of tree /* ---------------- Fields -------------- */
TreeNode<K,V> first; // head of next-pointer list
/* /**
* Split a TreeBin into lo and hi parts and install in given table. * The table, initialized on first use, and resized as
* necessary. When allocated, length is always a power of two.
* (We also tolerate length zero in some operations to allow
* bootstrapping mechanics that are currently not needed.)
*/
transient Node<K,V>[] table;
/**
* Holds cached entrySet(). Note that AbstractMap fields are used
* for keySet() and values().
*/
transient Set<Map.Entry<K,V>> entrySet;
/**
* The number of key-value mappings contained in this map.
*/
transient int size;
/**
* The number of times this HashMap has been structurally modified
* Structural modifications are those that change the number of mappings in
* the HashMap or otherwise modify its internal structure (e.g.,
* rehash). This field is used to make iterators on Collection-views of
* the HashMap fail-fast. (See ConcurrentModificationException).
*/
transient int modCount;
/**
* The next size value at which to resize (capacity * load factor).
*
* @serial
*/
// (The javadoc description is true upon serialization.
// Additionally, if the table array has not been allocated, this
// field holds the initial array capacity, or zero signifying
// DEFAULT_INITIAL_CAPACITY.)
int threshold;
/**
* The load factor for the hash table.
* *
* Existing Entrys are re-used, which maintains the before/after links for * @serial
* LinkedHashMap.Entry. */
final float loadFactor;
/* ---------------- Public operations -------------- */
/**
* Constructs an empty <tt>HashMap</tt> with the specified initial
* capacity and load factor.
* *
* No check for Comparable, though this is the same as CHM. * @param initialCapacity the initial capacity
* @param loadFactor the load factor
* @throws IllegalArgumentException if the initial capacity is negative
* or the load factor is nonpositive
*/ */
final void splitTreeBin(Object[] newTable, int i, TreeBin loTree, TreeBin hiTree) { public HashMap(int initialCapacity, float loadFactor) {
TreeBin oldTree = this; if (initialCapacity < 0)
int bit = newTable.length >>> 1; throw new IllegalArgumentException("Illegal initial capacity: " +
int loCount = 0, hiCount = 0; initialCapacity);
TreeNode<K,V> e = oldTree.first; if (initialCapacity > MAXIMUM_CAPACITY)
TreeNode<K,V> next; initialCapacity = MAXIMUM_CAPACITY;
if (loadFactor <= 0 || Float.isNaN(loadFactor))
// This method is called when the table has just increased capacity, throw new IllegalArgumentException("Illegal load factor: " +
// so indexFor() is now taking one additional bit of hash into loadFactor);
// account ("bit"). Entries in this TreeBin now belong in one of this.loadFactor = loadFactor;
// two bins, "i" or "i+bit", depending on if the new top bit of the this.threshold = tableSizeFor(initialCapacity);
// hash is set. The trees for the two bins are loTree and hiTree.
// If either tree ends up containing fewer than TREE_THRESHOLD
// entries, it is converted back to a linked list.
while (e != null) {
// Save entry.next - it will get overwritten in putTreeNode()
next = (TreeNode<K,V>)e.entry.next;
int h = e.entry.hash;
K k = (K) e.entry.key;
V v = e.entry.value;
if ((h & bit) == 0) {
++loCount;
// Re-using e.entry
loTree.putTreeNode(h, k, v, e.entry);
} else {
++hiCount;
hiTree.putTreeNode(h, k, v, e.entry);
}
// Iterate using the saved 'next'
e = next;
}
if (loCount < TREE_THRESHOLD) { // too small, convert back to list
HashMap.Entry loEntry = null;
TreeNode<K,V> p = loTree.first;
while (p != null) {
@SuppressWarnings("unchecked")
TreeNode<K,V> savedNext = (TreeNode<K,V>) p.entry.next;
p.entry.next = loEntry;
loEntry = p.entry;
p = savedNext;
}
// assert newTable[i] == null;
newTable[i] = loEntry;
} else {
// assert newTable[i] == null;
newTable[i] = loTree;
}
if (hiCount < TREE_THRESHOLD) { // too small, convert back to list
HashMap.Entry hiEntry = null;
TreeNode<K,V> p = hiTree.first;
while (p != null) {
@SuppressWarnings("unchecked")
TreeNode<K,V> savedNext = (TreeNode<K,V>) p.entry.next;
p.entry.next = hiEntry;
hiEntry = p.entry;
p = savedNext;
} }
// assert newTable[i + bit] == null;
newTable[i + bit] = hiEntry; /**
} else { * Constructs an empty <tt>HashMap</tt> with the specified initial
// assert newTable[i + bit] == null; * capacity and the default load factor (0.75).
newTable[i + bit] = hiTree; *
* @param initialCapacity the initial capacity.
* @throws IllegalArgumentException if the initial capacity is negative.
*/
public HashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR);
} }
/**
* Constructs an empty <tt>HashMap</tt> with the default initial capacity
* (16) and the default load factor (0.75).
*/
public HashMap() {
this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted
} }
/* /**
* Popuplate the TreeBin with entries from the linked list e * Constructs a new <tt>HashMap</tt> with the same mappings as the
* * specified <tt>Map</tt>. The <tt>HashMap</tt> is created with
* Assumes 'this' is a new/empty TreeBin * default load factor (0.75) and an initial capacity sufficient to
* hold the mappings in the specified <tt>Map</tt>.
* *
* Note: no check for Comparable * @param m the map whose mappings are to be placed in this map
* Note: I believe this changes iteration order * @throws NullPointerException if the specified map is null
*/ */
@SuppressWarnings("unchecked") public HashMap(Map<? extends K, ? extends V> m) {
void populate(HashMap.Entry e) { this.loadFactor = DEFAULT_LOAD_FACTOR;
// assert root == null; putMapEntries(m, false);
// assert first == null;
HashMap.Entry next;
while (e != null) {
// Save entry.next - it will get overwritten in putTreeNode()
next = (HashMap.Entry)e.next;
// Re-using Entry e will maintain before/after in LinkedHM
putTreeNode(e.hash, (K)e.key, (V)e.value, e);
// Iterate using the saved 'next'
e = next;
}
} }
/** /**
* Copied from CHMv8 * Implements Map.putAll and Map constructor
* From CLR *
* @param m the map
* @param evict false when initially constructing this map, else
* true (relayed to method afterNodeInsertion).
*/ */
private void rotateLeft(TreeNode p) { final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) {
if (p != null) { int s = m.size();
TreeNode r = p.right, pp, rl; if (s > 0) {
if ((rl = p.right = r.left) != null) { if (table == null) { // pre-size
rl.parent = p; float ft = ((float)s / loadFactor) + 1.0F;
int t = ((ft < (float)MAXIMUM_CAPACITY) ?
(int)ft : MAXIMUM_CAPACITY);
if (t > threshold)
threshold = tableSizeFor(t);
} }
if ((pp = r.parent = p.parent) == null) { else if (s > threshold)
root = r; resize();
} else if (pp.left == p) { for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) {
pp.left = r; K key = e.getKey();
} else { V value = e.getValue();
pp.right = r; putVal(hash(key), key, value, false, evict);
} }
r.left = p;
p.parent = r;
} }
} }
/** /**
* Copied from CHMv8 * Returns the number of key-value mappings in this map.
* From CLR *
* @return the number of key-value mappings in this map
*/ */
private void rotateRight(TreeNode p) { public int size() {
if (p != null) { return size;
TreeNode l = p.left, pp, lr;
if ((lr = p.left = l.right) != null) {
lr.parent = p;
}
if ((pp = l.parent = p.parent) == null) {
root = l;
} else if (pp.right == p) {
pp.right = l;
} else {
pp.left = l;
}
l.right = p;
p.parent = l;
}
} }
/** /**
* Returns the TreeNode (or null if not found) for the given * Returns <tt>true</tt> if this map contains no key-value mappings.
* key. A front-end for recursive version. *
* @return <tt>true</tt> if this map contains no key-value mappings
*/ */
final TreeNode getTreeNode(int h, K k) { public boolean isEmpty() {
return getTreeNode(h, k, root, comparableClassFor(k)); return size == 0;
} }
/** /**
* Returns the TreeNode (or null if not found) for the given key * Returns the value to which the specified key is mapped,
* starting at given root. * or {@code null} if this map contains no mapping for the key.
*
* <p>More formally, if this map contains a mapping from a key
* {@code k} to a value {@code v} such that {@code (key==null ? k==null :
* key.equals(k))}, then this method returns {@code v}; otherwise
* it returns {@code null}. (There can be at most one such mapping.)
*
* <p>A return value of {@code null} does not <i>necessarily</i>
* indicate that the map contains no mapping for the key; it's also
* possible that the map explicitly maps the key to {@code null}.
* The {@link #containsKey containsKey} operation may be used to
* distinguish these two cases.
*
* @see #put(Object, Object)
*/ */
@SuppressWarnings("unchecked") public V get(Object key) {
final TreeNode getTreeNode (int h, K k, TreeNode p, Class<?> cc) { Node<K,V> e;
// assert k != null; return (e = getNode(hash(key), key)) == null ? null : e.value;
while (p != null) { }
int dir, ph; Object pk;
if ((ph = p.entry.hash) != h) /**
dir = (h < ph) ? -1 : 1; * Implements Map.get and related methods
else if ((pk = p.entry.key) == k || k.equals(pk)) *
return p; * @param hash hash for key
else if (cc == null || comparableClassFor(pk) != cc || * @param key the key
(dir = ((Comparable<Object>)k).compareTo(pk)) == 0) { * @return the node, or null if none
// assert pk != null; */
TreeNode r, pl, pr; // check both sides final Node<K,V> getNode(int hash, Object key) {
if ((pr = p.right) != null && Node<K,V>[] tab; Node<K,V> first, e; int n; K k;
(r = getTreeNode(h, k, pr, cc)) != null) if ((tab = table) != null && (n = tab.length) > 0 &&
return r; (first = tab[(n - 1) & hash]) != null) {
else if ((pl = p.left) != null) if (first.hash == hash && // always check first node
dir = -1; ((k = first.key) == key || (key != null && key.equals(k))))
else // nothing there return first;
break; if ((e = first.next) != null) {
if (first instanceof TreeNode)
return ((TreeNode<K,V>)first).getTreeNode(hash, key);
do {
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k))))
return e;
} while ((e = e.next) != null);
} }
p = (dir > 0) ? p.right : p.left;
} }
return null; return null;
} }
/* /**
* Finds or adds a node. * Returns <tt>true</tt> if this map contains a mapping for the
* * specified key.
* 'entry' should be used to recycle an existing Entry (e.g. in the case
* of converting a linked-list bin to a TreeBin).
* If entry is null, a new Entry will be created for the new TreeNode
* *
* @return the TreeNode containing the mapping, or null if a new * @param key The key whose presence in this map is to be tested
* TreeNode was added * @return <tt>true</tt> if this map contains a mapping for the specified
* key.
*/ */
@SuppressWarnings("unchecked") public boolean containsKey(Object key) {
TreeNode putTreeNode(int h, K k, V v, HashMap.Entry<K,V> entry) { return getNode(hash(key), key) != null;
// assert k != null;
//if (entry != null) {
// assert h == entry.hash;
// assert k == entry.key;
// assert v == entry.value;
// }
Class<?> cc = comparableClassFor(k);
TreeNode pp = root, p = null;
int dir = 0;
while (pp != null) { // find existing node or leaf to insert at
int ph; Object pk;
p = pp;
if ((ph = p.entry.hash) != h)
dir = (h < ph) ? -1 : 1;
else if ((pk = p.entry.key) == k || k.equals(pk))
return p;
else if (cc == null || comparableClassFor(pk) != cc ||
(dir = ((Comparable<Object>)k).compareTo(pk)) == 0) {
TreeNode r, pr;
if ((pr = p.right) != null &&
(r = getTreeNode(h, k, pr, cc)) != null)
return r;
else // continue left
dir = -1;
}
pp = (dir > 0) ? p.right : p.left;
} }
// Didn't find the mapping in the tree, so add it /**
TreeNode f = first; * Associates the specified value with the specified key in this map.
TreeNode x; * If the map previously contained a mapping for the key, the old
if (entry != null) { * value is replaced.
x = new TreeNode(entry, f, p); *
} else { * @param key key with which the specified value is to be associated
x = new TreeNode(newEntry(h, k, v, null), f, p); * @param value value to be associated with the specified key
* @return the previous value associated with <tt>key</tt>, or
* <tt>null</tt> if there was no mapping for <tt>key</tt>.
* (A <tt>null</tt> return can also indicate that the map
* previously associated <tt>null</tt> with <tt>key</tt>.)
*/
public V put(K key, V value) {
return putVal(hash(key), key, value, false, true);
} }
first = x;
if (p == null) { /**
root = x; * Implements Map.put and related methods
} else { // attach and rebalance; adapted from CLR *
TreeNode xp, xpp; * @param hash hash for key
if (f != null) { * @param key the key
f.prev = x; * @param value the value to put
* @param onlyIfAbsent if true, don't change existing value
* @param evict if false, the table is in creation mode.
* @return previous value, or null if none
*/
final V putVal(int hash, K key, V value, boolean onlyIfAbsent,
boolean evict) {
Node<K,V>[] tab; Node<K,V> p; int n, i;
if (size > threshold || (tab = table) == null ||
(n = tab.length) == 0)
n = (tab = resize()).length;
if ((p = tab[i = (n - 1) & hash]) == null)
tab[i] = newNode(hash, key, value, null);
else {
Node<K,V> e; K k;
if (p.hash == hash &&
((k = p.key) == key || (key != null && key.equals(k))))
e = p;
else if (p instanceof TreeNode)
e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value);
else {
for (int binCount = 0; ; ++binCount) {
if ((e = p.next) == null) {
p.next = newNode(hash, key, value, null);
if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
treeifyBin(tab, hash);
break;
} }
if (dir <= 0) { if (e.hash == hash &&
p.left = x; ((k = e.key) == key || (key != null && key.equals(k))))
} else { break;
p.right = x; p = e;
} }
x.red = true;
while (x != null && (xp = x.parent) != null && xp.red
&& (xpp = xp.parent) != null) {
TreeNode xppl = xpp.left;
if (xp == xppl) {
TreeNode y = xpp.right;
if (y != null && y.red) {
y.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
} else {
if (x == xp.right) {
rotateLeft(x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
rotateRight(xpp);
}
}
}
} else {
TreeNode y = xppl;
if (y != null && y.red) {
y.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
} else {
if (x == xp.left) {
rotateRight(x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
rotateLeft(xpp);
}
}
}
}
}
TreeNode r = root;
if (r != null && r.red) {
r.red = false;
}
}
return null;
}
/*
* From CHMv8
*
* Removes the given node, that must be present before this
* call. This is messier than typical red-black deletion code
* because we cannot swap the contents of an interior node
* with a leaf successor that is pinned by "next" pointers
* that are accessible independently of lock. So instead we
* swap the tree linkages.
*/
final void deleteTreeNode(TreeNode p) {
TreeNode next = (TreeNode) p.entry.next; // unlink traversal pointers
TreeNode pred = p.prev;
if (pred == null) {
first = next;
} else {
pred.entry.next = next;
}
if (next != null) {
next.prev = pred;
}
TreeNode replacement;
TreeNode pl = p.left;
TreeNode pr = p.right;
if (pl != null && pr != null) {
TreeNode s = pr, sl;
while ((sl = s.left) != null) // find successor
{
s = sl;
}
boolean c = s.red;
s.red = p.red;
p.red = c; // swap colors
TreeNode sr = s.right;
TreeNode pp = p.parent;
if (s == pr) { // p was s's direct parent
p.parent = s;
s.right = p;
} else {
TreeNode sp = s.parent;
if ((p.parent = sp) != null) {
if (s == sp.left) {
sp.left = p;
} else {
sp.right = p;
}
}
if ((s.right = pr) != null) {
pr.parent = s;
}
}
p.left = null;
if ((p.right = sr) != null) {
sr.parent = p;
}
if ((s.left = pl) != null) {
pl.parent = s;
}
if ((s.parent = pp) == null) {
root = s;
} else if (p == pp.left) {
pp.left = s;
} else {
pp.right = s;
}
replacement = sr;
} else {
replacement = (pl != null) ? pl : pr;
}
TreeNode pp = p.parent;
if (replacement == null) {
if (pp == null) {
root = null;
return;
}
replacement = p;
} else {
replacement.parent = pp;
if (pp == null) {
root = replacement;
} else if (p == pp.left) {
pp.left = replacement;
} else {
pp.right = replacement;
}
p.left = p.right = p.parent = null;
}
if (!p.red) { // rebalance, from CLR
TreeNode x = replacement;
while (x != null) {
TreeNode xp, xpl;
if (x.red || (xp = x.parent) == null) {
x.red = false;
break;
}
if (x == (xpl = xp.left)) {
TreeNode sib = xp.right;
if (sib != null && sib.red) {
sib.red = false;
xp.red = true;
rotateLeft(xp);
sib = (xp = x.parent) == null ? null : xp.right;
}
if (sib == null) {
x = xp;
} else {
TreeNode sl = sib.left, sr = sib.right;
if ((sr == null || !sr.red)
&& (sl == null || !sl.red)) {
sib.red = true;
x = xp;
} else {
if (sr == null || !sr.red) {
if (sl != null) {
sl.red = false;
}
sib.red = true;
rotateRight(sib);
sib = (xp = x.parent) == null ?
null : xp.right;
}
if (sib != null) {
sib.red = (xp == null) ? false : xp.red;
if ((sr = sib.right) != null) {
sr.red = false;
}
}
if (xp != null) {
xp.red = false;
rotateLeft(xp);
}
x = root;
}
}
} else { // symmetric
TreeNode sib = xpl;
if (sib != null && sib.red) {
sib.red = false;
xp.red = true;
rotateRight(xp);
sib = (xp = x.parent) == null ? null : xp.left;
}
if (sib == null) {
x = xp;
} else {
TreeNode sl = sib.left, sr = sib.right;
if ((sl == null || !sl.red)
&& (sr == null || !sr.red)) {
sib.red = true;
x = xp;
} else {
if (sl == null || !sl.red) {
if (sr != null) {
sr.red = false;
}
sib.red = true;
rotateLeft(sib);
sib = (xp = x.parent) == null ?
null : xp.left;
}
if (sib != null) {
sib.red = (xp == null) ? false : xp.red;
if ((sl = sib.left) != null) {
sl.red = false;
}
}
if (xp != null) {
xp.red = false;
rotateRight(xp);
}
x = root;
}
}
}
}
}
if (p == replacement && (pp = p.parent) != null) {
if (p == pp.left) // detach pointers
{
pp.left = null;
} else if (p == pp.right) {
pp.right = null;
}
p.parent = null;
}
}
}
/**
* Constructs an empty <tt>HashMap</tt> with the specified initial
* capacity and load factor.
*
* @param initialCapacity the initial capacity
* @param loadFactor the load factor
* @throws IllegalArgumentException if the initial capacity is negative
* or the load factor is nonpositive
*/
public HashMap(int initialCapacity, float loadFactor) {
if (initialCapacity < 0)
throw new IllegalArgumentException("Illegal initial capacity: " +
initialCapacity);
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
if (loadFactor <= 0 || Float.isNaN(loadFactor))
throw new IllegalArgumentException("Illegal load factor: " +
loadFactor);
this.loadFactor = loadFactor;
threshold = initialCapacity;
hashSeed = initHashSeed();
init();
}
/**
* Constructs an empty <tt>HashMap</tt> with the specified initial
* capacity and the default load factor (0.75).
*
* @param initialCapacity the initial capacity.
* @throws IllegalArgumentException if the initial capacity is negative.
*/
public HashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR);
}
/**
* Constructs an empty <tt>HashMap</tt> with the default initial capacity
* (16) and the default load factor (0.75).
*/
public HashMap() {
this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
}
/**
* Constructs a new <tt>HashMap</tt> with the same mappings as the
* specified <tt>Map</tt>. The <tt>HashMap</tt> is created with
* default load factor (0.75) and an initial capacity sufficient to
* hold the mappings in the specified <tt>Map</tt>.
*
* @param m the map whose mappings are to be placed in this map
* @throws NullPointerException if the specified map is null
*/
public HashMap(Map<? extends K, ? extends V> m) {
this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1,
DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR);
inflateTable(threshold);
putAllForCreate(m);
// assert size == m.size();
}
private static int roundUpToPowerOf2(int number) {
// assert number >= 0 : "number must be non-negative";
return number >= MAXIMUM_CAPACITY
? MAXIMUM_CAPACITY
: (number > 1) ? Integer.highestOneBit((number - 1) << 1) : 1;
}
/**
* Inflates the table.
*/
private void inflateTable(int toSize) {
// Find a power of 2 >= toSize
int capacity = roundUpToPowerOf2(toSize);
threshold = (int) Math.min(capacity * loadFactor, MAXIMUM_CAPACITY + 1);
table = new Object[capacity];
}
// internal utilities
/**
* Initialization hook for subclasses. This method is called
* in all constructors and pseudo-constructors (clone, readObject)
* after HashMap has been initialized but before any entries have
* been inserted. (In the absence of this method, readObject would
* require explicit knowledge of subclasses.)
*/
void init() {
}
/**
* Return an initial value for the hashSeed, or 0 if the random seed is not
* enabled.
*/
final int initHashSeed() {
if (sun.misc.VM.isBooted() && Holder.USE_HASHSEED) {
int seed = ThreadLocalRandom.current().nextInt();
return (seed != 0) ? seed : 1;
}
return 0;
}
/**
* Retrieve object hash code and applies a supplemental hash function to the
* result hash, which defends against poor quality hash functions. This is
* critical because HashMap uses power-of-two length hash tables, that
* otherwise encounter collisions for hashCodes that do not differ
* in lower bits.
*/
final int hash(Object k) {
int h = hashSeed ^ k.hashCode();
// This function ensures that hashCodes that differ only by
// constant multiples at each bit position have a bounded
// number of collisions (approximately 8 at default load factor).
h ^= (h >>> 20) ^ (h >>> 12);
return h ^ (h >>> 7) ^ (h >>> 4);
}
/**
* Returns index for hash code h.
*/
static int indexFor(int h, int length) {
// assert Integer.bitCount(length) == 1 : "length must be a non-zero power of 2";
return h & (length-1);
}
/**
* Returns the number of key-value mappings in this map.
*
* @return the number of key-value mappings in this map
*/
public int size() {
return size;
}
/**
* Returns <tt>true</tt> if this map contains no key-value mappings.
*
* @return <tt>true</tt> if this map contains no key-value mappings
*/
public boolean isEmpty() {
return size == 0;
}
/**
* Returns the value to which the specified key is mapped,
* or {@code null} if this map contains no mapping for the key.
*
* <p>More formally, if this map contains a mapping from a key
* {@code k} to a value {@code v} such that {@code (key==null ? k==null :
* key.equals(k))}, then this method returns {@code v}; otherwise
* it returns {@code null}. (There can be at most one such mapping.)
*
* <p>A return value of {@code null} does not <i>necessarily</i>
* indicate that the map contains no mapping for the key; it's also
* possible that the map explicitly maps the key to {@code null}.
* The {@link #containsKey containsKey} operation may be used to
* distinguish these two cases.
*
* @see #put(Object, Object)
*/
@SuppressWarnings("unchecked")
public V get(Object key) {
Entry<K,V> entry = getEntry(key);
return null == entry ? null : entry.getValue();
}
@Override
public V getOrDefault(Object key, V defaultValue) {
Entry<K,V> entry = getEntry(key);
return (entry == null) ? defaultValue : entry.getValue();
}
/**
* Returns <tt>true</tt> if this map contains a mapping for the
* specified key.
*
* @param key The key whose presence in this map is to be tested
* @return <tt>true</tt> if this map contains a mapping for the specified
* key.
*/
public boolean containsKey(Object key) {
return getEntry(key) != null;
}
/**
* Returns the entry associated with the specified key in the
* HashMap. Returns null if the HashMap contains no mapping
* for the key.
*/
@SuppressWarnings("unchecked")
final Entry<K,V> getEntry(Object key) {
if (size == 0) {
return null;
}
if (key == null) {
return nullKeyEntry;
}
int hash = hash(key);
int bin = indexFor(hash, table.length);
if (table[bin] instanceof Entry) {
Entry<K,V> e = (Entry<K,V>) table[bin];
for (; e != null; e = (Entry<K,V>)e.next) {
Object k;
if (e.hash == hash &&
((k = e.key) == key || key.equals(k))) {
return e;
}
}
} else if (table[bin] != null) {
TreeBin e = (TreeBin)table[bin];
TreeNode p = e.getTreeNode(hash, (K)key);
if (p != null) {
// assert p.entry.hash == hash && p.entry.key.equals(key);
return (Entry<K,V>)p.entry;
} else {
return null;
}
}
return null;
}
/**
* Associates the specified value with the specified key in this map.
* If the map previously contained a mapping for the key, the old
* value is replaced.
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
* @return the previous value associated with <tt>key</tt>, or
* <tt>null</tt> if there was no mapping for <tt>key</tt>.
* (A <tt>null</tt> return can also indicate that the map
* previously associated <tt>null</tt> with <tt>key</tt>.)
*/
@SuppressWarnings("unchecked")
public V put(K key, V value) {
if (table == EMPTY_TABLE) {
inflateTable(threshold);
}
if (key == null)
return putForNullKey(value);
int hash = hash(key);
int i = indexFor(hash, table.length);
boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin?
if (table[i] instanceof Entry) {
// Bin contains ordinary Entries. Search for key in the linked list
// of entries, counting the number of entries. Only check for
// TreeBin conversion if the list size is >= TREE_THRESHOLD.
// (The conversion still may not happen if the table gets resized.)
int listSize = 0;
Entry<K,V> e = (Entry<K,V>) table[i];
for (; e != null; e = (Entry<K,V>)e.next) {
Object k;
if (e.hash == hash && ((k = e.key) == key || key.equals(k))) {
V oldValue = e.value;
e.value = value;
e.recordAccess(this);
return oldValue;
}
listSize++;
}
// Didn't find, so fall through and call addEntry() to add the
// Entry and check for TreeBin conversion.
checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD;
} else if (table[i] != null) {
TreeBin e = (TreeBin)table[i];
TreeNode p = e.putTreeNode(hash, key, value, null);
if (p == null) { // putTreeNode() added a new node
modCount++;
size++;
if (size >= threshold) {
resize(2 * table.length);
}
return null;
} else { // putTreeNode() found an existing node
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
V oldVal = pEntry.value;
pEntry.value = value;
pEntry.recordAccess(this);
return oldVal;
}
}
modCount++;
addEntry(hash, key, value, i, checkIfNeedTree);
return null;
}
/**
* Offloaded version of put for null keys
*/
private V putForNullKey(V value) {
if (nullKeyEntry != null) {
V oldValue = nullKeyEntry.value;
nullKeyEntry.value = value;
nullKeyEntry.recordAccess(this);
return oldValue;
}
modCount++;
size++; // newEntry() skips size++
nullKeyEntry = newEntry(0, null, value, null);
return null;
}
private void putForCreateNullKey(V value) {
// Look for preexisting entry for key. This will never happen for
// clone or deserialize. It will only happen for construction if the
// input Map is a sorted map whose ordering is inconsistent w/ equals.
if (nullKeyEntry != null) {
nullKeyEntry.value = value;
} else {
nullKeyEntry = newEntry(0, null, value, null);
size++;
}
}
/**
* This method is used instead of put by constructors and
* pseudoconstructors (clone, readObject). It does not resize the table,
* check for comodification, etc, though it will convert bins to TreeBins
* as needed. It calls createEntry rather than addEntry.
*/
@SuppressWarnings("unchecked")
private void putForCreate(K key, V value) {
if (null == key) {
putForCreateNullKey(value);
return;
}
int hash = hash(key);
int i = indexFor(hash, table.length);
boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin?
/**
* Look for preexisting entry for key. This will never happen for
* clone or deserialize. It will only happen for construction if the
* input Map is a sorted map whose ordering is inconsistent w/ equals.
*/
if (table[i] instanceof Entry) {
int listSize = 0;
Entry<K,V> e = (Entry<K,V>) table[i];
for (; e != null; e = (Entry<K,V>)e.next) {
Object k;
if (e.hash == hash && ((k = e.key) == key || key.equals(k))) {
e.value = value;
return;
}
listSize++;
}
// Didn't find, fall through to createEntry().
// Check for conversion to TreeBin done via createEntry().
checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD;
} else if (table[i] != null) {
TreeBin e = (TreeBin)table[i];
TreeNode p = e.putTreeNode(hash, key, value, null);
if (p != null) {
p.entry.setValue(value); // Found an existing node, set value
} else {
size++; // Added a new TreeNode, so update size
}
// don't need modCount++/check for resize - just return
return;
}
createEntry(hash, key, value, i, checkIfNeedTree);
}
private void putAllForCreate(Map<? extends K, ? extends V> m) {
for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
putForCreate(e.getKey(), e.getValue());
}
/**
* Rehashes the contents of this map into a new array with a
* larger capacity. This method is called automatically when the
* number of keys in this map reaches its threshold.
*
* If current capacity is MAXIMUM_CAPACITY, this method does not
* resize the map, but sets threshold to Integer.MAX_VALUE.
* This has the effect of preventing future calls.
*
* @param newCapacity the new capacity, MUST be a power of two;
* must be greater than current capacity unless current
* capacity is MAXIMUM_CAPACITY (in which case value
* is irrelevant).
*/
void resize(int newCapacity) {
Object[] oldTable = table;
int oldCapacity = oldTable.length;
if (oldCapacity == MAXIMUM_CAPACITY) {
threshold = Integer.MAX_VALUE;
return;
}
Object[] newTable = new Object[newCapacity];
transfer(newTable);
table = newTable;
threshold = (int)Math.min(newCapacity * loadFactor, MAXIMUM_CAPACITY + 1);
}
/**
* Transfers all entries from current table to newTable.
*
* Assumes newTable is larger than table
*/
@SuppressWarnings("unchecked")
void transfer(Object[] newTable) {
Object[] src = table;
// assert newTable.length > src.length : "newTable.length(" +
// newTable.length + ") expected to be > src.length("+src.length+")";
int newCapacity = newTable.length;
for (int j = 0; j < src.length; j++) {
if (src[j] instanceof Entry) {
// Assume: since wasn't TreeBin before, won't need TreeBin now
Entry<K,V> e = (Entry<K,V>) src[j];
while (null != e) {
Entry<K,V> next = (Entry<K,V>)e.next;
int i = indexFor(e.hash, newCapacity);
e.next = (Entry<K,V>) newTable[i];
newTable[i] = e;
e = next;
}
} else if (src[j] != null) {
TreeBin e = (TreeBin) src[j];
TreeBin loTree = new TreeBin();
TreeBin hiTree = new TreeBin();
e.splitTreeBin(newTable, j, loTree, hiTree);
}
}
Arrays.fill(table, null);
}
/**
* Copies all of the mappings from the specified map to this map.
* These mappings will replace any mappings that this map had for
* any of the keys currently in the specified map.
*
* @param m mappings to be stored in this map
* @throws NullPointerException if the specified map is null
*/
public void putAll(Map<? extends K, ? extends V> m) {
int numKeysToBeAdded = m.size();
if (numKeysToBeAdded == 0)
return;
if (table == EMPTY_TABLE) {
inflateTable((int) Math.max(numKeysToBeAdded * loadFactor, threshold));
}
/*
* Expand the map if the map if the number of mappings to be added
* is greater than or equal to threshold. This is conservative; the
* obvious condition is (m.size() + size) >= threshold, but this
* condition could result in a map with twice the appropriate capacity,
* if the keys to be added overlap with the keys already in this map.
* By using the conservative calculation, we subject ourself
* to at most one extra resize.
*/
if (numKeysToBeAdded > threshold && table.length < MAXIMUM_CAPACITY) {
resize(table.length * 2);
}
for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
put(e.getKey(), e.getValue());
}
/**
* Removes the mapping for the specified key from this map if present.
*
* @param key key whose mapping is to be removed from the map
* @return the previous value associated with <tt>key</tt>, or
* <tt>null</tt> if there was no mapping for <tt>key</tt>.
* (A <tt>null</tt> return can also indicate that the map
* previously associated <tt>null</tt> with <tt>key</tt>.)
*/
public V remove(Object key) {
Entry<K,V> e = removeEntryForKey(key);
return (e == null ? null : e.value);
}
// optimized implementations of default methods in Map
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
Objects.requireNonNull(action);
final int expectedModCount = modCount;
if (nullKeyEntry != null) {
forEachNullKey(expectedModCount, action);
}
Object[] tab = this.table;
for (int index = 0; index < tab.length; index++) {
Object item = tab[index];
if (item == null) {
continue;
}
if (item instanceof HashMap.TreeBin) {
eachTreeNode(expectedModCount, ((TreeBin)item).first, action);
continue;
}
@SuppressWarnings("unchecked")
Entry<K, V> entry = (Entry<K, V>)item;
while (entry != null) {
action.accept(entry.key, entry.value);
entry = (Entry<K, V>)entry.next;
if (expectedModCount != modCount) {
throw new ConcurrentModificationException();
}
}
}
}
private void eachTreeNode(int expectedModCount, TreeNode<K, V> node, BiConsumer<? super K, ? super V> action) {
while (node != null) {
@SuppressWarnings("unchecked")
Entry<K, V> entry = (Entry<K, V>)node.entry;
action.accept(entry.key, entry.value);
node = (TreeNode<K, V>)entry.next;
if (expectedModCount != modCount) {
throw new ConcurrentModificationException();
}
}
}
private void forEachNullKey(int expectedModCount, BiConsumer<? super K, ? super V> action) {
action.accept(null, nullKeyEntry.value);
if (expectedModCount != modCount) {
throw new ConcurrentModificationException();
}
}
@Override
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
Objects.requireNonNull(function);
final int expectedModCount = modCount;
if (nullKeyEntry != null) {
replaceforNullKey(expectedModCount, function);
}
Object[] tab = this.table;
for (int index = 0; index < tab.length; index++) {
Object item = tab[index];
if (item == null) {
continue;
}
if (item instanceof HashMap.TreeBin) {
replaceEachTreeNode(expectedModCount, ((TreeBin)item).first, function);
continue;
}
@SuppressWarnings("unchecked")
Entry<K, V> entry = (Entry<K, V>)item;
while (entry != null) {
entry.value = function.apply(entry.key, entry.value);
entry = (Entry<K, V>)entry.next;
if (expectedModCount != modCount) {
throw new ConcurrentModificationException();
}
}
}
}
private void replaceEachTreeNode(int expectedModCount, TreeNode<K, V> node, BiFunction<? super K, ? super V, ? extends V> function) {
while (node != null) {
@SuppressWarnings("unchecked")
Entry<K, V> entry = (Entry<K, V>)node.entry;
entry.value = function.apply(entry.key, entry.value);
node = (TreeNode<K, V>)entry.next;
if (expectedModCount != modCount) {
throw new ConcurrentModificationException();
}
}
}
private void replaceforNullKey(int expectedModCount, BiFunction<? super K, ? super V, ? extends V> function) {
nullKeyEntry.value = function.apply(null, nullKeyEntry.value);
if (expectedModCount != modCount) {
throw new ConcurrentModificationException();
}
}
@Override
public V putIfAbsent(K key, V value) {
if (table == EMPTY_TABLE) {
inflateTable(threshold);
}
if (key == null) {
if (nullKeyEntry == null || nullKeyEntry.value == null) {
putForNullKey(value);
return null;
} else {
return nullKeyEntry.value;
}
}
int hash = hash(key);
int i = indexFor(hash, table.length);
boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin?
if (table[i] instanceof Entry) {
int listSize = 0;
Entry<K,V> e = (Entry<K,V>) table[i];
for (; e != null; e = (Entry<K,V>)e.next) {
if (e.hash == hash && Objects.equals(e.key, key)) {
if (e.value != null) {
return e.value;
}
e.value = value;
e.recordAccess(this);
return null;
}
listSize++;
}
// Didn't find, so fall through and call addEntry() to add the
// Entry and check for TreeBin conversion.
checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD;
} else if (table[i] != null) {
TreeBin e = (TreeBin)table[i];
TreeNode p = e.putTreeNode(hash, key, value, null);
if (p == null) { // not found, putTreeNode() added a new node
modCount++;
size++;
if (size >= threshold) {
resize(2 * table.length);
}
return null;
} else { // putTreeNode() found an existing node
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
V oldVal = pEntry.value;
if (oldVal == null) { // only replace if maps to null
pEntry.value = value;
pEntry.recordAccess(this);
} }
return oldVal; if (e != null) { // existing mapping for key
V oldValue = e.value;
if (!onlyIfAbsent || oldValue == null)
e.value = value;
afterNodeAccess(e);
return oldValue;
} }
} }
modCount++; ++modCount;
addEntry(hash, key, value, i, checkIfNeedTree); ++size;
afterNodeInsertion(evict);
return null; return null;
} }
@Override /**
public boolean remove(Object key, Object value) { * Initializes or doubles table size. If null, allocates in
if (size == 0) { * accord with initial capacity target held in field threshold.
return false; * Otherwise, because we are using power-of-two expansion, the
} * elements from each bin must either stay at same index, or move
if (key == null) { * with a power of two offset in the new table.
if (nullKeyEntry != null && *
Objects.equals(nullKeyEntry.value, value)) { * @return the table
removeNullKey(); */
return true; final Node<K,V>[] resize() {
} Node<K,V>[] oldTab = table;
return false; int oldCap = (oldTab == null) ? 0 : oldTab.length;
} int oldThr = threshold;
int hash = hash(key); int newCap, newThr = 0;
int i = indexFor(hash, table.length); if (oldCap > 0) {
if (oldCap >= MAXIMUM_CAPACITY) {
if (table[i] instanceof Entry) { threshold = Integer.MAX_VALUE;
@SuppressWarnings("unchecked") return oldTab;
Entry<K,V> prev = (Entry<K,V>) table[i]; }
Entry<K,V> e = prev; else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
while (e != null) { oldCap >= DEFAULT_INITIAL_CAPACITY)
@SuppressWarnings("unchecked") newThr = oldThr << 1; // double threshold
Entry<K,V> next = (Entry<K,V>) e.next; }
if (e.hash == hash && Objects.equals(e.key, key)) { else if (oldThr > 0) // initial capacity was placed in threshold
if (!Objects.equals(e.value, value)) { newCap = oldThr;
return false; else { // zero initial threshold signifies using defaults
} newCap = DEFAULT_INITIAL_CAPACITY;
modCount++; newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
size--; }
if (prev == e) if (newThr == 0) {
table[i] = next; float ft = (float)newCap * loadFactor;
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
(int)ft : Integer.MAX_VALUE);
}
threshold = newThr;
@SuppressWarnings({"rawtypes","unchecked"})
Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
table = newTab;
if (oldTab != null) {
for (int j = 0; j < oldCap; ++j) {
Node<K,V> e;
if ((e = oldTab[j]) != null) {
oldTab[j] = null;
if (e.next == null)
newTab[e.hash & (newCap - 1)] = e;
else if (e instanceof TreeNode)
((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
else { // preserve order
Node<K,V> loHead = null, loTail = null;
Node<K,V> hiHead = null, hiTail = null;
Node<K,V> next;
do {
next = e.next;
if ((e.hash & oldCap) == 0) {
if (loTail == null)
loHead = e;
else else
prev.next = next; loTail.next = e;
e.recordRemoval(this); loTail = e;
return true;
} }
prev = e; else {
e = next; if (hiTail == null)
} hiHead = e;
} else if (table[i] != null) { else
TreeBin tb = ((TreeBin) table[i]); hiTail.next = e;
TreeNode p = tb.getTreeNode(hash, (K)key); hiTail = e;
if (p != null) {
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
// assert pEntry.key.equals(key);
if (Objects.equals(pEntry.value, value)) {
modCount++;
size--;
tb.deleteTreeNode(p);
pEntry.recordRemoval(this);
if (tb.root == null || tb.first == null) {
// assert tb.root == null && tb.first == null :
// "TreeBin.first and root should both be null";
// TreeBin is now empty, we should blank this bin
table[i] = null;
} }
return true; } while ((e = next) != null);
if (loTail != null) {
loTail.next = null;
newTab[j] = loHead;
} }
if (hiTail != null) {
hiTail.next = null;
newTab[j + oldCap] = hiHead;
} }
} }
return false;
} }
@Override
public boolean replace(K key, V oldValue, V newValue) {
if (size == 0) {
return false;
} }
if (key == null) {
if (nullKeyEntry != null &&
Objects.equals(nullKeyEntry.value, oldValue)) {
putForNullKey(newValue);
return true;
} }
return false; return newTab;
} }
int hash = hash(key);
int i = indexFor(hash, table.length);
if (table[i] instanceof Entry) { /**
@SuppressWarnings("unchecked") * Replaces all linked nodes in bin at index for given hash unless
Entry<K,V> e = (Entry<K,V>) table[i]; * table is too small, in which case resizes instead.
for (; e != null; e = (Entry<K,V>)e.next) { */
if (e.hash == hash && Objects.equals(e.key, key) && Objects.equals(e.value, oldValue)) { final void treeifyBin(Node<K,V>[] tab, int hash) {
e.value = newValue; int n, index; Node<K,V> e;
e.recordAccess(this); if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY)
return true; resize();
} else if ((e = tab[index = (n - 1) & hash]) != null) {
} TreeNode<K,V> hd = null, tl = null;
return false; do {
} else if (table[i] != null) { TreeNode<K,V> p = replacementTreeNode(e, null);
TreeBin tb = ((TreeBin) table[i]); if (tl == null)
TreeNode p = tb.getTreeNode(hash, key); hd = p;
if (p != null) { else {
Entry<K,V> pEntry = (Entry<K,V>)p.entry; p.prev = tl;
// assert pEntry.key.equals(key); tl.next = p;
if (Objects.equals(pEntry.value, oldValue)) {
pEntry.value = newValue;
pEntry.recordAccess(this);
return true;
}
} }
tl = p;
} while ((e = e.next) != null);
if ((tab[index] = hd) != null)
hd.treeify(tab);
} }
return false;
} }
@Override /**
public V replace(K key, V value) { * Copies all of the mappings from the specified map to this map.
if (size == 0) { * These mappings will replace any mappings that this map had for
return null; * any of the keys currently in the specified map.
*
* @param m mappings to be stored in this map
* @throws NullPointerException if the specified map is null
*/
public void putAll(Map<? extends K, ? extends V> m) {
putMapEntries(m, true);
} }
if (key == null) {
if (nullKeyEntry != null) { /**
return putForNullKey(value); * Removes the mapping for the specified key from this map if present.
*
* @param key key whose mapping is to be removed from the map
* @return the previous value associated with <tt>key</tt>, or
* <tt>null</tt> if there was no mapping for <tt>key</tt>.
* (A <tt>null</tt> return can also indicate that the map
* previously associated <tt>null</tt> with <tt>key</tt>.)
*/
public V remove(Object key) {
Node<K,V> e;
return (e = removeNode(hash(key), key, null, false, true)) == null ?
null : e.value;
} }
return null;
/**
* Implements Map.remove and related methods
*
* @param hash hash for key
* @param key the key
* @param value the value to match if matchValue, else ignored
* @param matchValue if true only remove if value is equal
* @param movable if false do not move other nodes while removing
* @return the node, or null if none
*/
final Node<K,V> removeNode(int hash, Object key, Object value,
boolean matchValue, boolean movable) {
Node<K,V>[] tab; Node<K,V> p; int n, index;
if ((tab = table) != null && (n = tab.length) > 0 &&
(p = tab[index = (n - 1) & hash]) != null) {
Node<K,V> node = null, e; K k; V v;
if (p.hash == hash &&
((k = p.key) == key || (key != null && key.equals(k))))
node = p;
else if ((e = p.next) != null) {
if (p instanceof TreeNode)
node = ((TreeNode<K,V>)p).getTreeNode(hash, key);
else {
do {
if (e.hash == hash &&
((k = e.key) == key ||
(key != null && key.equals(k)))) {
node = e;
break;
} }
int hash = hash(key); p = e;
int i = indexFor(hash, table.length); } while ((e = e.next) != null);
if (table[i] instanceof Entry) {
@SuppressWarnings("unchecked")
Entry<K,V> e = (Entry<K,V>)table[i];
for (; e != null; e = (Entry<K,V>)e.next) {
if (e.hash == hash && Objects.equals(e.key, key)) {
V oldValue = e.value;
e.value = value;
e.recordAccess(this);
return oldValue;
} }
} }
if (node != null && (!matchValue || (v = node.value) == value ||
return null; (value != null && value.equals(v)))) {
} else if (table[i] != null) { if (node instanceof TreeNode)
TreeBin tb = ((TreeBin) table[i]); ((TreeNode<K,V>)node).removeTreeNode(this, tab, movable);
TreeNode p = tb.getTreeNode(hash, key); else if (node == p)
if (p != null) { tab[index] = node.next;
Entry<K,V> pEntry = (Entry<K,V>)p.entry; else
// assert pEntry.key.equals(key); p.next = node.next;
V oldValue = pEntry.value; ++modCount;
pEntry.value = value; --size;
pEntry.recordAccess(this); afterNodeRemoval(node);
return oldValue; return node;
} }
} }
return null; return null;
} }
@Override /**
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { * Removes all of the mappings from this map.
if (table == EMPTY_TABLE) { * The map will be empty after this call returns.
inflateTable(threshold); */
} public void clear() {
if (key == null) { Node<K,V>[] tab;
if (nullKeyEntry == null || nullKeyEntry.value == null) { modCount++;
V newValue = mappingFunction.apply(key); if ((tab = table) != null && size > 0) {
if (newValue != null) { size = 0;
putForNullKey(newValue); for (int i = 0; i < tab.length; ++i)
} tab[i] = null;
return newValue;
} }
return nullKeyEntry.value;
} }
int hash = hash(key);
int i = indexFor(hash, table.length);
boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin?
if (table[i] instanceof Entry) { /**
int listSize = 0; * Returns <tt>true</tt> if this map maps one or more keys to the
@SuppressWarnings("unchecked") * specified value.
Entry<K,V> e = (Entry<K,V>)table[i]; *
for (; e != null; e = (Entry<K,V>)e.next) { * @param value value whose presence in this map is to be tested
if (e.hash == hash && Objects.equals(e.key, key)) { * @return <tt>true</tt> if this map maps one or more keys to the
V oldValue = e.value; * specified value
if (oldValue == null) { */
V newValue = mappingFunction.apply(key); public boolean containsValue(Object value) {
if (newValue != null) { Node<K,V>[] tab; V v;
e.value = newValue; if ((tab = table) != null && size > 0) {
e.recordAccess(this); for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next) {
if ((v = e.value) == value ||
(value != null && value.equals(v)))
return true;
} }
return newValue;
} }
return oldValue;
} }
listSize++; return false;
} }
// Didn't find, fall through to call the mapping function
checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; /**
} else if (table[i] != null) { * Returns a {@link Set} view of the keys contained in this map.
TreeBin e = (TreeBin)table[i]; * The set is backed by the map, so changes to the map are
V value = mappingFunction.apply(key); * reflected in the set, and vice-versa. If the map is modified
if (value == null) { // Return the existing value, if any * while an iteration over the set is in progress (except through
TreeNode p = e.getTreeNode(hash, key); * the iterator's own <tt>remove</tt> operation), the results of
if (p != null) { * the iteration are undefined. The set supports element removal,
return (V) p.entry.value; * which removes the corresponding mapping from the map, via the
* <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
* operations. It does not support the <tt>add</tt> or <tt>addAll</tt>
* operations.
*
* @return a set view of the keys contained in this map
*/
public Set<K> keySet() {
Set<K> ks;
return (ks = keySet) == null ? (keySet = new KeySet()) : ks;
} }
return null;
} else { // Put the new value into the Tree, if absent final class KeySet extends AbstractSet<K> {
TreeNode p = e.putTreeNode(hash, key, value, null); public final int size() { return size; }
if (p == null) { // not found, new node was added public final void clear() { HashMap.this.clear(); }
modCount++; public final Iterator<K> iterator() { return new KeyIterator(); }
size++; public final boolean contains(Object o) { return containsKey(o); }
if (size >= threshold) { public final boolean remove(Object key) {
resize(2 * table.length); return removeNode(hash(key), key, null, false, true) != null;
} }
return value; public final Spliterator<K> spliterator() {
} else { // putTreeNode() found an existing node return new KeySpliterator<K,V>(HashMap.this, 0, -1, 0, 0);
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
V oldVal = pEntry.value;
if (oldVal == null) { // only replace if maps to null
pEntry.value = value;
pEntry.recordAccess(this);
return value;
} }
return oldVal; public final void forEach(Consumer<? super K> action) {
Node<K,V>[] tab;
if (action == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next)
action.accept(e.key);
} }
if (modCount != mc)
throw new ConcurrentModificationException();
} }
} }
V newValue = mappingFunction.apply(key);
if (newValue != null) { // add Entry and check for TreeBin conversion
modCount++;
addEntry(hash, key, newValue, i, checkIfNeedTree);
} }
return newValue; /**
* Returns a {@link Collection} view of the values contained in this map.
* The collection is backed by the map, so changes to the map are
* reflected in the collection, and vice-versa. If the map is
* modified while an iteration over the collection is in progress
* (except through the iterator's own <tt>remove</tt> operation),
* the results of the iteration are undefined. The collection
* supports element removal, which removes the corresponding
* mapping from the map, via the <tt>Iterator.remove</tt>,
* <tt>Collection.remove</tt>, <tt>removeAll</tt>,
* <tt>retainAll</tt> and <tt>clear</tt> operations. It does not
* support the <tt>add</tt> or <tt>addAll</tt> operations.
*
* @return a view of the values contained in this map
*/
public Collection<V> values() {
Collection<V> vs;
return (vs = values) == null ? (values = new Values()) : vs;
} }
@Override final class Values extends AbstractCollection<V> {
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { public final int size() { return size; }
if (size == 0) { public final void clear() { HashMap.this.clear(); }
return null; public final Iterator<V> iterator() { return new ValueIterator(); }
} public final boolean contains(Object o) { return containsValue(o); }
if (key == null) { public final Spliterator<V> spliterator() {
V oldValue; return new ValueSpliterator<K,V>(HashMap.this, 0, -1, 0, 0);
if (nullKeyEntry != null && (oldValue = nullKeyEntry.value) != null) {
V newValue = remappingFunction.apply(key, oldValue);
if (newValue != null ) {
putForNullKey(newValue);
return newValue;
} else {
removeNullKey();
}
}
return null;
}
int hash = hash(key);
int i = indexFor(hash, table.length);
if (table[i] instanceof Entry) {
@SuppressWarnings("unchecked")
Entry<K,V> prev = (Entry<K,V>)table[i];
Entry<K,V> e = prev;
while (e != null) {
Entry<K,V> next = (Entry<K,V>)e.next;
if (e.hash == hash && Objects.equals(e.key, key)) {
V oldValue = e.value;
if (oldValue == null)
break;
V newValue = remappingFunction.apply(key, oldValue);
if (newValue == null) {
modCount++;
size--;
if (prev == e)
table[i] = next;
else
prev.next = next;
e.recordRemoval(this);
} else {
e.value = newValue;
e.recordAccess(this);
}
return newValue;
}
prev = e;
e = next;
}
} else if (table[i] != null) {
TreeBin tb = (TreeBin)table[i];
TreeNode p = tb.getTreeNode(hash, key);
if (p != null) {
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
// assert pEntry.key.equals(key);
V oldValue = pEntry.value;
if (oldValue != null) {
V newValue = remappingFunction.apply(key, oldValue);
if (newValue == null) { // remove mapping
modCount++;
size--;
tb.deleteTreeNode(p);
pEntry.recordRemoval(this);
if (tb.root == null || tb.first == null) {
// assert tb.root == null && tb.first == null :
// "TreeBin.first and root should both be null";
// TreeBin is now empty, we should blank this bin
table[i] = null;
}
} else {
pEntry.value = newValue;
pEntry.recordAccess(this);
} }
return newValue; public final void forEach(Consumer<? super V> action) {
Node<K,V>[] tab;
if (action == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next)
action.accept(e.value);
} }
if (modCount != mc)
throw new ConcurrentModificationException();
} }
} }
return null;
} }
@Override /**
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { * Returns a {@link Set} view of the mappings contained in this map.
if (table == EMPTY_TABLE) { * The set is backed by the map, so changes to the map are
inflateTable(threshold); * reflected in the set, and vice-versa. If the map is modified
} * while an iteration over the set is in progress (except through
if (key == null) { * the iterator's own <tt>remove</tt> operation, or through the
V oldValue = nullKeyEntry == null ? null : nullKeyEntry.value; * <tt>setValue</tt> operation on a map entry returned by the
V newValue = remappingFunction.apply(key, oldValue); * iterator) the results of the iteration are undefined. The set
if (newValue != oldValue || (oldValue == null && nullKeyEntry != null)) { * supports element removal, which removes the corresponding
if (newValue == null) { * mapping from the map, via the <tt>Iterator.remove</tt>,
removeNullKey(); * <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and
} else { * <tt>clear</tt> operations. It does not support the
putForNullKey(newValue); * <tt>add</tt> or <tt>addAll</tt> operations.
} *
} * @return a set view of the mappings contained in this map
return newValue; */
public Set<Map.Entry<K,V>> entrySet() {
Set<Map.Entry<K,V>> es;
return (es = entrySet) == null ? (entrySet = new EntrySet()) : es;
} }
int hash = hash(key);
int i = indexFor(hash, table.length);
boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin?
if (table[i] instanceof Entry) {
int listSize = 0;
@SuppressWarnings("unchecked")
Entry<K,V> prev = (Entry<K,V>)table[i];
Entry<K,V> e = prev;
while (e != null) { final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
Entry<K,V> next = (Entry<K,V>)e.next; public final int size() { return size; }
if (e.hash == hash && Objects.equals(e.key, key)) { public final void clear() { HashMap.this.clear(); }
V oldValue = e.value; public final Iterator<Map.Entry<K,V>> iterator() {
V newValue = remappingFunction.apply(key, oldValue); return new EntryIterator();
if (newValue != oldValue || oldValue == null) {
if (newValue == null) {
modCount++;
size--;
if (prev == e)
table[i] = next;
else
prev.next = next;
e.recordRemoval(this);
} else {
e.value = newValue;
e.recordAccess(this);
} }
public final boolean contains(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
Object key = e.getKey();
Node<K,V> candidate = getNode(hash(key), key);
return candidate != null && candidate.equals(e);
} }
return newValue; public final boolean remove(Object o) {
if (o instanceof Map.Entry) {
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
Object key = e.getKey();
Object value = e.getValue();
return removeNode(hash(key), key, value, true, true) != null;
} }
prev = e; return false;
e = next;
listSize++;
} }
checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; public final Spliterator<Map.Entry<K,V>> spliterator() {
} else if (table[i] != null) { return new EntrySpliterator<K,V>(HashMap.this, 0, -1, 0, 0);
TreeBin tb = (TreeBin)table[i];
TreeNode p = tb.getTreeNode(hash, key);
V oldValue = p == null ? null : (V)p.entry.value;
V newValue = remappingFunction.apply(key, oldValue);
if (newValue != oldValue || (oldValue == null && p != null)) {
if (newValue == null) {
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
modCount++;
size--;
tb.deleteTreeNode(p);
pEntry.recordRemoval(this);
if (tb.root == null || tb.first == null) {
// assert tb.root == null && tb.first == null :
// "TreeBin.first and root should both be null";
// TreeBin is now empty, we should blank this bin
table[i] = null;
}
} else {
if (p != null) { // just update the value
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
pEntry.value = newValue;
pEntry.recordAccess(this);
} else { // need to put new node
p = tb.putTreeNode(hash, key, newValue, null);
// assert p == null; // should have added a new node
modCount++;
size++;
if (size >= threshold) {
resize(2 * table.length);
} }
public final void forEach(Consumer<? super Map.Entry<K,V>> action) {
Node<K,V>[] tab;
if (action == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next)
action.accept(e);
} }
if (modCount != mc)
throw new ConcurrentModificationException();
} }
} }
return newValue;
} }
V newValue = remappingFunction.apply(key, null); // Overrides of JDK8 Map extension methods
if (newValue != null) {
modCount++;
addEntry(hash, key, newValue, i, checkIfNeedTree);
}
return newValue; public V getOrDefault(Object key, V defaultValue) {
Node<K,V> e;
return (e = getNode(hash(key), key)) == null ? defaultValue : e.value;
} }
@Override public V putIfAbsent(K key, V value) {
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { return putVal(hash(key), key, value, true, true);
if (table == EMPTY_TABLE) {
inflateTable(threshold);
}
if (key == null) {
V oldValue = nullKeyEntry == null ? null : nullKeyEntry.value;
V newValue = oldValue == null ? value : remappingFunction.apply(oldValue, value);
if (newValue != null) {
putForNullKey(newValue);
} else if (nullKeyEntry != null) {
removeNullKey();
}
return newValue;
} }
int hash = hash(key);
int i = indexFor(hash, table.length);
boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin?
if (table[i] instanceof Entry) { public boolean remove(Object key, Object value) {
int listSize = 0; return removeNode(hash(key), key, value, true, true) != null;
@SuppressWarnings("unchecked") }
Entry<K,V> prev = (Entry<K,V>)table[i];
Entry<K,V> e = prev;
while (e != null) { public boolean replace(K key, V oldValue, V newValue) {
Entry<K,V> next = (Entry<K,V>)e.next; Node<K,V> e; V v;
if (e.hash == hash && Objects.equals(e.key, key)) { if ((e = getNode(hash(key), key)) != null &&
V oldValue = e.value; ((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) {
V newValue = (oldValue == null) ? value :
remappingFunction.apply(oldValue, value);
if (newValue == null) {
modCount++;
size--;
if (prev == e)
table[i] = next;
else
prev.next = next;
e.recordRemoval(this);
} else {
e.value = newValue; e.value = newValue;
e.recordAccess(this); afterNodeAccess(e);
} return true;
return newValue;
}
prev = e;
e = next;
listSize++;
}
// Didn't find, so fall through and (maybe) call addEntry() to add
// the Entry and check for TreeBin conversion.
checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD;
} else if (table[i] != null) {
TreeBin tb = (TreeBin)table[i];
TreeNode p = tb.getTreeNode(hash, key);
V oldValue = p == null ? null : (V)p.entry.value;
V newValue = (oldValue == null) ? value :
remappingFunction.apply(oldValue, value);
if (newValue == null) {
if (p != null) {
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
modCount++;
size--;
tb.deleteTreeNode(p);
pEntry.recordRemoval(this);
if (tb.root == null || tb.first == null) {
// assert tb.root == null && tb.first == null :
// "TreeBin.first and root should both be null";
// TreeBin is now empty, we should blank this bin
table[i] = null;
}
}
return null;
} else if (newValue != oldValue) {
if (p != null) { // just update the value
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
pEntry.value = newValue;
pEntry.recordAccess(this);
} else { // need to put new node
p = tb.putTreeNode(hash, key, newValue, null);
// assert p == null; // should have added a new node
modCount++;
size++;
if (size >= threshold) {
resize(2 * table.length);
}
}
}
return newValue;
}
if (value != null) {
modCount++;
addEntry(hash, key, value, i, checkIfNeedTree);
} }
return value; return false;
} }
// end of optimized implementations of default methods in Map public V replace(K key, V value) {
Node<K,V> e;
/** if ((e = getNode(hash(key), key)) != null) {
* Removes and returns the entry associated with the specified key V oldValue = e.value;
* in the HashMap. Returns null if the HashMap contains no mapping e.value = value;
* for this key. afterNodeAccess(e);
* return oldValue;
* We don't bother converting TreeBins back to Entry lists if the bin falls
* back below TREE_THRESHOLD, but we do clear bins when removing the last
* TreeNode in a TreeBin.
*/
final Entry<K,V> removeEntryForKey(Object key) {
if (size == 0) {
return null;
}
if (key == null) {
if (nullKeyEntry != null) {
return removeNullKey();
} }
return null; return null;
} }
int hash = hash(key);
int i = indexFor(hash, table.length);
if (table[i] instanceof Entry) {
@SuppressWarnings("unchecked")
Entry<K,V> prev = (Entry<K,V>)table[i];
Entry<K,V> e = prev;
while (e != null) { public V computeIfAbsent(K key,
@SuppressWarnings("unchecked") Function<? super K, ? extends V> mappingFunction) {
Entry<K,V> next = (Entry<K,V>) e.next; if (mappingFunction == null)
if (e.hash == hash && Objects.equals(e.key, key)) { throw new NullPointerException();
modCount++; int hash = hash(key);
size--; Node<K,V>[] tab; Node<K,V> first; int n, i;
if (prev == e) int binCount = 0;
table[i] = next; TreeNode<K,V> t = null;
else Node<K,V> old = null;
prev.next = next; if (size > threshold || (tab = table) == null ||
e.recordRemoval(this); (n = tab.length) == 0)
return e; n = (tab = resize()).length;
if ((first = tab[i = (n - 1) & hash]) != null) {
if (first instanceof TreeNode)
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
else {
Node<K,V> e = first; K k;
do {
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k)))) {
old = e;
break;
} }
prev = e; ++binCount;
e = next; } while ((e = e.next) != null);
} }
} else if (table[i] != null) { V oldValue;
TreeBin tb = ((TreeBin) table[i]); if (old != null && (oldValue = old.value) != null) {
TreeNode p = tb.getTreeNode(hash, (K)key); afterNodeAccess(old);
if (p != null) { return oldValue;
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
// assert pEntry.key.equals(key);
modCount++;
size--;
tb.deleteTreeNode(p);
pEntry.recordRemoval(this);
if (tb.root == null || tb.first == null) {
// assert tb.root == null && tb.first == null :
// "TreeBin.first and root should both be null";
// TreeBin is now empty, we should blank this bin
table[i] = null;
} }
return pEntry;
} }
V v = mappingFunction.apply(key);
if (old != null) {
old.value = v;
afterNodeAccess(old);
return v;
} }
else if (v == null)
return null; return null;
else if (t != null)
t.putTreeVal(this, tab, hash, key, v);
else {
tab[i] = newNode(hash, key, v, first);
if (binCount >= TREEIFY_THRESHOLD - 1)
treeifyBin(tab, hash);
}
++modCount;
++size;
afterNodeInsertion(true);
return v;
}
public V computeIfPresent(K key,
BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
Node<K,V> e; V oldValue;
int hash = hash(key);
if ((e = getNode(hash, key)) != null &&
(oldValue = e.value) != null) {
V v = remappingFunction.apply(key, oldValue);
if (v != null) {
e.value = v;
afterNodeAccess(e);
return v;
} }
else
/** removeNode(hash, key, null, false, true);
* Special version of remove for EntrySet using {@code Map.Entry.equals()}
* for matching.
*/
final Entry<K,V> removeMapping(Object o) {
if (size == 0 || !(o instanceof Map.Entry))
return null;
Map.Entry<?,?> entry = (Map.Entry<?,?>) o;
Object key = entry.getKey();
if (key == null) {
if (entry.equals(nullKeyEntry)) {
return removeNullKey();
} }
return null; return null;
} }
public V compute(K key,
BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
if (remappingFunction == null)
throw new NullPointerException();
int hash = hash(key); int hash = hash(key);
int i = indexFor(hash, table.length); Node<K,V>[] tab; Node<K,V> first; int n, i;
int binCount = 0;
if (table[i] instanceof Entry) { TreeNode<K,V> t = null;
@SuppressWarnings("unchecked") Node<K,V> old = null;
Entry<K,V> prev = (Entry<K,V>)table[i]; if (size > threshold || (tab = table) == null ||
Entry<K,V> e = prev; (n = tab.length) == 0)
n = (tab = resize()).length;
while (e != null) { if ((first = tab[i = (n - 1) & hash]) != null) {
@SuppressWarnings("unchecked") if (first instanceof TreeNode)
Entry<K,V> next = (Entry<K,V>)e.next; old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
if (e.hash == hash && e.equals(entry)) { else {
modCount++; Node<K,V> e = first; K k;
size--; do {
if (prev == e) if (e.hash == hash &&
table[i] = next; ((k = e.key) == key || (key != null && key.equals(k)))) {
else old = e;
prev.next = next; break;
e.recordRemoval(this);
return e;
}
prev = e;
e = next;
} }
} else if (table[i] != null) { ++binCount;
TreeBin tb = ((TreeBin) table[i]); } while ((e = e.next) != null);
TreeNode p = tb.getTreeNode(hash, (K)key);
if (p != null && p.entry.equals(entry)) {
@SuppressWarnings("unchecked")
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
// assert pEntry.key.equals(key);
modCount++;
size--;
tb.deleteTreeNode(p);
pEntry.recordRemoval(this);
if (tb.root == null || tb.first == null) {
// assert tb.root == null && tb.first == null :
// "TreeBin.first and root should both be null";
// TreeBin is now empty, we should blank this bin
table[i] = null;
} }
return pEntry;
} }
V oldValue = (old == null) ? null : old.value;
V v = remappingFunction.apply(key, oldValue);
if (old != null) {
if (v != null) {
old.value = v;
afterNodeAccess(old);
} }
return null; else
removeNode(hash, key, null, false, true);
} }
else if (v != null) {
/* if (t != null)
* Remove the mapping for the null key, and update internal accounting t.putTreeVal(this, tab, hash, key, v);
* (size, modcount, recordRemoval, etc). else {
* tab[i] = newNode(hash, key, v, first);
* Assumes nullKeyEntry is non-null. if (binCount >= TREEIFY_THRESHOLD - 1)
*/ treeifyBin(tab, hash);
private Entry<K,V> removeNullKey() {
// assert nullKeyEntry != null;
Entry<K,V> retVal = nullKeyEntry;
modCount++;
size--;
retVal.recordRemoval(this);
nullKeyEntry = null;
return retVal;
} }
++modCount;
/** ++size;
* Removes all of the mappings from this map. afterNodeInsertion(true);
* The map will be empty after this call returns.
*/
public void clear() {
modCount++;
if (nullKeyEntry != null) {
nullKeyEntry = null;
} }
Arrays.fill(table, null); return v;
size = 0;
} }
/** public V merge(K key, V value,
* Returns <tt>true</tt> if this map maps one or more keys to the BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
* specified value. if (remappingFunction == null)
* throw new NullPointerException();
* @param value value whose presence in this map is to be tested int hash = hash(key);
* @return <tt>true</tt> if this map maps one or more keys to the Node<K,V>[] tab; Node<K,V> first; int n, i;
* specified value int binCount = 0;
*/ TreeNode<K,V> t = null;
public boolean containsValue(Object value) { Node<K,V> old = null;
if (value == null) { if (size > threshold || (tab = table) == null ||
return containsNullValue(); (n = tab.length) == 0)
} n = (tab = resize()).length;
Object[] tab = table; if ((first = tab[i = (n - 1) & hash]) != null) {
for (int i = 0; i < tab.length; i++) { if (first instanceof TreeNode)
if (tab[i] instanceof Entry) { old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
Entry<?,?> e = (Entry<?,?>)tab[i]; else {
for (; e != null; e = (Entry<?,?>)e.next) { Node<K,V> e = first; K k;
if (value.equals(e.value)) { do {
return true; if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k)))) {
old = e;
break;
} }
++binCount;
} while ((e = e.next) != null);
} }
} else if (tab[i] != null) {
TreeBin e = (TreeBin)tab[i];
TreeNode p = e.first;
for (; p != null; p = (TreeNode) p.entry.next) {
if (value == p.entry.value || value.equals(p.entry.value)) {
return true;
} }
if (old != null) {
V v = remappingFunction.apply(old.value, value);
if (v != null) {
old.value = v;
afterNodeAccess(old);
}
else
removeNode(hash, key, null, false, true);
return v;
} }
if (value != null) {
if (t != null)
t.putTreeVal(this, tab, hash, key, value);
else {
tab[i] = newNode(hash, key, value, first);
if (binCount >= TREEIFY_THRESHOLD - 1)
treeifyBin(tab, hash);
} }
++modCount;
++size;
afterNodeInsertion(true);
} }
// Didn't find value in table - could be in nullKeyEntry return value;
return (nullKeyEntry != null && (value == nullKeyEntry.value ||
value.equals(nullKeyEntry.value)));
} }
/** public void forEach(BiConsumer<? super K, ? super V> action) {
* Special-case code for containsValue with null argument Node<K,V>[] tab;
*/ if (action == null)
private boolean containsNullValue() { throw new NullPointerException();
Object[] tab = table; if (size > 0 && (tab = table) != null) {
for (int i = 0; i < tab.length; i++) { int mc = modCount;
if (tab[i] instanceof Entry) { for (int i = 0; i < tab.length; ++i) {
Entry<K,V> e = (Entry<K,V>)tab[i]; for (Node<K,V> e = tab[i]; e != null; e = e.next)
for (; e != null; e = (Entry<K,V>)e.next) { action.accept(e.key, e.value);
if (e.value == null) {
return true;
} }
if (modCount != mc)
throw new ConcurrentModificationException();
} }
} else if (tab[i] != null) {
TreeBin e = (TreeBin)tab[i];
TreeNode p = e.first;
for (; p != null; p = (TreeNode) p.entry.next) {
if (p.entry.value == null) {
return true;
} }
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
Node<K,V>[] tab;
if (function == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next) {
e.value = function.apply(e.key, e.value);
} }
} }
if (modCount != mc)
throw new ConcurrentModificationException();
} }
// Didn't find value in table - could be in nullKeyEntry
return (nullKeyEntry != null && nullKeyEntry.value == null);
} }
/* ------------------------------------------------------------ */
// Cloning and serialization
/** /**
* Returns a shallow copy of this <tt>HashMap</tt> instance: the keys and * Returns a shallow copy of this <tt>HashMap</tt> instance: the keys and
* values themselves are not cloned. * values themselves are not cloned.
...@@ -2196,916 +1294,1067 @@ public class HashMap<K,V> ...@@ -2196,916 +1294,1067 @@ public class HashMap<K,V>
*/ */
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public Object clone() { public Object clone() {
HashMap<K,V> result = null; HashMap<K,V> result;
try { try {
result = (HashMap<K,V>)super.clone(); result = (HashMap<K,V>)super.clone();
} catch (CloneNotSupportedException e) { } catch (CloneNotSupportedException e) {
// assert false; // this shouldn't happen, since we are Cloneable
} throw new InternalError(e);
if (result.table != EMPTY_TABLE) { }
result.inflateTable(Math.min( result.reinitialize();
(int) Math.min( result.putMapEntries(this, false);
size * Math.min(1 / loadFactor, 4.0f),
// we have limits...
HashMap.MAXIMUM_CAPACITY),
table.length));
}
result.entrySet = null;
result.modCount = 0;
result.size = 0;
result.nullKeyEntry = null;
result.init();
result.putAllForCreate(this);
return result; return result;
} }
static class Entry<K,V> implements Map.Entry<K,V> { // These methods are also used when serializing HashSets
final K key; final float loadFactor() { return loadFactor; }
V value; final int capacity() {
Object next; // an Entry, or a TreeNode return (table != null) ? table.length :
final int hash; (threshold > 0) ? threshold :
DEFAULT_INITIAL_CAPACITY;
}
/** /**
* Creates new entry. * Save the state of the <tt>HashMap</tt> instance to a stream (i.e.,
* serialize it).
*
* @serialData The <i>capacity</i> of the HashMap (the length of the
* bucket array) is emitted (int), followed by the
* <i>size</i> (an int, the number of key-value
* mappings), followed by the key (Object) and value (Object)
* for each key-value mapping. The key-value mappings are
* emitted in no particular order.
*/ */
Entry(int h, K k, V v, Object n) { private void writeObject(java.io.ObjectOutputStream s)
value = v; throws IOException {
next = n; int buckets = capacity();
key = k; // Write out the threshold, loadfactor, and any hidden stuff
hash = h; s.defaultWriteObject();
s.writeInt(buckets);
s.writeInt(size);
internalWriteEntries(s);
} }
public final K getKey() { /**
return key; * Reconstitute the {@code HashMap} instance from a stream (i.e.,
} * deserialize it).
*/
private void readObject(java.io.ObjectInputStream s)
throws IOException, ClassNotFoundException {
// Read in the threshold (ignored), loadfactor, and any hidden stuff
s.defaultReadObject();
reinitialize();
if (loadFactor <= 0 || Float.isNaN(loadFactor))
throw new InvalidObjectException("Illegal load factor: " +
loadFactor);
s.readInt(); // Read and ignore number of buckets
int mappings = s.readInt(); // Read number of mappings (size)
if (mappings < 0)
throw new InvalidObjectException("Illegal mappings count: " +
mappings);
else if (mappings > 0) { // (if zero, use defaults)
// Size the table using given load factor only if within
// range of 0.25...4.0
float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f);
float fc = (float)mappings / lf + 1.0f;
int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ?
DEFAULT_INITIAL_CAPACITY :
(fc >= MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY :
tableSizeFor((int)fc));
float ft = (float)cap * lf;
threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ?
(int)ft : Integer.MAX_VALUE);
@SuppressWarnings({"rawtypes","unchecked"})
Node<K,V>[] tab = (Node<K,V>[])new Node[cap];
table = tab;
public final V getValue() { // Read the keys and values, and put the mappings in the HashMap
return value; for (int i = 0; i < mappings; i++) {
@SuppressWarnings("unchecked")
K key = (K) s.readObject();
@SuppressWarnings("unchecked")
V value = (V) s.readObject();
putVal(hash(key), key, value, false, false);
}
} }
public final V setValue(V newValue) {
V oldValue = value;
value = newValue;
return oldValue;
} }
public final boolean equals(Object o) { /* ------------------------------------------------------------ */
if (!(o instanceof Map.Entry)) // iterators
return false;
Map.Entry<?,?> e = (Map.Entry<?,?>)o; abstract class HashIterator {
Object k1 = getKey(); Node<K,V> next; // next entry to return
Object k2 = e.getKey(); Node<K,V> current; // current entry
if (k1 == k2 || (k1 != null && k1.equals(k2))) { int expectedModCount; // for fast-fail
Object v1 = getValue(); int index; // current slot
Object v2 = e.getValue();
if (v1 == v2 || (v1 != null && v1.equals(v2))) HashIterator() {
return true; expectedModCount = modCount;
Node<K,V>[] t = table;
current = next = null;
index = 0;
if (t != null && size > 0) { // advance to first entry
do {} while (index < t.length && (next = t[index++]) == null);
} }
return false;
} }
public final int hashCode() { public final boolean hasNext() {
return Objects.hashCode(getKey()) ^ Objects.hashCode(getValue()); return next != null;
} }
public final String toString() { final Node<K,V> nextNode() {
return getKey() + "=" + getValue(); Node<K,V>[] t;
Node<K,V> e = next;
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
if (e == null)
throw new NoSuchElementException();
if ((next = (current = e).next) == null && (t = table) != null) {
do {} while (index < t.length && (next = t[index++]) == null);
} }
return e;
/**
* This method is invoked whenever the value in an entry is
* overwritten for a key that's already in the HashMap.
*/
void recordAccess(HashMap<K,V> m) {
} }
/** public final void remove() {
* This method is invoked whenever the entry is Node<K,V> p = current;
* removed from the table. if (p == null)
*/ throw new IllegalStateException();
void recordRemoval(HashMap<K,V> m) { if (modCount != expectedModCount)
throw new ConcurrentModificationException();
current = null;
K key = p.key;
removeNode(hash(key), key, null, false, false);
expectedModCount = modCount;
} }
} }
void addEntry(int hash, K key, V value, int bucketIndex) { final class KeyIterator extends HashIterator
addEntry(hash, key, value, bucketIndex, true); implements Iterator<K> {
public final K next() { return nextNode().key; }
} }
/** final class ValueIterator extends HashIterator
* Adds a new entry with the specified key, value and hash code to implements Iterator<V> {
* the specified bucket. It is the responsibility of this public final V next() { return nextNode().value; }
* method to resize the table if appropriate. The new entry is then
* created by calling createEntry().
*
* Subclass overrides this to alter the behavior of put method.
*
* If checkIfNeedTree is false, it is known that this bucket will not need
* to be converted to a TreeBin, so don't bothering checking.
*
* Assumes key is not null.
*/
void addEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) {
// assert key != null;
if ((size >= threshold) && (null != table[bucketIndex])) {
resize(2 * table.length);
hash = hash(key);
bucketIndex = indexFor(hash, table.length);
} }
createEntry(hash, key, value, bucketIndex, checkIfNeedTree);
final class EntryIterator extends HashIterator
implements Iterator<Map.Entry<K,V>> {
public final Map.Entry<K,V> next() { return nextNode(); }
} }
/** /* ------------------------------------------------------------ */
* Called by addEntry(), and also used when creating entries // spliterators
* as part of Map construction or "pseudo-construction" (cloning,
* deserialization). This version does not check for resizing of the table. static class HashMapSpliterator<K,V> {
* final HashMap<K,V> map;
* This method is responsible for converting a bucket to a TreeBin once Node<K,V> current; // current node
* TREE_THRESHOLD is reached. However if checkIfNeedTree is false, it is known int index; // current index, modified on advance/split
* that this bucket will not need to be converted to a TreeBin, so don't int fence; // one past last index
* bother checking. The new entry is constructed by calling newEntry(). int est; // size estimate
* int expectedModCount; // for comodification checks
* Assumes key is not null.
* HashMapSpliterator(HashMap<K,V> m, int origin,
* Note: buckets already converted to a TreeBin don't call this method, but int fence, int est,
* instead call TreeBin.putTreeNode() to create new entries. int expectedModCount) {
*/ this.map = m;
void createEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) { this.index = origin;
// assert key != null; this.fence = fence;
@SuppressWarnings("unchecked") this.est = est;
Entry<K,V> e = (Entry<K,V>)table[bucketIndex]; this.expectedModCount = expectedModCount;
table[bucketIndex] = newEntry(hash, key, value, e);
size++;
if (checkIfNeedTree) {
int listSize = 0;
for (e = (Entry<K,V>) table[bucketIndex]; e != null; e = (Entry<K,V>)e.next) {
listSize++;
if (listSize >= TreeBin.TREE_THRESHOLD) { // Convert to TreeBin
if (comparableClassFor(key) != null) {
TreeBin t = new TreeBin();
t.populate((Entry)table[bucketIndex]);
table[bucketIndex] = t;
} }
break;
final int getFence() { // initialize fence and size on first use
int hi;
if ((hi = fence) < 0) {
HashMap<K,V> m = map;
est = m.size;
expectedModCount = m.modCount;
Node<K,V>[] tab = m.table;
hi = fence = (tab == null) ? 0 : tab.length;
} }
return hi;
} }
public final long estimateSize() {
getFence(); // force init
return (long) est;
} }
} }
/* static final class KeySpliterator<K,V>
* Factory method to create a new Entry object. extends HashMapSpliterator<K,V>
*/ implements Spliterator<K> {
Entry<K,V> newEntry(int hash, K key, V value, Object next) { KeySpliterator(HashMap<K,V> m, int origin, int fence, int est,
return new HashMap.Entry<>(hash, key, value, next); int expectedModCount) {
super(m, origin, fence, est, expectedModCount);
} }
public KeySpliterator<K,V> trySplit() {
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
return (lo >= mid || current != null) ? null :
new KeySpliterator<K,V>(map, lo, index = mid, est >>>= 1,
expectedModCount);
}
private abstract class HashIterator<E> implements Iterator<E> { public void forEachRemaining(Consumer<? super K> action) {
Object next; // next entry to return, an Entry or a TreeNode int i, hi, mc;
int expectedModCount; // For fast-fail if (action == null)
int index; // current slot throw new NullPointerException();
Object current; // current entry, an Entry or a TreeNode HashMap<K,V> m = map;
Node<K,V>[] tab = m.table;
if ((hi = fence) < 0) {
mc = expectedModCount = m.modCount;
hi = fence = (tab == null) ? 0 : tab.length;
}
else
mc = expectedModCount;
if (tab != null && tab.length >= hi &&
(i = index) >= 0 && (i < (index = hi) || current != null)) {
Node<K,V> p = current;
current = null;
do {
if (p == null)
p = tab[i++];
else {
action.accept(p.key);
p = p.next;
}
} while (p != null || i < hi);
if (m.modCount != mc)
throw new ConcurrentModificationException();
}
}
HashIterator() { public boolean tryAdvance(Consumer<? super K> action) {
expectedModCount = modCount; int hi;
if (size > 0) { // advance to first entry if (action == null)
if (nullKeyEntry != null) { throw new NullPointerException();
// assert nullKeyEntry.next == null; Node<K,V>[] tab = map.table;
// This works with nextEntry(): nullKeyEntry isa Entry, and if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
// e.next will be null, so we'll hit the findNextBin() call. while (current != null || index < hi) {
next = nullKeyEntry; if (current == null)
} else { current = tab[index++];
findNextBin(); else {
K k = current.key;
current = current.next;
action.accept(k);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
}
} }
} }
return false;
} }
public final boolean hasNext() { public int characteristics() {
return next != null; return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
Spliterator.DISTINCT;
}
} }
@SuppressWarnings("unchecked") static final class ValueSpliterator<K,V>
final Entry<K,V> nextEntry() { extends HashMapSpliterator<K,V>
if (modCount != expectedModCount) { implements Spliterator<V> {
throw new ConcurrentModificationException(); ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est,
int expectedModCount) {
super(m, origin, fence, est, expectedModCount);
} }
Object e = next;
Entry<K,V> retVal;
if (e == null)
throw new NoSuchElementException();
if (e instanceof TreeNode) { // TreeBin public ValueSpliterator<K,V> trySplit() {
retVal = (Entry<K,V>)((TreeNode)e).entry; int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
next = retVal.next; return (lo >= mid || current != null) ? null :
} else { new ValueSpliterator<K,V>(map, lo, index = mid, est >>>= 1,
retVal = (Entry<K,V>)e; expectedModCount);
next = ((Entry<K,V>)e).next;
} }
if (next == null) { // Move to next bin public void forEachRemaining(Consumer<? super V> action) {
findNextBin(); int i, hi, mc;
if (action == null)
throw new NullPointerException();
HashMap<K,V> m = map;
Node<K,V>[] tab = m.table;
if ((hi = fence) < 0) {
mc = expectedModCount = m.modCount;
hi = fence = (tab == null) ? 0 : tab.length;
} }
current = e; else
return retVal; mc = expectedModCount;
if (tab != null && tab.length >= hi &&
(i = index) >= 0 && (i < (index = hi) || current != null)) {
Node<K,V> p = current;
current = null;
do {
if (p == null)
p = tab[i++];
else {
action.accept(p.value);
p = p.next;
} }
} while (p != null || i < hi);
public void remove() { if (m.modCount != mc)
if (current == null)
throw new IllegalStateException();
if (modCount != expectedModCount)
throw new ConcurrentModificationException(); throw new ConcurrentModificationException();
K k;
if (current instanceof Entry) {
k = ((Entry<K,V>)current).key;
} else {
k = ((Entry<K,V>)((TreeNode)current).entry).key;
} }
current = null;
HashMap.this.removeEntryForKey(k);
expectedModCount = modCount;
} }
/* public boolean tryAdvance(Consumer<? super V> action) {
* Set 'next' to the first entry of the next non-empty bin in the table int hi;
*/ if (action == null)
private void findNextBin() { throw new NullPointerException();
// assert next == null; Node<K,V>[] tab = map.table;
Object[] t = table; if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
while (current != null || index < hi) {
while (index < t.length && (next = t[index++]) == null) if (current == null)
; current = tab[index++];
if (next instanceof HashMap.TreeBin) { // Point to the first TreeNode else {
next = ((TreeBin) next).first; V v = current.value;
// assert next != null; // There should be no empty TreeBins current = current.next;
} action.accept(v);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
} }
} }
private final class ValueIterator extends HashIterator<V> {
public V next() {
return nextEntry().value;
} }
return false;
} }
private final class KeyIterator extends HashIterator<K> { public int characteristics() {
public K next() { return (fence < 0 || est == map.size ? Spliterator.SIZED : 0);
return nextEntry().getKey();
} }
} }
private final class EntryIterator extends HashIterator<Map.Entry<K,V>> { static final class EntrySpliterator<K,V>
public Map.Entry<K,V> next() { extends HashMapSpliterator<K,V>
return nextEntry(); implements Spliterator<Map.Entry<K,V>> {
EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est,
int expectedModCount) {
super(m, origin, fence, est, expectedModCount);
} }
public EntrySpliterator<K,V> trySplit() {
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
return (lo >= mid || current != null) ? null :
new EntrySpliterator<K,V>(map, lo, index = mid, est >>>= 1,
expectedModCount);
} }
// Subclass overrides these to alter behavior of views' iterator() method public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
Iterator<K> newKeyIterator() { int i, hi, mc;
return new KeyIterator(); if (action == null)
throw new NullPointerException();
HashMap<K,V> m = map;
Node<K,V>[] tab = m.table;
if ((hi = fence) < 0) {
mc = expectedModCount = m.modCount;
hi = fence = (tab == null) ? 0 : tab.length;
} }
Iterator<V> newValueIterator() { else
return new ValueIterator(); mc = expectedModCount;
if (tab != null && tab.length >= hi &&
(i = index) >= 0 && (i < (index = hi) || current != null)) {
Node<K,V> p = current;
current = null;
do {
if (p == null)
p = tab[i++];
else {
action.accept(p);
p = p.next;
} }
Iterator<Map.Entry<K,V>> newEntryIterator() { } while (p != null || i < hi);
return new EntryIterator(); if (m.modCount != mc)
throw new ConcurrentModificationException();
} }
// Views
private transient Set<Map.Entry<K,V>> entrySet = null;
/**
* Returns a {@link Set} view of the keys contained in this map.
* The set is backed by the map, so changes to the map are
* reflected in the set, and vice-versa. If the map is modified
* while an iteration over the set is in progress (except through
* the iterator's own <tt>remove</tt> operation), the results of
* the iteration are undefined. The set supports element removal,
* which removes the corresponding mapping from the map, via the
* <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
* operations. It does not support the <tt>add</tt> or <tt>addAll</tt>
* operations.
*/
public Set<K> keySet() {
Set<K> ks = keySet;
return (ks != null ? ks : (keySet = new KeySet()));
} }
private final class KeySet extends AbstractSet<K> { public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
public Iterator<K> iterator() { int hi;
return newKeyIterator(); if (action == null)
} throw new NullPointerException();
public int size() { Node<K,V>[] tab = map.table;
return size; if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
while (current != null || index < hi) {
if (current == null)
current = tab[index++];
else {
Node<K,V> e = current;
current = current.next;
action.accept(e);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
} }
public boolean contains(Object o) {
return containsKey(o);
} }
public boolean remove(Object o) {
return HashMap.this.removeEntryForKey(o) != null;
} }
public void clear() { return false;
HashMap.this.clear();
} }
public Spliterator<K> spliterator() { public int characteristics() {
if (HashMap.this.getClass() == HashMap.class) return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
return new KeySpliterator<K,V>(HashMap.this, 0, -1, 0, 0); Spliterator.DISTINCT;
else
return Spliterators.spliterator
(this, Spliterator.SIZED | Spliterator.DISTINCT);
} }
} }
/** /* ------------------------------------------------------------ */
* Returns a {@link Collection} view of the values contained in this map. // LinkedHashMap support
* The collection is backed by the map, so changes to the map are
* reflected in the collection, and vice-versa. If the map is
* modified while an iteration over the collection is in progress /*
* (except through the iterator's own <tt>remove</tt> operation), * The following package-protected methods are designed to be
* the results of the iteration are undefined. The collection * overridden by LinkedHashMap, but not by any other subclass.
* supports element removal, which removes the corresponding * Nearly all other internal methods are also package-protected
* mapping from the map, via the <tt>Iterator.remove</tt>, * but are declared final, so can be used by LinkedHashMap, view
* <tt>Collection.remove</tt>, <tt>removeAll</tt>, * classes, and HashSet.
* <tt>retainAll</tt> and <tt>clear</tt> operations. It does not
* support the <tt>add</tt> or <tt>addAll</tt> operations.
*/ */
public Collection<V> values() {
Collection<V> vs = values;
return (vs != null ? vs : (values = new Values()));
}
private final class Values extends AbstractCollection<V> { // Create a regular (non-tree) node
public Iterator<V> iterator() { Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) {
return newValueIterator(); return new Node<K,V>(hash, key, value, next);
} }
public int size() {
return size; // For conversion from TreeNodes to plain nodes
} Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) {
public boolean contains(Object o) { return new Node<K,V>(p.hash, p.key, p.value, next);
return containsValue(o);
}
public void clear() {
HashMap.this.clear();
} }
public Spliterator<V> spliterator() { // Create a tree bin node
if (HashMap.this.getClass() == HashMap.class) TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) {
return new ValueSpliterator<K,V>(HashMap.this, 0, -1, 0, 0); return new TreeNode<K,V>(hash, key, value, next);
else
return Spliterators.spliterator
(this, Spliterator.SIZED);
} }
// For treeifyBin
TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) {
return new TreeNode<K,V>(p.hash, p.key, p.value, next);
} }
/** /**
* Returns a {@link Set} view of the mappings contained in this map. * Reset to initial default state. Called by clone and readObject.
* The set is backed by the map, so changes to the map are */
* reflected in the set, and vice-versa. If the map is modified void reinitialize() {
* while an iteration over the set is in progress (except through table = null;
* the iterator's own <tt>remove</tt> operation, or through the entrySet = null;
* <tt>setValue</tt> operation on a map entry returned by the keySet = null;
* iterator) the results of the iteration are undefined. The set values = null;
* supports element removal, which removes the corresponding modCount = 0;
* mapping from the map, via the <tt>Iterator.remove</tt>, threshold = 0;
* <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and size = 0;
* <tt>clear</tt> operations. It does not support the
* <tt>add</tt> or <tt>addAll</tt> operations.
*
* @return a set view of the mappings contained in this map
*/
public Set<Map.Entry<K,V>> entrySet() {
return entrySet0();
} }
private Set<Map.Entry<K,V>> entrySet0() { // Callbacks to allow LinkedHashMap post-actions
Set<Map.Entry<K,V>> es = entrySet; void afterNodeAccess(Node<K,V> p) { }
return es != null ? es : (entrySet = new EntrySet()); void afterNodeInsertion(boolean evict) { }
} void afterNodeRemoval(Node<K,V> p) { }
private final class EntrySet extends AbstractSet<Map.Entry<K,V>> { // Called only from writeObject, to ensure compatible ordering.
public Iterator<Map.Entry<K,V>> iterator() { void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
return newEntryIterator(); Node<K,V>[] tab;
} if (size > 0 && (tab = table) != null) {
public boolean contains(Object o) { for (int i = 0; i < tab.length; ++i) {
if (!(o instanceof Map.Entry)) for (Node<K,V> e = tab[i]; e != null; e = e.next) {
return false; s.writeObject(e.key);
Map.Entry<?,?> e = (Map.Entry<?,?>) o; s.writeObject(e.value);
Entry<K,V> candidate = getEntry(e.getKey());
return candidate != null && candidate.equals(e);
} }
public boolean remove(Object o) {
return removeMapping(o) != null;
} }
public int size() {
return size;
} }
public void clear() {
HashMap.this.clear();
} }
public Spliterator<Map.Entry<K,V>> spliterator() { /* ------------------------------------------------------------ */
if (HashMap.this.getClass() == HashMap.class) // Tree bins
return new EntrySpliterator<K,V>(HashMap.this, 0, -1, 0, 0);
else /**
return Spliterators.spliterator * Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn
(this, Spliterator.SIZED | Spliterator.DISTINCT); * extends Node) so can be used as extension of either regular or
} * linked node.
*/
static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> {
TreeNode<K,V> parent; // red-black tree links
TreeNode<K,V> left;
TreeNode<K,V> right;
TreeNode<K,V> prev; // needed to unlink next upon deletion
boolean red;
TreeNode(int hash, K key, V val, Node<K,V> next) {
super(hash, key, val, next);
} }
/** /**
* Save the state of the <tt>HashMap</tt> instance to a stream (i.e., * Returns root of tree containing this node.
* serialize it).
*
* @serialData The <i>capacity</i> of the HashMap (the length of the
* bucket array) is emitted (int), followed by the
* <i>size</i> (an int, the number of key-value
* mappings), followed by the key (Object) and value (Object)
* for each key-value mapping. The key-value mappings are
* emitted in no particular order.
*/ */
private void writeObject(java.io.ObjectOutputStream s) final TreeNode<K,V> root() {
throws IOException for (TreeNode<K,V> r = this, p;;) {
{ if ((p = r.parent) == null)
// Write out the threshold, loadfactor, and any hidden stuff return r;
s.defaultWriteObject(); r = p;
// Write out number of buckets
if (table==EMPTY_TABLE) {
s.writeInt(roundUpToPowerOf2(threshold));
} else {
s.writeInt(table.length);
} }
// Write out size (number of Mappings)
s.writeInt(size);
// Write out keys and values (alternating)
if (size > 0) {
for(Map.Entry<K,V> e : entrySet0()) {
s.writeObject(e.getKey());
s.writeObject(e.getValue());
} }
/**
* Ensures that the given root is the first node of its bin.
*/
static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) {
int n;
if (root != null && tab != null && (n = tab.length) > 0) {
int index = (n - 1) & root.hash;
TreeNode<K,V> first = (TreeNode<K,V>)tab[index];
if (root != first) {
Node<K,V> rn;
tab[index] = root;
TreeNode<K,V> rp = root.prev;
if ((rn = root.next) != null)
((TreeNode<K,V>)rn).prev = rp;
if (rp != null)
rp.next = rn;
if (first != null)
first.prev = root;
root.next = first;
root.prev = null;
}
assert checkInvariants(root);
} }
} }
private static final long serialVersionUID = 362498820763181265L;
/** /**
* Reconstitute the {@code HashMap} instance from a stream (i.e., * Finds the node starting at root p with the given hash and key.
* deserialize it). * The kc argument caches comparableClassFor(key) upon first use
* comparing keys.
*/ */
private void readObject(java.io.ObjectInputStream s) final TreeNode<K,V> find(int h, Object k, Class<?> kc) {
throws IOException, ClassNotFoundException TreeNode<K,V> p = this;
{ do {
// Read in the threshold (ignored), loadfactor, and any hidden stuff int ph, dir; K pk;
s.defaultReadObject(); TreeNode<K,V> pl = p.left, pr = p.right, q;
if (loadFactor <= 0 || Float.isNaN(loadFactor)) { if ((ph = p.hash) > h)
throw new InvalidObjectException("Illegal load factor: " + p = pl;
loadFactor); else if (ph < h)
p = pr;
else if ((pk = p.key) == k || (k != null && k.equals(pk)))
return p;
else if (pl == null)
p = pr;
else if (pr == null)
p = pl;
else if ((kc != null ||
(kc = comparableClassFor(k)) != null) &&
(dir = compareComparables(kc, k, pk)) != 0)
p = (dir < 0) ? pl : pr;
else if ((q = pr.find(h, k, kc)) != null)
return q;
else
p = pl;
} while (p != null);
return null;
} }
// set other fields that need values /**
if (Holder.USE_HASHSEED) { * Calls find for root node.
int seed = ThreadLocalRandom.current().nextInt(); */
Holder.UNSAFE.putIntVolatile(this, Holder.HASHSEED_OFFSET, final TreeNode<K,V> getTreeNode(int h, Object k) {
(seed != 0) ? seed : 1); return ((parent != null) ? root() : this).find(h, k, null);
} }
table = EMPTY_TABLE;
// Read in number of buckets
s.readInt(); // ignored.
// Read number of mappings
int mappings = s.readInt();
if (mappings < 0)
throw new InvalidObjectException("Illegal mappings count: " +
mappings);
// capacity chosen by number of mappings and desired load (if >= 0.25) /**
int capacity = (int) Math.min( * Tie-breaking utility for ordering insertions when equal
mappings * Math.min(1 / loadFactor, 4.0f), * hashCodes and non-comparable. We don't require a total
// we have limits... * order, just a consistent insertion rule to maintain
HashMap.MAXIMUM_CAPACITY); * equivalence across rebalancings. Tie-breaking further than
* necessary simplifies testing a bit.
// allocate the bucket array; */
if (mappings > 0) { static int tieBreakOrder(Object a, Object b) {
inflateTable(capacity); int d;
} else { if (a == null || b == null ||
threshold = capacity; (d = a.getClass().getName().
compareTo(b.getClass().getName())) == 0)
d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
-1 : 1);
return d;
} }
init(); // Give subclass a chance to do its thing. /**
* Forms tree of the nodes linked from this node.
// Read the keys and values, and put the mappings in the HashMap * @return root of tree
for (int i=0; i<mappings; i++) { */
@SuppressWarnings("unchecked") final void treeify(Node<K,V>[] tab) {
K key = (K) s.readObject(); TreeNode<K,V> root = null;
@SuppressWarnings("unchecked") for (TreeNode<K,V> x = this, next; x != null; x = next) {
V value = (V) s.readObject(); next = (TreeNode<K,V>)x.next;
putForCreate(key, value); x.left = x.right = null;
if (root == null) {
x.parent = null;
x.red = false;
root = x;
} }
else {
K k = x.key;
int h = x.hash;
Class<?> kc = null;
for (TreeNode<K,V> p = root;;) {
int dir, ph;
K pk = p.key;
if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0)
dir = tieBreakOrder(k, pk);
TreeNode<K,V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
x.parent = xp;
if (dir <= 0)
xp.left = x;
else
xp.right = x;
root = balanceInsertion(root, x);
break;
} }
// These methods are used when serializing HashSets
int capacity() { return table.length; }
float loadFactor() { return loadFactor; }
/**
* Standin until HM overhaul; based loosely on Weak and Identity HM.
*/
static class HashMapSpliterator<K,V> {
final HashMap<K,V> map;
Object current; // current node, can be Entry or TreeNode
int index; // current index, modified on advance/split
int fence; // one past last index
int est; // size estimate
int expectedModCount; // for comodification checks
boolean acceptedNull; // Have we accepted the null key?
// Without this, we can't distinguish
// between being at the very beginning (and
// needing to accept null), or being at the
// end of the list in bin 0. In both cases,
// current == null && index == 0.
HashMapSpliterator(HashMap<K,V> m, int origin,
int fence, int est,
int expectedModCount) {
this.map = m;
this.index = origin;
this.fence = fence;
this.est = est;
this.expectedModCount = expectedModCount;
this.acceptedNull = false;
} }
final int getFence() { // initialize fence and size on first use
int hi;
if ((hi = fence) < 0) {
HashMap<K,V> m = map;
est = m.size;
expectedModCount = m.modCount;
hi = fence = m.table.length;
} }
return hi;
} }
moveRootToFront(tab, root);
public final long estimateSize() {
getFence(); // force init
return (long) est;
} }
/**
* Returns a list of non-TreeNodes replacing those linked from
* this node.
*/
final Node<K,V> untreeify(HashMap<K,V> map) {
Node<K,V> hd = null, tl = null;
for (Node<K,V> q = this; q != null; q = q.next) {
Node<K,V> p = map.replacementNode(q, null);
if (tl == null)
hd = p;
else
tl.next = p;
tl = p;
} }
return hd;
static final class KeySpliterator<K,V>
extends HashMapSpliterator<K,V>
implements Spliterator<K> {
KeySpliterator(HashMap<K,V> m, int origin, int fence, int est,
int expectedModCount) {
super(m, origin, fence, est, expectedModCount);
} }
public KeySpliterator<K,V> trySplit() { /**
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; * Tree version of putVal.
if (lo >= mid || current != null) { */
final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab,
int h, K k, V v) {
Class<?> kc = null;
boolean searched = false;
TreeNode<K,V> root = (parent != null) ? root() : this;
for (TreeNode<K,V> p = root;;) {
int dir, ph; K pk;
if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
return p;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0) {
if (!searched) {
TreeNode<K,V> q, ch;
searched = true;
if (((ch = p.left) != null &&
(q = ch.find(h, k, kc)) != null) ||
((ch = p.right) != null &&
(q = ch.find(h, k, kc)) != null))
return q;
}
dir = tieBreakOrder(k, pk);
}
TreeNode<K,V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
Node<K,V> xpn = xp.next;
TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn);
if (dir <= 0)
xp.left = x;
else
xp.right = x;
xp.next = x;
x.parent = x.prev = xp;
if (xpn != null)
((TreeNode<K,V>)xpn).prev = x;
moveRootToFront(tab, balanceInsertion(root, x));
return null; return null;
} else {
KeySpliterator<K,V> retVal = new KeySpliterator<K,V>(map, lo,
index = mid, est >>>= 1, expectedModCount);
// Only 'this' Spliterator chould check for null.
retVal.acceptedNull = true;
return retVal;
} }
} }
@SuppressWarnings("unchecked")
public void forEachRemaining(Consumer<? super K> action) {
int i, hi, mc;
if (action == null)
throw new NullPointerException();
HashMap<K,V> m = map;
Object[] tab = m.table;
if ((hi = fence) < 0) {
mc = expectedModCount = m.modCount;
hi = fence = tab.length;
} }
else
mc = expectedModCount;
if (!acceptedNull) { /**
acceptedNull = true; * Removes the given node, that must be present before this call.
if (m.nullKeyEntry != null) { * This is messier than typical red-black deletion code because we
action.accept(m.nullKeyEntry.key); * cannot swap the contents of an interior node with a leaf
} * successor that is pinned by "next" pointers that are accessible
* independently during traversal. So instead we swap the tree
* linkages. If the current tree appears to have too few nodes,
* the bin is converted back to a plain bin. (The test triggers
* somewhere between 2 and 6 nodes, depending on tree structure).
*/
final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab,
boolean movable) {
int n;
if (tab == null || (n = tab.length) == 0)
return;
int index = (n - 1) & hash;
TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl;
TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev;
if (pred == null)
tab[index] = first = succ;
else
pred.next = succ;
if (succ != null)
succ.prev = pred;
if (first == null)
return;
if (root.parent != null)
root = root.root();
if (root == null || root.right == null ||
(rl = root.left) == null || rl.left == null) {
tab[index] = first.untreeify(map); // too small
return;
} }
if (tab.length >= hi && (i = index) >= 0 && TreeNode<K,V> p = this, pl = left, pr = right, replacement;
(i < (index = hi) || current != null)) { if (pl != null && pr != null) {
Object p = current; TreeNode<K,V> s = pr, sl;
current = null; while ((sl = s.left) != null) // find successor
do { s = sl;
if (p == null) { boolean c = s.red; s.red = p.red; p.red = c; // swap colors
p = tab[i++]; TreeNode<K,V> sr = s.right;
if (p instanceof HashMap.TreeBin) { TreeNode<K,V> pp = p.parent;
p = ((HashMap.TreeBin)p).first; if (s == pr) { // p was s's direct parent
p.parent = s;
s.right = p;
} }
} else { else {
HashMap.Entry<K,V> entry; TreeNode<K,V> sp = s.parent;
if (p instanceof HashMap.Entry) { if ((p.parent = sp) != null) {
entry = (HashMap.Entry<K,V>)p; if (s == sp.left)
} else { sp.left = p;
entry = (HashMap.Entry<K,V>)((TreeNode)p).entry; else
sp.right = p;
} }
action.accept(entry.key); if ((s.right = pr) != null)
p = entry.next; pr.parent = s;
} }
} while (p != null || i < hi); p.left = null;
if (m.modCount != mc) if ((p.right = sr) != null)
throw new ConcurrentModificationException(); sr.parent = p;
if ((s.left = pl) != null)
pl.parent = s;
if ((s.parent = pp) == null)
root = s;
else if (p == pp.left)
pp.left = s;
else
pp.right = s;
if (sr != null)
replacement = sr;
else
replacement = p;
} }
else if (pl != null)
replacement = pl;
else if (pr != null)
replacement = pr;
else
replacement = p;
if (replacement != p) {
TreeNode<K,V> pp = replacement.parent = p.parent;
if (pp == null)
root = replacement;
else if (p == pp.left)
pp.left = replacement;
else
pp.right = replacement;
p.left = p.right = p.parent = null;
} }
@SuppressWarnings("unchecked") TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement);
public boolean tryAdvance(Consumer<? super K> action) {
int hi;
if (action == null)
throw new NullPointerException();
Object[] tab = map.table;
hi = getFence();
if (!acceptedNull) { if (replacement == p) { // detach
acceptedNull = true; TreeNode<K,V> pp = p.parent;
if (map.nullKeyEntry != null) { p.parent = null;
action.accept(map.nullKeyEntry.key); if (pp != null) {
if (map.modCount != expectedModCount) if (p == pp.left)
throw new ConcurrentModificationException(); pp.left = null;
return true; else if (p == pp.right)
pp.right = null;
} }
} }
if (tab.length >= hi && index >= 0) { if (movable)
while (current != null || index < hi) { moveRootToFront(tab, r);
if (current == null) {
current = tab[index++];
if (current instanceof HashMap.TreeBin) {
current = ((HashMap.TreeBin)current).first;
}
} else {
HashMap.Entry<K,V> entry;
if (current instanceof HashMap.Entry) {
entry = (HashMap.Entry<K,V>)current;
} else {
entry = (HashMap.Entry<K,V>)((TreeNode)current).entry;
}
K k = entry.key;
current = entry.next;
action.accept(k);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
} }
/**
* Splits nodes in a tree bin into lower and upper tree bins,
* or untreeifies if now too small. Called only from resize;
* see above discussion about split bits and indices.
*
* @param map the map
* @param tab the table for recording bin heads
* @param index the index of the table being split
* @param bit the bit of hash to split on
*/
final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) {
TreeNode<K,V> b = this;
// Relink into lo and hi lists, preserving order
TreeNode<K,V> loHead = null, loTail = null;
TreeNode<K,V> hiHead = null, hiTail = null;
int lc = 0, hc = 0;
for (TreeNode<K,V> e = b, next; e != null; e = next) {
next = (TreeNode<K,V>)e.next;
e.next = null;
if ((e.hash & bit) == 0) {
if ((e.prev = loTail) == null)
loHead = e;
else
loTail.next = e;
loTail = e;
++lc;
} }
else {
if ((e.prev = hiTail) == null)
hiHead = e;
else
hiTail.next = e;
hiTail = e;
++hc;
} }
return false;
} }
public int characteristics() { if (loHead != null) {
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | if (lc <= UNTREEIFY_THRESHOLD)
Spliterator.DISTINCT; tab[index] = loHead.untreeify(map);
else {
tab[index] = loHead;
if (hiHead != null) // (else is already treeified)
loHead.treeify(tab);
} }
} }
if (hiHead != null) {
static final class ValueSpliterator<K,V> if (hc <= UNTREEIFY_THRESHOLD)
extends HashMapSpliterator<K,V> tab[index + bit] = hiHead.untreeify(map);
implements Spliterator<V> { else {
ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est, tab[index + bit] = hiHead;
int expectedModCount) { if (loHead != null)
super(m, origin, fence, est, expectedModCount); hiHead.treeify(tab);
} }
public ValueSpliterator<K,V> trySplit() {
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
if (lo >= mid || current != null) {
return null;
} else {
ValueSpliterator<K,V> retVal = new ValueSpliterator<K,V>(map,
lo, index = mid, est >>>= 1, expectedModCount);
// Only 'this' Spliterator chould check for null.
retVal.acceptedNull = true;
return retVal;
} }
} }
@SuppressWarnings("unchecked") /* ------------------------------------------------------------ */
public void forEachRemaining(Consumer<? super V> action) { // Red-black tree methods, all adapted from CLR
int i, hi, mc;
if (action == null) static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
throw new NullPointerException(); TreeNode<K,V> p) {
HashMap<K,V> m = map; TreeNode<K,V> r, pp, rl;
Object[] tab = m.table; if (p != null && (r = p.right) != null) {
if ((hi = fence) < 0) { if ((rl = p.right = r.left) != null)
mc = expectedModCount = m.modCount; rl.parent = p;
hi = fence = tab.length; if ((pp = r.parent = p.parent) == null)
(root = r).red = false;
else if (pp.left == p)
pp.left = r;
else
pp.right = r;
r.left = p;
p.parent = r;
} }
return root;
}
static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
TreeNode<K,V> p) {
TreeNode<K,V> l, pp, lr;
if (p != null && (l = p.left) != null) {
if ((lr = p.left = l.right) != null)
lr.parent = p;
if ((pp = l.parent = p.parent) == null)
(root = l).red = false;
else if (pp.right == p)
pp.right = l;
else else
mc = expectedModCount; pp.left = l;
l.right = p;
p.parent = l;
}
return root;
}
if (!acceptedNull) { static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
acceptedNull = true; TreeNode<K,V> x) {
if (m.nullKeyEntry != null) { x.red = true;
action.accept(m.nullKeyEntry.value); for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
if ((xp = x.parent) == null) {
x.red = false;
return x;
} }
else if (!xp.red || (xpp = xp.parent) == null)
return root;
if (xp == (xppl = xpp.left)) {
if ((xppr = xpp.right) != null && xppr.red) {
xppr.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
} }
if (tab.length >= hi && (i = index) >= 0 && else {
(i < (index = hi) || current != null)) { if (x == xp.right) {
Object p = current; root = rotateLeft(root, x = xp);
current = null; xpp = (xp = x.parent) == null ? null : xp.parent;
do {
if (p == null) {
p = tab[i++];
if (p instanceof HashMap.TreeBin) {
p = ((HashMap.TreeBin)p).first;
} }
} else { if (xp != null) {
HashMap.Entry<K,V> entry; xp.red = false;
if (p instanceof HashMap.Entry) { if (xpp != null) {
entry = (HashMap.Entry<K,V>)p; xpp.red = true;
} else { root = rotateRight(root, xpp);
entry = (HashMap.Entry<K,V>)((TreeNode)p).entry;
} }
action.accept(entry.value);
p = entry.next;
} }
} while (p != null || i < hi);
if (m.modCount != mc)
throw new ConcurrentModificationException();
} }
} }
else {
@SuppressWarnings("unchecked") if (xppl != null && xppl.red) {
public boolean tryAdvance(Consumer<? super V> action) { xppl.red = false;
int hi; xp.red = false;
if (action == null) xpp.red = true;
throw new NullPointerException(); x = xpp;
Object[] tab = map.table;
hi = getFence();
if (!acceptedNull) {
acceptedNull = true;
if (map.nullKeyEntry != null) {
action.accept(map.nullKeyEntry.value);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
} }
else {
if (x == xp.left) {
root = rotateRight(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
} }
if (tab.length >= hi && index >= 0) { if (xp != null) {
while (current != null || index < hi) { xp.red = false;
if (current == null) { if (xpp != null) {
current = tab[index++]; xpp.red = true;
if (current instanceof HashMap.TreeBin) { root = rotateLeft(root, xpp);
current = ((HashMap.TreeBin)current).first;
}
} else {
HashMap.Entry<K,V> entry;
if (current instanceof HashMap.Entry) {
entry = (Entry<K,V>)current;
} else {
entry = (Entry<K,V>)((TreeNode)current).entry;
}
V v = entry.value;
current = entry.next;
action.accept(v);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
} }
} }
} }
return false;
} }
public int characteristics() {
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0);
} }
} }
static final class EntrySpliterator<K,V> static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
extends HashMapSpliterator<K,V> TreeNode<K,V> x) {
implements Spliterator<Map.Entry<K,V>> { for (TreeNode<K,V> xp, xpl, xpr;;) {
EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est, if (x == null || x == root)
int expectedModCount) { return root;
super(m, origin, fence, est, expectedModCount); else if ((xp = x.parent) == null) {
x.red = false;
return x;
} }
else if (x.red) {
public EntrySpliterator<K,V> trySplit() { x.red = false;
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; return root;
if (lo >= mid || current != null) {
return null;
} else {
EntrySpliterator<K,V> retVal = new EntrySpliterator<K,V>(map,
lo, index = mid, est >>>= 1, expectedModCount);
// Only 'this' Spliterator chould check for null.
retVal.acceptedNull = true;
return retVal;
} }
else if ((xpl = xp.left) == x) {
if ((xpr = xp.right) != null && xpr.red) {
xpr.red = false;
xp.red = true;
root = rotateLeft(root, xp);
xpr = (xp = x.parent) == null ? null : xp.right;
} }
if (xpr == null)
@SuppressWarnings("unchecked") x = xp;
public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { else {
int i, hi, mc; TreeNode<K,V> sl = xpr.left, sr = xpr.right;
if (action == null) if ((sr == null || !sr.red) &&
throw new NullPointerException(); (sl == null || !sl.red)) {
HashMap<K,V> m = map; xpr.red = true;
Object[] tab = m.table; x = xp;
if ((hi = fence) < 0) {
mc = expectedModCount = m.modCount;
hi = fence = tab.length;
} }
else else {
mc = expectedModCount; if (sr == null || !sr.red) {
if (sl != null)
if (!acceptedNull) { sl.red = false;
acceptedNull = true; xpr.red = true;
if (m.nullKeyEntry != null) { root = rotateRight(root, xpr);
action.accept(m.nullKeyEntry); xpr = (xp = x.parent) == null ?
null : xp.right;
} }
if (xpr != null) {
xpr.red = (xp == null) ? false : xp.red;
if ((sr = xpr.right) != null)
sr.red = false;
} }
if (tab.length >= hi && (i = index) >= 0 && if (xp != null) {
(i < (index = hi) || current != null)) { xp.red = false;
Object p = current; root = rotateLeft(root, xp);
current = null;
do {
if (p == null) {
p = tab[i++];
if (p instanceof HashMap.TreeBin) {
p = ((HashMap.TreeBin)p).first;
} }
} else { x = root;
HashMap.Entry<K,V> entry;
if (p instanceof HashMap.Entry) {
entry = (HashMap.Entry<K,V>)p;
} else {
entry = (HashMap.Entry<K,V>)((TreeNode)p).entry;
} }
action.accept(entry);
p = entry.next;
} }
} while (p != null || i < hi);
if (m.modCount != mc)
throw new ConcurrentModificationException();
} }
else { // symmetric
if (xpl != null && xpl.red) {
xpl.red = false;
xp.red = true;
root = rotateRight(root, xp);
xpl = (xp = x.parent) == null ? null : xp.left;
} }
if (xpl == null)
@SuppressWarnings("unchecked") x = xp;
public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { else {
int hi; TreeNode<K,V> sl = xpl.left, sr = xpl.right;
if (action == null) if ((sl == null || !sl.red) &&
throw new NullPointerException(); (sr == null || !sr.red)) {
Object[] tab = map.table; xpl.red = true;
hi = getFence(); x = xp;
if (!acceptedNull) {
acceptedNull = true;
if (map.nullKeyEntry != null) {
action.accept(map.nullKeyEntry);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
} }
else {
if (sl == null || !sl.red) {
if (sr != null)
sr.red = false;
xpl.red = true;
root = rotateLeft(root, xpl);
xpl = (xp = x.parent) == null ?
null : xp.left;
} }
if (tab.length >= hi && index >= 0) { if (xpl != null) {
while (current != null || index < hi) { xpl.red = (xp == null) ? false : xp.red;
if (current == null) { if ((sl = xpl.left) != null)
current = tab[index++]; sl.red = false;
if (current instanceof HashMap.TreeBin) {
current = ((HashMap.TreeBin)current).first;
} }
} else { if (xp != null) {
HashMap.Entry<K,V> e; xp.red = false;
if (current instanceof HashMap.Entry) { root = rotateRight(root, xp);
e = (Entry<K,V>)current; }
} else { x = root;
e = (Entry<K,V>)((TreeNode)current).entry;
} }
current = e.next;
action.accept(e);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
} }
} }
} }
return false;
} }
public int characteristics() { /**
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | * Recursive invariant check
Spliterator.DISTINCT; */
static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
tb = t.prev, tn = (TreeNode<K,V>)t.next;
if (tb != null && tb.next != t)
return false;
if (tn != null && tn.prev != t)
return false;
if (tp != null && t != tp.left && t != tp.right)
return false;
if (tl != null && (tl.parent != t || tl.hash > t.hash))
return false;
if (tr != null && (tr.parent != t || tr.hash < t.hash))
return false;
if (t.red && tl != null && tl.red && tr != null && tr.red)
return false;
if (tl != null && !checkInvariants(tl))
return false;
if (tr != null && !checkInvariants(tr))
return false;
return true;
} }
} }
} }
/* /*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -24,9 +24,12 @@ ...@@ -24,9 +24,12 @@
*/ */
package java.util; package java.util;
import java.io.*;
import java.util.function.Consumer;
import java.util.function.BiConsumer; import java.util.function.BiConsumer;
import java.util.function.BiFunction; import java.util.function.BiFunction;
import java.io.Serializable;
import java.io.IOException;
/** /**
* <p>Hash table and linked list implementation of the <tt>Map</tt> interface, * <p>Hash table and linked list implementation of the <tt>Map</tt> interface,
...@@ -57,9 +60,9 @@ import java.util.function.BiFunction; ...@@ -57,9 +60,9 @@ import java.util.function.BiFunction;
* order they were presented.) * order they were presented.)
* *
* <p>A special {@link #LinkedHashMap(int,float,boolean) constructor} is * <p>A special {@link #LinkedHashMap(int,float,boolean) constructor} is
* provided to create a <tt>LinkedHashMap</tt> whose order of iteration is the * provided to create a linked hash map whose order of iteration is the order
* order in which its entries were last accessed, from least-recently accessed * in which its entries were last accessed, from least-recently accessed to
* to most-recently (<i>access-order</i>). This kind of map is well-suited to * most-recently (<i>access-order</i>). This kind of map is well-suited to
* building LRU caches. Invoking the <tt>put</tt> or <tt>get</tt> method * building LRU caches. Invoking the <tt>put</tt> or <tt>get</tt> method
* results in an access to the corresponding entry (assuming it exists after * results in an access to the corresponding entry (assuming it exists after
* the invocation completes). The <tt>putAll</tt> method generates one entry * the invocation completes). The <tt>putAll</tt> method generates one entry
...@@ -155,18 +158,53 @@ import java.util.function.BiFunction; ...@@ -155,18 +158,53 @@ import java.util.function.BiFunction;
* @see Hashtable * @see Hashtable
* @since 1.4 * @since 1.4
*/ */
public class LinkedHashMap<K,V> public class LinkedHashMap<K,V>
extends HashMap<K,V> extends HashMap<K,V>
implements Map<K,V> implements Map<K,V>
{ {
/*
* Implementation note. A previous version of this class was
* internally structured a little differently. Because superclass
* HashMap now uses trees for some of its nodes, class
* LinkedHashMap.Entry is now treated as intermediary node class
* that can also be converted to tree form. The name of this
* class, LinkedHashMap.Entry, is confusing in several ways in its
* current context, but cannot be changed. Otherwise, even though
* it is not exported outside this package, some existing source
* code is known to have relied on a symbol resolution corner case
* rule in calls to removeEldestEntry that suppressed compilation
* errors due to ambiguous usages. So, we keep the name to
* preserve unmodified compilability.
*
* The changes in node classes also require using two fields
* (head, tail) rather than a pointer to a header node to maintain
* the doubly-linked before/after list. This class also
* previously used a different style of callback methods upon
* access, insertion, and removal.
*/
/**
* HashMap.Node subclass for normal LinkedHashMap entries.
*/
static class Entry<K,V> extends HashMap.Node<K,V> {
Entry<K,V> before, after;
Entry(int hash, K key, V value, Node<K,V> next) {
super(hash, key, value, next);
}
}
private static final long serialVersionUID = 3801124242820219131L; private static final long serialVersionUID = 3801124242820219131L;
/** /**
* The head of the doubly linked list. * The head (eldest) of the doubly linked list.
*/ */
private transient Entry<K,V> header; transient LinkedHashMap.Entry<K,V> head;
/**
* The tail (youngest) of the doubly linked list.
*/
transient LinkedHashMap.Entry<K,V> tail;
/** /**
* The iteration ordering method for this linked hash map: <tt>true</tt> * The iteration ordering method for this linked hash map: <tt>true</tt>
...@@ -174,7 +212,125 @@ public class LinkedHashMap<K,V> ...@@ -174,7 +212,125 @@ public class LinkedHashMap<K,V>
* *
* @serial * @serial
*/ */
private final boolean accessOrder; final boolean accessOrder;
// internal utilities
// link at the end of list
private void linkNodeLast(LinkedHashMap.Entry<K,V> p) {
LinkedHashMap.Entry<K,V> last = tail;
tail = p;
if (last == null)
head = p;
else {
p.before = last;
last.after = p;
}
}
// apply src's links to dst
private void transferLinks(LinkedHashMap.Entry<K,V> src,
LinkedHashMap.Entry<K,V> dst) {
LinkedHashMap.Entry<K,V> b = dst.before = src.before;
LinkedHashMap.Entry<K,V> a = dst.after = src.after;
if (b == null)
head = dst;
else
b.after = dst;
if (a == null)
tail = dst;
else
a.before = dst;
}
// overrides of HashMap hook methods
void reinitialize() {
super.reinitialize();
head = tail = null;
}
Node<K,V> newNode(int hash, K key, V value, Node<K,V> e) {
LinkedHashMap.Entry<K,V> p =
new LinkedHashMap.Entry<K,V>(hash, key, value, e);
linkNodeLast(p);
return p;
}
Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) {
LinkedHashMap.Entry<K,V> q = (LinkedHashMap.Entry<K,V>)p;
LinkedHashMap.Entry<K,V> t =
new LinkedHashMap.Entry<K,V>(q.hash, q.key, q.value, next);
transferLinks(q, t);
return t;
}
TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) {
TreeNode<K,V> p = new TreeNode<K,V>(hash, key, value, next);
linkNodeLast(p);
return p;
}
TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) {
LinkedHashMap.Entry<K,V> q = (LinkedHashMap.Entry<K,V>)p;
TreeNode<K,V> t = new TreeNode<K,V>(q.hash, q.key, q.value, next);
transferLinks(q, t);
return t;
}
void afterNodeRemoval(Node<K,V> e) { // unlink
LinkedHashMap.Entry<K,V> p =
(LinkedHashMap.Entry<K,V>)e, b = p.before, a = p.after;
p.before = p.after = null;
if (b == null)
head = a;
else
b.after = a;
if (a == null)
tail = b;
else
a.before = b;
}
void afterNodeInsertion(boolean evict) { // possibly remove eldest
LinkedHashMap.Entry<K,V> first;
if (evict && (first = head) != null && removeEldestEntry(first)) {
K key = first.key;
removeNode(hash(key), key, null, false, true);
}
}
void afterNodeAccess(Node<K,V> e) { // move node to last
LinkedHashMap.Entry<K,V> last;
if (accessOrder && (last = tail) != e) {
LinkedHashMap.Entry<K,V> p =
(LinkedHashMap.Entry<K,V>)e, b = p.before, a = p.after;
p.after = null;
if (b == null)
head = a;
else
b.after = a;
if (a != null)
a.before = b;
else
last = b;
if (last == null)
head = p;
else {
p.before = last;
last.after = p;
}
tail = p;
++modCount;
}
}
void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after) {
s.writeObject(e.key);
s.writeObject(e.value);
}
}
/** /**
* Constructs an empty insertion-ordered <tt>LinkedHashMap</tt> instance * Constructs an empty insertion-ordered <tt>LinkedHashMap</tt> instance
...@@ -221,8 +377,9 @@ public class LinkedHashMap<K,V> ...@@ -221,8 +377,9 @@ public class LinkedHashMap<K,V>
* @throws NullPointerException if the specified map is null * @throws NullPointerException if the specified map is null
*/ */
public LinkedHashMap(Map<? extends K, ? extends V> m) { public LinkedHashMap(Map<? extends K, ? extends V> m) {
super(m); super();
accessOrder = false; accessOrder = false;
putMapEntries(m, false);
} }
/** /**
...@@ -243,16 +400,6 @@ public class LinkedHashMap<K,V> ...@@ -243,16 +400,6 @@ public class LinkedHashMap<K,V>
this.accessOrder = accessOrder; this.accessOrder = accessOrder;
} }
/**
* Called by superclass constructors and pseudoconstructors (clone,
* readObject) before any entries are inserted into the map. Initializes
* the chain.
*/
@Override
void init() {
header = new Entry<>(-1, null, null, null);
header.before = header.after = header;
}
/** /**
* Returns <tt>true</tt> if this map maps one or more keys to the * Returns <tt>true</tt> if this map maps one or more keys to the
...@@ -263,14 +410,9 @@ public class LinkedHashMap<K,V> ...@@ -263,14 +410,9 @@ public class LinkedHashMap<K,V>
* specified value * specified value
*/ */
public boolean containsValue(Object value) { public boolean containsValue(Object value) {
// Overridden to take advantage of faster iterator for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after) {
if (value==null) { V v = e.value;
for (Entry<?,?> e = header.after; e != header; e = e.after) if (v == value || (value != null && value.equals(v)))
if (e.value==null)
return true;
} else {
for (Entry<?,?> e = header.after; e != header; e = e.after)
if (value.equals(e.value))
return true; return true;
} }
return false; return false;
...@@ -292,10 +434,11 @@ public class LinkedHashMap<K,V> ...@@ -292,10 +434,11 @@ public class LinkedHashMap<K,V>
* distinguish these two cases. * distinguish these two cases.
*/ */
public V get(Object key) { public V get(Object key) {
Entry<K,V> e = (Entry<K,V>)getEntry(key); Node<K,V> e;
if (e == null) if ((e = getNode(hash(key), key)) == null)
return null; return null;
e.recordAccess(this); if (accessOrder)
afterNodeAccess(e);
return e.value; return e.value;
} }
...@@ -305,207 +448,288 @@ public class LinkedHashMap<K,V> ...@@ -305,207 +448,288 @@ public class LinkedHashMap<K,V>
*/ */
public void clear() { public void clear() {
super.clear(); super.clear();
header.before = header.after = header; head = tail = null;
} }
@Override /**
public void forEach(BiConsumer<? super K, ? super V> action) { * Returns <tt>true</tt> if this map should remove its eldest entry.
Objects.requireNonNull(action); * This method is invoked by <tt>put</tt> and <tt>putAll</tt> after
int expectedModCount = modCount; * inserting a new entry into the map. It provides the implementor
for (Entry<K, V> entry = header.after; entry != header; entry = entry.after) { * with the opportunity to remove the eldest entry each time a new one
action.accept(entry.key, entry.value); * is added. This is useful if the map represents a cache: it allows
* the map to reduce memory consumption by deleting stale entries.
if (expectedModCount != modCount) { *
throw new ConcurrentModificationException(); * <p>Sample use: this override will allow the map to grow up to 100
} * entries and then delete the eldest entry each time a new entry is
} * added, maintaining a steady state of 100 entries.
* <pre>
* private static final int MAX_ENTRIES = 100;
*
* protected boolean removeEldestEntry(Map.Entry eldest) {
* return size() &gt; MAX_ENTRIES;
* }
* </pre>
*
* <p>This method typically does not modify the map in any way,
* instead allowing the map to modify itself as directed by its
* return value. It <i>is</i> permitted for this method to modify
* the map directly, but if it does so, it <i>must</i> return
* <tt>false</tt> (indicating that the map should not attempt any
* further modification). The effects of returning <tt>true</tt>
* after modifying the map from within this method are unspecified.
*
* <p>This implementation merely returns <tt>false</tt> (so that this
* map acts like a normal map - the eldest element is never removed).
*
* @param eldest The least recently inserted entry in the map, or if
* this is an access-ordered map, the least recently accessed
* entry. This is the entry that will be removed it this
* method returns <tt>true</tt>. If the map was empty prior
* to the <tt>put</tt> or <tt>putAll</tt> invocation resulting
* in this invocation, this will be the entry that was just
* inserted; in other words, if the map contains a single
* entry, the eldest entry is also the newest.
* @return <tt>true</tt> if the eldest entry should be removed
* from the map; <tt>false</tt> if it should be retained.
*/
protected boolean removeEldestEntry(Map.Entry<K,V> eldest) {
return false;
} }
@Override /**
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { * Returns a {@link Set} view of the keys contained in this map.
Objects.requireNonNull(function); * The set is backed by the map, so changes to the map are
int expectedModCount = modCount; * reflected in the set, and vice-versa. If the map is modified
for (Entry<K, V> entry = header.after; entry != header; entry = entry.after) { * while an iteration over the set is in progress (except through
entry.value = function.apply(entry.key, entry.value); * the iterator's own <tt>remove</tt> operation), the results of
* the iteration are undefined. The set supports element removal,
if (expectedModCount != modCount) { * which removes the corresponding mapping from the map, via the
* <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
* operations. It does not support the <tt>add</tt> or <tt>addAll</tt>
* operations.
* Its {@link Spliterator} typically provides faster sequential
* performance but much poorer parallel performance than that of
* {@code HashMap}.
*
* @return a set view of the keys contained in this map
*/
public Set<K> keySet() {
Set<K> ks;
return (ks = keySet) == null ? (keySet = new LinkedKeySet()) : ks;
}
final class LinkedKeySet extends AbstractSet<K> {
public final int size() { return size; }
public final void clear() { LinkedHashMap.this.clear(); }
public final Iterator<K> iterator() {
return new LinkedKeyIterator();
}
public final boolean contains(Object o) { return containsKey(o); }
public final boolean remove(Object key) {
return removeNode(hash(key), key, null, false, true) != null;
}
public final Spliterator<K> spliterator() {
return Spliterators.spliterator(this, Spliterator.SIZED |
Spliterator.ORDERED |
Spliterator.DISTINCT);
}
public final void forEach(Consumer<? super K> action) {
if (action == null)
throw new NullPointerException();
int mc = modCount;
for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
action.accept(e.key);
if (modCount != mc)
throw new ConcurrentModificationException(); throw new ConcurrentModificationException();
} }
} }
}
/** /**
* LinkedHashMap entry. * Returns a {@link Collection} view of the values contained in this map.
* The collection is backed by the map, so changes to the map are
* reflected in the collection, and vice-versa. If the map is
* modified while an iteration over the collection is in progress
* (except through the iterator's own <tt>remove</tt> operation),
* the results of the iteration are undefined. The collection
* supports element removal, which removes the corresponding
* mapping from the map, via the <tt>Iterator.remove</tt>,
* <tt>Collection.remove</tt>, <tt>removeAll</tt>,
* <tt>retainAll</tt> and <tt>clear</tt> operations. It does not
* support the <tt>add</tt> or <tt>addAll</tt> operations.
* Its {@link Spliterator} typically provides faster sequential
* performance but much poorer parallel performance than that of
* {@code HashMap}.
*
* @return a view of the values contained in this map
*/ */
private static class Entry<K,V> extends HashMap.Entry<K,V> { public Collection<V> values() {
// These fields comprise the doubly linked list used for iteration. Collection<V> vs;
Entry<K,V> before, after; return (vs = values) == null ? (values = new LinkedValues()) : vs;
}
Entry(int hash, K key, V value, Object next) {
super(hash, key, value, next); final class LinkedValues extends AbstractCollection<V> {
public final int size() { return size; }
public final void clear() { LinkedHashMap.this.clear(); }
public final Iterator<V> iterator() {
return new LinkedValueIterator();
}
public final boolean contains(Object o) { return containsValue(o); }
public final Spliterator<V> spliterator() {
return Spliterators.spliterator(this, Spliterator.SIZED |
Spliterator.ORDERED);
}
public final void forEach(Consumer<? super V> action) {
if (action == null)
throw new NullPointerException();
int mc = modCount;
for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
action.accept(e.value);
if (modCount != mc)
throw new ConcurrentModificationException();
} }
/**
* Removes this entry from the linked list.
*/
private void remove() {
before.after = after;
after.before = before;
} }
/** /**
* Inserts this entry before the specified existing entry in the list. * Returns a {@link Set} view of the mappings contained in this map.
* The set is backed by the map, so changes to the map are
* reflected in the set, and vice-versa. If the map is modified
* while an iteration over the set is in progress (except through
* the iterator's own <tt>remove</tt> operation, or through the
* <tt>setValue</tt> operation on a map entry returned by the
* iterator) the results of the iteration are undefined. The set
* supports element removal, which removes the corresponding
* mapping from the map, via the <tt>Iterator.remove</tt>,
* <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and
* <tt>clear</tt> operations. It does not support the
* <tt>add</tt> or <tt>addAll</tt> operations.
* Its {@link Spliterator} typically provides faster sequential
* performance but much poorer parallel performance than that of
* {@code HashMap}.
*
* @return a set view of the mappings contained in this map
*/ */
private void addBefore(Entry<K,V> existingEntry) { public Set<Map.Entry<K,V>> entrySet() {
after = existingEntry; Set<Map.Entry<K,V>> es;
before = existingEntry.before; return (es = entrySet) == null ? (entrySet = new LinkedEntrySet()) : es;
before.after = this;
after.before = this;
} }
/** final class LinkedEntrySet extends AbstractSet<Map.Entry<K,V>> {
* This method is invoked by the superclass whenever the value public final int size() { return size; }
* of a pre-existing entry is read by Map.get or modified by Map.put. public final void clear() { LinkedHashMap.this.clear(); }
* If the enclosing Map is access-ordered, it moves the entry public final Iterator<Map.Entry<K,V>> iterator() {
* to the end of the list; otherwise, it does nothing. return new LinkedEntryIterator();
*/
void recordAccess(HashMap<K,V> m) {
LinkedHashMap<K,V> lm = (LinkedHashMap<K,V>)m;
if (lm.accessOrder) {
lm.modCount++;
remove();
addBefore(lm.header);
} }
public final boolean contains(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
Object key = e.getKey();
Node<K,V> candidate = getNode(hash(key), key);
return candidate != null && candidate.equals(e);
}
public final boolean remove(Object o) {
if (o instanceof Map.Entry) {
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
Object key = e.getKey();
Object value = e.getValue();
return removeNode(hash(key), key, value, true, true) != null;
} }
return false;
void recordRemoval(HashMap<K,V> m) { }
remove(); public final Spliterator<Map.Entry<K,V>> spliterator() {
return Spliterators.spliterator(this, Spliterator.SIZED |
Spliterator.ORDERED |
Spliterator.DISTINCT);
}
public final void forEach(Consumer<? super Map.Entry<K,V>> action) {
if (action == null)
throw new NullPointerException();
int mc = modCount;
for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
action.accept(e);
if (modCount != mc)
throw new ConcurrentModificationException();
} }
} }
private abstract class LinkedHashIterator<T> implements Iterator<T> { // Map overrides
Entry<K,V> nextEntry = header.after;
Entry<K,V> lastReturned = null;
/**
* The modCount value that the iterator believes that the backing
* List should have. If this expectation is violated, the iterator
* has detected concurrent modification.
*/
int expectedModCount = modCount;
public boolean hasNext() { public void forEach(BiConsumer<? super K, ? super V> action) {
return nextEntry != header; if (action == null)
throw new NullPointerException();
int mc = modCount;
for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
action.accept(e.key, e.value);
if (modCount != mc)
throw new ConcurrentModificationException();
} }
public void remove() { public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
if (lastReturned == null) if (function == null)
throw new IllegalStateException(); throw new NullPointerException();
if (modCount != expectedModCount) int mc = modCount;
for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
e.value = function.apply(e.key, e.value);
if (modCount != mc)
throw new ConcurrentModificationException(); throw new ConcurrentModificationException();
}
LinkedHashMap.this.remove(lastReturned.key); // Iterators
lastReturned = null;
abstract class LinkedHashIterator {
LinkedHashMap.Entry<K,V> next;
LinkedHashMap.Entry<K,V> current;
int expectedModCount;
LinkedHashIterator() {
next = head;
expectedModCount = modCount; expectedModCount = modCount;
current = null;
} }
Entry<K,V> nextEntry() { public final boolean hasNext() {
return next != null;
}
final LinkedHashMap.Entry<K,V> nextNode() {
LinkedHashMap.Entry<K,V> e = next;
if (modCount != expectedModCount) if (modCount != expectedModCount)
throw new ConcurrentModificationException(); throw new ConcurrentModificationException();
if (nextEntry == header) if (e == null)
throw new NoSuchElementException(); throw new NoSuchElementException();
current = e;
Entry<K,V> e = lastReturned = nextEntry; next = e.after;
nextEntry = e.after;
return e; return e;
} }
}
private class KeyIterator extends LinkedHashIterator<K> { public final void remove() {
public K next() { return nextEntry().getKey(); } Node<K,V> p = current;
if (p == null)
throw new IllegalStateException();
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
current = null;
K key = p.key;
removeNode(hash(key), key, null, false, false);
expectedModCount = modCount;
} }
private class ValueIterator extends LinkedHashIterator<V> {
public V next() { return nextEntry().value; }
} }
private class EntryIterator extends LinkedHashIterator<Map.Entry<K,V>> { final class LinkedKeyIterator extends LinkedHashIterator
public Map.Entry<K,V> next() { return nextEntry(); } implements Iterator<K> {
public final K next() { return nextNode().getKey(); }
} }
// These Overrides alter the behavior of superclass view iterator() methods final class LinkedValueIterator extends LinkedHashIterator
Iterator<K> newKeyIterator() { return new KeyIterator(); } implements Iterator<V> {
Iterator<V> newValueIterator() { return new ValueIterator(); } public final V next() { return nextNode().value; }
Iterator<Map.Entry<K,V>> newEntryIterator() { return new EntryIterator(); }
/**
* This override alters behavior of superclass put method. It causes newly
* allocated entry to get inserted at the end of the linked list and
* removes the eldest entry if appropriate.
*/
@Override
void addEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) {
super.addEntry(hash, key, value, bucketIndex, checkIfNeedTree);
// Remove eldest entry if instructed
Entry<K,V> eldest = header.after;
if (removeEldestEntry(eldest)) {
removeEntryForKey(eldest.key);
}
} }
/* final class LinkedEntryIterator extends LinkedHashIterator
* Create a new LinkedHashMap.Entry and setup the before/after pointers implements Iterator<Map.Entry<K,V>> {
*/ public final Map.Entry<K,V> next() { return nextNode(); }
@Override
HashMap.Entry<K,V> newEntry(int hash, K key, V value, Object next) {
Entry<K,V> newEntry = new Entry<>(hash, key, value, next);
newEntry.addBefore(header);
return newEntry;
} }
/**
* Returns <tt>true</tt> if this map should remove its eldest entry.
* This method is invoked by <tt>put</tt> and <tt>putAll</tt> after
* inserting a new entry into the map. It provides the implementor
* with the opportunity to remove the eldest entry each time a new one
* is added. This is useful if the map represents a cache: it allows
* the map to reduce memory consumption by deleting stale entries.
*
* <p>Sample use: this override will allow the map to grow up to 100
* entries and then delete the eldest entry each time a new entry is
* added, maintaining a steady state of 100 entries.
* <pre>{@code
* private static final int MAX_ENTRIES = 100;
*
* protected boolean removeEldestEntry(Map.Entry eldest) {
* return size() > MAX_ENTRIES;
* }
* }</pre>
*
* <p>This method typically does not modify the map in any way,
* instead allowing the map to modify itself as directed by its
* return value. It <i>is</i> permitted for this method to modify
* the map directly, but if it does so, it <i>must</i> return
* <tt>false</tt> (indicating that the map should not attempt any
* further modification). The effects of returning <tt>true</tt>
* after modifying the map from within this method are unspecified.
*
* <p>This implementation merely returns <tt>false</tt> (so that this
* map acts like a normal map - the eldest element is never removed).
*
* @param eldest The least recently inserted entry in the map, or if
* this is an access-ordered map, the least recently accessed
* entry. This is the entry that will be removed it this
* method returns <tt>true</tt>. If the map was empty prior
* to the <tt>put</tt> or <tt>putAll</tt> invocation resulting
* in this invocation, this will be the entry that was just
* inserted; in other words, if the map contains a single
* entry, the eldest entry is also the newest.
* @return <tt>true</tt> if the eldest entry should be removed
* from the map; <tt>false</tt> if it should be retained.
*/
protected boolean removeEldestEntry(Map.Entry<K,V> eldest) {
return false;
}
} }
...@@ -50,9 +50,9 @@ import static java.util.Arrays.*; ...@@ -50,9 +50,9 @@ import static java.util.Arrays.*;
"java.util.HashMap$EntryIterator", "java.util.HashMap$EntryIterator",
"java.util.HashMap$KeyIterator", "java.util.HashMap$KeyIterator",
"java.util.HashMap$ValueIterator", "java.util.HashMap$ValueIterator",
"java.util.LinkedHashMap$EntryIterator", "java.util.LinkedHashMap$LinkedEntryIterator",
"java.util.LinkedHashMap$KeyIterator", "java.util.LinkedHashMap$LinkedKeyIterator",
"java.util.LinkedHashMap$ValueIterator"}) "java.util.LinkedHashMap$LinkedValueIterator"})
public class Probe { public class Probe {
public static void main (String... args) throws Throwable { public static void main (String... args) throws Throwable {
Classes classesAnnotation = (Probe.class).getAnnotation(Classes.class); Classes classesAnnotation = (Probe.class).getAnnotation(Classes.class);
......
...@@ -53,8 +53,6 @@ public class CheckRandomHashSeed { ...@@ -53,8 +53,6 @@ public class CheckRandomHashSeed {
throw new Error("Error in test setup: " + (expectRandom ? "" : "not " ) + "expecting random hashSeed, but " + PROP_NAME + " is " + (propSet ? "" : "not ") + "enabled"); throw new Error("Error in test setup: " + (expectRandom ? "" : "not " ) + "expecting random hashSeed, but " + PROP_NAME + " is " + (propSet ? "" : "not ") + "enabled");
} }
testMap(new HashMap());
testMap(new LinkedHashMap());
testMap(new WeakHashMap()); testMap(new WeakHashMap());
testMap(new Hashtable()); testMap(new Hashtable());
} }
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
* @test * @test
* @bug 8005698 * @bug 8005698
* @run main InPlaceOpsCollisions -shortrun * @run main InPlaceOpsCollisions -shortrun
* @run main/othervm -Djdk.map.randomseed=true InPlaceOpsCollisions -shortrun
* @summary Ensure overrides of in-place operations in Maps behave well with lots of collisions. * @summary Ensure overrides of in-place operations in Maps behave well with lots of collisions.
* @author Brent Christian * @author Brent Christian
*/ */
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.BiConsumer;
import java.util.stream.Collector;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.testng.Assert.assertEquals;
/*
* @test
* @bug 8023463
* @summary Test the case where a bin is treeified and vice verser
* @run testng MapBinToFromTreeTest
*/
@Test
public class MapBinToFromTreeTest {
// Initial capacity of map
// Should be >= the map capacity for treeifiying, see HashMap/ConcurrentMap.MIN_TREEIFY_CAPACITY
static final int INITIAL_CAPACITY = 64;
// Maximum size of map
// Should be > the treeify threshold, see HashMap/ConcurrentMap.TREEIFY_THRESHOLD
// Should be > INITIAL_CAPACITY to ensure resize occurs
static final int SIZE = 256;
// Load factor of map
// A value 1.0 will ensure that a new threshold == capacity
static final float LOAD_FACTOR = 1.0f;
@DataProvider(name = "maps")
static Object[][] mapProvider() {
return new Object[][] {
// Pass in the class name as a description for test reporting
// purposes
{ HashMap.class.getName(), new HashMap(INITIAL_CAPACITY, LOAD_FACTOR) },
{ LinkedHashMap.class.getName(), new LinkedHashMap(INITIAL_CAPACITY, LOAD_FACTOR) },
{ ConcurrentHashMap.class.getName(), new ConcurrentHashMap(INITIAL_CAPACITY, LOAD_FACTOR) },
};
}
@Test(dataProvider = "maps")
public void testPutThenGet(String d, Map<HashCodeInteger, Integer> m) {
put(SIZE, m, (i, s) -> {
for (int j = 0; j < s; j++) {
assertEquals(m.get(new HashCodeInteger(j)).intValue(), j,
String.format("Map.get(%d)", j));
}
});
}
@Test(dataProvider = "maps")
public void testPutThenTraverse(String d, Map<HashCodeInteger, Integer> m) {
Collector<Integer, ?, ? extends Collection<Integer>> c = getCollector(m);
put(SIZE, m, (i, s) -> {
// Note that it is OK to collect to a Set (HashSet) as long as
// integer values are used since these tests only check for
// collisions and other tests will verify more general functionality
Collection<Integer> actual = m.keySet().stream().map(e -> e.value).collect(c);
Collection<Integer> expected = IntStream.range(0, s).boxed().collect(c);
assertEquals(actual, expected, "Map.keySet()");
});
}
@Test(dataProvider = "maps")
public void testRemoveThenGet(String d, Map<HashCodeInteger, Integer> m) {
put(SIZE, m, (i, s) -> { });
remove(m, (i, s) -> {
for (int j = i + 1; j < SIZE; j++) {
assertEquals(m.get(new HashCodeInteger(j)).intValue(), j,
String.format("Map.get(%d)", j));
}
});
}
@Test(dataProvider = "maps")
public void testRemoveThenTraverse(String d, Map<HashCodeInteger, Integer> m) {
put(SIZE, m, (i, s) -> { });
Collector<Integer, ?, ? extends Collection<Integer>> c = getCollector(m);
remove(m, (i, s) -> {
Collection<Integer> actual = m.keySet().stream().map(e -> e.value).collect(c);
Collection<Integer> expected = IntStream.range(i + 1, SIZE).boxed().collect(c);
assertEquals(actual, expected, "Map.keySet()");
});
}
@Test(dataProvider = "maps")
public void testUntreeifyOnResizeWithGet(String d, Map<HashCodeInteger, Integer> m) {
// Fill the map with 64 entries grouped into 4 buckets
put(INITIAL_CAPACITY, m, (i, s) -> { });
for (int i = INITIAL_CAPACITY; i < SIZE; i++) {
// Add further entries in the 0'th bucket so as not to disturb
// other buckets, entries of which may be distributed and/or
// the bucket untreeified on resize
m.put(new HashCodeInteger(i, 0), i);
for (int j = 0; j < INITIAL_CAPACITY; j++) {
assertEquals(m.get(new HashCodeInteger(j)).intValue(), j,
String.format("Map.get(%d) < INITIAL_CAPACITY", j));
}
for (int j = INITIAL_CAPACITY; j <= i; j++) {
assertEquals(m.get(new HashCodeInteger(j, 0)).intValue(), j,
String.format("Map.get(%d) >= INITIAL_CAPACITY", j));
}
}
}
@Test(dataProvider = "maps")
public void testUntreeifyOnResizeWithTraverse(String d, Map<HashCodeInteger, Integer> m) {
// Fill the map with 64 entries grouped into 4 buckets
put(INITIAL_CAPACITY, m, (i, s) -> { });
Collector<Integer, ?, ? extends Collection<Integer>> c = getCollector(m);
for (int i = INITIAL_CAPACITY; i < SIZE; i++) {
// Add further entries in the 0'th bucket so as not to disturb
// other buckets, entries of which may be distributed and/or
// the bucket untreeified on resize
m.put(new HashCodeInteger(i, 0), i);
Collection<Integer> actual = m.keySet().stream().map(e -> e.value).collect(c);
Collection<Integer> expected = IntStream.rangeClosed(0, i).boxed().collect(c);
assertEquals(actual, expected, "Key set");
}
}
Collector<Integer, ?, ? extends Collection<Integer>> getCollector(Map<?, ?> m) {
Collector<Integer, ?, ? extends Collection<Integer>> collector = m instanceof LinkedHashMap
? Collectors.toList()
: Collectors.toSet();
return collector;
}
void put(int size, Map<HashCodeInteger, Integer> m, BiConsumer<Integer, Integer> c) {
for (int i = 0; i < size; i++) {
m.put(new HashCodeInteger(i), i);
c.accept(i, m.size());
}
}
void remove(Map<HashCodeInteger, Integer> m, BiConsumer<Integer, Integer> c) {
int size = m.size();
// Remove all elements thus ensuring at some point trees will be
// converting back to bins
for (int i = 0; i < size; i++) {
m.remove(new HashCodeInteger(i));
c.accept(i, m.size());
}
}
final static class HashCodeInteger implements Comparable<HashCodeInteger> {
final int value;
final int hashcode;
HashCodeInteger(int value) {
this(value, hash(value));
}
HashCodeInteger(int value, int hashcode) {
this.value = value;
this.hashcode = hashcode;
}
static int hash(int i) {
// Assuming 64 entries with keys from 0 to 63 then a map:
// - of capacity 64 will have 4 buckets with 16 entries per-bucket
// - of capacity 128 will have 8 buckets with 8 entries per-bucket
// - of capacity 256 will have 16 buckets with 4 entries per-bucket
//
// Re-sizing will result in re-distribution, doubling the buckets
// and reducing the entries by half. This will result in
// untreeifying when the number of entries is less than untreeify
// threshold (see HashMap/ConcurrentMap.UNTREEIFY_THRESHOLD)
return (i % 4) + (i / 4) * INITIAL_CAPACITY;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof HashCodeInteger) {
HashCodeInteger other = (HashCodeInteger) obj;
return other.value == value;
}
return false;
}
@Override
public int hashCode() {
return hashcode;
}
@Override
public int compareTo(HashCodeInteger o) {
return value - o.value;
}
@Override
public String toString() {
return Integer.toString(value);
}
}
}
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import java.util.*;
import java.lang.reflect.Field;
/*
* @test
* @bug 8005698
* @summary Test the case where TreeBin.splitTreeBin() converts a bin back to an Entry list
* @run main TreeBinSplitBackToEntries unused
* @author Brent Christian
*/
public class TreeBinSplitBackToEntries {
private static int EXPECTED_TREE_THRESHOLD = 16;
// Easiest if this covers one bit higher then 'bit' in splitTreeBin() on the
// call where the TreeBin is converted back to an Entry list
private static int HASHMASK = 0x7F;
private static boolean verbose = false;
private static boolean fastFail = false;
private static boolean failed = false;
static void printlnIfVerbose(String msg) {
if (verbose) {System.out.println(msg); }
}
public static void main(String[] args) {
for (String arg : args) {
switch(arg) {
case "-verbose":
verbose = true;
break;
case "-fastfail":
fastFail = true;
break;
}
}
checkTreeThreshold();
testMapHiTree();
testMapLoTree();
if (failed) {
System.out.println("Test Failed");
System.exit(1);
} else {
System.out.println("Test Passed");
}
}
public static void checkTreeThreshold() {
int threshold = -1;
try {
Class treeBinClass = Class.forName("java.util.HashMap$TreeBin");
Field treeThreshold = treeBinClass.getDeclaredField("TREE_THRESHOLD");
treeThreshold.setAccessible(true);
threshold = treeThreshold.getInt(treeBinClass);
} catch (ClassNotFoundException|NoSuchFieldException|IllegalAccessException e) {
e.printStackTrace();
throw new Error("Problem accessing TreeBin.TREE_THRESHOLD", e);
}
check("Expected TREE_THRESHOLD: " + EXPECTED_TREE_THRESHOLD +", found: " + threshold,
threshold == EXPECTED_TREE_THRESHOLD);
printlnIfVerbose("TREE_THRESHOLD: " + threshold);
}
public static void testMapHiTree() {
Object[][] mapKeys = makeHiTreeTestData();
testMapsForKeys(mapKeys, "hiTree");
}
public static void testMapLoTree() {
Object[][] mapKeys = makeLoTreeTestData();
testMapsForKeys(mapKeys, "loTree");
}
public static void testMapsForKeys(Object[][] mapKeys, String desc) {
// loop through data sets
for (Object[] keys_desc : mapKeys) {
Map<Object, Object>[] maps = (Map<Object, Object>[]) new Map[]{
new HashMap<>(4, 0.8f),
new LinkedHashMap<>(4, 0.8f),
};
// for each map type.
for (Map<Object, Object> map : maps) {
Object[] keys = (Object[]) keys_desc[1];
System.out.println(desc + ": testPutThenGet() for " + map.getClass());
testPutThenGet(map, keys);
}
}
}
private static <T> void testPutThenGet(Map<T, T> map, T[] keys) {
for (T key : keys) {
printlnIfVerbose("put()ing 0x" + Integer.toHexString(Integer.parseInt(key.toString())) + ", hashCode=" + Integer.toHexString(key.hashCode()));
map.put(key, key);
}
for (T key : keys) {
check("key: 0x" + Integer.toHexString(Integer.parseInt(key.toString())) + " not found in resulting " + map.getClass().getSimpleName(), map.get(key) != null);
}
}
/* Data to force a non-empty loTree in TreeBin.splitTreeBin() to be converted back
* into an Entry list
*/
private static Object[][] makeLoTreeTestData() {
HashableInteger COLLIDING_OBJECTS[] = new HashableInteger[] {
new HashableInteger( 0x23, HASHMASK),
new HashableInteger( 0x123, HASHMASK),
new HashableInteger( 0x323, HASHMASK),
new HashableInteger( 0x523, HASHMASK),
new HashableInteger( 0x723, HASHMASK),
new HashableInteger( 0x923, HASHMASK),
new HashableInteger( 0xB23, HASHMASK),
new HashableInteger( 0xD23, HASHMASK),
new HashableInteger( 0xF23, HASHMASK),
new HashableInteger( 0xF123, HASHMASK),
new HashableInteger( 0x1023, HASHMASK),
new HashableInteger( 0x1123, HASHMASK),
new HashableInteger( 0x1323, HASHMASK),
new HashableInteger( 0x1523, HASHMASK),
new HashableInteger( 0x1723, HASHMASK),
new HashableInteger( 0x1923, HASHMASK),
new HashableInteger( 0x1B23, HASHMASK),
new HashableInteger( 0x1D23, HASHMASK),
new HashableInteger( 0x3123, HASHMASK),
new HashableInteger( 0x3323, HASHMASK),
new HashableInteger( 0x3523, HASHMASK),
new HashableInteger( 0x3723, HASHMASK),
new HashableInteger( 0x1001, HASHMASK),
new HashableInteger( 0x4001, HASHMASK),
new HashableInteger( 0x1, HASHMASK),
};
return new Object[][] {
new Object[]{"Colliding Objects", COLLIDING_OBJECTS},
};
}
/* Data to force the hiTree in TreeBin.splitTreeBin() to be converted back
* into an Entry list
*/
private static Object[][] makeHiTreeTestData() {
HashableInteger COLLIDING_OBJECTS[] = new HashableInteger[] {
new HashableInteger( 0x1, HASHMASK),
new HashableInteger( 0x101, HASHMASK),
new HashableInteger( 0x301, HASHMASK),
new HashableInteger( 0x501, HASHMASK),
new HashableInteger( 0x701, HASHMASK),
new HashableInteger( 0x1001, HASHMASK),
new HashableInteger( 0x1101, HASHMASK),
new HashableInteger( 0x1301, HASHMASK),
new HashableInteger( 0x1501, HASHMASK),
new HashableInteger( 0x1701, HASHMASK),
new HashableInteger( 0x4001, HASHMASK),
new HashableInteger( 0x4101, HASHMASK),
new HashableInteger( 0x4301, HASHMASK),
new HashableInteger( 0x4501, HASHMASK),
new HashableInteger( 0x4701, HASHMASK),
new HashableInteger( 0x8001, HASHMASK),
new HashableInteger( 0x8101, HASHMASK),
new HashableInteger( 0x8301, HASHMASK),
new HashableInteger( 0x8501, HASHMASK),
new HashableInteger( 0x8701, HASHMASK),
new HashableInteger( 0x9001, HASHMASK),
new HashableInteger( 0x23, HASHMASK),
new HashableInteger( 0x123, HASHMASK),
new HashableInteger( 0x323, HASHMASK),
new HashableInteger( 0x523, HASHMASK),
};
return new Object[][] {
new Object[]{"Colliding Objects", COLLIDING_OBJECTS},
};
}
static void check(String desc, boolean cond) {
if (!cond) {
fail(desc);
}
}
static void fail(String msg) {
failed = true;
(new Error("Failure: " + msg)).printStackTrace(System.err);
if (fastFail) {
System.exit(1);
}
}
final static class HashableInteger implements Comparable<HashableInteger> {
final int value;
final int hashmask; //yes duplication
HashableInteger(int value, int hashmask) {
this.value = value;
this.hashmask = hashmask;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof HashableInteger) {
HashableInteger other = (HashableInteger) obj;
return other.value == value;
}
return false;
}
@Override
public int hashCode() {
// This version ANDs the mask
return value & hashmask;
}
@Override
public int compareTo(HashableInteger o) {
return value - o.value;
}
@Override
public String toString() {
return Integer.toString(value);
}
}
}
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
/** /**
* @test * @test
* @bug 8020156 8020009 8022326 * @bug 8020156 8020009 8022326 8012913
* @run testng SpliteratorCharacteristics * @run testng SpliteratorCharacteristics
*/ */
...@@ -32,6 +32,10 @@ import org.testng.annotations.Test; ...@@ -32,6 +32,10 @@ import org.testng.annotations.Test;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Comparator; import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.SortedMap; import java.util.SortedMap;
...@@ -47,7 +51,27 @@ import static org.testng.Assert.*; ...@@ -47,7 +51,27 @@ import static org.testng.Assert.*;
@Test @Test
public class SpliteratorCharacteristics { public class SpliteratorCharacteristics {
// TreeMap public void testHashMap() {
assertMapCharacteristics(new HashMap<>(),
Spliterator.SIZED | Spliterator.DISTINCT);
}
public void testHashSet() {
assertSetCharacteristics(new HashSet<>(),
Spliterator.SIZED | Spliterator.DISTINCT);
}
public void testLinkedHashMap() {
assertMapCharacteristics(new LinkedHashMap<>(),
Spliterator.SIZED | Spliterator.DISTINCT |
Spliterator.ORDERED);
}
public void testLinkedHashSet() {
assertSetCharacteristics(new LinkedHashSet<>(),
Spliterator.SIZED | Spliterator.DISTINCT |
Spliterator.ORDERED);
}
public void testTreeMap() { public void testTreeMap() {
assertSortedMapCharacteristics(new TreeMap<>(), assertSortedMapCharacteristics(new TreeMap<>(),
...@@ -61,9 +85,6 @@ public class SpliteratorCharacteristics { ...@@ -61,9 +85,6 @@ public class SpliteratorCharacteristics {
Spliterator.SORTED | Spliterator.ORDERED); Spliterator.SORTED | Spliterator.ORDERED);
} }
// TreeSet
public void testTreeSet() { public void testTreeSet() {
assertSortedSetCharacteristics(new TreeSet<>(), assertSortedSetCharacteristics(new TreeSet<>(),
Spliterator.SIZED | Spliterator.DISTINCT | Spliterator.SIZED | Spliterator.DISTINCT |
...@@ -76,9 +97,6 @@ public class SpliteratorCharacteristics { ...@@ -76,9 +97,6 @@ public class SpliteratorCharacteristics {
Spliterator.SORTED | Spliterator.ORDERED); Spliterator.SORTED | Spliterator.ORDERED);
} }
// ConcurrentSkipListMap
public void testConcurrentSkipListMap() { public void testConcurrentSkipListMap() {
assertSortedMapCharacteristics(new ConcurrentSkipListMap<>(), assertSortedMapCharacteristics(new ConcurrentSkipListMap<>(),
Spliterator.CONCURRENT | Spliterator.NONNULL | Spliterator.CONCURRENT | Spliterator.NONNULL |
...@@ -93,9 +111,6 @@ public class SpliteratorCharacteristics { ...@@ -93,9 +111,6 @@ public class SpliteratorCharacteristics {
Spliterator.ORDERED); Spliterator.ORDERED);
} }
// ConcurrentSkipListSet
public void testConcurrentSkipListSet() { public void testConcurrentSkipListSet() {
assertSortedSetCharacteristics(new ConcurrentSkipListSet<>(), assertSortedSetCharacteristics(new ConcurrentSkipListSet<>(),
Spliterator.CONCURRENT | Spliterator.NONNULL | Spliterator.CONCURRENT | Spliterator.NONNULL |
...@@ -113,35 +128,58 @@ public class SpliteratorCharacteristics { ...@@ -113,35 +128,58 @@ public class SpliteratorCharacteristics {
// //
void assertSortedMapCharacteristics(SortedMap<Integer, String> m, int keyCharacteristics) {
void assertMapCharacteristics(Map<Integer, String> m, int keyCharacteristics) {
assertMapCharacteristics(m, keyCharacteristics, 0);
}
void assertMapCharacteristics(Map<Integer, String> m, int keyCharacteristics, int notValueCharacteristics) {
initMap(m); initMap(m);
boolean hasComparator = m.comparator() != null; assertCharacteristics(m.keySet(), keyCharacteristics);
assertCharacteristics(m.values(),
keyCharacteristics & ~(Spliterator.DISTINCT | notValueCharacteristics));
assertCharacteristics(m.entrySet(), keyCharacteristics);
if ((keyCharacteristics & Spliterator.SORTED) == 0) {
assertISEComparator(m.keySet());
assertISEComparator(m.values());
assertISEComparator(m.entrySet());
}
}
void assertSetCharacteristics(Set<Integer> s, int keyCharacteristics) {
initSet(s);
assertCharacteristics(s, keyCharacteristics);
if ((keyCharacteristics & Spliterator.SORTED) == 0) {
assertISEComparator(s);
}
}
void assertSortedMapCharacteristics(SortedMap<Integer, String> m, int keyCharacteristics) {
assertMapCharacteristics(m, keyCharacteristics, Spliterator.SORTED);
Set<Integer> keys = m.keySet(); Set<Integer> keys = m.keySet();
assertCharacteristics(keys, keyCharacteristics); if (m.comparator() != null) {
if (hasComparator) {
assertNotNullComparator(keys); assertNotNullComparator(keys);
} }
else { else {
assertNullComparator(keys); assertNullComparator(keys);
} }
assertCharacteristics(m.values(),
keyCharacteristics & ~(Spliterator.DISTINCT | Spliterator.SORTED));
assertISEComparator(m.values()); assertISEComparator(m.values());
assertCharacteristics(m.entrySet(), keyCharacteristics);
assertNotNullComparator(m.entrySet()); assertNotNullComparator(m.entrySet());
} }
void assertSortedSetCharacteristics(SortedSet<Integer> s, int keyCharacteristics) { void assertSortedSetCharacteristics(SortedSet<Integer> s, int keyCharacteristics) {
initSet(s); assertSetCharacteristics(s, keyCharacteristics);
boolean hasComparator = s.comparator() != null;
assertCharacteristics(s, keyCharacteristics); if (s.comparator() != null) {
if (hasComparator) {
assertNotNullComparator(s); assertNotNullComparator(s);
} }
else { else {
...@@ -161,27 +199,18 @@ public class SpliteratorCharacteristics { ...@@ -161,27 +199,18 @@ public class SpliteratorCharacteristics {
} }
void assertCharacteristics(Collection<?> c, int expectedCharacteristics) { void assertCharacteristics(Collection<?> c, int expectedCharacteristics) {
assertCharacteristics(c.spliterator(), expectedCharacteristics); assertTrue(c.spliterator().hasCharacteristics(expectedCharacteristics),
} "Spliterator characteristics");
void assertCharacteristics(Spliterator<?> s, int expectedCharacteristics) {
assertTrue(s.hasCharacteristics(expectedCharacteristics));
} }
void assertNullComparator(Collection<?> c) { void assertNullComparator(Collection<?> c) {
assertNullComparator(c.spliterator()); assertNull(c.spliterator().getComparator(),
} "Comparator of Spliterator of Collection");
void assertNullComparator(Spliterator<?> s) {
assertNull(s.getComparator());
} }
void assertNotNullComparator(Collection<?> c) { void assertNotNullComparator(Collection<?> c) {
assertNotNullComparator(c.spliterator()); assertNotNull(c.spliterator().getComparator(),
} "Comparator of Spliterator of Collection");
void assertNotNullComparator(Spliterator<?> s) {
assertNotNull(s.getComparator());
} }
void assertISEComparator(Collection<?> c) { void assertISEComparator(Collection<?> c) {
...@@ -196,6 +225,6 @@ public class SpliteratorCharacteristics { ...@@ -196,6 +225,6 @@ public class SpliteratorCharacteristics {
catch (IllegalStateException e) { catch (IllegalStateException e) {
caught = true; caught = true;
} }
assertTrue(caught); assertTrue(caught, "Throwing IllegalStateException");
} }
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册