提交 e57821b6 编写于 作者: P psandoz

8023463: Improvements to HashMap/LinkedHashMap use of bins/buckets and trees (red/black)

8012913: LinkedHashMap key/value/entry spliterators should report ORDERED
Reviewed-by: mduigou, forax, bchristi, alanb
Contributed-by: NDoug Lea &lt;dl@cs.oswego.edu&gt;, Paul Sandoz <paul.sandoz@oracle.com>
上级 a576565e
......@@ -25,13 +25,14 @@
package java.util;
import java.io.*;
import java.io.IOException;
import java.io.InvalidObjectException;
import java.io.Serializable;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.util.concurrent.ThreadLocalRandom;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.function.Function;
/**
......@@ -63,20 +64,25 @@ import java.util.function.Function;
* structures are rebuilt) so that the hash table has approximately twice the
* number of buckets.
*
* <p>As a general rule, the default load factor (.75) offers a good tradeoff
* between time and space costs. Higher values decrease the space overhead
* but increase the lookup cost (reflected in most of the operations of the
* <tt>HashMap</tt> class, including <tt>get</tt> and <tt>put</tt>). The
* expected number of entries in the map and its load factor should be taken
* into account when setting its initial capacity, so as to minimize the
* number of rehash operations. If the initial capacity is greater
* than the maximum number of entries divided by the load factor, no
* rehash operations will ever occur.
* <p>As a general rule, the default load factor (.75) offers a good
* tradeoff between time and space costs. Higher values decrease the
* space overhead but increase the lookup cost (reflected in most of
* the operations of the <tt>HashMap</tt> class, including
* <tt>get</tt> and <tt>put</tt>). The expected number of entries in
* the map and its load factor should be taken into account when
* setting its initial capacity, so as to minimize the number of
* rehash operations. If the initial capacity is greater than the
* maximum number of entries divided by the load factor, no rehash
* operations will ever occur.
*
* <p>If many mappings are to be stored in a <tt>HashMap</tt> instance,
* creating it with a sufficiently large capacity will allow the mappings to
* be stored more efficiently than letting it perform automatic rehashing as
* needed to grow the table.
* <p>If many mappings are to be stored in a <tt>HashMap</tt>
* instance, creating it with a sufficiently large capacity will allow
* the mappings to be stored more efficiently than letting it perform
* automatic rehashing as needed to grow the table. Note that using
* many keys with the same {@code hashCode()} is a sure way to slow
* down performance of any hash table. To ameliorate impact, when keys
* are {@link Comparable}, this class may use comparison order among
* keys to help break ties.
*
* <p><strong>Note that this implementation is not synchronized.</strong>
* If multiple threads access a hash map concurrently, and at least one of
......@@ -128,11 +134,100 @@ import java.util.function.Function;
* @see Hashtable
* @since 1.2
*/
public class HashMap<K,V> extends AbstractMap<K,V>
implements Map<K,V>, Cloneable, Serializable {
private static final long serialVersionUID = 362498820763181265L;
public class HashMap<K,V>
extends AbstractMap<K,V>
implements Map<K,V>, Cloneable, Serializable
{
/*
* Implementation notes.
*
* This map usually acts as a binned (bucketed) hash table, but
* when bins get too large, they are transformed into bins of
* TreeNodes, each structured similarly to those in
* java.util.TreeMap. Most methods try to use normal bins, but
* relay to TreeNode methods when applicable (simply by checking
* instanceof a node). Bins of TreeNodes may be traversed and
* used like any others, but additionally support faster lookup
* when overpopulated. However, since the vast majority of bins in
* normal use are not overpopulated, checking for existence of
* tree bins may be delayed in the course of table methods.
*
* Tree bins (i.e., bins whose elements are all TreeNodes) are
* ordered primarily by hashCode, but in the case of ties, if two
* elements are of the same "class C implements Comparable<C>",
* type then their compareTo method is used for ordering. (We
* conservatively check generic types via reflection to validate
* this -- see method comparableClassFor). The added complexity
* of tree bins is worthwhile in providing worst-case O(log n)
* operations when keys either have distinct hashes or are
* orderable, Thus, performance degrades gracefully under
* accidental or malicious usages in which hashCode() methods
* return values that are poorly distributed, as well as those in
* which many keys share a hashCode, so long as they are also
* Comparable. (If neither of these apply, we may waste about a
* factor of two in time and space compared to taking no
* precautions. But the only known cases stem from poor user
* programming practices that are already so slow that this makes
* little difference.)
*
* Because TreeNodes are about twice the size of regular nodes, we
* use them only when bins contain enough nodes to warrant use
* (see TREEIFY_THRESHOLD). And when they become too small (due to
* removal or resizing) they are converted back to plain bins. In
* usages with well-distributed user hashCodes, tree bins are
* rarely used. Ideally, under random hashCodes, the frequency of
* nodes in bins follows a Poisson distribution
* (http://en.wikipedia.org/wiki/Poisson_distribution) with a
* parameter of about 0.5 on average for the default resizing
* threshold of 0.75, although with a large variance because of
* resizing granularity. Ignoring variance, the expected
* occurrences of list size k are (exp(-0.5) * pow(0.5, k) /
* factorial(k)). The first values are:
*
* 0: 0.60653066
* 1: 0.30326533
* 2: 0.07581633
* 3: 0.01263606
* 4: 0.00157952
* 5: 0.00015795
* 6: 0.00001316
* 7: 0.00000094
* 8: 0.00000006
* more: less than 1 in ten million
*
* The root of a tree bin is normally its first node. However,
* sometimes (currently only upon Iterator.remove), the root might
* be elsewhere, but can be recovered following parent links
* (method TreeNode.root()).
*
* All applicable internal methods accept a hash code as an
* argument (as normally supplied from a public method), allowing
* them to call each other without recomputing user hashCodes.
* Most internal methods also accept a "tab" argument, that is
* normally the current table, but may be a new or old one when
* resizing or converting.
*
* When bin lists are treeified, split, or untreeified, we keep
* them in the same relative access/traversal order (i.e., field
* Node.next) to better preserve locality, and to slightly
* simplify handling of splits and traversals that invoke
* iterator.remove. When using comparators on insertion, to keep a
* total ordering (or as close as is required here) across
* rebalancings, we compare classes and identityHashCodes as
* tie-breakers.
*
* The use and transitions among plain vs tree modes is
* complicated by the existence of subclass LinkedHashMap. See
* below for hook methods defined to be invoked upon insertion,
* removal and access that allow LinkedHashMap internals to
* otherwise remain independent of these mechanics. (This also
* requires that a map instance be passed to some utility methods
* that may create new nodes.)
*
* The concurrent-programming-like SSA-based coding style helps
* avoid aliasing errors amid all of the twisty pointer operations.
*/
/**
* The default initial capacity - MUST be a power of two.
......@@ -152,140 +247,111 @@ public class HashMap<K,V>
static final float DEFAULT_LOAD_FACTOR = 0.75f;
/**
* An empty table instance to share when the table is not inflated.
*/
static final Object[] EMPTY_TABLE = {};
/**
* The table, resized as necessary. Length MUST Always be a power of two.
*/
transient Object[] table = EMPTY_TABLE;
/**
* The number of key-value mappings contained in this map.
*/
transient int size;
/**
* The next size value at which to resize (capacity * load factor).
* @serial
* The bin count threshold for using a tree rather than list for a
* bin. Bins are converted to trees when adding an element to a
* bin with at least this many nodes. The value must be greater
* than 2 and should be at least 8 to mesh with assumptions in
* tree removal about conversion back to plain bins upon
* shrinkage.
*/
// If table == EMPTY_TABLE then this is the initial capacity at which the
// table will be created when inflated.
int threshold;
static final int TREEIFY_THRESHOLD = 8;
/**
* The load factor for the hash table.
*
* @serial
* The bin count threshold for untreeifying a (split) bin during a
* resize operation. Should be less than TREEIFY_THRESHOLD, and at
* most 6 to mesh with shrinkage detection under removal.
*/
final float loadFactor;
static final int UNTREEIFY_THRESHOLD = 6;
/**
* The number of times this HashMap has been structurally modified
* Structural modifications are those that change the number of mappings in
* the HashMap or otherwise modify its internal structure (e.g.,
* rehash). This field is used to make iterators on Collection-views of
* the HashMap fail-fast. (See ConcurrentModificationException).
* The smallest table capacity for which bins may be treeified.
* (Otherwise the table is resized if too many nodes in a bin.)
* Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts
* between resizing and treeification thresholds.
*/
transient int modCount;
static final int MIN_TREEIFY_CAPACITY = 64;
/**
* Holds values which can't be initialized until after VM is booted.
* Basic hash bin node, used for most entries. (See below for
* TreeNode subclass, and in LinkedHashMap for its Entry subclass.)
*/
private static class Holder {
static final sun.misc.Unsafe UNSAFE;
static class Node<K,V> implements Map.Entry<K,V> {
final int hash;
final K key;
V value;
Node<K,V> next;
/**
* Offset of "final" hashSeed field we must set in
* readObject() method.
*/
static final long HASHSEED_OFFSET;
Node(int hash, K key, V value, Node<K,V> next) {
this.hash = hash;
this.key = key;
this.value = value;
this.next = next;
}
static final boolean USE_HASHSEED;
public final K getKey() { return key; }
public final V getValue() { return value; }
public final String toString() { return key + "=" + value; }
static {
String hashSeedProp = java.security.AccessController.doPrivileged(
new sun.security.action.GetPropertyAction(
"jdk.map.useRandomSeed"));
boolean localBool = (null != hashSeedProp)
? Boolean.parseBoolean(hashSeedProp) : false;
USE_HASHSEED = localBool;
public final int hashCode() {
return Objects.hashCode(key) ^ Objects.hashCode(value);
}
if (USE_HASHSEED) {
try {
UNSAFE = sun.misc.Unsafe.getUnsafe();
HASHSEED_OFFSET = UNSAFE.objectFieldOffset(
HashMap.class.getDeclaredField("hashSeed"));
} catch (NoSuchFieldException | SecurityException e) {
throw new InternalError("Failed to record hashSeed offset", e);
public final V setValue(V newValue) {
V oldValue = value;
value = newValue;
return oldValue;
}
} else {
UNSAFE = null;
HASHSEED_OFFSET = 0;
public final boolean equals(Object o) {
if (o == this)
return true;
if (o instanceof Map.Entry) {
Map.Entry<?,?> e = (Map.Entry<?,?>)o;
if (Objects.equals(key, e.getKey()) &&
Objects.equals(value, e.getValue()))
return true;
}
return false;
}
}
/*
* A randomizing value associated with this instance that is applied to
* hash code of keys to make hash collisions harder to find.
*
* Non-final so it can be set lazily, but be sure not to set more than once.
*/
transient final int hashSeed;
/*
* TreeBin/TreeNode code from CHM doesn't handle the null key. Store the
* null key entry here.
*/
transient Entry<K,V> nullKeyEntry = null;
/*
* In order to improve performance under high hash-collision conditions,
* HashMap will switch to storing a bin's entries in a balanced tree
* (TreeBin) instead of a linked-list once the number of entries in the bin
* passes a certain threshold (TreeBin.TREE_THRESHOLD), if at least one of
* the keys in the bin implements Comparable. This technique is borrowed
* from ConcurrentHashMap.
*/
/*
* Code based on CHMv8
*
* Node type for TreeBin
*/
final static class TreeNode<K,V> {
TreeNode parent; // red-black tree links
TreeNode left;
TreeNode right;
TreeNode prev; // needed to unlink next upon deletion
boolean red;
final HashMap.Entry<K,V> entry;
/* ---------------- Static utilities -------------- */
TreeNode(HashMap.Entry<K,V> entry, Object next, TreeNode parent) {
this.entry = entry;
this.entry.next = next;
this.parent = parent;
}
/**
* Computes key.hashCode() and spreads (XORs) higher bits of hash
* to lower. Because the table uses power-of-two masking, sets of
* hashes that vary only in bits above the current mask will
* always collide. (Among known examples are sets of Float keys
* holding consecutive whole numbers in small tables.) So we
* apply a transform that spreads the impact of higher bits
* downward. There is a tradeoff between speed, utility, and
* quality of bit-spreading. Because many common sets of hashes
* are already reasonably distributed (so don't benefit from
* spreading), and because we use trees to handle large sets of
* collisions in bins, we just XOR some shifted bits in the
* cheapest possible way to reduce systematic lossage, as well as
* to incorporate impact of the highest bits that would otherwise
* never be used in index calculations because of table bounds.
*/
static final int hash(Object key) {
int h;
return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16);
}
/**
* Returns a Class for the given object of the form "class C
* implements Comparable<C>", if one exists, else null. See the TreeBin
* docs, below, for explanation.
* Returns x's Class if it is of the form "class C implements
* Comparable<C>", else null.
*/
static Class<?> comparableClassFor(Object x) {
Class<?> c, s, cmpc; Type[] ts, as; Type t; ParameterizedType p;
if (x instanceof Comparable) {
Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
if ((c = x.getClass()) == String.class) // bypass checks
return c;
if ((cmpc = Comparable.class).isAssignableFrom(c)) {
while (cmpc.isAssignableFrom(s = c.getSuperclass()))
c = s; // find topmost comparable class
if ((ts = c.getGenericInterfaces()) != null) {
for (int i = 0; i < ts.length; ++i) {
if (((t = ts[i]) instanceof ParameterizedType) &&
((p = (ParameterizedType)t).getRawType() == cmpc) &&
((p = (ParameterizedType)t).getRawType() ==
Comparable.class) &&
(as = p.getActualTypeArguments()) != null &&
as.length == 1 && as[0] == c) // type arg is c
return c;
......@@ -295,1899 +361,931 @@ public class HashMap<K,V>
return null;
}
/*
* Code based on CHMv8
*
* A specialized form of red-black tree for use in bins
* whose size exceeds a threshold.
*
* TreeBins use a special form of comparison for search and
* related operations (which is the main reason we cannot use
* existing collections such as TreeMaps). TreeBins contain
* Comparable elements, but may contain others, as well as
* elements that are Comparable but not necessarily Comparable<T>
* for the same T, so we cannot invoke compareTo among them. To
* handle this, the tree is ordered primarily by hash value, then
* by Comparable.compareTo order if applicable. On lookup at a
* node, if elements are not comparable or compare as 0 then both
* left and right children may need to be searched in the case of
* tied hash values. (This corresponds to the full list search
* that would be necessary if all elements were non-Comparable and
* had tied hashes.) The red-black balancing code is updated from
* pre-jdk-collections
* (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
* based in turn on Cormen, Leiserson, and Rivest "Introduction to
* Algorithms" (CLR).
/**
* Returns k.compareTo(x) if x matches kc (k's screened comparable
* class), else 0.
*/
final class TreeBin {
/*
* The bin count threshold for using a tree rather than list for a bin. The
* value reflects the approximate break-even point for using tree-based
* operations.
@SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
static int compareComparables(Class<?> kc, Object k, Object x) {
return (x == null || x.getClass() != kc ? 0 :
((Comparable)k).compareTo(x));
}
/**
* Returns a power of two size for the given target capacity.
*/
static final int TREE_THRESHOLD = 16;
static final int tableSizeFor(int cap) {
int n = cap - 1;
n |= n >>> 1;
n |= n >>> 2;
n |= n >>> 4;
n |= n >>> 8;
n |= n >>> 16;
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
}
TreeNode<K,V> root; // root of tree
TreeNode<K,V> first; // head of next-pointer list
/* ---------------- Fields -------------- */
/*
* Split a TreeBin into lo and hi parts and install in given table.
/**
* The table, initialized on first use, and resized as
* necessary. When allocated, length is always a power of two.
* (We also tolerate length zero in some operations to allow
* bootstrapping mechanics that are currently not needed.)
*/
transient Node<K,V>[] table;
/**
* Holds cached entrySet(). Note that AbstractMap fields are used
* for keySet() and values().
*/
transient Set<Map.Entry<K,V>> entrySet;
/**
* The number of key-value mappings contained in this map.
*/
transient int size;
/**
* The number of times this HashMap has been structurally modified
* Structural modifications are those that change the number of mappings in
* the HashMap or otherwise modify its internal structure (e.g.,
* rehash). This field is used to make iterators on Collection-views of
* the HashMap fail-fast. (See ConcurrentModificationException).
*/
transient int modCount;
/**
* The next size value at which to resize (capacity * load factor).
*
* @serial
*/
// (The javadoc description is true upon serialization.
// Additionally, if the table array has not been allocated, this
// field holds the initial array capacity, or zero signifying
// DEFAULT_INITIAL_CAPACITY.)
int threshold;
/**
* The load factor for the hash table.
*
* Existing Entrys are re-used, which maintains the before/after links for
* LinkedHashMap.Entry.
* @serial
*/
final float loadFactor;
/* ---------------- Public operations -------------- */
/**
* Constructs an empty <tt>HashMap</tt> with the specified initial
* capacity and load factor.
*
* No check for Comparable, though this is the same as CHM.
* @param initialCapacity the initial capacity
* @param loadFactor the load factor
* @throws IllegalArgumentException if the initial capacity is negative
* or the load factor is nonpositive
*/
final void splitTreeBin(Object[] newTable, int i, TreeBin loTree, TreeBin hiTree) {
TreeBin oldTree = this;
int bit = newTable.length >>> 1;
int loCount = 0, hiCount = 0;
TreeNode<K,V> e = oldTree.first;
TreeNode<K,V> next;
// This method is called when the table has just increased capacity,
// so indexFor() is now taking one additional bit of hash into
// account ("bit"). Entries in this TreeBin now belong in one of
// two bins, "i" or "i+bit", depending on if the new top bit of the
// hash is set. The trees for the two bins are loTree and hiTree.
// If either tree ends up containing fewer than TREE_THRESHOLD
// entries, it is converted back to a linked list.
while (e != null) {
// Save entry.next - it will get overwritten in putTreeNode()
next = (TreeNode<K,V>)e.entry.next;
int h = e.entry.hash;
K k = (K) e.entry.key;
V v = e.entry.value;
if ((h & bit) == 0) {
++loCount;
// Re-using e.entry
loTree.putTreeNode(h, k, v, e.entry);
} else {
++hiCount;
hiTree.putTreeNode(h, k, v, e.entry);
}
// Iterate using the saved 'next'
e = next;
}
if (loCount < TREE_THRESHOLD) { // too small, convert back to list
HashMap.Entry loEntry = null;
TreeNode<K,V> p = loTree.first;
while (p != null) {
@SuppressWarnings("unchecked")
TreeNode<K,V> savedNext = (TreeNode<K,V>) p.entry.next;
p.entry.next = loEntry;
loEntry = p.entry;
p = savedNext;
}
// assert newTable[i] == null;
newTable[i] = loEntry;
} else {
// assert newTable[i] == null;
newTable[i] = loTree;
}
if (hiCount < TREE_THRESHOLD) { // too small, convert back to list
HashMap.Entry hiEntry = null;
TreeNode<K,V> p = hiTree.first;
while (p != null) {
@SuppressWarnings("unchecked")
TreeNode<K,V> savedNext = (TreeNode<K,V>) p.entry.next;
p.entry.next = hiEntry;
hiEntry = p.entry;
p = savedNext;
public HashMap(int initialCapacity, float loadFactor) {
if (initialCapacity < 0)
throw new IllegalArgumentException("Illegal initial capacity: " +
initialCapacity);
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
if (loadFactor <= 0 || Float.isNaN(loadFactor))
throw new IllegalArgumentException("Illegal load factor: " +
loadFactor);
this.loadFactor = loadFactor;
this.threshold = tableSizeFor(initialCapacity);
}
// assert newTable[i + bit] == null;
newTable[i + bit] = hiEntry;
} else {
// assert newTable[i + bit] == null;
newTable[i + bit] = hiTree;
/**
* Constructs an empty <tt>HashMap</tt> with the specified initial
* capacity and the default load factor (0.75).
*
* @param initialCapacity the initial capacity.
* @throws IllegalArgumentException if the initial capacity is negative.
*/
public HashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR);
}
/**
* Constructs an empty <tt>HashMap</tt> with the default initial capacity
* (16) and the default load factor (0.75).
*/
public HashMap() {
this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted
}
/*
* Popuplate the TreeBin with entries from the linked list e
*
* Assumes 'this' is a new/empty TreeBin
/**
* Constructs a new <tt>HashMap</tt> with the same mappings as the
* specified <tt>Map</tt>. The <tt>HashMap</tt> is created with
* default load factor (0.75) and an initial capacity sufficient to
* hold the mappings in the specified <tt>Map</tt>.
*
* Note: no check for Comparable
* Note: I believe this changes iteration order
* @param m the map whose mappings are to be placed in this map
* @throws NullPointerException if the specified map is null
*/
@SuppressWarnings("unchecked")
void populate(HashMap.Entry e) {
// assert root == null;
// assert first == null;
HashMap.Entry next;
while (e != null) {
// Save entry.next - it will get overwritten in putTreeNode()
next = (HashMap.Entry)e.next;
// Re-using Entry e will maintain before/after in LinkedHM
putTreeNode(e.hash, (K)e.key, (V)e.value, e);
// Iterate using the saved 'next'
e = next;
}
public HashMap(Map<? extends K, ? extends V> m) {
this.loadFactor = DEFAULT_LOAD_FACTOR;
putMapEntries(m, false);
}
/**
* Copied from CHMv8
* From CLR
* Implements Map.putAll and Map constructor
*
* @param m the map
* @param evict false when initially constructing this map, else
* true (relayed to method afterNodeInsertion).
*/
private void rotateLeft(TreeNode p) {
if (p != null) {
TreeNode r = p.right, pp, rl;
if ((rl = p.right = r.left) != null) {
rl.parent = p;
final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) {
int s = m.size();
if (s > 0) {
if (table == null) { // pre-size
float ft = ((float)s / loadFactor) + 1.0F;
int t = ((ft < (float)MAXIMUM_CAPACITY) ?
(int)ft : MAXIMUM_CAPACITY);
if (t > threshold)
threshold = tableSizeFor(t);
}
if ((pp = r.parent = p.parent) == null) {
root = r;
} else if (pp.left == p) {
pp.left = r;
} else {
pp.right = r;
else if (s > threshold)
resize();
for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) {
K key = e.getKey();
V value = e.getValue();
putVal(hash(key), key, value, false, evict);
}
r.left = p;
p.parent = r;
}
}
/**
* Copied from CHMv8
* From CLR
* Returns the number of key-value mappings in this map.
*
* @return the number of key-value mappings in this map
*/
private void rotateRight(TreeNode p) {
if (p != null) {
TreeNode l = p.left, pp, lr;
if ((lr = p.left = l.right) != null) {
lr.parent = p;
}
if ((pp = l.parent = p.parent) == null) {
root = l;
} else if (pp.right == p) {
pp.right = l;
} else {
pp.left = l;
}
l.right = p;
p.parent = l;
}
public int size() {
return size;
}
/**
* Returns the TreeNode (or null if not found) for the given
* key. A front-end for recursive version.
* Returns <tt>true</tt> if this map contains no key-value mappings.
*
* @return <tt>true</tt> if this map contains no key-value mappings
*/
final TreeNode getTreeNode(int h, K k) {
return getTreeNode(h, k, root, comparableClassFor(k));
public boolean isEmpty() {
return size == 0;
}
/**
* Returns the TreeNode (or null if not found) for the given key
* starting at given root.
* Returns the value to which the specified key is mapped,
* or {@code null} if this map contains no mapping for the key.
*
* <p>More formally, if this map contains a mapping from a key
* {@code k} to a value {@code v} such that {@code (key==null ? k==null :
* key.equals(k))}, then this method returns {@code v}; otherwise
* it returns {@code null}. (There can be at most one such mapping.)
*
* <p>A return value of {@code null} does not <i>necessarily</i>
* indicate that the map contains no mapping for the key; it's also
* possible that the map explicitly maps the key to {@code null}.
* The {@link #containsKey containsKey} operation may be used to
* distinguish these two cases.
*
* @see #put(Object, Object)
*/
@SuppressWarnings("unchecked")
final TreeNode getTreeNode (int h, K k, TreeNode p, Class<?> cc) {
// assert k != null;
while (p != null) {
int dir, ph; Object pk;
if ((ph = p.entry.hash) != h)
dir = (h < ph) ? -1 : 1;
else if ((pk = p.entry.key) == k || k.equals(pk))
return p;
else if (cc == null || comparableClassFor(pk) != cc ||
(dir = ((Comparable<Object>)k).compareTo(pk)) == 0) {
// assert pk != null;
TreeNode r, pl, pr; // check both sides
if ((pr = p.right) != null &&
(r = getTreeNode(h, k, pr, cc)) != null)
return r;
else if ((pl = p.left) != null)
dir = -1;
else // nothing there
break;
public V get(Object key) {
Node<K,V> e;
return (e = getNode(hash(key), key)) == null ? null : e.value;
}
/**
* Implements Map.get and related methods
*
* @param hash hash for key
* @param key the key
* @return the node, or null if none
*/
final Node<K,V> getNode(int hash, Object key) {
Node<K,V>[] tab; Node<K,V> first, e; int n; K k;
if ((tab = table) != null && (n = tab.length) > 0 &&
(first = tab[(n - 1) & hash]) != null) {
if (first.hash == hash && // always check first node
((k = first.key) == key || (key != null && key.equals(k))))
return first;
if ((e = first.next) != null) {
if (first instanceof TreeNode)
return ((TreeNode<K,V>)first).getTreeNode(hash, key);
do {
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k))))
return e;
} while ((e = e.next) != null);
}
p = (dir > 0) ? p.right : p.left;
}
return null;
}
/*
* Finds or adds a node.
*
* 'entry' should be used to recycle an existing Entry (e.g. in the case
* of converting a linked-list bin to a TreeBin).
* If entry is null, a new Entry will be created for the new TreeNode
/**
* Returns <tt>true</tt> if this map contains a mapping for the
* specified key.
*
* @return the TreeNode containing the mapping, or null if a new
* TreeNode was added
* @param key The key whose presence in this map is to be tested
* @return <tt>true</tt> if this map contains a mapping for the specified
* key.
*/
@SuppressWarnings("unchecked")
TreeNode putTreeNode(int h, K k, V v, HashMap.Entry<K,V> entry) {
// assert k != null;
//if (entry != null) {
// assert h == entry.hash;
// assert k == entry.key;
// assert v == entry.value;
// }
Class<?> cc = comparableClassFor(k);
TreeNode pp = root, p = null;
int dir = 0;
while (pp != null) { // find existing node or leaf to insert at
int ph; Object pk;
p = pp;
if ((ph = p.entry.hash) != h)
dir = (h < ph) ? -1 : 1;
else if ((pk = p.entry.key) == k || k.equals(pk))
return p;
else if (cc == null || comparableClassFor(pk) != cc ||
(dir = ((Comparable<Object>)k).compareTo(pk)) == 0) {
TreeNode r, pr;
if ((pr = p.right) != null &&
(r = getTreeNode(h, k, pr, cc)) != null)
return r;
else // continue left
dir = -1;
}
pp = (dir > 0) ? p.right : p.left;
public boolean containsKey(Object key) {
return getNode(hash(key), key) != null;
}
// Didn't find the mapping in the tree, so add it
TreeNode f = first;
TreeNode x;
if (entry != null) {
x = new TreeNode(entry, f, p);
} else {
x = new TreeNode(newEntry(h, k, v, null), f, p);
/**
* Associates the specified value with the specified key in this map.
* If the map previously contained a mapping for the key, the old
* value is replaced.
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
* @return the previous value associated with <tt>key</tt>, or
* <tt>null</tt> if there was no mapping for <tt>key</tt>.
* (A <tt>null</tt> return can also indicate that the map
* previously associated <tt>null</tt> with <tt>key</tt>.)
*/
public V put(K key, V value) {
return putVal(hash(key), key, value, false, true);
}
first = x;
if (p == null) {
root = x;
} else { // attach and rebalance; adapted from CLR
TreeNode xp, xpp;
if (f != null) {
f.prev = x;
/**
* Implements Map.put and related methods
*
* @param hash hash for key
* @param key the key
* @param value the value to put
* @param onlyIfAbsent if true, don't change existing value
* @param evict if false, the table is in creation mode.
* @return previous value, or null if none
*/
final V putVal(int hash, K key, V value, boolean onlyIfAbsent,
boolean evict) {
Node<K,V>[] tab; Node<K,V> p; int n, i;
if (size > threshold || (tab = table) == null ||
(n = tab.length) == 0)
n = (tab = resize()).length;
if ((p = tab[i = (n - 1) & hash]) == null)
tab[i] = newNode(hash, key, value, null);
else {
Node<K,V> e; K k;
if (p.hash == hash &&
((k = p.key) == key || (key != null && key.equals(k))))
e = p;
else if (p instanceof TreeNode)
e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value);
else {
for (int binCount = 0; ; ++binCount) {
if ((e = p.next) == null) {
p.next = newNode(hash, key, value, null);
if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
treeifyBin(tab, hash);
break;
}
if (dir <= 0) {
p.left = x;
} else {
p.right = x;
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k))))
break;
p = e;
}
x.red = true;
while (x != null && (xp = x.parent) != null && xp.red
&& (xpp = xp.parent) != null) {
TreeNode xppl = xpp.left;
if (xp == xppl) {
TreeNode y = xpp.right;
if (y != null && y.red) {
y.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
} else {
if (x == xp.right) {
rotateLeft(x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
rotateRight(xpp);
}
}
}
} else {
TreeNode y = xppl;
if (y != null && y.red) {
y.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
} else {
if (x == xp.left) {
rotateRight(x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
rotateLeft(xpp);
}
}
}
}
}
TreeNode r = root;
if (r != null && r.red) {
r.red = false;
}
}
return null;
}
/*
* From CHMv8
*
* Removes the given node, that must be present before this
* call. This is messier than typical red-black deletion code
* because we cannot swap the contents of an interior node
* with a leaf successor that is pinned by "next" pointers
* that are accessible independently of lock. So instead we
* swap the tree linkages.
*/
final void deleteTreeNode(TreeNode p) {
TreeNode next = (TreeNode) p.entry.next; // unlink traversal pointers
TreeNode pred = p.prev;
if (pred == null) {
first = next;
} else {
pred.entry.next = next;
}
if (next != null) {
next.prev = pred;
}
TreeNode replacement;
TreeNode pl = p.left;
TreeNode pr = p.right;
if (pl != null && pr != null) {
TreeNode s = pr, sl;
while ((sl = s.left) != null) // find successor
{
s = sl;
}
boolean c = s.red;
s.red = p.red;
p.red = c; // swap colors
TreeNode sr = s.right;
TreeNode pp = p.parent;
if (s == pr) { // p was s's direct parent
p.parent = s;
s.right = p;
} else {
TreeNode sp = s.parent;
if ((p.parent = sp) != null) {
if (s == sp.left) {
sp.left = p;
} else {
sp.right = p;
}
}
if ((s.right = pr) != null) {
pr.parent = s;
}
}
p.left = null;
if ((p.right = sr) != null) {
sr.parent = p;
}
if ((s.left = pl) != null) {
pl.parent = s;
}
if ((s.parent = pp) == null) {
root = s;
} else if (p == pp.left) {
pp.left = s;
} else {
pp.right = s;
}
replacement = sr;
} else {
replacement = (pl != null) ? pl : pr;
}
TreeNode pp = p.parent;
if (replacement == null) {
if (pp == null) {
root = null;
return;
}
replacement = p;
} else {
replacement.parent = pp;
if (pp == null) {
root = replacement;
} else if (p == pp.left) {
pp.left = replacement;
} else {
pp.right = replacement;
}
p.left = p.right = p.parent = null;
}
if (!p.red) { // rebalance, from CLR
TreeNode x = replacement;
while (x != null) {
TreeNode xp, xpl;
if (x.red || (xp = x.parent) == null) {
x.red = false;
break;
}
if (x == (xpl = xp.left)) {
TreeNode sib = xp.right;
if (sib != null && sib.red) {
sib.red = false;
xp.red = true;
rotateLeft(xp);
sib = (xp = x.parent) == null ? null : xp.right;
}
if (sib == null) {
x = xp;
} else {
TreeNode sl = sib.left, sr = sib.right;
if ((sr == null || !sr.red)
&& (sl == null || !sl.red)) {
sib.red = true;
x = xp;
} else {
if (sr == null || !sr.red) {
if (sl != null) {
sl.red = false;
}
sib.red = true;
rotateRight(sib);
sib = (xp = x.parent) == null ?
null : xp.right;
}
if (sib != null) {
sib.red = (xp == null) ? false : xp.red;
if ((sr = sib.right) != null) {
sr.red = false;
}
}
if (xp != null) {
xp.red = false;
rotateLeft(xp);
}
x = root;
}
}
} else { // symmetric
TreeNode sib = xpl;
if (sib != null && sib.red) {
sib.red = false;
xp.red = true;
rotateRight(xp);
sib = (xp = x.parent) == null ? null : xp.left;
}
if (sib == null) {
x = xp;
} else {
TreeNode sl = sib.left, sr = sib.right;
if ((sl == null || !sl.red)
&& (sr == null || !sr.red)) {
sib.red = true;
x = xp;
} else {
if (sl == null || !sl.red) {
if (sr != null) {
sr.red = false;
}
sib.red = true;
rotateLeft(sib);
sib = (xp = x.parent) == null ?
null : xp.left;
}
if (sib != null) {
sib.red = (xp == null) ? false : xp.red;
if ((sl = sib.left) != null) {
sl.red = false;
}
}
if (xp != null) {
xp.red = false;
rotateRight(xp);
}
x = root;
}
}
}
}
}
if (p == replacement && (pp = p.parent) != null) {
if (p == pp.left) // detach pointers
{
pp.left = null;
} else if (p == pp.right) {
pp.right = null;
}
p.parent = null;
}
}
}
/**
* Constructs an empty <tt>HashMap</tt> with the specified initial
* capacity and load factor.
*
* @param initialCapacity the initial capacity
* @param loadFactor the load factor
* @throws IllegalArgumentException if the initial capacity is negative
* or the load factor is nonpositive
*/
public HashMap(int initialCapacity, float loadFactor) {
if (initialCapacity < 0)
throw new IllegalArgumentException("Illegal initial capacity: " +
initialCapacity);
if (initialCapacity > MAXIMUM_CAPACITY)
initialCapacity = MAXIMUM_CAPACITY;
if (loadFactor <= 0 || Float.isNaN(loadFactor))
throw new IllegalArgumentException("Illegal load factor: " +
loadFactor);
this.loadFactor = loadFactor;
threshold = initialCapacity;
hashSeed = initHashSeed();
init();
}
/**
* Constructs an empty <tt>HashMap</tt> with the specified initial
* capacity and the default load factor (0.75).
*
* @param initialCapacity the initial capacity.
* @throws IllegalArgumentException if the initial capacity is negative.
*/
public HashMap(int initialCapacity) {
this(initialCapacity, DEFAULT_LOAD_FACTOR);
}
/**
* Constructs an empty <tt>HashMap</tt> with the default initial capacity
* (16) and the default load factor (0.75).
*/
public HashMap() {
this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
}
/**
* Constructs a new <tt>HashMap</tt> with the same mappings as the
* specified <tt>Map</tt>. The <tt>HashMap</tt> is created with
* default load factor (0.75) and an initial capacity sufficient to
* hold the mappings in the specified <tt>Map</tt>.
*
* @param m the map whose mappings are to be placed in this map
* @throws NullPointerException if the specified map is null
*/
public HashMap(Map<? extends K, ? extends V> m) {
this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1,
DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR);
inflateTable(threshold);
putAllForCreate(m);
// assert size == m.size();
}
private static int roundUpToPowerOf2(int number) {
// assert number >= 0 : "number must be non-negative";
return number >= MAXIMUM_CAPACITY
? MAXIMUM_CAPACITY
: (number > 1) ? Integer.highestOneBit((number - 1) << 1) : 1;
}
/**
* Inflates the table.
*/
private void inflateTable(int toSize) {
// Find a power of 2 >= toSize
int capacity = roundUpToPowerOf2(toSize);
threshold = (int) Math.min(capacity * loadFactor, MAXIMUM_CAPACITY + 1);
table = new Object[capacity];
}
// internal utilities
/**
* Initialization hook for subclasses. This method is called
* in all constructors and pseudo-constructors (clone, readObject)
* after HashMap has been initialized but before any entries have
* been inserted. (In the absence of this method, readObject would
* require explicit knowledge of subclasses.)
*/
void init() {
}
/**
* Return an initial value for the hashSeed, or 0 if the random seed is not
* enabled.
*/
final int initHashSeed() {
if (sun.misc.VM.isBooted() && Holder.USE_HASHSEED) {
int seed = ThreadLocalRandom.current().nextInt();
return (seed != 0) ? seed : 1;
}
return 0;
}
/**
* Retrieve object hash code and applies a supplemental hash function to the
* result hash, which defends against poor quality hash functions. This is
* critical because HashMap uses power-of-two length hash tables, that
* otherwise encounter collisions for hashCodes that do not differ
* in lower bits.
*/
final int hash(Object k) {
int h = hashSeed ^ k.hashCode();
// This function ensures that hashCodes that differ only by
// constant multiples at each bit position have a bounded
// number of collisions (approximately 8 at default load factor).
h ^= (h >>> 20) ^ (h >>> 12);
return h ^ (h >>> 7) ^ (h >>> 4);
}
/**
* Returns index for hash code h.
*/
static int indexFor(int h, int length) {
// assert Integer.bitCount(length) == 1 : "length must be a non-zero power of 2";
return h & (length-1);
}
/**
* Returns the number of key-value mappings in this map.
*
* @return the number of key-value mappings in this map
*/
public int size() {
return size;
}
/**
* Returns <tt>true</tt> if this map contains no key-value mappings.
*
* @return <tt>true</tt> if this map contains no key-value mappings
*/
public boolean isEmpty() {
return size == 0;
}
/**
* Returns the value to which the specified key is mapped,
* or {@code null} if this map contains no mapping for the key.
*
* <p>More formally, if this map contains a mapping from a key
* {@code k} to a value {@code v} such that {@code (key==null ? k==null :
* key.equals(k))}, then this method returns {@code v}; otherwise
* it returns {@code null}. (There can be at most one such mapping.)
*
* <p>A return value of {@code null} does not <i>necessarily</i>
* indicate that the map contains no mapping for the key; it's also
* possible that the map explicitly maps the key to {@code null}.
* The {@link #containsKey containsKey} operation may be used to
* distinguish these two cases.
*
* @see #put(Object, Object)
*/
@SuppressWarnings("unchecked")
public V get(Object key) {
Entry<K,V> entry = getEntry(key);
return null == entry ? null : entry.getValue();
}
@Override
public V getOrDefault(Object key, V defaultValue) {
Entry<K,V> entry = getEntry(key);
return (entry == null) ? defaultValue : entry.getValue();
}
/**
* Returns <tt>true</tt> if this map contains a mapping for the
* specified key.
*
* @param key The key whose presence in this map is to be tested
* @return <tt>true</tt> if this map contains a mapping for the specified
* key.
*/
public boolean containsKey(Object key) {
return getEntry(key) != null;
}
/**
* Returns the entry associated with the specified key in the
* HashMap. Returns null if the HashMap contains no mapping
* for the key.
*/
@SuppressWarnings("unchecked")
final Entry<K,V> getEntry(Object key) {
if (size == 0) {
return null;
}
if (key == null) {
return nullKeyEntry;
}
int hash = hash(key);
int bin = indexFor(hash, table.length);
if (table[bin] instanceof Entry) {
Entry<K,V> e = (Entry<K,V>) table[bin];
for (; e != null; e = (Entry<K,V>)e.next) {
Object k;
if (e.hash == hash &&
((k = e.key) == key || key.equals(k))) {
return e;
}
}
} else if (table[bin] != null) {
TreeBin e = (TreeBin)table[bin];
TreeNode p = e.getTreeNode(hash, (K)key);
if (p != null) {
// assert p.entry.hash == hash && p.entry.key.equals(key);
return (Entry<K,V>)p.entry;
} else {
return null;
}
}
return null;
}
/**
* Associates the specified value with the specified key in this map.
* If the map previously contained a mapping for the key, the old
* value is replaced.
*
* @param key key with which the specified value is to be associated
* @param value value to be associated with the specified key
* @return the previous value associated with <tt>key</tt>, or
* <tt>null</tt> if there was no mapping for <tt>key</tt>.
* (A <tt>null</tt> return can also indicate that the map
* previously associated <tt>null</tt> with <tt>key</tt>.)
*/
@SuppressWarnings("unchecked")
public V put(K key, V value) {
if (table == EMPTY_TABLE) {
inflateTable(threshold);
}
if (key == null)
return putForNullKey(value);
int hash = hash(key);
int i = indexFor(hash, table.length);
boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin?
if (table[i] instanceof Entry) {
// Bin contains ordinary Entries. Search for key in the linked list
// of entries, counting the number of entries. Only check for
// TreeBin conversion if the list size is >= TREE_THRESHOLD.
// (The conversion still may not happen if the table gets resized.)
int listSize = 0;
Entry<K,V> e = (Entry<K,V>) table[i];
for (; e != null; e = (Entry<K,V>)e.next) {
Object k;
if (e.hash == hash && ((k = e.key) == key || key.equals(k))) {
V oldValue = e.value;
e.value = value;
e.recordAccess(this);
return oldValue;
}
listSize++;
}
// Didn't find, so fall through and call addEntry() to add the
// Entry and check for TreeBin conversion.
checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD;
} else if (table[i] != null) {
TreeBin e = (TreeBin)table[i];
TreeNode p = e.putTreeNode(hash, key, value, null);
if (p == null) { // putTreeNode() added a new node
modCount++;
size++;
if (size >= threshold) {
resize(2 * table.length);
}
return null;
} else { // putTreeNode() found an existing node
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
V oldVal = pEntry.value;
pEntry.value = value;
pEntry.recordAccess(this);
return oldVal;
}
}
modCount++;
addEntry(hash, key, value, i, checkIfNeedTree);
return null;
}
/**
* Offloaded version of put for null keys
*/
private V putForNullKey(V value) {
if (nullKeyEntry != null) {
V oldValue = nullKeyEntry.value;
nullKeyEntry.value = value;
nullKeyEntry.recordAccess(this);
return oldValue;
}
modCount++;
size++; // newEntry() skips size++
nullKeyEntry = newEntry(0, null, value, null);
return null;
}
private void putForCreateNullKey(V value) {
// Look for preexisting entry for key. This will never happen for
// clone or deserialize. It will only happen for construction if the
// input Map is a sorted map whose ordering is inconsistent w/ equals.
if (nullKeyEntry != null) {
nullKeyEntry.value = value;
} else {
nullKeyEntry = newEntry(0, null, value, null);
size++;
}
}
/**
* This method is used instead of put by constructors and
* pseudoconstructors (clone, readObject). It does not resize the table,
* check for comodification, etc, though it will convert bins to TreeBins
* as needed. It calls createEntry rather than addEntry.
*/
@SuppressWarnings("unchecked")
private void putForCreate(K key, V value) {
if (null == key) {
putForCreateNullKey(value);
return;
}
int hash = hash(key);
int i = indexFor(hash, table.length);
boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin?
/**
* Look for preexisting entry for key. This will never happen for
* clone or deserialize. It will only happen for construction if the
* input Map is a sorted map whose ordering is inconsistent w/ equals.
*/
if (table[i] instanceof Entry) {
int listSize = 0;
Entry<K,V> e = (Entry<K,V>) table[i];
for (; e != null; e = (Entry<K,V>)e.next) {
Object k;
if (e.hash == hash && ((k = e.key) == key || key.equals(k))) {
e.value = value;
return;
}
listSize++;
}
// Didn't find, fall through to createEntry().
// Check for conversion to TreeBin done via createEntry().
checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD;
} else if (table[i] != null) {
TreeBin e = (TreeBin)table[i];
TreeNode p = e.putTreeNode(hash, key, value, null);
if (p != null) {
p.entry.setValue(value); // Found an existing node, set value
} else {
size++; // Added a new TreeNode, so update size
}
// don't need modCount++/check for resize - just return
return;
}
createEntry(hash, key, value, i, checkIfNeedTree);
}
private void putAllForCreate(Map<? extends K, ? extends V> m) {
for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
putForCreate(e.getKey(), e.getValue());
}
/**
* Rehashes the contents of this map into a new array with a
* larger capacity. This method is called automatically when the
* number of keys in this map reaches its threshold.
*
* If current capacity is MAXIMUM_CAPACITY, this method does not
* resize the map, but sets threshold to Integer.MAX_VALUE.
* This has the effect of preventing future calls.
*
* @param newCapacity the new capacity, MUST be a power of two;
* must be greater than current capacity unless current
* capacity is MAXIMUM_CAPACITY (in which case value
* is irrelevant).
*/
void resize(int newCapacity) {
Object[] oldTable = table;
int oldCapacity = oldTable.length;
if (oldCapacity == MAXIMUM_CAPACITY) {
threshold = Integer.MAX_VALUE;
return;
}
Object[] newTable = new Object[newCapacity];
transfer(newTable);
table = newTable;
threshold = (int)Math.min(newCapacity * loadFactor, MAXIMUM_CAPACITY + 1);
}
/**
* Transfers all entries from current table to newTable.
*
* Assumes newTable is larger than table
*/
@SuppressWarnings("unchecked")
void transfer(Object[] newTable) {
Object[] src = table;
// assert newTable.length > src.length : "newTable.length(" +
// newTable.length + ") expected to be > src.length("+src.length+")";
int newCapacity = newTable.length;
for (int j = 0; j < src.length; j++) {
if (src[j] instanceof Entry) {
// Assume: since wasn't TreeBin before, won't need TreeBin now
Entry<K,V> e = (Entry<K,V>) src[j];
while (null != e) {
Entry<K,V> next = (Entry<K,V>)e.next;
int i = indexFor(e.hash, newCapacity);
e.next = (Entry<K,V>) newTable[i];
newTable[i] = e;
e = next;
}
} else if (src[j] != null) {
TreeBin e = (TreeBin) src[j];
TreeBin loTree = new TreeBin();
TreeBin hiTree = new TreeBin();
e.splitTreeBin(newTable, j, loTree, hiTree);
}
}
Arrays.fill(table, null);
}
/**
* Copies all of the mappings from the specified map to this map.
* These mappings will replace any mappings that this map had for
* any of the keys currently in the specified map.
*
* @param m mappings to be stored in this map
* @throws NullPointerException if the specified map is null
*/
public void putAll(Map<? extends K, ? extends V> m) {
int numKeysToBeAdded = m.size();
if (numKeysToBeAdded == 0)
return;
if (table == EMPTY_TABLE) {
inflateTable((int) Math.max(numKeysToBeAdded * loadFactor, threshold));
}
/*
* Expand the map if the map if the number of mappings to be added
* is greater than or equal to threshold. This is conservative; the
* obvious condition is (m.size() + size) >= threshold, but this
* condition could result in a map with twice the appropriate capacity,
* if the keys to be added overlap with the keys already in this map.
* By using the conservative calculation, we subject ourself
* to at most one extra resize.
*/
if (numKeysToBeAdded > threshold && table.length < MAXIMUM_CAPACITY) {
resize(table.length * 2);
}
for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
put(e.getKey(), e.getValue());
}
/**
* Removes the mapping for the specified key from this map if present.
*
* @param key key whose mapping is to be removed from the map
* @return the previous value associated with <tt>key</tt>, or
* <tt>null</tt> if there was no mapping for <tt>key</tt>.
* (A <tt>null</tt> return can also indicate that the map
* previously associated <tt>null</tt> with <tt>key</tt>.)
*/
public V remove(Object key) {
Entry<K,V> e = removeEntryForKey(key);
return (e == null ? null : e.value);
}
// optimized implementations of default methods in Map
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
Objects.requireNonNull(action);
final int expectedModCount = modCount;
if (nullKeyEntry != null) {
forEachNullKey(expectedModCount, action);
}
Object[] tab = this.table;
for (int index = 0; index < tab.length; index++) {
Object item = tab[index];
if (item == null) {
continue;
}
if (item instanceof HashMap.TreeBin) {
eachTreeNode(expectedModCount, ((TreeBin)item).first, action);
continue;
}
@SuppressWarnings("unchecked")
Entry<K, V> entry = (Entry<K, V>)item;
while (entry != null) {
action.accept(entry.key, entry.value);
entry = (Entry<K, V>)entry.next;
if (expectedModCount != modCount) {
throw new ConcurrentModificationException();
}
}
}
}
private void eachTreeNode(int expectedModCount, TreeNode<K, V> node, BiConsumer<? super K, ? super V> action) {
while (node != null) {
@SuppressWarnings("unchecked")
Entry<K, V> entry = (Entry<K, V>)node.entry;
action.accept(entry.key, entry.value);
node = (TreeNode<K, V>)entry.next;
if (expectedModCount != modCount) {
throw new ConcurrentModificationException();
}
}
}
private void forEachNullKey(int expectedModCount, BiConsumer<? super K, ? super V> action) {
action.accept(null, nullKeyEntry.value);
if (expectedModCount != modCount) {
throw new ConcurrentModificationException();
}
}
@Override
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
Objects.requireNonNull(function);
final int expectedModCount = modCount;
if (nullKeyEntry != null) {
replaceforNullKey(expectedModCount, function);
}
Object[] tab = this.table;
for (int index = 0; index < tab.length; index++) {
Object item = tab[index];
if (item == null) {
continue;
}
if (item instanceof HashMap.TreeBin) {
replaceEachTreeNode(expectedModCount, ((TreeBin)item).first, function);
continue;
}
@SuppressWarnings("unchecked")
Entry<K, V> entry = (Entry<K, V>)item;
while (entry != null) {
entry.value = function.apply(entry.key, entry.value);
entry = (Entry<K, V>)entry.next;
if (expectedModCount != modCount) {
throw new ConcurrentModificationException();
}
}
}
}
private void replaceEachTreeNode(int expectedModCount, TreeNode<K, V> node, BiFunction<? super K, ? super V, ? extends V> function) {
while (node != null) {
@SuppressWarnings("unchecked")
Entry<K, V> entry = (Entry<K, V>)node.entry;
entry.value = function.apply(entry.key, entry.value);
node = (TreeNode<K, V>)entry.next;
if (expectedModCount != modCount) {
throw new ConcurrentModificationException();
}
}
}
private void replaceforNullKey(int expectedModCount, BiFunction<? super K, ? super V, ? extends V> function) {
nullKeyEntry.value = function.apply(null, nullKeyEntry.value);
if (expectedModCount != modCount) {
throw new ConcurrentModificationException();
}
}
@Override
public V putIfAbsent(K key, V value) {
if (table == EMPTY_TABLE) {
inflateTable(threshold);
}
if (key == null) {
if (nullKeyEntry == null || nullKeyEntry.value == null) {
putForNullKey(value);
return null;
} else {
return nullKeyEntry.value;
}
}
int hash = hash(key);
int i = indexFor(hash, table.length);
boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin?
if (table[i] instanceof Entry) {
int listSize = 0;
Entry<K,V> e = (Entry<K,V>) table[i];
for (; e != null; e = (Entry<K,V>)e.next) {
if (e.hash == hash && Objects.equals(e.key, key)) {
if (e.value != null) {
return e.value;
}
e.value = value;
e.recordAccess(this);
return null;
}
listSize++;
}
// Didn't find, so fall through and call addEntry() to add the
// Entry and check for TreeBin conversion.
checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD;
} else if (table[i] != null) {
TreeBin e = (TreeBin)table[i];
TreeNode p = e.putTreeNode(hash, key, value, null);
if (p == null) { // not found, putTreeNode() added a new node
modCount++;
size++;
if (size >= threshold) {
resize(2 * table.length);
}
return null;
} else { // putTreeNode() found an existing node
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
V oldVal = pEntry.value;
if (oldVal == null) { // only replace if maps to null
pEntry.value = value;
pEntry.recordAccess(this);
}
return oldVal;
if (e != null) { // existing mapping for key
V oldValue = e.value;
if (!onlyIfAbsent || oldValue == null)
e.value = value;
afterNodeAccess(e);
return oldValue;
}
}
modCount++;
addEntry(hash, key, value, i, checkIfNeedTree);
++modCount;
++size;
afterNodeInsertion(evict);
return null;
}
@Override
public boolean remove(Object key, Object value) {
if (size == 0) {
return false;
}
if (key == null) {
if (nullKeyEntry != null &&
Objects.equals(nullKeyEntry.value, value)) {
removeNullKey();
return true;
}
return false;
}
int hash = hash(key);
int i = indexFor(hash, table.length);
if (table[i] instanceof Entry) {
@SuppressWarnings("unchecked")
Entry<K,V> prev = (Entry<K,V>) table[i];
Entry<K,V> e = prev;
while (e != null) {
@SuppressWarnings("unchecked")
Entry<K,V> next = (Entry<K,V>) e.next;
if (e.hash == hash && Objects.equals(e.key, key)) {
if (!Objects.equals(e.value, value)) {
return false;
}
modCount++;
size--;
if (prev == e)
table[i] = next;
/**
* Initializes or doubles table size. If null, allocates in
* accord with initial capacity target held in field threshold.
* Otherwise, because we are using power-of-two expansion, the
* elements from each bin must either stay at same index, or move
* with a power of two offset in the new table.
*
* @return the table
*/
final Node<K,V>[] resize() {
Node<K,V>[] oldTab = table;
int oldCap = (oldTab == null) ? 0 : oldTab.length;
int oldThr = threshold;
int newCap, newThr = 0;
if (oldCap > 0) {
if (oldCap >= MAXIMUM_CAPACITY) {
threshold = Integer.MAX_VALUE;
return oldTab;
}
else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
oldCap >= DEFAULT_INITIAL_CAPACITY)
newThr = oldThr << 1; // double threshold
}
else if (oldThr > 0) // initial capacity was placed in threshold
newCap = oldThr;
else { // zero initial threshold signifies using defaults
newCap = DEFAULT_INITIAL_CAPACITY;
newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
}
if (newThr == 0) {
float ft = (float)newCap * loadFactor;
newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
(int)ft : Integer.MAX_VALUE);
}
threshold = newThr;
@SuppressWarnings({"rawtypes","unchecked"})
Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
table = newTab;
if (oldTab != null) {
for (int j = 0; j < oldCap; ++j) {
Node<K,V> e;
if ((e = oldTab[j]) != null) {
oldTab[j] = null;
if (e.next == null)
newTab[e.hash & (newCap - 1)] = e;
else if (e instanceof TreeNode)
((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
else { // preserve order
Node<K,V> loHead = null, loTail = null;
Node<K,V> hiHead = null, hiTail = null;
Node<K,V> next;
do {
next = e.next;
if ((e.hash & oldCap) == 0) {
if (loTail == null)
loHead = e;
else
prev.next = next;
e.recordRemoval(this);
return true;
loTail.next = e;
loTail = e;
}
prev = e;
e = next;
}
} else if (table[i] != null) {
TreeBin tb = ((TreeBin) table[i]);
TreeNode p = tb.getTreeNode(hash, (K)key);
if (p != null) {
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
// assert pEntry.key.equals(key);
if (Objects.equals(pEntry.value, value)) {
modCount++;
size--;
tb.deleteTreeNode(p);
pEntry.recordRemoval(this);
if (tb.root == null || tb.first == null) {
// assert tb.root == null && tb.first == null :
// "TreeBin.first and root should both be null";
// TreeBin is now empty, we should blank this bin
table[i] = null;
else {
if (hiTail == null)
hiHead = e;
else
hiTail.next = e;
hiTail = e;
}
return true;
} while ((e = next) != null);
if (loTail != null) {
loTail.next = null;
newTab[j] = loHead;
}
if (hiTail != null) {
hiTail.next = null;
newTab[j + oldCap] = hiHead;
}
}
return false;
}
@Override
public boolean replace(K key, V oldValue, V newValue) {
if (size == 0) {
return false;
}
if (key == null) {
if (nullKeyEntry != null &&
Objects.equals(nullKeyEntry.value, oldValue)) {
putForNullKey(newValue);
return true;
}
return false;
return newTab;
}
int hash = hash(key);
int i = indexFor(hash, table.length);
if (table[i] instanceof Entry) {
@SuppressWarnings("unchecked")
Entry<K,V> e = (Entry<K,V>) table[i];
for (; e != null; e = (Entry<K,V>)e.next) {
if (e.hash == hash && Objects.equals(e.key, key) && Objects.equals(e.value, oldValue)) {
e.value = newValue;
e.recordAccess(this);
return true;
}
}
return false;
} else if (table[i] != null) {
TreeBin tb = ((TreeBin) table[i]);
TreeNode p = tb.getTreeNode(hash, key);
if (p != null) {
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
// assert pEntry.key.equals(key);
if (Objects.equals(pEntry.value, oldValue)) {
pEntry.value = newValue;
pEntry.recordAccess(this);
return true;
}
/**
* Replaces all linked nodes in bin at index for given hash unless
* table is too small, in which case resizes instead.
*/
final void treeifyBin(Node<K,V>[] tab, int hash) {
int n, index; Node<K,V> e;
if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY)
resize();
else if ((e = tab[index = (n - 1) & hash]) != null) {
TreeNode<K,V> hd = null, tl = null;
do {
TreeNode<K,V> p = replacementTreeNode(e, null);
if (tl == null)
hd = p;
else {
p.prev = tl;
tl.next = p;
}
tl = p;
} while ((e = e.next) != null);
if ((tab[index] = hd) != null)
hd.treeify(tab);
}
return false;
}
@Override
public V replace(K key, V value) {
if (size == 0) {
return null;
/**
* Copies all of the mappings from the specified map to this map.
* These mappings will replace any mappings that this map had for
* any of the keys currently in the specified map.
*
* @param m mappings to be stored in this map
* @throws NullPointerException if the specified map is null
*/
public void putAll(Map<? extends K, ? extends V> m) {
putMapEntries(m, true);
}
if (key == null) {
if (nullKeyEntry != null) {
return putForNullKey(value);
/**
* Removes the mapping for the specified key from this map if present.
*
* @param key key whose mapping is to be removed from the map
* @return the previous value associated with <tt>key</tt>, or
* <tt>null</tt> if there was no mapping for <tt>key</tt>.
* (A <tt>null</tt> return can also indicate that the map
* previously associated <tt>null</tt> with <tt>key</tt>.)
*/
public V remove(Object key) {
Node<K,V> e;
return (e = removeNode(hash(key), key, null, false, true)) == null ?
null : e.value;
}
return null;
/**
* Implements Map.remove and related methods
*
* @param hash hash for key
* @param key the key
* @param value the value to match if matchValue, else ignored
* @param matchValue if true only remove if value is equal
* @param movable if false do not move other nodes while removing
* @return the node, or null if none
*/
final Node<K,V> removeNode(int hash, Object key, Object value,
boolean matchValue, boolean movable) {
Node<K,V>[] tab; Node<K,V> p; int n, index;
if ((tab = table) != null && (n = tab.length) > 0 &&
(p = tab[index = (n - 1) & hash]) != null) {
Node<K,V> node = null, e; K k; V v;
if (p.hash == hash &&
((k = p.key) == key || (key != null && key.equals(k))))
node = p;
else if ((e = p.next) != null) {
if (p instanceof TreeNode)
node = ((TreeNode<K,V>)p).getTreeNode(hash, key);
else {
do {
if (e.hash == hash &&
((k = e.key) == key ||
(key != null && key.equals(k)))) {
node = e;
break;
}
int hash = hash(key);
int i = indexFor(hash, table.length);
if (table[i] instanceof Entry) {
@SuppressWarnings("unchecked")
Entry<K,V> e = (Entry<K,V>)table[i];
for (; e != null; e = (Entry<K,V>)e.next) {
if (e.hash == hash && Objects.equals(e.key, key)) {
V oldValue = e.value;
e.value = value;
e.recordAccess(this);
return oldValue;
p = e;
} while ((e = e.next) != null);
}
}
return null;
} else if (table[i] != null) {
TreeBin tb = ((TreeBin) table[i]);
TreeNode p = tb.getTreeNode(hash, key);
if (p != null) {
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
// assert pEntry.key.equals(key);
V oldValue = pEntry.value;
pEntry.value = value;
pEntry.recordAccess(this);
return oldValue;
if (node != null && (!matchValue || (v = node.value) == value ||
(value != null && value.equals(v)))) {
if (node instanceof TreeNode)
((TreeNode<K,V>)node).removeTreeNode(this, tab, movable);
else if (node == p)
tab[index] = node.next;
else
p.next = node.next;
++modCount;
--size;
afterNodeRemoval(node);
return node;
}
}
return null;
}
@Override
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
if (table == EMPTY_TABLE) {
inflateTable(threshold);
}
if (key == null) {
if (nullKeyEntry == null || nullKeyEntry.value == null) {
V newValue = mappingFunction.apply(key);
if (newValue != null) {
putForNullKey(newValue);
}
return newValue;
/**
* Removes all of the mappings from this map.
* The map will be empty after this call returns.
*/
public void clear() {
Node<K,V>[] tab;
modCount++;
if ((tab = table) != null && size > 0) {
size = 0;
for (int i = 0; i < tab.length; ++i)
tab[i] = null;
}
return nullKeyEntry.value;
}
int hash = hash(key);
int i = indexFor(hash, table.length);
boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin?
if (table[i] instanceof Entry) {
int listSize = 0;
@SuppressWarnings("unchecked")
Entry<K,V> e = (Entry<K,V>)table[i];
for (; e != null; e = (Entry<K,V>)e.next) {
if (e.hash == hash && Objects.equals(e.key, key)) {
V oldValue = e.value;
if (oldValue == null) {
V newValue = mappingFunction.apply(key);
if (newValue != null) {
e.value = newValue;
e.recordAccess(this);
/**
* Returns <tt>true</tt> if this map maps one or more keys to the
* specified value.
*
* @param value value whose presence in this map is to be tested
* @return <tt>true</tt> if this map maps one or more keys to the
* specified value
*/
public boolean containsValue(Object value) {
Node<K,V>[] tab; V v;
if ((tab = table) != null && size > 0) {
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next) {
if ((v = e.value) == value ||
(value != null && value.equals(v)))
return true;
}
return newValue;
}
return oldValue;
}
listSize++;
return false;
}
// Didn't find, fall through to call the mapping function
checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD;
} else if (table[i] != null) {
TreeBin e = (TreeBin)table[i];
V value = mappingFunction.apply(key);
if (value == null) { // Return the existing value, if any
TreeNode p = e.getTreeNode(hash, key);
if (p != null) {
return (V) p.entry.value;
/**
* Returns a {@link Set} view of the keys contained in this map.
* The set is backed by the map, so changes to the map are
* reflected in the set, and vice-versa. If the map is modified
* while an iteration over the set is in progress (except through
* the iterator's own <tt>remove</tt> operation), the results of
* the iteration are undefined. The set supports element removal,
* which removes the corresponding mapping from the map, via the
* <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
* operations. It does not support the <tt>add</tt> or <tt>addAll</tt>
* operations.
*
* @return a set view of the keys contained in this map
*/
public Set<K> keySet() {
Set<K> ks;
return (ks = keySet) == null ? (keySet = new KeySet()) : ks;
}
return null;
} else { // Put the new value into the Tree, if absent
TreeNode p = e.putTreeNode(hash, key, value, null);
if (p == null) { // not found, new node was added
modCount++;
size++;
if (size >= threshold) {
resize(2 * table.length);
final class KeySet extends AbstractSet<K> {
public final int size() { return size; }
public final void clear() { HashMap.this.clear(); }
public final Iterator<K> iterator() { return new KeyIterator(); }
public final boolean contains(Object o) { return containsKey(o); }
public final boolean remove(Object key) {
return removeNode(hash(key), key, null, false, true) != null;
}
return value;
} else { // putTreeNode() found an existing node
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
V oldVal = pEntry.value;
if (oldVal == null) { // only replace if maps to null
pEntry.value = value;
pEntry.recordAccess(this);
return value;
public final Spliterator<K> spliterator() {
return new KeySpliterator<K,V>(HashMap.this, 0, -1, 0, 0);
}
return oldVal;
public final void forEach(Consumer<? super K> action) {
Node<K,V>[] tab;
if (action == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next)
action.accept(e.key);
}
if (modCount != mc)
throw new ConcurrentModificationException();
}
}
V newValue = mappingFunction.apply(key);
if (newValue != null) { // add Entry and check for TreeBin conversion
modCount++;
addEntry(hash, key, newValue, i, checkIfNeedTree);
}
return newValue;
/**
* Returns a {@link Collection} view of the values contained in this map.
* The collection is backed by the map, so changes to the map are
* reflected in the collection, and vice-versa. If the map is
* modified while an iteration over the collection is in progress
* (except through the iterator's own <tt>remove</tt> operation),
* the results of the iteration are undefined. The collection
* supports element removal, which removes the corresponding
* mapping from the map, via the <tt>Iterator.remove</tt>,
* <tt>Collection.remove</tt>, <tt>removeAll</tt>,
* <tt>retainAll</tt> and <tt>clear</tt> operations. It does not
* support the <tt>add</tt> or <tt>addAll</tt> operations.
*
* @return a view of the values contained in this map
*/
public Collection<V> values() {
Collection<V> vs;
return (vs = values) == null ? (values = new Values()) : vs;
}
@Override
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
if (size == 0) {
return null;
}
if (key == null) {
V oldValue;
if (nullKeyEntry != null && (oldValue = nullKeyEntry.value) != null) {
V newValue = remappingFunction.apply(key, oldValue);
if (newValue != null ) {
putForNullKey(newValue);
return newValue;
} else {
removeNullKey();
}
}
return null;
}
int hash = hash(key);
int i = indexFor(hash, table.length);
if (table[i] instanceof Entry) {
@SuppressWarnings("unchecked")
Entry<K,V> prev = (Entry<K,V>)table[i];
Entry<K,V> e = prev;
while (e != null) {
Entry<K,V> next = (Entry<K,V>)e.next;
if (e.hash == hash && Objects.equals(e.key, key)) {
V oldValue = e.value;
if (oldValue == null)
break;
V newValue = remappingFunction.apply(key, oldValue);
if (newValue == null) {
modCount++;
size--;
if (prev == e)
table[i] = next;
else
prev.next = next;
e.recordRemoval(this);
} else {
e.value = newValue;
e.recordAccess(this);
}
return newValue;
}
prev = e;
e = next;
}
} else if (table[i] != null) {
TreeBin tb = (TreeBin)table[i];
TreeNode p = tb.getTreeNode(hash, key);
if (p != null) {
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
// assert pEntry.key.equals(key);
V oldValue = pEntry.value;
if (oldValue != null) {
V newValue = remappingFunction.apply(key, oldValue);
if (newValue == null) { // remove mapping
modCount++;
size--;
tb.deleteTreeNode(p);
pEntry.recordRemoval(this);
if (tb.root == null || tb.first == null) {
// assert tb.root == null && tb.first == null :
// "TreeBin.first and root should both be null";
// TreeBin is now empty, we should blank this bin
table[i] = null;
}
} else {
pEntry.value = newValue;
pEntry.recordAccess(this);
final class Values extends AbstractCollection<V> {
public final int size() { return size; }
public final void clear() { HashMap.this.clear(); }
public final Iterator<V> iterator() { return new ValueIterator(); }
public final boolean contains(Object o) { return containsValue(o); }
public final Spliterator<V> spliterator() {
return new ValueSpliterator<K,V>(HashMap.this, 0, -1, 0, 0);
}
return newValue;
public final void forEach(Consumer<? super V> action) {
Node<K,V>[] tab;
if (action == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next)
action.accept(e.value);
}
if (modCount != mc)
throw new ConcurrentModificationException();
}
}
return null;
}
@Override
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
if (table == EMPTY_TABLE) {
inflateTable(threshold);
}
if (key == null) {
V oldValue = nullKeyEntry == null ? null : nullKeyEntry.value;
V newValue = remappingFunction.apply(key, oldValue);
if (newValue != oldValue || (oldValue == null && nullKeyEntry != null)) {
if (newValue == null) {
removeNullKey();
} else {
putForNullKey(newValue);
}
}
return newValue;
/**
* Returns a {@link Set} view of the mappings contained in this map.
* The set is backed by the map, so changes to the map are
* reflected in the set, and vice-versa. If the map is modified
* while an iteration over the set is in progress (except through
* the iterator's own <tt>remove</tt> operation, or through the
* <tt>setValue</tt> operation on a map entry returned by the
* iterator) the results of the iteration are undefined. The set
* supports element removal, which removes the corresponding
* mapping from the map, via the <tt>Iterator.remove</tt>,
* <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and
* <tt>clear</tt> operations. It does not support the
* <tt>add</tt> or <tt>addAll</tt> operations.
*
* @return a set view of the mappings contained in this map
*/
public Set<Map.Entry<K,V>> entrySet() {
Set<Map.Entry<K,V>> es;
return (es = entrySet) == null ? (entrySet = new EntrySet()) : es;
}
int hash = hash(key);
int i = indexFor(hash, table.length);
boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin?
if (table[i] instanceof Entry) {
int listSize = 0;
@SuppressWarnings("unchecked")
Entry<K,V> prev = (Entry<K,V>)table[i];
Entry<K,V> e = prev;
while (e != null) {
Entry<K,V> next = (Entry<K,V>)e.next;
if (e.hash == hash && Objects.equals(e.key, key)) {
V oldValue = e.value;
V newValue = remappingFunction.apply(key, oldValue);
if (newValue != oldValue || oldValue == null) {
if (newValue == null) {
modCount++;
size--;
if (prev == e)
table[i] = next;
else
prev.next = next;
e.recordRemoval(this);
} else {
e.value = newValue;
e.recordAccess(this);
final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
public final int size() { return size; }
public final void clear() { HashMap.this.clear(); }
public final Iterator<Map.Entry<K,V>> iterator() {
return new EntryIterator();
}
public final boolean contains(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
Object key = e.getKey();
Node<K,V> candidate = getNode(hash(key), key);
return candidate != null && candidate.equals(e);
}
return newValue;
public final boolean remove(Object o) {
if (o instanceof Map.Entry) {
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
Object key = e.getKey();
Object value = e.getValue();
return removeNode(hash(key), key, value, true, true) != null;
}
prev = e;
e = next;
listSize++;
return false;
}
checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD;
} else if (table[i] != null) {
TreeBin tb = (TreeBin)table[i];
TreeNode p = tb.getTreeNode(hash, key);
V oldValue = p == null ? null : (V)p.entry.value;
V newValue = remappingFunction.apply(key, oldValue);
if (newValue != oldValue || (oldValue == null && p != null)) {
if (newValue == null) {
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
modCount++;
size--;
tb.deleteTreeNode(p);
pEntry.recordRemoval(this);
if (tb.root == null || tb.first == null) {
// assert tb.root == null && tb.first == null :
// "TreeBin.first and root should both be null";
// TreeBin is now empty, we should blank this bin
table[i] = null;
}
} else {
if (p != null) { // just update the value
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
pEntry.value = newValue;
pEntry.recordAccess(this);
} else { // need to put new node
p = tb.putTreeNode(hash, key, newValue, null);
// assert p == null; // should have added a new node
modCount++;
size++;
if (size >= threshold) {
resize(2 * table.length);
public final Spliterator<Map.Entry<K,V>> spliterator() {
return new EntrySpliterator<K,V>(HashMap.this, 0, -1, 0, 0);
}
public final void forEach(Consumer<? super Map.Entry<K,V>> action) {
Node<K,V>[] tab;
if (action == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next)
action.accept(e);
}
if (modCount != mc)
throw new ConcurrentModificationException();
}
}
return newValue;
}
V newValue = remappingFunction.apply(key, null);
if (newValue != null) {
modCount++;
addEntry(hash, key, newValue, i, checkIfNeedTree);
}
// Overrides of JDK8 Map extension methods
return newValue;
public V getOrDefault(Object key, V defaultValue) {
Node<K,V> e;
return (e = getNode(hash(key), key)) == null ? defaultValue : e.value;
}
@Override
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
if (table == EMPTY_TABLE) {
inflateTable(threshold);
}
if (key == null) {
V oldValue = nullKeyEntry == null ? null : nullKeyEntry.value;
V newValue = oldValue == null ? value : remappingFunction.apply(oldValue, value);
if (newValue != null) {
putForNullKey(newValue);
} else if (nullKeyEntry != null) {
removeNullKey();
}
return newValue;
public V putIfAbsent(K key, V value) {
return putVal(hash(key), key, value, true, true);
}
int hash = hash(key);
int i = indexFor(hash, table.length);
boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin?
if (table[i] instanceof Entry) {
int listSize = 0;
@SuppressWarnings("unchecked")
Entry<K,V> prev = (Entry<K,V>)table[i];
Entry<K,V> e = prev;
public boolean remove(Object key, Object value) {
return removeNode(hash(key), key, value, true, true) != null;
}
while (e != null) {
Entry<K,V> next = (Entry<K,V>)e.next;
if (e.hash == hash && Objects.equals(e.key, key)) {
V oldValue = e.value;
V newValue = (oldValue == null) ? value :
remappingFunction.apply(oldValue, value);
if (newValue == null) {
modCount++;
size--;
if (prev == e)
table[i] = next;
else
prev.next = next;
e.recordRemoval(this);
} else {
public boolean replace(K key, V oldValue, V newValue) {
Node<K,V> e; V v;
if ((e = getNode(hash(key), key)) != null &&
((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) {
e.value = newValue;
e.recordAccess(this);
}
return newValue;
}
prev = e;
e = next;
listSize++;
}
// Didn't find, so fall through and (maybe) call addEntry() to add
// the Entry and check for TreeBin conversion.
checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD;
} else if (table[i] != null) {
TreeBin tb = (TreeBin)table[i];
TreeNode p = tb.getTreeNode(hash, key);
V oldValue = p == null ? null : (V)p.entry.value;
V newValue = (oldValue == null) ? value :
remappingFunction.apply(oldValue, value);
if (newValue == null) {
if (p != null) {
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
modCount++;
size--;
tb.deleteTreeNode(p);
pEntry.recordRemoval(this);
if (tb.root == null || tb.first == null) {
// assert tb.root == null && tb.first == null :
// "TreeBin.first and root should both be null";
// TreeBin is now empty, we should blank this bin
table[i] = null;
}
}
return null;
} else if (newValue != oldValue) {
if (p != null) { // just update the value
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
pEntry.value = newValue;
pEntry.recordAccess(this);
} else { // need to put new node
p = tb.putTreeNode(hash, key, newValue, null);
// assert p == null; // should have added a new node
modCount++;
size++;
if (size >= threshold) {
resize(2 * table.length);
}
}
}
return newValue;
}
if (value != null) {
modCount++;
addEntry(hash, key, value, i, checkIfNeedTree);
afterNodeAccess(e);
return true;
}
return value;
return false;
}
// end of optimized implementations of default methods in Map
/**
* Removes and returns the entry associated with the specified key
* in the HashMap. Returns null if the HashMap contains no mapping
* for this key.
*
* We don't bother converting TreeBins back to Entry lists if the bin falls
* back below TREE_THRESHOLD, but we do clear bins when removing the last
* TreeNode in a TreeBin.
*/
final Entry<K,V> removeEntryForKey(Object key) {
if (size == 0) {
return null;
}
if (key == null) {
if (nullKeyEntry != null) {
return removeNullKey();
public V replace(K key, V value) {
Node<K,V> e;
if ((e = getNode(hash(key), key)) != null) {
V oldValue = e.value;
e.value = value;
afterNodeAccess(e);
return oldValue;
}
return null;
}
int hash = hash(key);
int i = indexFor(hash, table.length);
if (table[i] instanceof Entry) {
@SuppressWarnings("unchecked")
Entry<K,V> prev = (Entry<K,V>)table[i];
Entry<K,V> e = prev;
while (e != null) {
@SuppressWarnings("unchecked")
Entry<K,V> next = (Entry<K,V>) e.next;
if (e.hash == hash && Objects.equals(e.key, key)) {
modCount++;
size--;
if (prev == e)
table[i] = next;
else
prev.next = next;
e.recordRemoval(this);
return e;
public V computeIfAbsent(K key,
Function<? super K, ? extends V> mappingFunction) {
if (mappingFunction == null)
throw new NullPointerException();
int hash = hash(key);
Node<K,V>[] tab; Node<K,V> first; int n, i;
int binCount = 0;
TreeNode<K,V> t = null;
Node<K,V> old = null;
if (size > threshold || (tab = table) == null ||
(n = tab.length) == 0)
n = (tab = resize()).length;
if ((first = tab[i = (n - 1) & hash]) != null) {
if (first instanceof TreeNode)
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
else {
Node<K,V> e = first; K k;
do {
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k)))) {
old = e;
break;
}
prev = e;
e = next;
++binCount;
} while ((e = e.next) != null);
}
} else if (table[i] != null) {
TreeBin tb = ((TreeBin) table[i]);
TreeNode p = tb.getTreeNode(hash, (K)key);
if (p != null) {
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
// assert pEntry.key.equals(key);
modCount++;
size--;
tb.deleteTreeNode(p);
pEntry.recordRemoval(this);
if (tb.root == null || tb.first == null) {
// assert tb.root == null && tb.first == null :
// "TreeBin.first and root should both be null";
// TreeBin is now empty, we should blank this bin
table[i] = null;
V oldValue;
if (old != null && (oldValue = old.value) != null) {
afterNodeAccess(old);
return oldValue;
}
return pEntry;
}
V v = mappingFunction.apply(key);
if (old != null) {
old.value = v;
afterNodeAccess(old);
return v;
}
else if (v == null)
return null;
else if (t != null)
t.putTreeVal(this, tab, hash, key, v);
else {
tab[i] = newNode(hash, key, v, first);
if (binCount >= TREEIFY_THRESHOLD - 1)
treeifyBin(tab, hash);
}
++modCount;
++size;
afterNodeInsertion(true);
return v;
}
public V computeIfPresent(K key,
BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
Node<K,V> e; V oldValue;
int hash = hash(key);
if ((e = getNode(hash, key)) != null &&
(oldValue = e.value) != null) {
V v = remappingFunction.apply(key, oldValue);
if (v != null) {
e.value = v;
afterNodeAccess(e);
return v;
}
/**
* Special version of remove for EntrySet using {@code Map.Entry.equals()}
* for matching.
*/
final Entry<K,V> removeMapping(Object o) {
if (size == 0 || !(o instanceof Map.Entry))
return null;
Map.Entry<?,?> entry = (Map.Entry<?,?>) o;
Object key = entry.getKey();
if (key == null) {
if (entry.equals(nullKeyEntry)) {
return removeNullKey();
else
removeNode(hash, key, null, false, true);
}
return null;
}
public V compute(K key,
BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
if (remappingFunction == null)
throw new NullPointerException();
int hash = hash(key);
int i = indexFor(hash, table.length);
if (table[i] instanceof Entry) {
@SuppressWarnings("unchecked")
Entry<K,V> prev = (Entry<K,V>)table[i];
Entry<K,V> e = prev;
while (e != null) {
@SuppressWarnings("unchecked")
Entry<K,V> next = (Entry<K,V>)e.next;
if (e.hash == hash && e.equals(entry)) {
modCount++;
size--;
if (prev == e)
table[i] = next;
else
prev.next = next;
e.recordRemoval(this);
return e;
}
prev = e;
e = next;
Node<K,V>[] tab; Node<K,V> first; int n, i;
int binCount = 0;
TreeNode<K,V> t = null;
Node<K,V> old = null;
if (size > threshold || (tab = table) == null ||
(n = tab.length) == 0)
n = (tab = resize()).length;
if ((first = tab[i = (n - 1) & hash]) != null) {
if (first instanceof TreeNode)
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
else {
Node<K,V> e = first; K k;
do {
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k)))) {
old = e;
break;
}
} else if (table[i] != null) {
TreeBin tb = ((TreeBin) table[i]);
TreeNode p = tb.getTreeNode(hash, (K)key);
if (p != null && p.entry.equals(entry)) {
@SuppressWarnings("unchecked")
Entry<K,V> pEntry = (Entry<K,V>)p.entry;
// assert pEntry.key.equals(key);
modCount++;
size--;
tb.deleteTreeNode(p);
pEntry.recordRemoval(this);
if (tb.root == null || tb.first == null) {
// assert tb.root == null && tb.first == null :
// "TreeBin.first and root should both be null";
// TreeBin is now empty, we should blank this bin
table[i] = null;
++binCount;
} while ((e = e.next) != null);
}
return pEntry;
}
V oldValue = (old == null) ? null : old.value;
V v = remappingFunction.apply(key, oldValue);
if (old != null) {
if (v != null) {
old.value = v;
afterNodeAccess(old);
}
return null;
else
removeNode(hash, key, null, false, true);
}
/*
* Remove the mapping for the null key, and update internal accounting
* (size, modcount, recordRemoval, etc).
*
* Assumes nullKeyEntry is non-null.
*/
private Entry<K,V> removeNullKey() {
// assert nullKeyEntry != null;
Entry<K,V> retVal = nullKeyEntry;
modCount++;
size--;
retVal.recordRemoval(this);
nullKeyEntry = null;
return retVal;
else if (v != null) {
if (t != null)
t.putTreeVal(this, tab, hash, key, v);
else {
tab[i] = newNode(hash, key, v, first);
if (binCount >= TREEIFY_THRESHOLD - 1)
treeifyBin(tab, hash);
}
/**
* Removes all of the mappings from this map.
* The map will be empty after this call returns.
*/
public void clear() {
modCount++;
if (nullKeyEntry != null) {
nullKeyEntry = null;
++modCount;
++size;
afterNodeInsertion(true);
}
Arrays.fill(table, null);
size = 0;
return v;
}
/**
* Returns <tt>true</tt> if this map maps one or more keys to the
* specified value.
*
* @param value value whose presence in this map is to be tested
* @return <tt>true</tt> if this map maps one or more keys to the
* specified value
*/
public boolean containsValue(Object value) {
if (value == null) {
return containsNullValue();
}
Object[] tab = table;
for (int i = 0; i < tab.length; i++) {
if (tab[i] instanceof Entry) {
Entry<?,?> e = (Entry<?,?>)tab[i];
for (; e != null; e = (Entry<?,?>)e.next) {
if (value.equals(e.value)) {
return true;
public V merge(K key, V value,
BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
if (remappingFunction == null)
throw new NullPointerException();
int hash = hash(key);
Node<K,V>[] tab; Node<K,V> first; int n, i;
int binCount = 0;
TreeNode<K,V> t = null;
Node<K,V> old = null;
if (size > threshold || (tab = table) == null ||
(n = tab.length) == 0)
n = (tab = resize()).length;
if ((first = tab[i = (n - 1) & hash]) != null) {
if (first instanceof TreeNode)
old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key);
else {
Node<K,V> e = first; K k;
do {
if (e.hash == hash &&
((k = e.key) == key || (key != null && key.equals(k)))) {
old = e;
break;
}
++binCount;
} while ((e = e.next) != null);
}
} else if (tab[i] != null) {
TreeBin e = (TreeBin)tab[i];
TreeNode p = e.first;
for (; p != null; p = (TreeNode) p.entry.next) {
if (value == p.entry.value || value.equals(p.entry.value)) {
return true;
}
if (old != null) {
V v = remappingFunction.apply(old.value, value);
if (v != null) {
old.value = v;
afterNodeAccess(old);
}
else
removeNode(hash, key, null, false, true);
return v;
}
if (value != null) {
if (t != null)
t.putTreeVal(this, tab, hash, key, value);
else {
tab[i] = newNode(hash, key, value, first);
if (binCount >= TREEIFY_THRESHOLD - 1)
treeifyBin(tab, hash);
}
++modCount;
++size;
afterNodeInsertion(true);
}
// Didn't find value in table - could be in nullKeyEntry
return (nullKeyEntry != null && (value == nullKeyEntry.value ||
value.equals(nullKeyEntry.value)));
return value;
}
/**
* Special-case code for containsValue with null argument
*/
private boolean containsNullValue() {
Object[] tab = table;
for (int i = 0; i < tab.length; i++) {
if (tab[i] instanceof Entry) {
Entry<K,V> e = (Entry<K,V>)tab[i];
for (; e != null; e = (Entry<K,V>)e.next) {
if (e.value == null) {
return true;
public void forEach(BiConsumer<? super K, ? super V> action) {
Node<K,V>[] tab;
if (action == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next)
action.accept(e.key, e.value);
}
if (modCount != mc)
throw new ConcurrentModificationException();
}
} else if (tab[i] != null) {
TreeBin e = (TreeBin)tab[i];
TreeNode p = e.first;
for (; p != null; p = (TreeNode) p.entry.next) {
if (p.entry.value == null) {
return true;
}
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
Node<K,V>[] tab;
if (function == null)
throw new NullPointerException();
if (size > 0 && (tab = table) != null) {
int mc = modCount;
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next) {
e.value = function.apply(e.key, e.value);
}
}
if (modCount != mc)
throw new ConcurrentModificationException();
}
// Didn't find value in table - could be in nullKeyEntry
return (nullKeyEntry != null && nullKeyEntry.value == null);
}
/* ------------------------------------------------------------ */
// Cloning and serialization
/**
* Returns a shallow copy of this <tt>HashMap</tt> instance: the keys and
* values themselves are not cloned.
......@@ -2196,916 +1294,1067 @@ public class HashMap<K,V>
*/
@SuppressWarnings("unchecked")
public Object clone() {
HashMap<K,V> result = null;
HashMap<K,V> result;
try {
result = (HashMap<K,V>)super.clone();
} catch (CloneNotSupportedException e) {
// assert false;
}
if (result.table != EMPTY_TABLE) {
result.inflateTable(Math.min(
(int) Math.min(
size * Math.min(1 / loadFactor, 4.0f),
// we have limits...
HashMap.MAXIMUM_CAPACITY),
table.length));
}
result.entrySet = null;
result.modCount = 0;
result.size = 0;
result.nullKeyEntry = null;
result.init();
result.putAllForCreate(this);
// this shouldn't happen, since we are Cloneable
throw new InternalError(e);
}
result.reinitialize();
result.putMapEntries(this, false);
return result;
}
static class Entry<K,V> implements Map.Entry<K,V> {
final K key;
V value;
Object next; // an Entry, or a TreeNode
final int hash;
// These methods are also used when serializing HashSets
final float loadFactor() { return loadFactor; }
final int capacity() {
return (table != null) ? table.length :
(threshold > 0) ? threshold :
DEFAULT_INITIAL_CAPACITY;
}
/**
* Creates new entry.
* Save the state of the <tt>HashMap</tt> instance to a stream (i.e.,
* serialize it).
*
* @serialData The <i>capacity</i> of the HashMap (the length of the
* bucket array) is emitted (int), followed by the
* <i>size</i> (an int, the number of key-value
* mappings), followed by the key (Object) and value (Object)
* for each key-value mapping. The key-value mappings are
* emitted in no particular order.
*/
Entry(int h, K k, V v, Object n) {
value = v;
next = n;
key = k;
hash = h;
private void writeObject(java.io.ObjectOutputStream s)
throws IOException {
int buckets = capacity();
// Write out the threshold, loadfactor, and any hidden stuff
s.defaultWriteObject();
s.writeInt(buckets);
s.writeInt(size);
internalWriteEntries(s);
}
public final K getKey() {
return key;
}
/**
* Reconstitute the {@code HashMap} instance from a stream (i.e.,
* deserialize it).
*/
private void readObject(java.io.ObjectInputStream s)
throws IOException, ClassNotFoundException {
// Read in the threshold (ignored), loadfactor, and any hidden stuff
s.defaultReadObject();
reinitialize();
if (loadFactor <= 0 || Float.isNaN(loadFactor))
throw new InvalidObjectException("Illegal load factor: " +
loadFactor);
s.readInt(); // Read and ignore number of buckets
int mappings = s.readInt(); // Read number of mappings (size)
if (mappings < 0)
throw new InvalidObjectException("Illegal mappings count: " +
mappings);
else if (mappings > 0) { // (if zero, use defaults)
// Size the table using given load factor only if within
// range of 0.25...4.0
float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f);
float fc = (float)mappings / lf + 1.0f;
int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ?
DEFAULT_INITIAL_CAPACITY :
(fc >= MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY :
tableSizeFor((int)fc));
float ft = (float)cap * lf;
threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ?
(int)ft : Integer.MAX_VALUE);
@SuppressWarnings({"rawtypes","unchecked"})
Node<K,V>[] tab = (Node<K,V>[])new Node[cap];
table = tab;
public final V getValue() {
return value;
// Read the keys and values, and put the mappings in the HashMap
for (int i = 0; i < mappings; i++) {
@SuppressWarnings("unchecked")
K key = (K) s.readObject();
@SuppressWarnings("unchecked")
V value = (V) s.readObject();
putVal(hash(key), key, value, false, false);
}
}
public final V setValue(V newValue) {
V oldValue = value;
value = newValue;
return oldValue;
}
public final boolean equals(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?,?> e = (Map.Entry<?,?>)o;
Object k1 = getKey();
Object k2 = e.getKey();
if (k1 == k2 || (k1 != null && k1.equals(k2))) {
Object v1 = getValue();
Object v2 = e.getValue();
if (v1 == v2 || (v1 != null && v1.equals(v2)))
return true;
/* ------------------------------------------------------------ */
// iterators
abstract class HashIterator {
Node<K,V> next; // next entry to return
Node<K,V> current; // current entry
int expectedModCount; // for fast-fail
int index; // current slot
HashIterator() {
expectedModCount = modCount;
Node<K,V>[] t = table;
current = next = null;
index = 0;
if (t != null && size > 0) { // advance to first entry
do {} while (index < t.length && (next = t[index++]) == null);
}
return false;
}
public final int hashCode() {
return Objects.hashCode(getKey()) ^ Objects.hashCode(getValue());
public final boolean hasNext() {
return next != null;
}
public final String toString() {
return getKey() + "=" + getValue();
final Node<K,V> nextNode() {
Node<K,V>[] t;
Node<K,V> e = next;
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
if (e == null)
throw new NoSuchElementException();
if ((next = (current = e).next) == null && (t = table) != null) {
do {} while (index < t.length && (next = t[index++]) == null);
}
/**
* This method is invoked whenever the value in an entry is
* overwritten for a key that's already in the HashMap.
*/
void recordAccess(HashMap<K,V> m) {
return e;
}
/**
* This method is invoked whenever the entry is
* removed from the table.
*/
void recordRemoval(HashMap<K,V> m) {
public final void remove() {
Node<K,V> p = current;
if (p == null)
throw new IllegalStateException();
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
current = null;
K key = p.key;
removeNode(hash(key), key, null, false, false);
expectedModCount = modCount;
}
}
void addEntry(int hash, K key, V value, int bucketIndex) {
addEntry(hash, key, value, bucketIndex, true);
final class KeyIterator extends HashIterator
implements Iterator<K> {
public final K next() { return nextNode().key; }
}
/**
* Adds a new entry with the specified key, value and hash code to
* the specified bucket. It is the responsibility of this
* method to resize the table if appropriate. The new entry is then
* created by calling createEntry().
*
* Subclass overrides this to alter the behavior of put method.
*
* If checkIfNeedTree is false, it is known that this bucket will not need
* to be converted to a TreeBin, so don't bothering checking.
*
* Assumes key is not null.
*/
void addEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) {
// assert key != null;
if ((size >= threshold) && (null != table[bucketIndex])) {
resize(2 * table.length);
hash = hash(key);
bucketIndex = indexFor(hash, table.length);
final class ValueIterator extends HashIterator
implements Iterator<V> {
public final V next() { return nextNode().value; }
}
createEntry(hash, key, value, bucketIndex, checkIfNeedTree);
final class EntryIterator extends HashIterator
implements Iterator<Map.Entry<K,V>> {
public final Map.Entry<K,V> next() { return nextNode(); }
}
/**
* Called by addEntry(), and also used when creating entries
* as part of Map construction or "pseudo-construction" (cloning,
* deserialization). This version does not check for resizing of the table.
*
* This method is responsible for converting a bucket to a TreeBin once
* TREE_THRESHOLD is reached. However if checkIfNeedTree is false, it is known
* that this bucket will not need to be converted to a TreeBin, so don't
* bother checking. The new entry is constructed by calling newEntry().
*
* Assumes key is not null.
*
* Note: buckets already converted to a TreeBin don't call this method, but
* instead call TreeBin.putTreeNode() to create new entries.
*/
void createEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) {
// assert key != null;
@SuppressWarnings("unchecked")
Entry<K,V> e = (Entry<K,V>)table[bucketIndex];
table[bucketIndex] = newEntry(hash, key, value, e);
size++;
if (checkIfNeedTree) {
int listSize = 0;
for (e = (Entry<K,V>) table[bucketIndex]; e != null; e = (Entry<K,V>)e.next) {
listSize++;
if (listSize >= TreeBin.TREE_THRESHOLD) { // Convert to TreeBin
if (comparableClassFor(key) != null) {
TreeBin t = new TreeBin();
t.populate((Entry)table[bucketIndex]);
table[bucketIndex] = t;
/* ------------------------------------------------------------ */
// spliterators
static class HashMapSpliterator<K,V> {
final HashMap<K,V> map;
Node<K,V> current; // current node
int index; // current index, modified on advance/split
int fence; // one past last index
int est; // size estimate
int expectedModCount; // for comodification checks
HashMapSpliterator(HashMap<K,V> m, int origin,
int fence, int est,
int expectedModCount) {
this.map = m;
this.index = origin;
this.fence = fence;
this.est = est;
this.expectedModCount = expectedModCount;
}
break;
final int getFence() { // initialize fence and size on first use
int hi;
if ((hi = fence) < 0) {
HashMap<K,V> m = map;
est = m.size;
expectedModCount = m.modCount;
Node<K,V>[] tab = m.table;
hi = fence = (tab == null) ? 0 : tab.length;
}
return hi;
}
public final long estimateSize() {
getFence(); // force init
return (long) est;
}
}
/*
* Factory method to create a new Entry object.
*/
Entry<K,V> newEntry(int hash, K key, V value, Object next) {
return new HashMap.Entry<>(hash, key, value, next);
static final class KeySpliterator<K,V>
extends HashMapSpliterator<K,V>
implements Spliterator<K> {
KeySpliterator(HashMap<K,V> m, int origin, int fence, int est,
int expectedModCount) {
super(m, origin, fence, est, expectedModCount);
}
public KeySpliterator<K,V> trySplit() {
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
return (lo >= mid || current != null) ? null :
new KeySpliterator<K,V>(map, lo, index = mid, est >>>= 1,
expectedModCount);
}
private abstract class HashIterator<E> implements Iterator<E> {
Object next; // next entry to return, an Entry or a TreeNode
int expectedModCount; // For fast-fail
int index; // current slot
Object current; // current entry, an Entry or a TreeNode
public void forEachRemaining(Consumer<? super K> action) {
int i, hi, mc;
if (action == null)
throw new NullPointerException();
HashMap<K,V> m = map;
Node<K,V>[] tab = m.table;
if ((hi = fence) < 0) {
mc = expectedModCount = m.modCount;
hi = fence = (tab == null) ? 0 : tab.length;
}
else
mc = expectedModCount;
if (tab != null && tab.length >= hi &&
(i = index) >= 0 && (i < (index = hi) || current != null)) {
Node<K,V> p = current;
current = null;
do {
if (p == null)
p = tab[i++];
else {
action.accept(p.key);
p = p.next;
}
} while (p != null || i < hi);
if (m.modCount != mc)
throw new ConcurrentModificationException();
}
}
HashIterator() {
expectedModCount = modCount;
if (size > 0) { // advance to first entry
if (nullKeyEntry != null) {
// assert nullKeyEntry.next == null;
// This works with nextEntry(): nullKeyEntry isa Entry, and
// e.next will be null, so we'll hit the findNextBin() call.
next = nullKeyEntry;
} else {
findNextBin();
public boolean tryAdvance(Consumer<? super K> action) {
int hi;
if (action == null)
throw new NullPointerException();
Node<K,V>[] tab = map.table;
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
while (current != null || index < hi) {
if (current == null)
current = tab[index++];
else {
K k = current.key;
current = current.next;
action.accept(k);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
}
}
}
return false;
}
public final boolean hasNext() {
return next != null;
public int characteristics() {
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
Spliterator.DISTINCT;
}
}
@SuppressWarnings("unchecked")
final Entry<K,V> nextEntry() {
if (modCount != expectedModCount) {
throw new ConcurrentModificationException();
static final class ValueSpliterator<K,V>
extends HashMapSpliterator<K,V>
implements Spliterator<V> {
ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est,
int expectedModCount) {
super(m, origin, fence, est, expectedModCount);
}
Object e = next;
Entry<K,V> retVal;
if (e == null)
throw new NoSuchElementException();
if (e instanceof TreeNode) { // TreeBin
retVal = (Entry<K,V>)((TreeNode)e).entry;
next = retVal.next;
} else {
retVal = (Entry<K,V>)e;
next = ((Entry<K,V>)e).next;
public ValueSpliterator<K,V> trySplit() {
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
return (lo >= mid || current != null) ? null :
new ValueSpliterator<K,V>(map, lo, index = mid, est >>>= 1,
expectedModCount);
}
if (next == null) { // Move to next bin
findNextBin();
public void forEachRemaining(Consumer<? super V> action) {
int i, hi, mc;
if (action == null)
throw new NullPointerException();
HashMap<K,V> m = map;
Node<K,V>[] tab = m.table;
if ((hi = fence) < 0) {
mc = expectedModCount = m.modCount;
hi = fence = (tab == null) ? 0 : tab.length;
}
current = e;
return retVal;
else
mc = expectedModCount;
if (tab != null && tab.length >= hi &&
(i = index) >= 0 && (i < (index = hi) || current != null)) {
Node<K,V> p = current;
current = null;
do {
if (p == null)
p = tab[i++];
else {
action.accept(p.value);
p = p.next;
}
public void remove() {
if (current == null)
throw new IllegalStateException();
if (modCount != expectedModCount)
} while (p != null || i < hi);
if (m.modCount != mc)
throw new ConcurrentModificationException();
K k;
if (current instanceof Entry) {
k = ((Entry<K,V>)current).key;
} else {
k = ((Entry<K,V>)((TreeNode)current).entry).key;
}
current = null;
HashMap.this.removeEntryForKey(k);
expectedModCount = modCount;
}
/*
* Set 'next' to the first entry of the next non-empty bin in the table
*/
private void findNextBin() {
// assert next == null;
Object[] t = table;
while (index < t.length && (next = t[index++]) == null)
;
if (next instanceof HashMap.TreeBin) { // Point to the first TreeNode
next = ((TreeBin) next).first;
// assert next != null; // There should be no empty TreeBins
}
public boolean tryAdvance(Consumer<? super V> action) {
int hi;
if (action == null)
throw new NullPointerException();
Node<K,V>[] tab = map.table;
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
while (current != null || index < hi) {
if (current == null)
current = tab[index++];
else {
V v = current.value;
current = current.next;
action.accept(v);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
}
}
private final class ValueIterator extends HashIterator<V> {
public V next() {
return nextEntry().value;
}
return false;
}
private final class KeyIterator extends HashIterator<K> {
public K next() {
return nextEntry().getKey();
public int characteristics() {
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0);
}
}
private final class EntryIterator extends HashIterator<Map.Entry<K,V>> {
public Map.Entry<K,V> next() {
return nextEntry();
static final class EntrySpliterator<K,V>
extends HashMapSpliterator<K,V>
implements Spliterator<Map.Entry<K,V>> {
EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est,
int expectedModCount) {
super(m, origin, fence, est, expectedModCount);
}
public EntrySpliterator<K,V> trySplit() {
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
return (lo >= mid || current != null) ? null :
new EntrySpliterator<K,V>(map, lo, index = mid, est >>>= 1,
expectedModCount);
}
// Subclass overrides these to alter behavior of views' iterator() method
Iterator<K> newKeyIterator() {
return new KeyIterator();
public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
int i, hi, mc;
if (action == null)
throw new NullPointerException();
HashMap<K,V> m = map;
Node<K,V>[] tab = m.table;
if ((hi = fence) < 0) {
mc = expectedModCount = m.modCount;
hi = fence = (tab == null) ? 0 : tab.length;
}
Iterator<V> newValueIterator() {
return new ValueIterator();
else
mc = expectedModCount;
if (tab != null && tab.length >= hi &&
(i = index) >= 0 && (i < (index = hi) || current != null)) {
Node<K,V> p = current;
current = null;
do {
if (p == null)
p = tab[i++];
else {
action.accept(p);
p = p.next;
}
Iterator<Map.Entry<K,V>> newEntryIterator() {
return new EntryIterator();
} while (p != null || i < hi);
if (m.modCount != mc)
throw new ConcurrentModificationException();
}
// Views
private transient Set<Map.Entry<K,V>> entrySet = null;
/**
* Returns a {@link Set} view of the keys contained in this map.
* The set is backed by the map, so changes to the map are
* reflected in the set, and vice-versa. If the map is modified
* while an iteration over the set is in progress (except through
* the iterator's own <tt>remove</tt> operation), the results of
* the iteration are undefined. The set supports element removal,
* which removes the corresponding mapping from the map, via the
* <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
* operations. It does not support the <tt>add</tt> or <tt>addAll</tt>
* operations.
*/
public Set<K> keySet() {
Set<K> ks = keySet;
return (ks != null ? ks : (keySet = new KeySet()));
}
private final class KeySet extends AbstractSet<K> {
public Iterator<K> iterator() {
return newKeyIterator();
}
public int size() {
return size;
public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
int hi;
if (action == null)
throw new NullPointerException();
Node<K,V>[] tab = map.table;
if (tab != null && tab.length >= (hi = getFence()) && index >= 0) {
while (current != null || index < hi) {
if (current == null)
current = tab[index++];
else {
Node<K,V> e = current;
current = current.next;
action.accept(e);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
}
public boolean contains(Object o) {
return containsKey(o);
}
public boolean remove(Object o) {
return HashMap.this.removeEntryForKey(o) != null;
}
public void clear() {
HashMap.this.clear();
return false;
}
public Spliterator<K> spliterator() {
if (HashMap.this.getClass() == HashMap.class)
return new KeySpliterator<K,V>(HashMap.this, 0, -1, 0, 0);
else
return Spliterators.spliterator
(this, Spliterator.SIZED | Spliterator.DISTINCT);
public int characteristics() {
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
Spliterator.DISTINCT;
}
}
/**
* Returns a {@link Collection} view of the values contained in this map.
* The collection is backed by the map, so changes to the map are
* reflected in the collection, and vice-versa. If the map is
* modified while an iteration over the collection is in progress
* (except through the iterator's own <tt>remove</tt> operation),
* the results of the iteration are undefined. The collection
* supports element removal, which removes the corresponding
* mapping from the map, via the <tt>Iterator.remove</tt>,
* <tt>Collection.remove</tt>, <tt>removeAll</tt>,
* <tt>retainAll</tt> and <tt>clear</tt> operations. It does not
* support the <tt>add</tt> or <tt>addAll</tt> operations.
/* ------------------------------------------------------------ */
// LinkedHashMap support
/*
* The following package-protected methods are designed to be
* overridden by LinkedHashMap, but not by any other subclass.
* Nearly all other internal methods are also package-protected
* but are declared final, so can be used by LinkedHashMap, view
* classes, and HashSet.
*/
public Collection<V> values() {
Collection<V> vs = values;
return (vs != null ? vs : (values = new Values()));
}
private final class Values extends AbstractCollection<V> {
public Iterator<V> iterator() {
return newValueIterator();
// Create a regular (non-tree) node
Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) {
return new Node<K,V>(hash, key, value, next);
}
public int size() {
return size;
}
public boolean contains(Object o) {
return containsValue(o);
}
public void clear() {
HashMap.this.clear();
// For conversion from TreeNodes to plain nodes
Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) {
return new Node<K,V>(p.hash, p.key, p.value, next);
}
public Spliterator<V> spliterator() {
if (HashMap.this.getClass() == HashMap.class)
return new ValueSpliterator<K,V>(HashMap.this, 0, -1, 0, 0);
else
return Spliterators.spliterator
(this, Spliterator.SIZED);
// Create a tree bin node
TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) {
return new TreeNode<K,V>(hash, key, value, next);
}
// For treeifyBin
TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) {
return new TreeNode<K,V>(p.hash, p.key, p.value, next);
}
/**
* Returns a {@link Set} view of the mappings contained in this map.
* The set is backed by the map, so changes to the map are
* reflected in the set, and vice-versa. If the map is modified
* while an iteration over the set is in progress (except through
* the iterator's own <tt>remove</tt> operation, or through the
* <tt>setValue</tt> operation on a map entry returned by the
* iterator) the results of the iteration are undefined. The set
* supports element removal, which removes the corresponding
* mapping from the map, via the <tt>Iterator.remove</tt>,
* <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and
* <tt>clear</tt> operations. It does not support the
* <tt>add</tt> or <tt>addAll</tt> operations.
*
* @return a set view of the mappings contained in this map
*/
public Set<Map.Entry<K,V>> entrySet() {
return entrySet0();
* Reset to initial default state. Called by clone and readObject.
*/
void reinitialize() {
table = null;
entrySet = null;
keySet = null;
values = null;
modCount = 0;
threshold = 0;
size = 0;
}
private Set<Map.Entry<K,V>> entrySet0() {
Set<Map.Entry<K,V>> es = entrySet;
return es != null ? es : (entrySet = new EntrySet());
}
// Callbacks to allow LinkedHashMap post-actions
void afterNodeAccess(Node<K,V> p) { }
void afterNodeInsertion(boolean evict) { }
void afterNodeRemoval(Node<K,V> p) { }
private final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
public Iterator<Map.Entry<K,V>> iterator() {
return newEntryIterator();
}
public boolean contains(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
Entry<K,V> candidate = getEntry(e.getKey());
return candidate != null && candidate.equals(e);
// Called only from writeObject, to ensure compatible ordering.
void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
Node<K,V>[] tab;
if (size > 0 && (tab = table) != null) {
for (int i = 0; i < tab.length; ++i) {
for (Node<K,V> e = tab[i]; e != null; e = e.next) {
s.writeObject(e.key);
s.writeObject(e.value);
}
public boolean remove(Object o) {
return removeMapping(o) != null;
}
public int size() {
return size;
}
public void clear() {
HashMap.this.clear();
}
public Spliterator<Map.Entry<K,V>> spliterator() {
if (HashMap.this.getClass() == HashMap.class)
return new EntrySpliterator<K,V>(HashMap.this, 0, -1, 0, 0);
else
return Spliterators.spliterator
(this, Spliterator.SIZED | Spliterator.DISTINCT);
}
/* ------------------------------------------------------------ */
// Tree bins
/**
* Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn
* extends Node) so can be used as extension of either regular or
* linked node.
*/
static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> {
TreeNode<K,V> parent; // red-black tree links
TreeNode<K,V> left;
TreeNode<K,V> right;
TreeNode<K,V> prev; // needed to unlink next upon deletion
boolean red;
TreeNode(int hash, K key, V val, Node<K,V> next) {
super(hash, key, val, next);
}
/**
* Save the state of the <tt>HashMap</tt> instance to a stream (i.e.,
* serialize it).
*
* @serialData The <i>capacity</i> of the HashMap (the length of the
* bucket array) is emitted (int), followed by the
* <i>size</i> (an int, the number of key-value
* mappings), followed by the key (Object) and value (Object)
* for each key-value mapping. The key-value mappings are
* emitted in no particular order.
* Returns root of tree containing this node.
*/
private void writeObject(java.io.ObjectOutputStream s)
throws IOException
{
// Write out the threshold, loadfactor, and any hidden stuff
s.defaultWriteObject();
// Write out number of buckets
if (table==EMPTY_TABLE) {
s.writeInt(roundUpToPowerOf2(threshold));
} else {
s.writeInt(table.length);
final TreeNode<K,V> root() {
for (TreeNode<K,V> r = this, p;;) {
if ((p = r.parent) == null)
return r;
r = p;
}
// Write out size (number of Mappings)
s.writeInt(size);
// Write out keys and values (alternating)
if (size > 0) {
for(Map.Entry<K,V> e : entrySet0()) {
s.writeObject(e.getKey());
s.writeObject(e.getValue());
}
/**
* Ensures that the given root is the first node of its bin.
*/
static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) {
int n;
if (root != null && tab != null && (n = tab.length) > 0) {
int index = (n - 1) & root.hash;
TreeNode<K,V> first = (TreeNode<K,V>)tab[index];
if (root != first) {
Node<K,V> rn;
tab[index] = root;
TreeNode<K,V> rp = root.prev;
if ((rn = root.next) != null)
((TreeNode<K,V>)rn).prev = rp;
if (rp != null)
rp.next = rn;
if (first != null)
first.prev = root;
root.next = first;
root.prev = null;
}
assert checkInvariants(root);
}
}
private static final long serialVersionUID = 362498820763181265L;
/**
* Reconstitute the {@code HashMap} instance from a stream (i.e.,
* deserialize it).
* Finds the node starting at root p with the given hash and key.
* The kc argument caches comparableClassFor(key) upon first use
* comparing keys.
*/
private void readObject(java.io.ObjectInputStream s)
throws IOException, ClassNotFoundException
{
// Read in the threshold (ignored), loadfactor, and any hidden stuff
s.defaultReadObject();
if (loadFactor <= 0 || Float.isNaN(loadFactor)) {
throw new InvalidObjectException("Illegal load factor: " +
loadFactor);
final TreeNode<K,V> find(int h, Object k, Class<?> kc) {
TreeNode<K,V> p = this;
do {
int ph, dir; K pk;
TreeNode<K,V> pl = p.left, pr = p.right, q;
if ((ph = p.hash) > h)
p = pl;
else if (ph < h)
p = pr;
else if ((pk = p.key) == k || (k != null && k.equals(pk)))
return p;
else if (pl == null)
p = pr;
else if (pr == null)
p = pl;
else if ((kc != null ||
(kc = comparableClassFor(k)) != null) &&
(dir = compareComparables(kc, k, pk)) != 0)
p = (dir < 0) ? pl : pr;
else if ((q = pr.find(h, k, kc)) != null)
return q;
else
p = pl;
} while (p != null);
return null;
}
// set other fields that need values
if (Holder.USE_HASHSEED) {
int seed = ThreadLocalRandom.current().nextInt();
Holder.UNSAFE.putIntVolatile(this, Holder.HASHSEED_OFFSET,
(seed != 0) ? seed : 1);
/**
* Calls find for root node.
*/
final TreeNode<K,V> getTreeNode(int h, Object k) {
return ((parent != null) ? root() : this).find(h, k, null);
}
table = EMPTY_TABLE;
// Read in number of buckets
s.readInt(); // ignored.
// Read number of mappings
int mappings = s.readInt();
if (mappings < 0)
throw new InvalidObjectException("Illegal mappings count: " +
mappings);
// capacity chosen by number of mappings and desired load (if >= 0.25)
int capacity = (int) Math.min(
mappings * Math.min(1 / loadFactor, 4.0f),
// we have limits...
HashMap.MAXIMUM_CAPACITY);
// allocate the bucket array;
if (mappings > 0) {
inflateTable(capacity);
} else {
threshold = capacity;
/**
* Tie-breaking utility for ordering insertions when equal
* hashCodes and non-comparable. We don't require a total
* order, just a consistent insertion rule to maintain
* equivalence across rebalancings. Tie-breaking further than
* necessary simplifies testing a bit.
*/
static int tieBreakOrder(Object a, Object b) {
int d;
if (a == null || b == null ||
(d = a.getClass().getName().
compareTo(b.getClass().getName())) == 0)
d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
-1 : 1);
return d;
}
init(); // Give subclass a chance to do its thing.
// Read the keys and values, and put the mappings in the HashMap
for (int i=0; i<mappings; i++) {
@SuppressWarnings("unchecked")
K key = (K) s.readObject();
@SuppressWarnings("unchecked")
V value = (V) s.readObject();
putForCreate(key, value);
/**
* Forms tree of the nodes linked from this node.
* @return root of tree
*/
final void treeify(Node<K,V>[] tab) {
TreeNode<K,V> root = null;
for (TreeNode<K,V> x = this, next; x != null; x = next) {
next = (TreeNode<K,V>)x.next;
x.left = x.right = null;
if (root == null) {
x.parent = null;
x.red = false;
root = x;
}
else {
K k = x.key;
int h = x.hash;
Class<?> kc = null;
for (TreeNode<K,V> p = root;;) {
int dir, ph;
K pk = p.key;
if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0)
dir = tieBreakOrder(k, pk);
TreeNode<K,V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
x.parent = xp;
if (dir <= 0)
xp.left = x;
else
xp.right = x;
root = balanceInsertion(root, x);
break;
}
// These methods are used when serializing HashSets
int capacity() { return table.length; }
float loadFactor() { return loadFactor; }
/**
* Standin until HM overhaul; based loosely on Weak and Identity HM.
*/
static class HashMapSpliterator<K,V> {
final HashMap<K,V> map;
Object current; // current node, can be Entry or TreeNode
int index; // current index, modified on advance/split
int fence; // one past last index
int est; // size estimate
int expectedModCount; // for comodification checks
boolean acceptedNull; // Have we accepted the null key?
// Without this, we can't distinguish
// between being at the very beginning (and
// needing to accept null), or being at the
// end of the list in bin 0. In both cases,
// current == null && index == 0.
HashMapSpliterator(HashMap<K,V> m, int origin,
int fence, int est,
int expectedModCount) {
this.map = m;
this.index = origin;
this.fence = fence;
this.est = est;
this.expectedModCount = expectedModCount;
this.acceptedNull = false;
}
final int getFence() { // initialize fence and size on first use
int hi;
if ((hi = fence) < 0) {
HashMap<K,V> m = map;
est = m.size;
expectedModCount = m.modCount;
hi = fence = m.table.length;
}
return hi;
}
public final long estimateSize() {
getFence(); // force init
return (long) est;
moveRootToFront(tab, root);
}
/**
* Returns a list of non-TreeNodes replacing those linked from
* this node.
*/
final Node<K,V> untreeify(HashMap<K,V> map) {
Node<K,V> hd = null, tl = null;
for (Node<K,V> q = this; q != null; q = q.next) {
Node<K,V> p = map.replacementNode(q, null);
if (tl == null)
hd = p;
else
tl.next = p;
tl = p;
}
static final class KeySpliterator<K,V>
extends HashMapSpliterator<K,V>
implements Spliterator<K> {
KeySpliterator(HashMap<K,V> m, int origin, int fence, int est,
int expectedModCount) {
super(m, origin, fence, est, expectedModCount);
return hd;
}
public KeySpliterator<K,V> trySplit() {
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
if (lo >= mid || current != null) {
/**
* Tree version of putVal.
*/
final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab,
int h, K k, V v) {
Class<?> kc = null;
boolean searched = false;
TreeNode<K,V> root = (parent != null) ? root() : this;
for (TreeNode<K,V> p = root;;) {
int dir, ph; K pk;
if ((ph = p.hash) > h)
dir = -1;
else if (ph < h)
dir = 1;
else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
return p;
else if ((kc == null &&
(kc = comparableClassFor(k)) == null) ||
(dir = compareComparables(kc, k, pk)) == 0) {
if (!searched) {
TreeNode<K,V> q, ch;
searched = true;
if (((ch = p.left) != null &&
(q = ch.find(h, k, kc)) != null) ||
((ch = p.right) != null &&
(q = ch.find(h, k, kc)) != null))
return q;
}
dir = tieBreakOrder(k, pk);
}
TreeNode<K,V> xp = p;
if ((p = (dir <= 0) ? p.left : p.right) == null) {
Node<K,V> xpn = xp.next;
TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn);
if (dir <= 0)
xp.left = x;
else
xp.right = x;
xp.next = x;
x.parent = x.prev = xp;
if (xpn != null)
((TreeNode<K,V>)xpn).prev = x;
moveRootToFront(tab, balanceInsertion(root, x));
return null;
} else {
KeySpliterator<K,V> retVal = new KeySpliterator<K,V>(map, lo,
index = mid, est >>>= 1, expectedModCount);
// Only 'this' Spliterator chould check for null.
retVal.acceptedNull = true;
return retVal;
}
}
@SuppressWarnings("unchecked")
public void forEachRemaining(Consumer<? super K> action) {
int i, hi, mc;
if (action == null)
throw new NullPointerException();
HashMap<K,V> m = map;
Object[] tab = m.table;
if ((hi = fence) < 0) {
mc = expectedModCount = m.modCount;
hi = fence = tab.length;
}
else
mc = expectedModCount;
if (!acceptedNull) {
acceptedNull = true;
if (m.nullKeyEntry != null) {
action.accept(m.nullKeyEntry.key);
}
/**
* Removes the given node, that must be present before this call.
* This is messier than typical red-black deletion code because we
* cannot swap the contents of an interior node with a leaf
* successor that is pinned by "next" pointers that are accessible
* independently during traversal. So instead we swap the tree
* linkages. If the current tree appears to have too few nodes,
* the bin is converted back to a plain bin. (The test triggers
* somewhere between 2 and 6 nodes, depending on tree structure).
*/
final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab,
boolean movable) {
int n;
if (tab == null || (n = tab.length) == 0)
return;
int index = (n - 1) & hash;
TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl;
TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev;
if (pred == null)
tab[index] = first = succ;
else
pred.next = succ;
if (succ != null)
succ.prev = pred;
if (first == null)
return;
if (root.parent != null)
root = root.root();
if (root == null || root.right == null ||
(rl = root.left) == null || rl.left == null) {
tab[index] = first.untreeify(map); // too small
return;
}
if (tab.length >= hi && (i = index) >= 0 &&
(i < (index = hi) || current != null)) {
Object p = current;
current = null;
do {
if (p == null) {
p = tab[i++];
if (p instanceof HashMap.TreeBin) {
p = ((HashMap.TreeBin)p).first;
TreeNode<K,V> p = this, pl = left, pr = right, replacement;
if (pl != null && pr != null) {
TreeNode<K,V> s = pr, sl;
while ((sl = s.left) != null) // find successor
s = sl;
boolean c = s.red; s.red = p.red; p.red = c; // swap colors
TreeNode<K,V> sr = s.right;
TreeNode<K,V> pp = p.parent;
if (s == pr) { // p was s's direct parent
p.parent = s;
s.right = p;
}
} else {
HashMap.Entry<K,V> entry;
if (p instanceof HashMap.Entry) {
entry = (HashMap.Entry<K,V>)p;
} else {
entry = (HashMap.Entry<K,V>)((TreeNode)p).entry;
else {
TreeNode<K,V> sp = s.parent;
if ((p.parent = sp) != null) {
if (s == sp.left)
sp.left = p;
else
sp.right = p;
}
action.accept(entry.key);
p = entry.next;
if ((s.right = pr) != null)
pr.parent = s;
}
} while (p != null || i < hi);
if (m.modCount != mc)
throw new ConcurrentModificationException();
p.left = null;
if ((p.right = sr) != null)
sr.parent = p;
if ((s.left = pl) != null)
pl.parent = s;
if ((s.parent = pp) == null)
root = s;
else if (p == pp.left)
pp.left = s;
else
pp.right = s;
if (sr != null)
replacement = sr;
else
replacement = p;
}
else if (pl != null)
replacement = pl;
else if (pr != null)
replacement = pr;
else
replacement = p;
if (replacement != p) {
TreeNode<K,V> pp = replacement.parent = p.parent;
if (pp == null)
root = replacement;
else if (p == pp.left)
pp.left = replacement;
else
pp.right = replacement;
p.left = p.right = p.parent = null;
}
@SuppressWarnings("unchecked")
public boolean tryAdvance(Consumer<? super K> action) {
int hi;
if (action == null)
throw new NullPointerException();
Object[] tab = map.table;
hi = getFence();
TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement);
if (!acceptedNull) {
acceptedNull = true;
if (map.nullKeyEntry != null) {
action.accept(map.nullKeyEntry.key);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
if (replacement == p) { // detach
TreeNode<K,V> pp = p.parent;
p.parent = null;
if (pp != null) {
if (p == pp.left)
pp.left = null;
else if (p == pp.right)
pp.right = null;
}
}
if (tab.length >= hi && index >= 0) {
while (current != null || index < hi) {
if (current == null) {
current = tab[index++];
if (current instanceof HashMap.TreeBin) {
current = ((HashMap.TreeBin)current).first;
}
} else {
HashMap.Entry<K,V> entry;
if (current instanceof HashMap.Entry) {
entry = (HashMap.Entry<K,V>)current;
} else {
entry = (HashMap.Entry<K,V>)((TreeNode)current).entry;
}
K k = entry.key;
current = entry.next;
action.accept(k);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
if (movable)
moveRootToFront(tab, r);
}
/**
* Splits nodes in a tree bin into lower and upper tree bins,
* or untreeifies if now too small. Called only from resize;
* see above discussion about split bits and indices.
*
* @param map the map
* @param tab the table for recording bin heads
* @param index the index of the table being split
* @param bit the bit of hash to split on
*/
final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) {
TreeNode<K,V> b = this;
// Relink into lo and hi lists, preserving order
TreeNode<K,V> loHead = null, loTail = null;
TreeNode<K,V> hiHead = null, hiTail = null;
int lc = 0, hc = 0;
for (TreeNode<K,V> e = b, next; e != null; e = next) {
next = (TreeNode<K,V>)e.next;
e.next = null;
if ((e.hash & bit) == 0) {
if ((e.prev = loTail) == null)
loHead = e;
else
loTail.next = e;
loTail = e;
++lc;
}
else {
if ((e.prev = hiTail) == null)
hiHead = e;
else
hiTail.next = e;
hiTail = e;
++hc;
}
return false;
}
public int characteristics() {
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
Spliterator.DISTINCT;
if (loHead != null) {
if (lc <= UNTREEIFY_THRESHOLD)
tab[index] = loHead.untreeify(map);
else {
tab[index] = loHead;
if (hiHead != null) // (else is already treeified)
loHead.treeify(tab);
}
}
static final class ValueSpliterator<K,V>
extends HashMapSpliterator<K,V>
implements Spliterator<V> {
ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est,
int expectedModCount) {
super(m, origin, fence, est, expectedModCount);
if (hiHead != null) {
if (hc <= UNTREEIFY_THRESHOLD)
tab[index + bit] = hiHead.untreeify(map);
else {
tab[index + bit] = hiHead;
if (loHead != null)
hiHead.treeify(tab);
}
public ValueSpliterator<K,V> trySplit() {
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
if (lo >= mid || current != null) {
return null;
} else {
ValueSpliterator<K,V> retVal = new ValueSpliterator<K,V>(map,
lo, index = mid, est >>>= 1, expectedModCount);
// Only 'this' Spliterator chould check for null.
retVal.acceptedNull = true;
return retVal;
}
}
@SuppressWarnings("unchecked")
public void forEachRemaining(Consumer<? super V> action) {
int i, hi, mc;
if (action == null)
throw new NullPointerException();
HashMap<K,V> m = map;
Object[] tab = m.table;
if ((hi = fence) < 0) {
mc = expectedModCount = m.modCount;
hi = fence = tab.length;
/* ------------------------------------------------------------ */
// Red-black tree methods, all adapted from CLR
static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
TreeNode<K,V> p) {
TreeNode<K,V> r, pp, rl;
if (p != null && (r = p.right) != null) {
if ((rl = p.right = r.left) != null)
rl.parent = p;
if ((pp = r.parent = p.parent) == null)
(root = r).red = false;
else if (pp.left == p)
pp.left = r;
else
pp.right = r;
r.left = p;
p.parent = r;
}
return root;
}
static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
TreeNode<K,V> p) {
TreeNode<K,V> l, pp, lr;
if (p != null && (l = p.left) != null) {
if ((lr = p.left = l.right) != null)
lr.parent = p;
if ((pp = l.parent = p.parent) == null)
(root = l).red = false;
else if (pp.right == p)
pp.right = l;
else
mc = expectedModCount;
pp.left = l;
l.right = p;
p.parent = l;
}
return root;
}
if (!acceptedNull) {
acceptedNull = true;
if (m.nullKeyEntry != null) {
action.accept(m.nullKeyEntry.value);
static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
TreeNode<K,V> x) {
x.red = true;
for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
if ((xp = x.parent) == null) {
x.red = false;
return x;
}
else if (!xp.red || (xpp = xp.parent) == null)
return root;
if (xp == (xppl = xpp.left)) {
if ((xppr = xpp.right) != null && xppr.red) {
xppr.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
}
if (tab.length >= hi && (i = index) >= 0 &&
(i < (index = hi) || current != null)) {
Object p = current;
current = null;
do {
if (p == null) {
p = tab[i++];
if (p instanceof HashMap.TreeBin) {
p = ((HashMap.TreeBin)p).first;
else {
if (x == xp.right) {
root = rotateLeft(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
} else {
HashMap.Entry<K,V> entry;
if (p instanceof HashMap.Entry) {
entry = (HashMap.Entry<K,V>)p;
} else {
entry = (HashMap.Entry<K,V>)((TreeNode)p).entry;
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
root = rotateRight(root, xpp);
}
action.accept(entry.value);
p = entry.next;
}
} while (p != null || i < hi);
if (m.modCount != mc)
throw new ConcurrentModificationException();
}
}
@SuppressWarnings("unchecked")
public boolean tryAdvance(Consumer<? super V> action) {
int hi;
if (action == null)
throw new NullPointerException();
Object[] tab = map.table;
hi = getFence();
if (!acceptedNull) {
acceptedNull = true;
if (map.nullKeyEntry != null) {
action.accept(map.nullKeyEntry.value);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
else {
if (xppl != null && xppl.red) {
xppl.red = false;
xp.red = false;
xpp.red = true;
x = xpp;
}
else {
if (x == xp.left) {
root = rotateRight(root, x = xp);
xpp = (xp = x.parent) == null ? null : xp.parent;
}
if (tab.length >= hi && index >= 0) {
while (current != null || index < hi) {
if (current == null) {
current = tab[index++];
if (current instanceof HashMap.TreeBin) {
current = ((HashMap.TreeBin)current).first;
}
} else {
HashMap.Entry<K,V> entry;
if (current instanceof HashMap.Entry) {
entry = (Entry<K,V>)current;
} else {
entry = (Entry<K,V>)((TreeNode)current).entry;
}
V v = entry.value;
current = entry.next;
action.accept(v);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
if (xp != null) {
xp.red = false;
if (xpp != null) {
xpp.red = true;
root = rotateLeft(root, xpp);
}
}
}
return false;
}
public int characteristics() {
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0);
}
}
static final class EntrySpliterator<K,V>
extends HashMapSpliterator<K,V>
implements Spliterator<Map.Entry<K,V>> {
EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est,
int expectedModCount) {
super(m, origin, fence, est, expectedModCount);
static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
TreeNode<K,V> x) {
for (TreeNode<K,V> xp, xpl, xpr;;) {
if (x == null || x == root)
return root;
else if ((xp = x.parent) == null) {
x.red = false;
return x;
}
public EntrySpliterator<K,V> trySplit() {
int hi = getFence(), lo = index, mid = (lo + hi) >>> 1;
if (lo >= mid || current != null) {
return null;
} else {
EntrySpliterator<K,V> retVal = new EntrySpliterator<K,V>(map,
lo, index = mid, est >>>= 1, expectedModCount);
// Only 'this' Spliterator chould check for null.
retVal.acceptedNull = true;
return retVal;
else if (x.red) {
x.red = false;
return root;
}
else if ((xpl = xp.left) == x) {
if ((xpr = xp.right) != null && xpr.red) {
xpr.red = false;
xp.red = true;
root = rotateLeft(root, xp);
xpr = (xp = x.parent) == null ? null : xp.right;
}
@SuppressWarnings("unchecked")
public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
int i, hi, mc;
if (action == null)
throw new NullPointerException();
HashMap<K,V> m = map;
Object[] tab = m.table;
if ((hi = fence) < 0) {
mc = expectedModCount = m.modCount;
hi = fence = tab.length;
if (xpr == null)
x = xp;
else {
TreeNode<K,V> sl = xpr.left, sr = xpr.right;
if ((sr == null || !sr.red) &&
(sl == null || !sl.red)) {
xpr.red = true;
x = xp;
}
else
mc = expectedModCount;
if (!acceptedNull) {
acceptedNull = true;
if (m.nullKeyEntry != null) {
action.accept(m.nullKeyEntry);
else {
if (sr == null || !sr.red) {
if (sl != null)
sl.red = false;
xpr.red = true;
root = rotateRight(root, xpr);
xpr = (xp = x.parent) == null ?
null : xp.right;
}
if (xpr != null) {
xpr.red = (xp == null) ? false : xp.red;
if ((sr = xpr.right) != null)
sr.red = false;
}
if (tab.length >= hi && (i = index) >= 0 &&
(i < (index = hi) || current != null)) {
Object p = current;
current = null;
do {
if (p == null) {
p = tab[i++];
if (p instanceof HashMap.TreeBin) {
p = ((HashMap.TreeBin)p).first;
if (xp != null) {
xp.red = false;
root = rotateLeft(root, xp);
}
} else {
HashMap.Entry<K,V> entry;
if (p instanceof HashMap.Entry) {
entry = (HashMap.Entry<K,V>)p;
} else {
entry = (HashMap.Entry<K,V>)((TreeNode)p).entry;
x = root;
}
action.accept(entry);
p = entry.next;
}
} while (p != null || i < hi);
if (m.modCount != mc)
throw new ConcurrentModificationException();
}
else { // symmetric
if (xpl != null && xpl.red) {
xpl.red = false;
xp.red = true;
root = rotateRight(root, xp);
xpl = (xp = x.parent) == null ? null : xp.left;
}
@SuppressWarnings("unchecked")
public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
int hi;
if (action == null)
throw new NullPointerException();
Object[] tab = map.table;
hi = getFence();
if (!acceptedNull) {
acceptedNull = true;
if (map.nullKeyEntry != null) {
action.accept(map.nullKeyEntry);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
if (xpl == null)
x = xp;
else {
TreeNode<K,V> sl = xpl.left, sr = xpl.right;
if ((sl == null || !sl.red) &&
(sr == null || !sr.red)) {
xpl.red = true;
x = xp;
}
else {
if (sl == null || !sl.red) {
if (sr != null)
sr.red = false;
xpl.red = true;
root = rotateLeft(root, xpl);
xpl = (xp = x.parent) == null ?
null : xp.left;
}
if (tab.length >= hi && index >= 0) {
while (current != null || index < hi) {
if (current == null) {
current = tab[index++];
if (current instanceof HashMap.TreeBin) {
current = ((HashMap.TreeBin)current).first;
if (xpl != null) {
xpl.red = (xp == null) ? false : xp.red;
if ((sl = xpl.left) != null)
sl.red = false;
}
} else {
HashMap.Entry<K,V> e;
if (current instanceof HashMap.Entry) {
e = (Entry<K,V>)current;
} else {
e = (Entry<K,V>)((TreeNode)current).entry;
if (xp != null) {
xp.red = false;
root = rotateRight(root, xp);
}
x = root;
}
current = e.next;
action.accept(e);
if (map.modCount != expectedModCount)
throw new ConcurrentModificationException();
return true;
}
}
}
return false;
}
public int characteristics() {
return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) |
Spliterator.DISTINCT;
/**
* Recursive invariant check
*/
static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
tb = t.prev, tn = (TreeNode<K,V>)t.next;
if (tb != null && tb.next != t)
return false;
if (tn != null && tn.prev != t)
return false;
if (tp != null && t != tp.left && t != tp.right)
return false;
if (tl != null && (tl.parent != t || tl.hash > t.hash))
return false;
if (tr != null && (tr.parent != t || tr.hash < t.hash))
return false;
if (t.red && tl != null && tl.red && tr != null && tr.red)
return false;
if (tl != null && !checkInvariants(tl))
return false;
if (tr != null && !checkInvariants(tr))
return false;
return true;
}
}
}
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -24,9 +24,12 @@
*/
package java.util;
import java.io.*;
import java.util.function.Consumer;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.io.Serializable;
import java.io.IOException;
/**
* <p>Hash table and linked list implementation of the <tt>Map</tt> interface,
......@@ -57,9 +60,9 @@ import java.util.function.BiFunction;
* order they were presented.)
*
* <p>A special {@link #LinkedHashMap(int,float,boolean) constructor} is
* provided to create a <tt>LinkedHashMap</tt> whose order of iteration is the
* order in which its entries were last accessed, from least-recently accessed
* to most-recently (<i>access-order</i>). This kind of map is well-suited to
* provided to create a linked hash map whose order of iteration is the order
* in which its entries were last accessed, from least-recently accessed to
* most-recently (<i>access-order</i>). This kind of map is well-suited to
* building LRU caches. Invoking the <tt>put</tt> or <tt>get</tt> method
* results in an access to the corresponding entry (assuming it exists after
* the invocation completes). The <tt>putAll</tt> method generates one entry
......@@ -155,18 +158,53 @@ import java.util.function.BiFunction;
* @see Hashtable
* @since 1.4
*/
public class LinkedHashMap<K,V>
extends HashMap<K,V>
implements Map<K,V>
{
/*
* Implementation note. A previous version of this class was
* internally structured a little differently. Because superclass
* HashMap now uses trees for some of its nodes, class
* LinkedHashMap.Entry is now treated as intermediary node class
* that can also be converted to tree form. The name of this
* class, LinkedHashMap.Entry, is confusing in several ways in its
* current context, but cannot be changed. Otherwise, even though
* it is not exported outside this package, some existing source
* code is known to have relied on a symbol resolution corner case
* rule in calls to removeEldestEntry that suppressed compilation
* errors due to ambiguous usages. So, we keep the name to
* preserve unmodified compilability.
*
* The changes in node classes also require using two fields
* (head, tail) rather than a pointer to a header node to maintain
* the doubly-linked before/after list. This class also
* previously used a different style of callback methods upon
* access, insertion, and removal.
*/
/**
* HashMap.Node subclass for normal LinkedHashMap entries.
*/
static class Entry<K,V> extends HashMap.Node<K,V> {
Entry<K,V> before, after;
Entry(int hash, K key, V value, Node<K,V> next) {
super(hash, key, value, next);
}
}
private static final long serialVersionUID = 3801124242820219131L;
/**
* The head of the doubly linked list.
* The head (eldest) of the doubly linked list.
*/
private transient Entry<K,V> header;
transient LinkedHashMap.Entry<K,V> head;
/**
* The tail (youngest) of the doubly linked list.
*/
transient LinkedHashMap.Entry<K,V> tail;
/**
* The iteration ordering method for this linked hash map: <tt>true</tt>
......@@ -174,7 +212,125 @@ public class LinkedHashMap<K,V>
*
* @serial
*/
private final boolean accessOrder;
final boolean accessOrder;
// internal utilities
// link at the end of list
private void linkNodeLast(LinkedHashMap.Entry<K,V> p) {
LinkedHashMap.Entry<K,V> last = tail;
tail = p;
if (last == null)
head = p;
else {
p.before = last;
last.after = p;
}
}
// apply src's links to dst
private void transferLinks(LinkedHashMap.Entry<K,V> src,
LinkedHashMap.Entry<K,V> dst) {
LinkedHashMap.Entry<K,V> b = dst.before = src.before;
LinkedHashMap.Entry<K,V> a = dst.after = src.after;
if (b == null)
head = dst;
else
b.after = dst;
if (a == null)
tail = dst;
else
a.before = dst;
}
// overrides of HashMap hook methods
void reinitialize() {
super.reinitialize();
head = tail = null;
}
Node<K,V> newNode(int hash, K key, V value, Node<K,V> e) {
LinkedHashMap.Entry<K,V> p =
new LinkedHashMap.Entry<K,V>(hash, key, value, e);
linkNodeLast(p);
return p;
}
Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) {
LinkedHashMap.Entry<K,V> q = (LinkedHashMap.Entry<K,V>)p;
LinkedHashMap.Entry<K,V> t =
new LinkedHashMap.Entry<K,V>(q.hash, q.key, q.value, next);
transferLinks(q, t);
return t;
}
TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) {
TreeNode<K,V> p = new TreeNode<K,V>(hash, key, value, next);
linkNodeLast(p);
return p;
}
TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) {
LinkedHashMap.Entry<K,V> q = (LinkedHashMap.Entry<K,V>)p;
TreeNode<K,V> t = new TreeNode<K,V>(q.hash, q.key, q.value, next);
transferLinks(q, t);
return t;
}
void afterNodeRemoval(Node<K,V> e) { // unlink
LinkedHashMap.Entry<K,V> p =
(LinkedHashMap.Entry<K,V>)e, b = p.before, a = p.after;
p.before = p.after = null;
if (b == null)
head = a;
else
b.after = a;
if (a == null)
tail = b;
else
a.before = b;
}
void afterNodeInsertion(boolean evict) { // possibly remove eldest
LinkedHashMap.Entry<K,V> first;
if (evict && (first = head) != null && removeEldestEntry(first)) {
K key = first.key;
removeNode(hash(key), key, null, false, true);
}
}
void afterNodeAccess(Node<K,V> e) { // move node to last
LinkedHashMap.Entry<K,V> last;
if (accessOrder && (last = tail) != e) {
LinkedHashMap.Entry<K,V> p =
(LinkedHashMap.Entry<K,V>)e, b = p.before, a = p.after;
p.after = null;
if (b == null)
head = a;
else
b.after = a;
if (a != null)
a.before = b;
else
last = b;
if (last == null)
head = p;
else {
p.before = last;
last.after = p;
}
tail = p;
++modCount;
}
}
void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException {
for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after) {
s.writeObject(e.key);
s.writeObject(e.value);
}
}
/**
* Constructs an empty insertion-ordered <tt>LinkedHashMap</tt> instance
......@@ -221,8 +377,9 @@ public class LinkedHashMap<K,V>
* @throws NullPointerException if the specified map is null
*/
public LinkedHashMap(Map<? extends K, ? extends V> m) {
super(m);
super();
accessOrder = false;
putMapEntries(m, false);
}
/**
......@@ -243,16 +400,6 @@ public class LinkedHashMap<K,V>
this.accessOrder = accessOrder;
}
/**
* Called by superclass constructors and pseudoconstructors (clone,
* readObject) before any entries are inserted into the map. Initializes
* the chain.
*/
@Override
void init() {
header = new Entry<>(-1, null, null, null);
header.before = header.after = header;
}
/**
* Returns <tt>true</tt> if this map maps one or more keys to the
......@@ -263,14 +410,9 @@ public class LinkedHashMap<K,V>
* specified value
*/
public boolean containsValue(Object value) {
// Overridden to take advantage of faster iterator
if (value==null) {
for (Entry<?,?> e = header.after; e != header; e = e.after)
if (e.value==null)
return true;
} else {
for (Entry<?,?> e = header.after; e != header; e = e.after)
if (value.equals(e.value))
for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after) {
V v = e.value;
if (v == value || (value != null && value.equals(v)))
return true;
}
return false;
......@@ -292,10 +434,11 @@ public class LinkedHashMap<K,V>
* distinguish these two cases.
*/
public V get(Object key) {
Entry<K,V> e = (Entry<K,V>)getEntry(key);
if (e == null)
Node<K,V> e;
if ((e = getNode(hash(key), key)) == null)
return null;
e.recordAccess(this);
if (accessOrder)
afterNodeAccess(e);
return e.value;
}
......@@ -305,207 +448,288 @@ public class LinkedHashMap<K,V>
*/
public void clear() {
super.clear();
header.before = header.after = header;
head = tail = null;
}
@Override
public void forEach(BiConsumer<? super K, ? super V> action) {
Objects.requireNonNull(action);
int expectedModCount = modCount;
for (Entry<K, V> entry = header.after; entry != header; entry = entry.after) {
action.accept(entry.key, entry.value);
if (expectedModCount != modCount) {
throw new ConcurrentModificationException();
}
}
/**
* Returns <tt>true</tt> if this map should remove its eldest entry.
* This method is invoked by <tt>put</tt> and <tt>putAll</tt> after
* inserting a new entry into the map. It provides the implementor
* with the opportunity to remove the eldest entry each time a new one
* is added. This is useful if the map represents a cache: it allows
* the map to reduce memory consumption by deleting stale entries.
*
* <p>Sample use: this override will allow the map to grow up to 100
* entries and then delete the eldest entry each time a new entry is
* added, maintaining a steady state of 100 entries.
* <pre>
* private static final int MAX_ENTRIES = 100;
*
* protected boolean removeEldestEntry(Map.Entry eldest) {
* return size() &gt; MAX_ENTRIES;
* }
* </pre>
*
* <p>This method typically does not modify the map in any way,
* instead allowing the map to modify itself as directed by its
* return value. It <i>is</i> permitted for this method to modify
* the map directly, but if it does so, it <i>must</i> return
* <tt>false</tt> (indicating that the map should not attempt any
* further modification). The effects of returning <tt>true</tt>
* after modifying the map from within this method are unspecified.
*
* <p>This implementation merely returns <tt>false</tt> (so that this
* map acts like a normal map - the eldest element is never removed).
*
* @param eldest The least recently inserted entry in the map, or if
* this is an access-ordered map, the least recently accessed
* entry. This is the entry that will be removed it this
* method returns <tt>true</tt>. If the map was empty prior
* to the <tt>put</tt> or <tt>putAll</tt> invocation resulting
* in this invocation, this will be the entry that was just
* inserted; in other words, if the map contains a single
* entry, the eldest entry is also the newest.
* @return <tt>true</tt> if the eldest entry should be removed
* from the map; <tt>false</tt> if it should be retained.
*/
protected boolean removeEldestEntry(Map.Entry<K,V> eldest) {
return false;
}
@Override
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
Objects.requireNonNull(function);
int expectedModCount = modCount;
for (Entry<K, V> entry = header.after; entry != header; entry = entry.after) {
entry.value = function.apply(entry.key, entry.value);
if (expectedModCount != modCount) {
/**
* Returns a {@link Set} view of the keys contained in this map.
* The set is backed by the map, so changes to the map are
* reflected in the set, and vice-versa. If the map is modified
* while an iteration over the set is in progress (except through
* the iterator's own <tt>remove</tt> operation), the results of
* the iteration are undefined. The set supports element removal,
* which removes the corresponding mapping from the map, via the
* <tt>Iterator.remove</tt>, <tt>Set.remove</tt>,
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt>
* operations. It does not support the <tt>add</tt> or <tt>addAll</tt>
* operations.
* Its {@link Spliterator} typically provides faster sequential
* performance but much poorer parallel performance than that of
* {@code HashMap}.
*
* @return a set view of the keys contained in this map
*/
public Set<K> keySet() {
Set<K> ks;
return (ks = keySet) == null ? (keySet = new LinkedKeySet()) : ks;
}
final class LinkedKeySet extends AbstractSet<K> {
public final int size() { return size; }
public final void clear() { LinkedHashMap.this.clear(); }
public final Iterator<K> iterator() {
return new LinkedKeyIterator();
}
public final boolean contains(Object o) { return containsKey(o); }
public final boolean remove(Object key) {
return removeNode(hash(key), key, null, false, true) != null;
}
public final Spliterator<K> spliterator() {
return Spliterators.spliterator(this, Spliterator.SIZED |
Spliterator.ORDERED |
Spliterator.DISTINCT);
}
public final void forEach(Consumer<? super K> action) {
if (action == null)
throw new NullPointerException();
int mc = modCount;
for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
action.accept(e.key);
if (modCount != mc)
throw new ConcurrentModificationException();
}
}
}
/**
* LinkedHashMap entry.
* Returns a {@link Collection} view of the values contained in this map.
* The collection is backed by the map, so changes to the map are
* reflected in the collection, and vice-versa. If the map is
* modified while an iteration over the collection is in progress
* (except through the iterator's own <tt>remove</tt> operation),
* the results of the iteration are undefined. The collection
* supports element removal, which removes the corresponding
* mapping from the map, via the <tt>Iterator.remove</tt>,
* <tt>Collection.remove</tt>, <tt>removeAll</tt>,
* <tt>retainAll</tt> and <tt>clear</tt> operations. It does not
* support the <tt>add</tt> or <tt>addAll</tt> operations.
* Its {@link Spliterator} typically provides faster sequential
* performance but much poorer parallel performance than that of
* {@code HashMap}.
*
* @return a view of the values contained in this map
*/
private static class Entry<K,V> extends HashMap.Entry<K,V> {
// These fields comprise the doubly linked list used for iteration.
Entry<K,V> before, after;
Entry(int hash, K key, V value, Object next) {
super(hash, key, value, next);
public Collection<V> values() {
Collection<V> vs;
return (vs = values) == null ? (values = new LinkedValues()) : vs;
}
final class LinkedValues extends AbstractCollection<V> {
public final int size() { return size; }
public final void clear() { LinkedHashMap.this.clear(); }
public final Iterator<V> iterator() {
return new LinkedValueIterator();
}
public final boolean contains(Object o) { return containsValue(o); }
public final Spliterator<V> spliterator() {
return Spliterators.spliterator(this, Spliterator.SIZED |
Spliterator.ORDERED);
}
public final void forEach(Consumer<? super V> action) {
if (action == null)
throw new NullPointerException();
int mc = modCount;
for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
action.accept(e.value);
if (modCount != mc)
throw new ConcurrentModificationException();
}
/**
* Removes this entry from the linked list.
*/
private void remove() {
before.after = after;
after.before = before;
}
/**
* Inserts this entry before the specified existing entry in the list.
* Returns a {@link Set} view of the mappings contained in this map.
* The set is backed by the map, so changes to the map are
* reflected in the set, and vice-versa. If the map is modified
* while an iteration over the set is in progress (except through
* the iterator's own <tt>remove</tt> operation, or through the
* <tt>setValue</tt> operation on a map entry returned by the
* iterator) the results of the iteration are undefined. The set
* supports element removal, which removes the corresponding
* mapping from the map, via the <tt>Iterator.remove</tt>,
* <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and
* <tt>clear</tt> operations. It does not support the
* <tt>add</tt> or <tt>addAll</tt> operations.
* Its {@link Spliterator} typically provides faster sequential
* performance but much poorer parallel performance than that of
* {@code HashMap}.
*
* @return a set view of the mappings contained in this map
*/
private void addBefore(Entry<K,V> existingEntry) {
after = existingEntry;
before = existingEntry.before;
before.after = this;
after.before = this;
public Set<Map.Entry<K,V>> entrySet() {
Set<Map.Entry<K,V>> es;
return (es = entrySet) == null ? (entrySet = new LinkedEntrySet()) : es;
}
/**
* This method is invoked by the superclass whenever the value
* of a pre-existing entry is read by Map.get or modified by Map.put.
* If the enclosing Map is access-ordered, it moves the entry
* to the end of the list; otherwise, it does nothing.
*/
void recordAccess(HashMap<K,V> m) {
LinkedHashMap<K,V> lm = (LinkedHashMap<K,V>)m;
if (lm.accessOrder) {
lm.modCount++;
remove();
addBefore(lm.header);
final class LinkedEntrySet extends AbstractSet<Map.Entry<K,V>> {
public final int size() { return size; }
public final void clear() { LinkedHashMap.this.clear(); }
public final Iterator<Map.Entry<K,V>> iterator() {
return new LinkedEntryIterator();
}
public final boolean contains(Object o) {
if (!(o instanceof Map.Entry))
return false;
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
Object key = e.getKey();
Node<K,V> candidate = getNode(hash(key), key);
return candidate != null && candidate.equals(e);
}
public final boolean remove(Object o) {
if (o instanceof Map.Entry) {
Map.Entry<?,?> e = (Map.Entry<?,?>) o;
Object key = e.getKey();
Object value = e.getValue();
return removeNode(hash(key), key, value, true, true) != null;
}
void recordRemoval(HashMap<K,V> m) {
remove();
return false;
}
public final Spliterator<Map.Entry<K,V>> spliterator() {
return Spliterators.spliterator(this, Spliterator.SIZED |
Spliterator.ORDERED |
Spliterator.DISTINCT);
}
public final void forEach(Consumer<? super Map.Entry<K,V>> action) {
if (action == null)
throw new NullPointerException();
int mc = modCount;
for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
action.accept(e);
if (modCount != mc)
throw new ConcurrentModificationException();
}
}
private abstract class LinkedHashIterator<T> implements Iterator<T> {
Entry<K,V> nextEntry = header.after;
Entry<K,V> lastReturned = null;
/**
* The modCount value that the iterator believes that the backing
* List should have. If this expectation is violated, the iterator
* has detected concurrent modification.
*/
int expectedModCount = modCount;
// Map overrides
public boolean hasNext() {
return nextEntry != header;
public void forEach(BiConsumer<? super K, ? super V> action) {
if (action == null)
throw new NullPointerException();
int mc = modCount;
for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
action.accept(e.key, e.value);
if (modCount != mc)
throw new ConcurrentModificationException();
}
public void remove() {
if (lastReturned == null)
throw new IllegalStateException();
if (modCount != expectedModCount)
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
if (function == null)
throw new NullPointerException();
int mc = modCount;
for (LinkedHashMap.Entry<K,V> e = head; e != null; e = e.after)
e.value = function.apply(e.key, e.value);
if (modCount != mc)
throw new ConcurrentModificationException();
}
LinkedHashMap.this.remove(lastReturned.key);
lastReturned = null;
// Iterators
abstract class LinkedHashIterator {
LinkedHashMap.Entry<K,V> next;
LinkedHashMap.Entry<K,V> current;
int expectedModCount;
LinkedHashIterator() {
next = head;
expectedModCount = modCount;
current = null;
}
Entry<K,V> nextEntry() {
public final boolean hasNext() {
return next != null;
}
final LinkedHashMap.Entry<K,V> nextNode() {
LinkedHashMap.Entry<K,V> e = next;
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
if (nextEntry == header)
if (e == null)
throw new NoSuchElementException();
Entry<K,V> e = lastReturned = nextEntry;
nextEntry = e.after;
current = e;
next = e.after;
return e;
}
}
private class KeyIterator extends LinkedHashIterator<K> {
public K next() { return nextEntry().getKey(); }
public final void remove() {
Node<K,V> p = current;
if (p == null)
throw new IllegalStateException();
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
current = null;
K key = p.key;
removeNode(hash(key), key, null, false, false);
expectedModCount = modCount;
}
private class ValueIterator extends LinkedHashIterator<V> {
public V next() { return nextEntry().value; }
}
private class EntryIterator extends LinkedHashIterator<Map.Entry<K,V>> {
public Map.Entry<K,V> next() { return nextEntry(); }
final class LinkedKeyIterator extends LinkedHashIterator
implements Iterator<K> {
public final K next() { return nextNode().getKey(); }
}
// These Overrides alter the behavior of superclass view iterator() methods
Iterator<K> newKeyIterator() { return new KeyIterator(); }
Iterator<V> newValueIterator() { return new ValueIterator(); }
Iterator<Map.Entry<K,V>> newEntryIterator() { return new EntryIterator(); }
/**
* This override alters behavior of superclass put method. It causes newly
* allocated entry to get inserted at the end of the linked list and
* removes the eldest entry if appropriate.
*/
@Override
void addEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) {
super.addEntry(hash, key, value, bucketIndex, checkIfNeedTree);
// Remove eldest entry if instructed
Entry<K,V> eldest = header.after;
if (removeEldestEntry(eldest)) {
removeEntryForKey(eldest.key);
}
final class LinkedValueIterator extends LinkedHashIterator
implements Iterator<V> {
public final V next() { return nextNode().value; }
}
/*
* Create a new LinkedHashMap.Entry and setup the before/after pointers
*/
@Override
HashMap.Entry<K,V> newEntry(int hash, K key, V value, Object next) {
Entry<K,V> newEntry = new Entry<>(hash, key, value, next);
newEntry.addBefore(header);
return newEntry;
final class LinkedEntryIterator extends LinkedHashIterator
implements Iterator<Map.Entry<K,V>> {
public final Map.Entry<K,V> next() { return nextNode(); }
}
/**
* Returns <tt>true</tt> if this map should remove its eldest entry.
* This method is invoked by <tt>put</tt> and <tt>putAll</tt> after
* inserting a new entry into the map. It provides the implementor
* with the opportunity to remove the eldest entry each time a new one
* is added. This is useful if the map represents a cache: it allows
* the map to reduce memory consumption by deleting stale entries.
*
* <p>Sample use: this override will allow the map to grow up to 100
* entries and then delete the eldest entry each time a new entry is
* added, maintaining a steady state of 100 entries.
* <pre>{@code
* private static final int MAX_ENTRIES = 100;
*
* protected boolean removeEldestEntry(Map.Entry eldest) {
* return size() > MAX_ENTRIES;
* }
* }</pre>
*
* <p>This method typically does not modify the map in any way,
* instead allowing the map to modify itself as directed by its
* return value. It <i>is</i> permitted for this method to modify
* the map directly, but if it does so, it <i>must</i> return
* <tt>false</tt> (indicating that the map should not attempt any
* further modification). The effects of returning <tt>true</tt>
* after modifying the map from within this method are unspecified.
*
* <p>This implementation merely returns <tt>false</tt> (so that this
* map acts like a normal map - the eldest element is never removed).
*
* @param eldest The least recently inserted entry in the map, or if
* this is an access-ordered map, the least recently accessed
* entry. This is the entry that will be removed it this
* method returns <tt>true</tt>. If the map was empty prior
* to the <tt>put</tt> or <tt>putAll</tt> invocation resulting
* in this invocation, this will be the entry that was just
* inserted; in other words, if the map contains a single
* entry, the eldest entry is also the newest.
* @return <tt>true</tt> if the eldest entry should be removed
* from the map; <tt>false</tt> if it should be retained.
*/
protected boolean removeEldestEntry(Map.Entry<K,V> eldest) {
return false;
}
}
......@@ -50,9 +50,9 @@ import static java.util.Arrays.*;
"java.util.HashMap$EntryIterator",
"java.util.HashMap$KeyIterator",
"java.util.HashMap$ValueIterator",
"java.util.LinkedHashMap$EntryIterator",
"java.util.LinkedHashMap$KeyIterator",
"java.util.LinkedHashMap$ValueIterator"})
"java.util.LinkedHashMap$LinkedEntryIterator",
"java.util.LinkedHashMap$LinkedKeyIterator",
"java.util.LinkedHashMap$LinkedValueIterator"})
public class Probe {
public static void main (String... args) throws Throwable {
Classes classesAnnotation = (Probe.class).getAnnotation(Classes.class);
......
......@@ -53,8 +53,6 @@ public class CheckRandomHashSeed {
throw new Error("Error in test setup: " + (expectRandom ? "" : "not " ) + "expecting random hashSeed, but " + PROP_NAME + " is " + (propSet ? "" : "not ") + "enabled");
}
testMap(new HashMap());
testMap(new LinkedHashMap());
testMap(new WeakHashMap());
testMap(new Hashtable());
}
......
......@@ -25,7 +25,6 @@
* @test
* @bug 8005698
* @run main InPlaceOpsCollisions -shortrun
* @run main/othervm -Djdk.map.randomseed=true InPlaceOpsCollisions -shortrun
* @summary Ensure overrides of in-place operations in Maps behave well with lots of collisions.
* @author Brent Christian
*/
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.BiConsumer;
import java.util.stream.Collector;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import static org.testng.Assert.assertEquals;
/*
* @test
* @bug 8023463
* @summary Test the case where a bin is treeified and vice verser
* @run testng MapBinToFromTreeTest
*/
@Test
public class MapBinToFromTreeTest {
// Initial capacity of map
// Should be >= the map capacity for treeifiying, see HashMap/ConcurrentMap.MIN_TREEIFY_CAPACITY
static final int INITIAL_CAPACITY = 64;
// Maximum size of map
// Should be > the treeify threshold, see HashMap/ConcurrentMap.TREEIFY_THRESHOLD
// Should be > INITIAL_CAPACITY to ensure resize occurs
static final int SIZE = 256;
// Load factor of map
// A value 1.0 will ensure that a new threshold == capacity
static final float LOAD_FACTOR = 1.0f;
@DataProvider(name = "maps")
static Object[][] mapProvider() {
return new Object[][] {
// Pass in the class name as a description for test reporting
// purposes
{ HashMap.class.getName(), new HashMap(INITIAL_CAPACITY, LOAD_FACTOR) },
{ LinkedHashMap.class.getName(), new LinkedHashMap(INITIAL_CAPACITY, LOAD_FACTOR) },
{ ConcurrentHashMap.class.getName(), new ConcurrentHashMap(INITIAL_CAPACITY, LOAD_FACTOR) },
};
}
@Test(dataProvider = "maps")
public void testPutThenGet(String d, Map<HashCodeInteger, Integer> m) {
put(SIZE, m, (i, s) -> {
for (int j = 0; j < s; j++) {
assertEquals(m.get(new HashCodeInteger(j)).intValue(), j,
String.format("Map.get(%d)", j));
}
});
}
@Test(dataProvider = "maps")
public void testPutThenTraverse(String d, Map<HashCodeInteger, Integer> m) {
Collector<Integer, ?, ? extends Collection<Integer>> c = getCollector(m);
put(SIZE, m, (i, s) -> {
// Note that it is OK to collect to a Set (HashSet) as long as
// integer values are used since these tests only check for
// collisions and other tests will verify more general functionality
Collection<Integer> actual = m.keySet().stream().map(e -> e.value).collect(c);
Collection<Integer> expected = IntStream.range(0, s).boxed().collect(c);
assertEquals(actual, expected, "Map.keySet()");
});
}
@Test(dataProvider = "maps")
public void testRemoveThenGet(String d, Map<HashCodeInteger, Integer> m) {
put(SIZE, m, (i, s) -> { });
remove(m, (i, s) -> {
for (int j = i + 1; j < SIZE; j++) {
assertEquals(m.get(new HashCodeInteger(j)).intValue(), j,
String.format("Map.get(%d)", j));
}
});
}
@Test(dataProvider = "maps")
public void testRemoveThenTraverse(String d, Map<HashCodeInteger, Integer> m) {
put(SIZE, m, (i, s) -> { });
Collector<Integer, ?, ? extends Collection<Integer>> c = getCollector(m);
remove(m, (i, s) -> {
Collection<Integer> actual = m.keySet().stream().map(e -> e.value).collect(c);
Collection<Integer> expected = IntStream.range(i + 1, SIZE).boxed().collect(c);
assertEquals(actual, expected, "Map.keySet()");
});
}
@Test(dataProvider = "maps")
public void testUntreeifyOnResizeWithGet(String d, Map<HashCodeInteger, Integer> m) {
// Fill the map with 64 entries grouped into 4 buckets
put(INITIAL_CAPACITY, m, (i, s) -> { });
for (int i = INITIAL_CAPACITY; i < SIZE; i++) {
// Add further entries in the 0'th bucket so as not to disturb
// other buckets, entries of which may be distributed and/or
// the bucket untreeified on resize
m.put(new HashCodeInteger(i, 0), i);
for (int j = 0; j < INITIAL_CAPACITY; j++) {
assertEquals(m.get(new HashCodeInteger(j)).intValue(), j,
String.format("Map.get(%d) < INITIAL_CAPACITY", j));
}
for (int j = INITIAL_CAPACITY; j <= i; j++) {
assertEquals(m.get(new HashCodeInteger(j, 0)).intValue(), j,
String.format("Map.get(%d) >= INITIAL_CAPACITY", j));
}
}
}
@Test(dataProvider = "maps")
public void testUntreeifyOnResizeWithTraverse(String d, Map<HashCodeInteger, Integer> m) {
// Fill the map with 64 entries grouped into 4 buckets
put(INITIAL_CAPACITY, m, (i, s) -> { });
Collector<Integer, ?, ? extends Collection<Integer>> c = getCollector(m);
for (int i = INITIAL_CAPACITY; i < SIZE; i++) {
// Add further entries in the 0'th bucket so as not to disturb
// other buckets, entries of which may be distributed and/or
// the bucket untreeified on resize
m.put(new HashCodeInteger(i, 0), i);
Collection<Integer> actual = m.keySet().stream().map(e -> e.value).collect(c);
Collection<Integer> expected = IntStream.rangeClosed(0, i).boxed().collect(c);
assertEquals(actual, expected, "Key set");
}
}
Collector<Integer, ?, ? extends Collection<Integer>> getCollector(Map<?, ?> m) {
Collector<Integer, ?, ? extends Collection<Integer>> collector = m instanceof LinkedHashMap
? Collectors.toList()
: Collectors.toSet();
return collector;
}
void put(int size, Map<HashCodeInteger, Integer> m, BiConsumer<Integer, Integer> c) {
for (int i = 0; i < size; i++) {
m.put(new HashCodeInteger(i), i);
c.accept(i, m.size());
}
}
void remove(Map<HashCodeInteger, Integer> m, BiConsumer<Integer, Integer> c) {
int size = m.size();
// Remove all elements thus ensuring at some point trees will be
// converting back to bins
for (int i = 0; i < size; i++) {
m.remove(new HashCodeInteger(i));
c.accept(i, m.size());
}
}
final static class HashCodeInteger implements Comparable<HashCodeInteger> {
final int value;
final int hashcode;
HashCodeInteger(int value) {
this(value, hash(value));
}
HashCodeInteger(int value, int hashcode) {
this.value = value;
this.hashcode = hashcode;
}
static int hash(int i) {
// Assuming 64 entries with keys from 0 to 63 then a map:
// - of capacity 64 will have 4 buckets with 16 entries per-bucket
// - of capacity 128 will have 8 buckets with 8 entries per-bucket
// - of capacity 256 will have 16 buckets with 4 entries per-bucket
//
// Re-sizing will result in re-distribution, doubling the buckets
// and reducing the entries by half. This will result in
// untreeifying when the number of entries is less than untreeify
// threshold (see HashMap/ConcurrentMap.UNTREEIFY_THRESHOLD)
return (i % 4) + (i / 4) * INITIAL_CAPACITY;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof HashCodeInteger) {
HashCodeInteger other = (HashCodeInteger) obj;
return other.value == value;
}
return false;
}
@Override
public int hashCode() {
return hashcode;
}
@Override
public int compareTo(HashCodeInteger o) {
return value - o.value;
}
@Override
public String toString() {
return Integer.toString(value);
}
}
}
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import java.util.*;
import java.lang.reflect.Field;
/*
* @test
* @bug 8005698
* @summary Test the case where TreeBin.splitTreeBin() converts a bin back to an Entry list
* @run main TreeBinSplitBackToEntries unused
* @author Brent Christian
*/
public class TreeBinSplitBackToEntries {
private static int EXPECTED_TREE_THRESHOLD = 16;
// Easiest if this covers one bit higher then 'bit' in splitTreeBin() on the
// call where the TreeBin is converted back to an Entry list
private static int HASHMASK = 0x7F;
private static boolean verbose = false;
private static boolean fastFail = false;
private static boolean failed = false;
static void printlnIfVerbose(String msg) {
if (verbose) {System.out.println(msg); }
}
public static void main(String[] args) {
for (String arg : args) {
switch(arg) {
case "-verbose":
verbose = true;
break;
case "-fastfail":
fastFail = true;
break;
}
}
checkTreeThreshold();
testMapHiTree();
testMapLoTree();
if (failed) {
System.out.println("Test Failed");
System.exit(1);
} else {
System.out.println("Test Passed");
}
}
public static void checkTreeThreshold() {
int threshold = -1;
try {
Class treeBinClass = Class.forName("java.util.HashMap$TreeBin");
Field treeThreshold = treeBinClass.getDeclaredField("TREE_THRESHOLD");
treeThreshold.setAccessible(true);
threshold = treeThreshold.getInt(treeBinClass);
} catch (ClassNotFoundException|NoSuchFieldException|IllegalAccessException e) {
e.printStackTrace();
throw new Error("Problem accessing TreeBin.TREE_THRESHOLD", e);
}
check("Expected TREE_THRESHOLD: " + EXPECTED_TREE_THRESHOLD +", found: " + threshold,
threshold == EXPECTED_TREE_THRESHOLD);
printlnIfVerbose("TREE_THRESHOLD: " + threshold);
}
public static void testMapHiTree() {
Object[][] mapKeys = makeHiTreeTestData();
testMapsForKeys(mapKeys, "hiTree");
}
public static void testMapLoTree() {
Object[][] mapKeys = makeLoTreeTestData();
testMapsForKeys(mapKeys, "loTree");
}
public static void testMapsForKeys(Object[][] mapKeys, String desc) {
// loop through data sets
for (Object[] keys_desc : mapKeys) {
Map<Object, Object>[] maps = (Map<Object, Object>[]) new Map[]{
new HashMap<>(4, 0.8f),
new LinkedHashMap<>(4, 0.8f),
};
// for each map type.
for (Map<Object, Object> map : maps) {
Object[] keys = (Object[]) keys_desc[1];
System.out.println(desc + ": testPutThenGet() for " + map.getClass());
testPutThenGet(map, keys);
}
}
}
private static <T> void testPutThenGet(Map<T, T> map, T[] keys) {
for (T key : keys) {
printlnIfVerbose("put()ing 0x" + Integer.toHexString(Integer.parseInt(key.toString())) + ", hashCode=" + Integer.toHexString(key.hashCode()));
map.put(key, key);
}
for (T key : keys) {
check("key: 0x" + Integer.toHexString(Integer.parseInt(key.toString())) + " not found in resulting " + map.getClass().getSimpleName(), map.get(key) != null);
}
}
/* Data to force a non-empty loTree in TreeBin.splitTreeBin() to be converted back
* into an Entry list
*/
private static Object[][] makeLoTreeTestData() {
HashableInteger COLLIDING_OBJECTS[] = new HashableInteger[] {
new HashableInteger( 0x23, HASHMASK),
new HashableInteger( 0x123, HASHMASK),
new HashableInteger( 0x323, HASHMASK),
new HashableInteger( 0x523, HASHMASK),
new HashableInteger( 0x723, HASHMASK),
new HashableInteger( 0x923, HASHMASK),
new HashableInteger( 0xB23, HASHMASK),
new HashableInteger( 0xD23, HASHMASK),
new HashableInteger( 0xF23, HASHMASK),
new HashableInteger( 0xF123, HASHMASK),
new HashableInteger( 0x1023, HASHMASK),
new HashableInteger( 0x1123, HASHMASK),
new HashableInteger( 0x1323, HASHMASK),
new HashableInteger( 0x1523, HASHMASK),
new HashableInteger( 0x1723, HASHMASK),
new HashableInteger( 0x1923, HASHMASK),
new HashableInteger( 0x1B23, HASHMASK),
new HashableInteger( 0x1D23, HASHMASK),
new HashableInteger( 0x3123, HASHMASK),
new HashableInteger( 0x3323, HASHMASK),
new HashableInteger( 0x3523, HASHMASK),
new HashableInteger( 0x3723, HASHMASK),
new HashableInteger( 0x1001, HASHMASK),
new HashableInteger( 0x4001, HASHMASK),
new HashableInteger( 0x1, HASHMASK),
};
return new Object[][] {
new Object[]{"Colliding Objects", COLLIDING_OBJECTS},
};
}
/* Data to force the hiTree in TreeBin.splitTreeBin() to be converted back
* into an Entry list
*/
private static Object[][] makeHiTreeTestData() {
HashableInteger COLLIDING_OBJECTS[] = new HashableInteger[] {
new HashableInteger( 0x1, HASHMASK),
new HashableInteger( 0x101, HASHMASK),
new HashableInteger( 0x301, HASHMASK),
new HashableInteger( 0x501, HASHMASK),
new HashableInteger( 0x701, HASHMASK),
new HashableInteger( 0x1001, HASHMASK),
new HashableInteger( 0x1101, HASHMASK),
new HashableInteger( 0x1301, HASHMASK),
new HashableInteger( 0x1501, HASHMASK),
new HashableInteger( 0x1701, HASHMASK),
new HashableInteger( 0x4001, HASHMASK),
new HashableInteger( 0x4101, HASHMASK),
new HashableInteger( 0x4301, HASHMASK),
new HashableInteger( 0x4501, HASHMASK),
new HashableInteger( 0x4701, HASHMASK),
new HashableInteger( 0x8001, HASHMASK),
new HashableInteger( 0x8101, HASHMASK),
new HashableInteger( 0x8301, HASHMASK),
new HashableInteger( 0x8501, HASHMASK),
new HashableInteger( 0x8701, HASHMASK),
new HashableInteger( 0x9001, HASHMASK),
new HashableInteger( 0x23, HASHMASK),
new HashableInteger( 0x123, HASHMASK),
new HashableInteger( 0x323, HASHMASK),
new HashableInteger( 0x523, HASHMASK),
};
return new Object[][] {
new Object[]{"Colliding Objects", COLLIDING_OBJECTS},
};
}
static void check(String desc, boolean cond) {
if (!cond) {
fail(desc);
}
}
static void fail(String msg) {
failed = true;
(new Error("Failure: " + msg)).printStackTrace(System.err);
if (fastFail) {
System.exit(1);
}
}
final static class HashableInteger implements Comparable<HashableInteger> {
final int value;
final int hashmask; //yes duplication
HashableInteger(int value, int hashmask) {
this.value = value;
this.hashmask = hashmask;
}
@Override
public boolean equals(Object obj) {
if (obj instanceof HashableInteger) {
HashableInteger other = (HashableInteger) obj;
return other.value == value;
}
return false;
}
@Override
public int hashCode() {
// This version ANDs the mask
return value & hashmask;
}
@Override
public int compareTo(HashableInteger o) {
return value - o.value;
}
@Override
public String toString() {
return Integer.toString(value);
}
}
}
......@@ -23,7 +23,7 @@
/**
* @test
* @bug 8020156 8020009 8022326
* @bug 8020156 8020009 8022326 8012913
* @run testng SpliteratorCharacteristics
*/
......@@ -32,6 +32,10 @@ import org.testng.annotations.Test;
import java.util.Arrays;
import java.util.Collection;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.Map;
import java.util.Set;
import java.util.SortedMap;
......@@ -47,7 +51,27 @@ import static org.testng.Assert.*;
@Test
public class SpliteratorCharacteristics {
// TreeMap
public void testHashMap() {
assertMapCharacteristics(new HashMap<>(),
Spliterator.SIZED | Spliterator.DISTINCT);
}
public void testHashSet() {
assertSetCharacteristics(new HashSet<>(),
Spliterator.SIZED | Spliterator.DISTINCT);
}
public void testLinkedHashMap() {
assertMapCharacteristics(new LinkedHashMap<>(),
Spliterator.SIZED | Spliterator.DISTINCT |
Spliterator.ORDERED);
}
public void testLinkedHashSet() {
assertSetCharacteristics(new LinkedHashSet<>(),
Spliterator.SIZED | Spliterator.DISTINCT |
Spliterator.ORDERED);
}
public void testTreeMap() {
assertSortedMapCharacteristics(new TreeMap<>(),
......@@ -61,9 +85,6 @@ public class SpliteratorCharacteristics {
Spliterator.SORTED | Spliterator.ORDERED);
}
// TreeSet
public void testTreeSet() {
assertSortedSetCharacteristics(new TreeSet<>(),
Spliterator.SIZED | Spliterator.DISTINCT |
......@@ -76,9 +97,6 @@ public class SpliteratorCharacteristics {
Spliterator.SORTED | Spliterator.ORDERED);
}
// ConcurrentSkipListMap
public void testConcurrentSkipListMap() {
assertSortedMapCharacteristics(new ConcurrentSkipListMap<>(),
Spliterator.CONCURRENT | Spliterator.NONNULL |
......@@ -93,9 +111,6 @@ public class SpliteratorCharacteristics {
Spliterator.ORDERED);
}
// ConcurrentSkipListSet
public void testConcurrentSkipListSet() {
assertSortedSetCharacteristics(new ConcurrentSkipListSet<>(),
Spliterator.CONCURRENT | Spliterator.NONNULL |
......@@ -113,35 +128,58 @@ public class SpliteratorCharacteristics {
//
void assertSortedMapCharacteristics(SortedMap<Integer, String> m, int keyCharacteristics) {
void assertMapCharacteristics(Map<Integer, String> m, int keyCharacteristics) {
assertMapCharacteristics(m, keyCharacteristics, 0);
}
void assertMapCharacteristics(Map<Integer, String> m, int keyCharacteristics, int notValueCharacteristics) {
initMap(m);
boolean hasComparator = m.comparator() != null;
assertCharacteristics(m.keySet(), keyCharacteristics);
assertCharacteristics(m.values(),
keyCharacteristics & ~(Spliterator.DISTINCT | notValueCharacteristics));
assertCharacteristics(m.entrySet(), keyCharacteristics);
if ((keyCharacteristics & Spliterator.SORTED) == 0) {
assertISEComparator(m.keySet());
assertISEComparator(m.values());
assertISEComparator(m.entrySet());
}
}
void assertSetCharacteristics(Set<Integer> s, int keyCharacteristics) {
initSet(s);
assertCharacteristics(s, keyCharacteristics);
if ((keyCharacteristics & Spliterator.SORTED) == 0) {
assertISEComparator(s);
}
}
void assertSortedMapCharacteristics(SortedMap<Integer, String> m, int keyCharacteristics) {
assertMapCharacteristics(m, keyCharacteristics, Spliterator.SORTED);
Set<Integer> keys = m.keySet();
assertCharacteristics(keys, keyCharacteristics);
if (hasComparator) {
if (m.comparator() != null) {
assertNotNullComparator(keys);
}
else {
assertNullComparator(keys);
}
assertCharacteristics(m.values(),
keyCharacteristics & ~(Spliterator.DISTINCT | Spliterator.SORTED));
assertISEComparator(m.values());
assertCharacteristics(m.entrySet(), keyCharacteristics);
assertNotNullComparator(m.entrySet());
}
void assertSortedSetCharacteristics(SortedSet<Integer> s, int keyCharacteristics) {
initSet(s);
boolean hasComparator = s.comparator() != null;
assertSetCharacteristics(s, keyCharacteristics);
assertCharacteristics(s, keyCharacteristics);
if (hasComparator) {
if (s.comparator() != null) {
assertNotNullComparator(s);
}
else {
......@@ -161,27 +199,18 @@ public class SpliteratorCharacteristics {
}
void assertCharacteristics(Collection<?> c, int expectedCharacteristics) {
assertCharacteristics(c.spliterator(), expectedCharacteristics);
}
void assertCharacteristics(Spliterator<?> s, int expectedCharacteristics) {
assertTrue(s.hasCharacteristics(expectedCharacteristics));
assertTrue(c.spliterator().hasCharacteristics(expectedCharacteristics),
"Spliterator characteristics");
}
void assertNullComparator(Collection<?> c) {
assertNullComparator(c.spliterator());
}
void assertNullComparator(Spliterator<?> s) {
assertNull(s.getComparator());
assertNull(c.spliterator().getComparator(),
"Comparator of Spliterator of Collection");
}
void assertNotNullComparator(Collection<?> c) {
assertNotNullComparator(c.spliterator());
}
void assertNotNullComparator(Spliterator<?> s) {
assertNotNull(s.getComparator());
assertNotNull(c.spliterator().getComparator(),
"Comparator of Spliterator of Collection");
}
void assertISEComparator(Collection<?> c) {
......@@ -196,6 +225,6 @@ public class SpliteratorCharacteristics {
catch (IllegalStateException e) {
caught = true;
}
assertTrue(caught);
assertTrue(caught, "Throwing IllegalStateException");
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册