8169748: LinkedTransferQueue bulk remove is O(n^2)
8172023: Concurrent spliterators fail to handle exhaustion properly Reviewed-by: martin, psandoz, smarks
This commit is contained in:
parent
1f99fea68c
commit
7f519be836
@ -67,12 +67,12 @@ import java.util.function.Predicate;
|
||||
* asynchronous nature of these deques, determining the current number
|
||||
* of elements requires a traversal of the elements, and so may report
|
||||
* inaccurate results if this collection is modified during traversal.
|
||||
* Additionally, the bulk operations {@code addAll},
|
||||
* {@code removeAll}, {@code retainAll}, {@code containsAll},
|
||||
* and {@code toArray} are <em>not</em> guaranteed
|
||||
* to be performed atomically. For example, an iterator operating
|
||||
* concurrently with an {@code addAll} operation might view only some
|
||||
* of the added elements.
|
||||
*
|
||||
* <p>Bulk operations that add, remove, or examine multiple elements,
|
||||
* such as {@link #addAll}, {@link #removeIf} or {@link #forEach},
|
||||
* are <em>not</em> guaranteed to be performed atomically.
|
||||
* For example, a {@code forEach} traversal concurrent with an {@code
|
||||
* addAll} operation might observe only some of the added elements.
|
||||
*
|
||||
* <p>This class and its iterator implement all of the <em>optional</em>
|
||||
* methods of the {@link Deque} and {@link Iterator} interfaces.
|
||||
@ -683,8 +683,9 @@ public class ConcurrentLinkedDeque<E>
|
||||
*/
|
||||
final Node<E> succ(Node<E> p) {
|
||||
// TODO: should we skip deleted nodes here?
|
||||
Node<E> q = p.next;
|
||||
return (p == q) ? first() : q;
|
||||
if (p == (p = p.next))
|
||||
p = first();
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1416,65 +1417,55 @@ public class ConcurrentLinkedDeque<E>
|
||||
boolean exhausted; // true when no more nodes
|
||||
|
||||
public Spliterator<E> trySplit() {
|
||||
Node<E> p;
|
||||
int b = batch;
|
||||
int n = (b <= 0) ? 1 : (b >= MAX_BATCH) ? MAX_BATCH : b + 1;
|
||||
if (!exhausted &&
|
||||
((p = current) != null || (p = first()) != null)) {
|
||||
if (p.item == null && p == (p = p.next))
|
||||
current = p = first();
|
||||
if (p != null && p.next != null) {
|
||||
Object[] a = new Object[n];
|
||||
int i = 0;
|
||||
do {
|
||||
if ((a[i] = p.item) != null)
|
||||
++i;
|
||||
if (p == (p = p.next))
|
||||
p = first();
|
||||
} while (p != null && i < n);
|
||||
if ((current = p) == null)
|
||||
exhausted = true;
|
||||
if (i > 0) {
|
||||
batch = i;
|
||||
return Spliterators.spliterator
|
||||
(a, 0, i, (Spliterator.ORDERED |
|
||||
Spliterator.NONNULL |
|
||||
Spliterator.CONCURRENT));
|
||||
}
|
||||
Node<E> p, q;
|
||||
if ((p = current()) == null || (q = p.next) == null)
|
||||
return null;
|
||||
int i = 0, n = batch = Math.min(batch + 1, MAX_BATCH);
|
||||
Object[] a = null;
|
||||
do {
|
||||
final E e;
|
||||
if ((e = p.item) != null) {
|
||||
if (a == null)
|
||||
a = new Object[n];
|
||||
a[i++] = e;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
if (p == (p = q))
|
||||
p = first();
|
||||
} while (p != null && (q = p.next) != null && i < n);
|
||||
setCurrent(p);
|
||||
return (i == 0) ? null :
|
||||
Spliterators.spliterator(a, 0, i, (Spliterator.ORDERED |
|
||||
Spliterator.NONNULL |
|
||||
Spliterator.CONCURRENT));
|
||||
}
|
||||
|
||||
public void forEachRemaining(Consumer<? super E> action) {
|
||||
Objects.requireNonNull(action);
|
||||
Node<E> p;
|
||||
if (action == null) throw new NullPointerException();
|
||||
if (!exhausted &&
|
||||
((p = current) != null || (p = first()) != null)) {
|
||||
if ((p = current()) != null) {
|
||||
current = null;
|
||||
exhausted = true;
|
||||
do {
|
||||
E e = p.item;
|
||||
final E e;
|
||||
if ((e = p.item) != null)
|
||||
action.accept(e);
|
||||
if (p == (p = p.next))
|
||||
p = first();
|
||||
if (e != null)
|
||||
action.accept(e);
|
||||
} while (p != null);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean tryAdvance(Consumer<? super E> action) {
|
||||
Objects.requireNonNull(action);
|
||||
Node<E> p;
|
||||
if (action == null) throw new NullPointerException();
|
||||
if (!exhausted &&
|
||||
((p = current) != null || (p = first()) != null)) {
|
||||
if ((p = current()) != null) {
|
||||
E e;
|
||||
do {
|
||||
e = p.item;
|
||||
if (p == (p = p.next))
|
||||
p = first();
|
||||
} while (e == null && p != null);
|
||||
if ((current = p) == null)
|
||||
exhausted = true;
|
||||
setCurrent(p);
|
||||
if (e != null) {
|
||||
action.accept(e);
|
||||
return true;
|
||||
@ -1483,11 +1474,24 @@ public class ConcurrentLinkedDeque<E>
|
||||
return false;
|
||||
}
|
||||
|
||||
private void setCurrent(Node<E> p) {
|
||||
if ((current = p) == null)
|
||||
exhausted = true;
|
||||
}
|
||||
|
||||
private Node<E> current() {
|
||||
Node<E> p;
|
||||
if ((p = current) == null && !exhausted)
|
||||
setCurrent(p = first());
|
||||
return p;
|
||||
}
|
||||
|
||||
public long estimateSize() { return Long.MAX_VALUE; }
|
||||
|
||||
public int characteristics() {
|
||||
return Spliterator.ORDERED | Spliterator.NONNULL |
|
||||
Spliterator.CONCURRENT;
|
||||
return (Spliterator.ORDERED |
|
||||
Spliterator.NONNULL |
|
||||
Spliterator.CONCURRENT);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,12 +81,12 @@ import java.util.function.Predicate;
|
||||
* asynchronous nature of these queues, determining the current number
|
||||
* of elements requires a traversal of the elements, and so may report
|
||||
* inaccurate results if this collection is modified during traversal.
|
||||
* Additionally, the bulk operations {@code addAll},
|
||||
* {@code removeAll}, {@code retainAll}, {@code containsAll},
|
||||
* and {@code toArray} are <em>not</em> guaranteed
|
||||
* to be performed atomically. For example, an iterator operating
|
||||
* concurrently with an {@code addAll} operation might view only some
|
||||
* of the added elements.
|
||||
*
|
||||
* <p>Bulk operations that add, remove, or examine multiple elements,
|
||||
* such as {@link #addAll}, {@link #removeIf} or {@link #forEach},
|
||||
* are <em>not</em> guaranteed to be performed atomically.
|
||||
* For example, a {@code forEach} traversal concurrent with an {@code
|
||||
* addAll} operation might observe only some of the added elements.
|
||||
*
|
||||
* <p>This class and its iterator implement all of the <em>optional</em>
|
||||
* methods of the {@link Queue} and {@link Iterator} interfaces.
|
||||
@ -184,16 +184,30 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
static final class Node<E> {
|
||||
volatile E item;
|
||||
volatile Node<E> next;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new node holding item. Uses relaxed write because item
|
||||
* can only be seen after piggy-backing publication via CAS.
|
||||
*/
|
||||
static <E> Node<E> newNode(E item) {
|
||||
Node<E> node = new Node<E>();
|
||||
ITEM.set(node, item);
|
||||
return node;
|
||||
/**
|
||||
* Constructs a node holding item. Uses relaxed write because
|
||||
* item can only be seen after piggy-backing publication via CAS.
|
||||
*/
|
||||
Node(E item) {
|
||||
ITEM.set(this, item);
|
||||
}
|
||||
|
||||
/** Constructs a dead dummy node. */
|
||||
Node() {}
|
||||
|
||||
void appendRelaxed(Node<E> next) {
|
||||
// assert next != null;
|
||||
// assert this.next == null;
|
||||
NEXT.set(this, next);
|
||||
}
|
||||
|
||||
boolean casItem(E cmp, E val) {
|
||||
// assert item == cmp || item == null;
|
||||
// assert cmp != null;
|
||||
// assert val == null;
|
||||
return ITEM.compareAndSet(this, cmp, val);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -220,7 +234,7 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
* - tail.item may or may not be null.
|
||||
* - it is permitted for tail to lag behind head, that is, for tail
|
||||
* to not be reachable from head!
|
||||
* - tail.next may or may not be self-pointing to tail.
|
||||
* - tail.next may or may not be self-linked.
|
||||
*/
|
||||
private transient volatile Node<E> tail;
|
||||
|
||||
@ -228,7 +242,7 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
* Creates a {@code ConcurrentLinkedQueue} that is initially empty.
|
||||
*/
|
||||
public ConcurrentLinkedQueue() {
|
||||
head = tail = newNode(null);
|
||||
head = tail = new Node<E>();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -243,16 +257,14 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
public ConcurrentLinkedQueue(Collection<? extends E> c) {
|
||||
Node<E> h = null, t = null;
|
||||
for (E e : c) {
|
||||
Node<E> newNode = newNode(Objects.requireNonNull(e));
|
||||
Node<E> newNode = new Node<E>(Objects.requireNonNull(e));
|
||||
if (h == null)
|
||||
h = t = newNode;
|
||||
else {
|
||||
NEXT.set(t, newNode);
|
||||
t = newNode;
|
||||
}
|
||||
else
|
||||
t.appendRelaxed(t = newNode);
|
||||
}
|
||||
if (h == null)
|
||||
h = t = newNode(null);
|
||||
h = t = new Node<E>();
|
||||
head = h;
|
||||
tail = t;
|
||||
}
|
||||
@ -287,14 +299,17 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
* stale pointer that is now off the list.
|
||||
*/
|
||||
final Node<E> succ(Node<E> p) {
|
||||
Node<E> next = p.next;
|
||||
return (p == next) ? head : next;
|
||||
if (p == (p = p.next))
|
||||
p = head;
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries to CAS pred.next (or head, if pred is null) from c to p.
|
||||
* Caller must ensure that we're not unlinking the trailing node.
|
||||
*/
|
||||
private boolean tryCasSuccessor(Node<E> pred, Node<E> c, Node<E> p) {
|
||||
// assert p != null;
|
||||
// assert c.item == null;
|
||||
// assert c != p;
|
||||
if (pred != null)
|
||||
@ -306,6 +321,29 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Collapse dead nodes between pred and q.
|
||||
* @param pred the last known live node, or null if none
|
||||
* @param c the first dead node
|
||||
* @param p the last dead node
|
||||
* @param q p.next: the next live node, or null if at end
|
||||
* @return either old pred or p if pred dead or CAS failed
|
||||
*/
|
||||
private Node<E> skipDeadNodes(Node<E> pred, Node<E> c, Node<E> p, Node<E> q) {
|
||||
// assert pred != c;
|
||||
// assert p != q;
|
||||
// assert c.item == null;
|
||||
// assert p.item == null;
|
||||
if (q == null) {
|
||||
// Never unlink trailing node.
|
||||
if (c == p) return pred;
|
||||
q = p;
|
||||
}
|
||||
return (tryCasSuccessor(pred, c, q)
|
||||
&& (pred == null || ITEM.get(pred) != null))
|
||||
? pred : p;
|
||||
}
|
||||
|
||||
/**
|
||||
* Inserts the specified element at the tail of this queue.
|
||||
* As the queue is unbounded, this method will never return {@code false}.
|
||||
@ -314,7 +352,7 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
* @throws NullPointerException if the specified element is null
|
||||
*/
|
||||
public boolean offer(E e) {
|
||||
final Node<E> newNode = newNode(Objects.requireNonNull(e));
|
||||
final Node<E> newNode = new Node<E>(Objects.requireNonNull(e));
|
||||
|
||||
for (Node<E> t = tail, p = t;;) {
|
||||
Node<E> q = p.next;
|
||||
@ -346,8 +384,7 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
restartFromHead: for (;;) {
|
||||
for (Node<E> h = head, p = h, q;; p = q) {
|
||||
final E item;
|
||||
if ((item = p.item) != null
|
||||
&& ITEM.compareAndSet(p, item, null)) {
|
||||
if ((item = p.item) != null && p.casItem(item, null)) {
|
||||
// Successful CAS is the linearization point
|
||||
// for item to be removed from this queue.
|
||||
if (p != h) // hop two nodes at a time
|
||||
@ -451,19 +488,20 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
public boolean contains(Object o) {
|
||||
if (o == null) return false;
|
||||
restartFromHead: for (;;) {
|
||||
for (Node<E> p = head, c = p, pred = null, q; p != null; p = q) {
|
||||
for (Node<E> p = head, pred = null; p != null; ) {
|
||||
Node<E> q = p.next;
|
||||
final E item;
|
||||
if ((item = p.item) != null && o.equals(item))
|
||||
return true;
|
||||
if (c != p && tryCasSuccessor(pred, c, p))
|
||||
c = p;
|
||||
q = p.next;
|
||||
if (item != null || c != p) {
|
||||
pred = p;
|
||||
c = q;
|
||||
if ((item = p.item) != null) {
|
||||
if (o.equals(item))
|
||||
return true;
|
||||
pred = p; p = q; continue;
|
||||
}
|
||||
for (Node<E> c = p;; q = p.next) {
|
||||
if (q == null || q.item != null) {
|
||||
pred = skipDeadNodes(pred, c, p, q); p = q; break;
|
||||
}
|
||||
if (p == (p = q)) continue restartFromHead;
|
||||
}
|
||||
else if (p == q)
|
||||
continue restartFromHead;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -483,23 +521,22 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
public boolean remove(Object o) {
|
||||
if (o == null) return false;
|
||||
restartFromHead: for (;;) {
|
||||
for (Node<E> p = head, c = p, pred = null, q; p != null; p = q) {
|
||||
for (Node<E> p = head, pred = null; p != null; ) {
|
||||
Node<E> q = p.next;
|
||||
final E item;
|
||||
final boolean removed =
|
||||
(item = p.item) != null
|
||||
&& o.equals(item)
|
||||
&& ITEM.compareAndSet(p, item, null);
|
||||
if (c != p && tryCasSuccessor(pred, c, p))
|
||||
c = p;
|
||||
if (removed)
|
||||
return true;
|
||||
q = p.next;
|
||||
if (item != null || c != p) {
|
||||
pred = p;
|
||||
c = q;
|
||||
if ((item = p.item) != null) {
|
||||
if (o.equals(item) && p.casItem(item, null)) {
|
||||
skipDeadNodes(pred, p, p, q);
|
||||
return true;
|
||||
}
|
||||
pred = p; p = q; continue;
|
||||
}
|
||||
for (Node<E> c = p;; q = p.next) {
|
||||
if (q == null || q.item != null) {
|
||||
pred = skipDeadNodes(pred, c, p, q); p = q; break;
|
||||
}
|
||||
if (p == (p = q)) continue restartFromHead;
|
||||
}
|
||||
else if (p == q)
|
||||
continue restartFromHead;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -525,13 +562,11 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
// Copy c into a private chain of Nodes
|
||||
Node<E> beginningOfTheEnd = null, last = null;
|
||||
for (E e : c) {
|
||||
Node<E> newNode = newNode(Objects.requireNonNull(e));
|
||||
Node<E> newNode = new Node<E>(Objects.requireNonNull(e));
|
||||
if (beginningOfTheEnd == null)
|
||||
beginningOfTheEnd = last = newNode;
|
||||
else {
|
||||
NEXT.set(last, newNode);
|
||||
last = newNode;
|
||||
}
|
||||
else
|
||||
last.appendRelaxed(last = newNode);
|
||||
}
|
||||
if (beginningOfTheEnd == null)
|
||||
return false;
|
||||
@ -677,7 +712,7 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public <T> T[] toArray(T[] a) {
|
||||
if (a == null) throw new NullPointerException();
|
||||
Objects.requireNonNull(a);
|
||||
return (T[]) toArrayInternal(a);
|
||||
}
|
||||
|
||||
@ -757,6 +792,8 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
}
|
||||
}
|
||||
|
||||
// Default implementation of forEachRemaining is "good enough".
|
||||
|
||||
public void remove() {
|
||||
Node<E> l = lastRet;
|
||||
if (l == null) throw new IllegalStateException();
|
||||
@ -806,16 +843,14 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
Node<E> h = null, t = null;
|
||||
for (Object item; (item = s.readObject()) != null; ) {
|
||||
@SuppressWarnings("unchecked")
|
||||
Node<E> newNode = newNode((E) item);
|
||||
Node<E> newNode = new Node<E>((E) item);
|
||||
if (h == null)
|
||||
h = t = newNode;
|
||||
else {
|
||||
NEXT.set(t, newNode);
|
||||
t = newNode;
|
||||
}
|
||||
else
|
||||
t.appendRelaxed(t = newNode);
|
||||
}
|
||||
if (h == null)
|
||||
h = t = newNode(null);
|
||||
h = t = new Node<E>();
|
||||
head = h;
|
||||
tail = t;
|
||||
}
|
||||
@ -828,62 +863,49 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
boolean exhausted; // true when no more nodes
|
||||
|
||||
public Spliterator<E> trySplit() {
|
||||
Node<E> p;
|
||||
int b = batch;
|
||||
int n = (b <= 0) ? 1 : (b >= MAX_BATCH) ? MAX_BATCH : b + 1;
|
||||
if (!exhausted &&
|
||||
((p = current) != null || (p = first()) != null) &&
|
||||
p.next != null) {
|
||||
Object[] a = new Object[n];
|
||||
int i = 0;
|
||||
do {
|
||||
if ((a[i] = p.item) != null)
|
||||
++i;
|
||||
if (p == (p = p.next))
|
||||
p = first();
|
||||
} while (p != null && i < n);
|
||||
if ((current = p) == null)
|
||||
exhausted = true;
|
||||
if (i > 0) {
|
||||
batch = i;
|
||||
return Spliterators.spliterator
|
||||
(a, 0, i, (Spliterator.ORDERED |
|
||||
Spliterator.NONNULL |
|
||||
Spliterator.CONCURRENT));
|
||||
Node<E> p, q;
|
||||
if ((p = current()) == null || (q = p.next) == null)
|
||||
return null;
|
||||
int i = 0, n = batch = Math.min(batch + 1, MAX_BATCH);
|
||||
Object[] a = null;
|
||||
do {
|
||||
final E e;
|
||||
if ((e = p.item) != null) {
|
||||
if (a == null)
|
||||
a = new Object[n];
|
||||
a[i++] = e;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
if (p == (p = q))
|
||||
p = first();
|
||||
} while (p != null && (q = p.next) != null && i < n);
|
||||
setCurrent(p);
|
||||
return (i == 0) ? null :
|
||||
Spliterators.spliterator(a, 0, i, (Spliterator.ORDERED |
|
||||
Spliterator.NONNULL |
|
||||
Spliterator.CONCURRENT));
|
||||
}
|
||||
|
||||
public void forEachRemaining(Consumer<? super E> action) {
|
||||
Node<E> p;
|
||||
if (action == null) throw new NullPointerException();
|
||||
if (!exhausted &&
|
||||
((p = current) != null || (p = first()) != null)) {
|
||||
Objects.requireNonNull(action);
|
||||
final Node<E> p;
|
||||
if ((p = current()) != null) {
|
||||
current = null;
|
||||
exhausted = true;
|
||||
do {
|
||||
E e = p.item;
|
||||
if (p == (p = p.next))
|
||||
p = first();
|
||||
if (e != null)
|
||||
action.accept(e);
|
||||
} while (p != null);
|
||||
forEachFrom(action, p);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean tryAdvance(Consumer<? super E> action) {
|
||||
Objects.requireNonNull(action);
|
||||
Node<E> p;
|
||||
if (action == null) throw new NullPointerException();
|
||||
if (!exhausted &&
|
||||
((p = current) != null || (p = first()) != null)) {
|
||||
if ((p = current()) != null) {
|
||||
E e;
|
||||
do {
|
||||
e = p.item;
|
||||
if (p == (p = p.next))
|
||||
p = first();
|
||||
} while (e == null && p != null);
|
||||
if ((current = p) == null)
|
||||
exhausted = true;
|
||||
setCurrent(p);
|
||||
if (e != null) {
|
||||
action.accept(e);
|
||||
return true;
|
||||
@ -892,11 +914,24 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
return false;
|
||||
}
|
||||
|
||||
private void setCurrent(Node<E> p) {
|
||||
if ((current = p) == null)
|
||||
exhausted = true;
|
||||
}
|
||||
|
||||
private Node<E> current() {
|
||||
Node<E> p;
|
||||
if ((p = current) == null && !exhausted)
|
||||
setCurrent(p = first());
|
||||
return p;
|
||||
}
|
||||
|
||||
public long estimateSize() { return Long.MAX_VALUE; }
|
||||
|
||||
public int characteristics() {
|
||||
return Spliterator.ORDERED | Spliterator.NONNULL |
|
||||
Spliterator.CONCURRENT;
|
||||
return (Spliterator.ORDERED |
|
||||
Spliterator.NONNULL |
|
||||
Spliterator.CONCURRENT);
|
||||
}
|
||||
}
|
||||
|
||||
@ -963,22 +998,22 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
// c will be CASed to collapse intervening dead nodes between
|
||||
// pred (or head if null) and p.
|
||||
for (Node<E> p = head, c = p, pred = null, q; p != null; p = q) {
|
||||
q = p.next;
|
||||
final E item; boolean pAlive;
|
||||
if (pAlive = ((item = p.item) != null)) {
|
||||
if (filter.test(item)) {
|
||||
if (ITEM.compareAndSet(p, item, null))
|
||||
if (p.casItem(item, null))
|
||||
removed = true;
|
||||
pAlive = false;
|
||||
}
|
||||
}
|
||||
if ((q = p.next) == null || pAlive || --hops == 0) {
|
||||
if (pAlive || q == null || --hops == 0) {
|
||||
// p might already be self-linked here, but if so:
|
||||
// - CASing head will surely fail
|
||||
// - CASing pred's next will be useless but harmless.
|
||||
if (c != p && tryCasSuccessor(pred, c, p))
|
||||
c = p;
|
||||
// if c != p, CAS failed, so abandon old pred
|
||||
if (pAlive || c != p) {
|
||||
if ((c != p && !tryCasSuccessor(pred, c, c = p))
|
||||
|| pAlive) {
|
||||
// if CAS failed or alive, abandon old pred
|
||||
hops = MAX_HOPS;
|
||||
pred = p;
|
||||
c = q;
|
||||
@ -990,35 +1025,40 @@ public class ConcurrentLinkedQueue<E> extends AbstractQueue<E>
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Runs action on each element found during a traversal starting at p.
|
||||
* If p is null, the action is not run.
|
||||
*/
|
||||
void forEachFrom(Consumer<? super E> action, Node<E> p) {
|
||||
for (Node<E> pred = null; p != null; ) {
|
||||
Node<E> q = p.next;
|
||||
final E item;
|
||||
if ((item = p.item) != null) {
|
||||
action.accept(item);
|
||||
pred = p; p = q; continue;
|
||||
}
|
||||
for (Node<E> c = p;; q = p.next) {
|
||||
if (q == null || q.item != null) {
|
||||
pred = skipDeadNodes(pred, c, p, q); p = q; break;
|
||||
}
|
||||
if (p == (p = q)) { pred = null; p = head; break; }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws NullPointerException {@inheritDoc}
|
||||
*/
|
||||
public void forEach(Consumer<? super E> action) {
|
||||
Objects.requireNonNull(action);
|
||||
restartFromHead: for (;;) {
|
||||
for (Node<E> p = head, c = p, pred = null, q; p != null; p = q) {
|
||||
final E item;
|
||||
if ((item = p.item) != null)
|
||||
action.accept(item);
|
||||
if (c != p && tryCasSuccessor(pred, c, p))
|
||||
c = p;
|
||||
q = p.next;
|
||||
if (item != null || c != p) {
|
||||
pred = p;
|
||||
c = q;
|
||||
}
|
||||
else if (p == q)
|
||||
continue restartFromHead;
|
||||
}
|
||||
return;
|
||||
}
|
||||
forEachFrom(action, head);
|
||||
}
|
||||
|
||||
// VarHandle mechanics
|
||||
private static final VarHandle HEAD;
|
||||
private static final VarHandle TAIL;
|
||||
private static final VarHandle ITEM;
|
||||
private static final VarHandle NEXT;
|
||||
static final VarHandle ITEM;
|
||||
static final VarHandle NEXT;
|
||||
static {
|
||||
try {
|
||||
MethodHandles.Lookup l = MethodHandles.lookup();
|
||||
|
@ -45,6 +45,7 @@ import java.util.Spliterators;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* An optionally-bounded {@linkplain BlockingDeque blocking deque} based on
|
||||
@ -63,9 +64,8 @@ import java.util.function.Consumer;
|
||||
* contains}, {@link #iterator iterator.remove()}, and the bulk
|
||||
* operations, all of which run in linear time.
|
||||
*
|
||||
* <p>This class and its iterator implement all of the
|
||||
* <em>optional</em> methods of the {@link Collection} and {@link
|
||||
* Iterator} interfaces.
|
||||
* <p>This class and its iterator implement all of the <em>optional</em>
|
||||
* methods of the {@link Collection} and {@link Iterator} interfaces.
|
||||
*
|
||||
* <p>This class is a member of the
|
||||
* <a href="{@docRoot}/../technotes/guides/collections/index.html">
|
||||
@ -195,18 +195,7 @@ public class LinkedBlockingDeque<E>
|
||||
*/
|
||||
public LinkedBlockingDeque(Collection<? extends E> c) {
|
||||
this(Integer.MAX_VALUE);
|
||||
final ReentrantLock lock = this.lock;
|
||||
lock.lock(); // Never contended, but necessary for visibility
|
||||
try {
|
||||
for (E e : c) {
|
||||
if (e == null)
|
||||
throw new NullPointerException();
|
||||
if (!linkLast(new Node<E>(e)))
|
||||
throw new IllegalStateException("Deque full");
|
||||
}
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
addAll(c);
|
||||
}
|
||||
|
||||
|
||||
@ -299,6 +288,7 @@ public class LinkedBlockingDeque<E>
|
||||
*/
|
||||
void unlink(Node<E> x) {
|
||||
// assert lock.isHeldByCurrentThread();
|
||||
// assert x.item != null;
|
||||
Node<E> p = x.prev;
|
||||
Node<E> n = x.next;
|
||||
if (p == null) {
|
||||
@ -834,46 +824,65 @@ public class LinkedBlockingDeque<E>
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Add support for more efficient bulk operations.
|
||||
/**
|
||||
* Appends all of the elements in the specified collection to the end of
|
||||
* this deque, in the order that they are returned by the specified
|
||||
* collection's iterator. Attempts to {@code addAll} of a deque to
|
||||
* itself result in {@code IllegalArgumentException}.
|
||||
*
|
||||
* We don't want to acquire the lock for every iteration, but we
|
||||
* also want other threads a chance to interact with the
|
||||
* collection, especially when count is close to capacity.
|
||||
* @param c the elements to be inserted into this deque
|
||||
* @return {@code true} if this deque changed as a result of the call
|
||||
* @throws NullPointerException if the specified collection or any
|
||||
* of its elements are null
|
||||
* @throws IllegalArgumentException if the collection is this deque
|
||||
* @throws IllegalStateException if this deque is full
|
||||
* @see #add(Object)
|
||||
*/
|
||||
public boolean addAll(Collection<? extends E> c) {
|
||||
if (c == this)
|
||||
// As historically specified in AbstractQueue#addAll
|
||||
throw new IllegalArgumentException();
|
||||
|
||||
// /**
|
||||
// * Adds all of the elements in the specified collection to this
|
||||
// * queue. Attempts to addAll of a queue to itself result in
|
||||
// * {@code IllegalArgumentException}. Further, the behavior of
|
||||
// * this operation is undefined if the specified collection is
|
||||
// * modified while the operation is in progress.
|
||||
// *
|
||||
// * @param c collection containing elements to be added to this queue
|
||||
// * @return {@code true} if this queue changed as a result of the call
|
||||
// * @throws ClassCastException {@inheritDoc}
|
||||
// * @throws NullPointerException {@inheritDoc}
|
||||
// * @throws IllegalArgumentException {@inheritDoc}
|
||||
// * @throws IllegalStateException if this deque is full
|
||||
// * @see #add(Object)
|
||||
// */
|
||||
// public boolean addAll(Collection<? extends E> c) {
|
||||
// if (c == null)
|
||||
// throw new NullPointerException();
|
||||
// if (c == this)
|
||||
// throw new IllegalArgumentException();
|
||||
// final ReentrantLock lock = this.lock;
|
||||
// lock.lock();
|
||||
// try {
|
||||
// boolean modified = false;
|
||||
// for (E e : c)
|
||||
// if (linkLast(e))
|
||||
// modified = true;
|
||||
// return modified;
|
||||
// } finally {
|
||||
// lock.unlock();
|
||||
// }
|
||||
// }
|
||||
// Copy c into a private chain of Nodes
|
||||
Node<E> beg = null, end = null;
|
||||
int n = 0;
|
||||
for (E e : c) {
|
||||
Objects.requireNonNull(e);
|
||||
n++;
|
||||
Node<E> newNode = new Node<E>(e);
|
||||
if (beg == null)
|
||||
beg = end = newNode;
|
||||
else {
|
||||
end.next = newNode;
|
||||
newNode.prev = end;
|
||||
end = newNode;
|
||||
}
|
||||
}
|
||||
if (beg == null)
|
||||
return false;
|
||||
|
||||
// Atomically append the chain at the end
|
||||
final ReentrantLock lock = this.lock;
|
||||
lock.lock();
|
||||
try {
|
||||
if (count + n <= capacity) {
|
||||
beg.prev = last;
|
||||
if (first == null)
|
||||
first = beg;
|
||||
else
|
||||
last.next = beg;
|
||||
last = end;
|
||||
count += n;
|
||||
notEmpty.signalAll();
|
||||
return true;
|
||||
}
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
// Fall back to historic non-atomic implementation, failing
|
||||
// with IllegalStateException when the capacity is exceeded.
|
||||
return super.addAll(c);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an array containing all of the elements in this deque, in
|
||||
@ -992,7 +1001,9 @@ public class LinkedBlockingDeque<E>
|
||||
* - (possibly multiple) interior removed nodes (p.item == null)
|
||||
*/
|
||||
Node<E> succ(Node<E> p) {
|
||||
return (p == (p = p.next)) ? first : p;
|
||||
if (p == (p = p.next))
|
||||
p = first;
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1049,7 +1060,9 @@ public class LinkedBlockingDeque<E>
|
||||
abstract Node<E> nextNode(Node<E> n);
|
||||
|
||||
private Node<E> succ(Node<E> p) {
|
||||
return (p == (p = nextNode(p))) ? firstNode() : p;
|
||||
if (p == (p = nextNode(p)))
|
||||
p = firstNode();
|
||||
return p;
|
||||
}
|
||||
|
||||
AbstractItr() {
|
||||
@ -1096,7 +1109,7 @@ public class LinkedBlockingDeque<E>
|
||||
lastRet = p;
|
||||
next = null;
|
||||
final ReentrantLock lock = LinkedBlockingDeque.this.lock;
|
||||
final int batchSize = 32;
|
||||
final int batchSize = 64;
|
||||
Object[] es = null;
|
||||
int n, len = 1;
|
||||
do {
|
||||
@ -1175,11 +1188,10 @@ public class LinkedBlockingDeque<E>
|
||||
|
||||
public Spliterator<E> trySplit() {
|
||||
Node<E> h;
|
||||
int b = batch;
|
||||
int n = (b <= 0) ? 1 : (b >= MAX_BATCH) ? MAX_BATCH : b + 1;
|
||||
if (!exhausted &&
|
||||
((h = current) != null || (h = first) != null)
|
||||
&& h.next != null) {
|
||||
int n = batch = Math.min(batch + 1, MAX_BATCH);
|
||||
Object[] a = new Object[n];
|
||||
final ReentrantLock lock = LinkedBlockingDeque.this.lock;
|
||||
int i = 0;
|
||||
@ -1199,13 +1211,11 @@ public class LinkedBlockingDeque<E>
|
||||
}
|
||||
else if ((est -= i) < 0L)
|
||||
est = 0L;
|
||||
if (i > 0) {
|
||||
batch = i;
|
||||
if (i > 0)
|
||||
return Spliterators.spliterator
|
||||
(a, 0, i, (Spliterator.ORDERED |
|
||||
Spliterator.NONNULL |
|
||||
Spliterator.CONCURRENT));
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
@ -1223,7 +1233,8 @@ public class LinkedBlockingDeque<E>
|
||||
e = p.item;
|
||||
p = succ(p);
|
||||
} while (e == null && p != null);
|
||||
exhausted = ((current = p) == null);
|
||||
if ((current = p) == null)
|
||||
exhausted = true;
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
@ -1288,7 +1299,7 @@ public class LinkedBlockingDeque<E>
|
||||
// Extract batches of elements while holding the lock; then
|
||||
// run the action on the elements while not
|
||||
final ReentrantLock lock = this.lock;
|
||||
final int batchSize = 32; // max number of elements per batch
|
||||
final int batchSize = 64; // max number of elements per batch
|
||||
Object[] es = null; // container for batch of elements
|
||||
int n, len = 0;
|
||||
do {
|
||||
@ -1314,6 +1325,83 @@ public class LinkedBlockingDeque<E>
|
||||
} while (n > 0 && p != null);
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws NullPointerException {@inheritDoc}
|
||||
*/
|
||||
public boolean removeIf(Predicate<? super E> filter) {
|
||||
Objects.requireNonNull(filter);
|
||||
return bulkRemove(filter);
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws NullPointerException {@inheritDoc}
|
||||
*/
|
||||
public boolean removeAll(Collection<?> c) {
|
||||
Objects.requireNonNull(c);
|
||||
return bulkRemove(e -> c.contains(e));
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws NullPointerException {@inheritDoc}
|
||||
*/
|
||||
public boolean retainAll(Collection<?> c) {
|
||||
Objects.requireNonNull(c);
|
||||
return bulkRemove(e -> !c.contains(e));
|
||||
}
|
||||
|
||||
/** Implementation of bulk remove methods. */
|
||||
@SuppressWarnings("unchecked")
|
||||
private boolean bulkRemove(Predicate<? super E> filter) {
|
||||
boolean removed = false;
|
||||
Node<E> p = null;
|
||||
final ReentrantLock lock = this.lock;
|
||||
Node<E>[] nodes = null;
|
||||
int n, len = 0;
|
||||
do {
|
||||
// 1. Extract batch of up to 64 elements while holding the lock.
|
||||
long deathRow = 0; // "bitset" of size 64
|
||||
lock.lock();
|
||||
try {
|
||||
if (nodes == null) {
|
||||
if (p == null) p = first;
|
||||
for (Node<E> q = p; q != null; q = succ(q))
|
||||
if (q.item != null && ++len == 64)
|
||||
break;
|
||||
nodes = (Node<E>[]) new Node<?>[len];
|
||||
}
|
||||
for (n = 0; p != null && n < len; p = succ(p))
|
||||
nodes[n++] = p;
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
|
||||
// 2. Run the filter on the elements while lock is free.
|
||||
for (int i = 0; i < n; i++) {
|
||||
final E e;
|
||||
if ((e = nodes[i].item) != null && filter.test(e))
|
||||
deathRow |= 1L << i;
|
||||
}
|
||||
|
||||
// 3. Remove any filtered elements while holding the lock.
|
||||
if (deathRow != 0) {
|
||||
lock.lock();
|
||||
try {
|
||||
for (int i = 0; i < n; i++) {
|
||||
final Node<E> q;
|
||||
if ((deathRow & (1L << i)) != 0L
|
||||
&& (q = nodes[i]).item != null) {
|
||||
unlink(q);
|
||||
removed = true;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
} while (n > 0 && p != null);
|
||||
return removed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves this deque to a stream (that is, serializes it).
|
||||
*
|
||||
|
@ -46,6 +46,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* An optionally-bounded {@linkplain BlockingQueue blocking queue} based on
|
||||
@ -66,9 +67,8 @@ import java.util.function.Consumer;
|
||||
* dynamically created upon each insertion unless this would bring the
|
||||
* queue above capacity.
|
||||
*
|
||||
* <p>This class and its iterator implement all of the
|
||||
* <em>optional</em> methods of the {@link Collection} and {@link
|
||||
* Iterator} interfaces.
|
||||
* <p>This class and its iterator implement all of the <em>optional</em>
|
||||
* methods of the {@link Collection} and {@link Iterator} interfaces.
|
||||
*
|
||||
* <p>This class is a member of the
|
||||
* <a href="{@docRoot}/../technotes/guides/collections/index.html">
|
||||
@ -507,17 +507,17 @@ public class LinkedBlockingQueue<E> extends AbstractQueue<E>
|
||||
}
|
||||
|
||||
/**
|
||||
* Unlinks interior Node p with predecessor trail.
|
||||
* Unlinks interior Node p with predecessor pred.
|
||||
*/
|
||||
void unlink(Node<E> p, Node<E> trail) {
|
||||
void unlink(Node<E> p, Node<E> pred) {
|
||||
// assert putLock.isHeldByCurrentThread();
|
||||
// assert takeLock.isHeldByCurrentThread();
|
||||
// p.next is not changed, to allow iterators that are
|
||||
// traversing p to maintain their weak-consistency guarantee.
|
||||
p.item = null;
|
||||
trail.next = p.next;
|
||||
pred.next = p.next;
|
||||
if (last == p)
|
||||
last = trail;
|
||||
last = pred;
|
||||
if (count.getAndDecrement() == capacity)
|
||||
notFull.signal();
|
||||
}
|
||||
@ -537,11 +537,11 @@ public class LinkedBlockingQueue<E> extends AbstractQueue<E>
|
||||
if (o == null) return false;
|
||||
fullyLock();
|
||||
try {
|
||||
for (Node<E> trail = head, p = trail.next;
|
||||
for (Node<E> pred = head, p = pred.next;
|
||||
p != null;
|
||||
trail = p, p = p.next) {
|
||||
pred = p, p = p.next) {
|
||||
if (o.equals(p.item)) {
|
||||
unlink(p, trail);
|
||||
unlink(p, pred);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -740,7 +740,9 @@ public class LinkedBlockingQueue<E> extends AbstractQueue<E>
|
||||
* - (possibly multiple) interior removed nodes (p.item == null)
|
||||
*/
|
||||
Node<E> succ(Node<E> p) {
|
||||
return (p == (p = p.next)) ? head.next : p;
|
||||
if (p == (p = p.next))
|
||||
p = head.next;
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -756,16 +758,18 @@ public class LinkedBlockingQueue<E> extends AbstractQueue<E>
|
||||
return new Itr();
|
||||
}
|
||||
|
||||
/**
|
||||
* Weakly-consistent iterator.
|
||||
*
|
||||
* Lazily updated ancestor field provides expected O(1) remove(),
|
||||
* but still O(n) in the worst case, whenever the saved ancestor
|
||||
* is concurrently deleted.
|
||||
*/
|
||||
private class Itr implements Iterator<E> {
|
||||
/*
|
||||
* Basic weakly-consistent iterator. At all times hold the next
|
||||
* item to hand out so that if hasNext() reports true, we will
|
||||
* still have it to return even if lost race with a take etc.
|
||||
*/
|
||||
|
||||
private Node<E> next;
|
||||
private E nextItem;
|
||||
private Node<E> next; // Node holding nextItem
|
||||
private E nextItem; // next item to hand out
|
||||
private Node<E> lastRet;
|
||||
private Node<E> ancestor; // Helps unlink lastRet on remove()
|
||||
|
||||
Itr() {
|
||||
fullyLock();
|
||||
@ -807,7 +811,7 @@ public class LinkedBlockingQueue<E> extends AbstractQueue<E>
|
||||
if ((p = next) == null) return;
|
||||
lastRet = p;
|
||||
next = null;
|
||||
final int batchSize = 32;
|
||||
final int batchSize = 64;
|
||||
Object[] es = null;
|
||||
int n, len = 1;
|
||||
do {
|
||||
@ -840,19 +844,17 @@ public class LinkedBlockingQueue<E> extends AbstractQueue<E>
|
||||
}
|
||||
|
||||
public void remove() {
|
||||
if (lastRet == null)
|
||||
Node<E> p = lastRet;
|
||||
if (p == null)
|
||||
throw new IllegalStateException();
|
||||
lastRet = null;
|
||||
fullyLock();
|
||||
try {
|
||||
Node<E> node = lastRet;
|
||||
lastRet = null;
|
||||
for (Node<E> trail = head, p = trail.next;
|
||||
p != null;
|
||||
trail = p, p = p.next) {
|
||||
if (p == node) {
|
||||
unlink(p, trail);
|
||||
break;
|
||||
}
|
||||
if (p.item != null) {
|
||||
if (ancestor == null)
|
||||
ancestor = head;
|
||||
ancestor = findPred(p, ancestor);
|
||||
unlink(p, ancestor);
|
||||
}
|
||||
} finally {
|
||||
fullyUnlock();
|
||||
@ -877,11 +879,10 @@ public class LinkedBlockingQueue<E> extends AbstractQueue<E>
|
||||
|
||||
public Spliterator<E> trySplit() {
|
||||
Node<E> h;
|
||||
int b = batch;
|
||||
int n = (b <= 0) ? 1 : (b >= MAX_BATCH) ? MAX_BATCH : b + 1;
|
||||
if (!exhausted &&
|
||||
((h = current) != null || (h = head.next) != null)
|
||||
&& h.next != null) {
|
||||
int n = batch = Math.min(batch + 1, MAX_BATCH);
|
||||
Object[] a = new Object[n];
|
||||
int i = 0;
|
||||
Node<E> p = current;
|
||||
@ -900,13 +901,11 @@ public class LinkedBlockingQueue<E> extends AbstractQueue<E>
|
||||
}
|
||||
else if ((est -= i) < 0L)
|
||||
est = 0L;
|
||||
if (i > 0) {
|
||||
batch = i;
|
||||
if (i > 0)
|
||||
return Spliterators.spliterator
|
||||
(a, 0, i, (Spliterator.ORDERED |
|
||||
Spliterator.NONNULL |
|
||||
Spliterator.CONCURRENT));
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
@ -923,7 +922,8 @@ public class LinkedBlockingQueue<E> extends AbstractQueue<E>
|
||||
e = p.item;
|
||||
p = succ(p);
|
||||
} while (e == null && p != null);
|
||||
exhausted = ((current = p) == null);
|
||||
if ((current = p) == null)
|
||||
exhausted = true;
|
||||
} finally {
|
||||
fullyUnlock();
|
||||
}
|
||||
@ -987,7 +987,7 @@ public class LinkedBlockingQueue<E> extends AbstractQueue<E>
|
||||
void forEachFrom(Consumer<? super E> action, Node<E> p) {
|
||||
// Extract batches of elements while holding the lock; then
|
||||
// run the action on the elements while not
|
||||
final int batchSize = 32; // max number of elements per batch
|
||||
final int batchSize = 64; // max number of elements per batch
|
||||
Object[] es = null; // container for batch of elements
|
||||
int n, len = 0;
|
||||
do {
|
||||
@ -1013,6 +1013,97 @@ public class LinkedBlockingQueue<E> extends AbstractQueue<E>
|
||||
} while (n > 0 && p != null);
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws NullPointerException {@inheritDoc}
|
||||
*/
|
||||
public boolean removeIf(Predicate<? super E> filter) {
|
||||
Objects.requireNonNull(filter);
|
||||
return bulkRemove(filter);
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws NullPointerException {@inheritDoc}
|
||||
*/
|
||||
public boolean removeAll(Collection<?> c) {
|
||||
Objects.requireNonNull(c);
|
||||
return bulkRemove(e -> c.contains(e));
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws NullPointerException {@inheritDoc}
|
||||
*/
|
||||
public boolean retainAll(Collection<?> c) {
|
||||
Objects.requireNonNull(c);
|
||||
return bulkRemove(e -> !c.contains(e));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the predecessor of live node p, given a node that was
|
||||
* once a live ancestor of p (or head); allows unlinking of p.
|
||||
*/
|
||||
Node<E> findPred(Node<E> p, Node<E> ancestor) {
|
||||
// assert p.item != null;
|
||||
if (ancestor.item == null)
|
||||
ancestor = head;
|
||||
// Fails with NPE if precondition not satisfied
|
||||
for (Node<E> q; (q = ancestor.next) != p; )
|
||||
ancestor = q;
|
||||
return ancestor;
|
||||
}
|
||||
|
||||
/** Implementation of bulk remove methods. */
|
||||
@SuppressWarnings("unchecked")
|
||||
private boolean bulkRemove(Predicate<? super E> filter) {
|
||||
boolean removed = false;
|
||||
Node<E> p = null, ancestor = head;
|
||||
Node<E>[] nodes = null;
|
||||
int n, len = 0;
|
||||
do {
|
||||
// 1. Extract batch of up to 64 elements while holding the lock.
|
||||
long deathRow = 0; // "bitset" of size 64
|
||||
fullyLock();
|
||||
try {
|
||||
if (nodes == null) {
|
||||
if (p == null) p = head.next;
|
||||
for (Node<E> q = p; q != null; q = succ(q))
|
||||
if (q.item != null && ++len == 64)
|
||||
break;
|
||||
nodes = (Node<E>[]) new Node<?>[len];
|
||||
}
|
||||
for (n = 0; p != null && n < len; p = succ(p))
|
||||
nodes[n++] = p;
|
||||
} finally {
|
||||
fullyUnlock();
|
||||
}
|
||||
|
||||
// 2. Run the filter on the elements while lock is free.
|
||||
for (int i = 0; i < n; i++) {
|
||||
final E e;
|
||||
if ((e = nodes[i].item) != null && filter.test(e))
|
||||
deathRow |= 1L << i;
|
||||
}
|
||||
|
||||
// 3. Remove any filtered elements while holding the lock.
|
||||
if (deathRow != 0) {
|
||||
fullyLock();
|
||||
try {
|
||||
for (int i = 0; i < n; i++) {
|
||||
final Node<E> q;
|
||||
if ((deathRow & (1L << i)) != 0L
|
||||
&& (q = nodes[i]).item != null) {
|
||||
ancestor = findPred(q, ancestor);
|
||||
unlink(q, ancestor);
|
||||
removed = true;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
fullyUnlock();
|
||||
}
|
||||
}
|
||||
} while (n > 0 && p != null);
|
||||
return removed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves this queue to a stream (that is, serializes it).
|
||||
*
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -43,6 +43,7 @@ import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.Objects;
|
||||
import java.util.PriorityQueue;
|
||||
import java.util.Queue;
|
||||
import java.util.SortedSet;
|
||||
@ -62,16 +63,15 @@ import java.util.function.Consumer;
|
||||
* non-comparable objects (doing so results in
|
||||
* {@code ClassCastException}).
|
||||
*
|
||||
* <p>This class and its iterator implement all of the
|
||||
* <em>optional</em> methods of the {@link Collection} and {@link
|
||||
* Iterator} interfaces. The Iterator provided in method {@link
|
||||
* #iterator()} and the Spliterator provided in method {@link #spliterator()}
|
||||
* are <em>not</em> guaranteed to traverse the elements of
|
||||
* the PriorityBlockingQueue in any particular order. If you need
|
||||
* ordered traversal, consider using
|
||||
* {@code Arrays.sort(pq.toArray())}. Also, method {@code drainTo}
|
||||
* can be used to <em>remove</em> some or all elements in priority
|
||||
* order and place them in another collection.
|
||||
* <p>This class and its iterator implement all of the <em>optional</em>
|
||||
* methods of the {@link Collection} and {@link Iterator} interfaces.
|
||||
* The Iterator provided in method {@link #iterator()} and the
|
||||
* Spliterator provided in method {@link #spliterator()} are <em>not</em>
|
||||
* guaranteed to traverse the elements of the PriorityBlockingQueue in
|
||||
* any particular order. If you need ordered traversal, consider using
|
||||
* {@code Arrays.sort(pq.toArray())}. Also, method {@code drainTo} can
|
||||
* be used to <em>remove</em> some or all elements in priority order and
|
||||
* place them in another collection.
|
||||
*
|
||||
* <p>Operations on this class make no guarantees about the ordering
|
||||
* of elements with equal priority. If you need to enforce an
|
||||
@ -437,15 +437,14 @@ public class PriorityBlockingQueue<E> extends AbstractQueue<E>
|
||||
*/
|
||||
private void heapify() {
|
||||
Object[] array = queue;
|
||||
int n = size;
|
||||
int half = (n >>> 1) - 1;
|
||||
int n = size, i = (n >>> 1) - 1;
|
||||
Comparator<? super E> cmp = comparator;
|
||||
if (cmp == null) {
|
||||
for (int i = half; i >= 0; i--)
|
||||
for (; i >= 0; i--)
|
||||
siftDownComparable(i, (E) array[i], array, n);
|
||||
}
|
||||
else {
|
||||
for (int i = half; i >= 0; i--)
|
||||
for (; i >= 0; i--)
|
||||
siftDownUsingComparator(i, (E) array[i], array, n, cmp);
|
||||
}
|
||||
}
|
||||
@ -730,8 +729,7 @@ public class PriorityBlockingQueue<E> extends AbstractQueue<E>
|
||||
* @throws IllegalArgumentException {@inheritDoc}
|
||||
*/
|
||||
public int drainTo(Collection<? super E> c, int maxElements) {
|
||||
if (c == null)
|
||||
throw new NullPointerException();
|
||||
Objects.requireNonNull(c);
|
||||
if (c == this)
|
||||
throw new IllegalArgumentException();
|
||||
if (maxElements <= 0)
|
||||
@ -935,21 +933,22 @@ public class PriorityBlockingQueue<E> extends AbstractQueue<E>
|
||||
* Immutable snapshot spliterator that binds to elements "late".
|
||||
*/
|
||||
final class PBQSpliterator implements Spliterator<E> {
|
||||
Object[] array;
|
||||
Object[] array; // null until late-bound-initialized
|
||||
int index;
|
||||
int fence;
|
||||
|
||||
PBQSpliterator() {}
|
||||
|
||||
PBQSpliterator(Object[] array, int index, int fence) {
|
||||
this.array = array;
|
||||
this.index = index;
|
||||
this.fence = fence;
|
||||
}
|
||||
|
||||
final int getFence() {
|
||||
int hi;
|
||||
if ((hi = fence) < 0)
|
||||
hi = fence = (array = toArray()).length;
|
||||
return hi;
|
||||
private int getFence() {
|
||||
if (array == null)
|
||||
fence = (array = toArray()).length;
|
||||
return fence;
|
||||
}
|
||||
|
||||
public PBQSpliterator trySplit() {
|
||||
@ -958,25 +957,19 @@ public class PriorityBlockingQueue<E> extends AbstractQueue<E>
|
||||
new PBQSpliterator(array, lo, index = mid);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void forEachRemaining(Consumer<? super E> action) {
|
||||
Object[] a; int i, hi; // hoist accesses and checks from loop
|
||||
if (action == null)
|
||||
throw new NullPointerException();
|
||||
if ((a = array) == null)
|
||||
fence = (a = toArray()).length;
|
||||
if ((hi = fence) <= a.length &&
|
||||
(i = index) >= 0 && i < (index = hi)) {
|
||||
do { action.accept((E)a[i]); } while (++i < hi);
|
||||
}
|
||||
Objects.requireNonNull(action);
|
||||
final int hi = getFence(), lo = index;
|
||||
final Object[] a = array;
|
||||
index = hi; // ensure exhaustion
|
||||
for (int i = lo; i < hi; i++)
|
||||
action.accept((E) a[i]);
|
||||
}
|
||||
|
||||
public boolean tryAdvance(Consumer<? super E> action) {
|
||||
if (action == null)
|
||||
throw new NullPointerException();
|
||||
Objects.requireNonNull(action);
|
||||
if (getFence() > index && index >= 0) {
|
||||
@SuppressWarnings("unchecked") E e = (E) array[index++];
|
||||
action.accept(e);
|
||||
action.accept((E) array[index++]);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -985,7 +978,9 @@ public class PriorityBlockingQueue<E> extends AbstractQueue<E>
|
||||
public long estimateSize() { return getFence() - index; }
|
||||
|
||||
public int characteristics() {
|
||||
return Spliterator.NONNULL | Spliterator.SIZED | Spliterator.SUBSIZED;
|
||||
return (Spliterator.NONNULL |
|
||||
Spliterator.SIZED |
|
||||
Spliterator.SUBSIZED);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1007,7 +1002,7 @@ public class PriorityBlockingQueue<E> extends AbstractQueue<E>
|
||||
* @since 1.8
|
||||
*/
|
||||
public Spliterator<E> spliterator() {
|
||||
return new PBQSpliterator(null, 0, -1);
|
||||
return new PBQSpliterator();
|
||||
}
|
||||
|
||||
// VarHandle mechanics
|
||||
|
@ -254,7 +254,7 @@ public class RemoveMicroBenchmark {
|
||||
// "iterations=%d size=%d, warmup=%1g, filter=\"%s\"%n",
|
||||
// iterations, size, warmupSeconds, filter);
|
||||
|
||||
final ArrayList<Integer> al = new ArrayList<Integer>(size);
|
||||
final ArrayList<Integer> al = new ArrayList<>(size);
|
||||
|
||||
// Populate collections with random data
|
||||
final ThreadLocalRandom rnd = ThreadLocalRandom.current();
|
||||
@ -333,7 +333,7 @@ public class RemoveMicroBenchmark {
|
||||
Supplier<Collection<Integer>> supplier,
|
||||
ArrayList<Integer> al) {
|
||||
return List.of(
|
||||
new Job(description + " .removeIf") {
|
||||
new Job(description + " removeIf") {
|
||||
public void work() throws Throwable {
|
||||
Collection<Integer> x = supplier.get();
|
||||
int[] sum = new int[1];
|
||||
@ -342,7 +342,21 @@ public class RemoveMicroBenchmark {
|
||||
x.addAll(al);
|
||||
x.removeIf(n -> { sum[0] += n; return true; });
|
||||
check.sum(sum[0]);}}},
|
||||
new Job(description + " .removeAll") {
|
||||
new Job(description + " removeIf rnd-two-pass") {
|
||||
public void work() throws Throwable {
|
||||
ThreadLocalRandom rnd = ThreadLocalRandom.current();
|
||||
Collection<Integer> x = supplier.get();
|
||||
int[] sum = new int[1];
|
||||
for (int i = 0; i < iterations; i++) {
|
||||
sum[0] = 0;
|
||||
x.addAll(al);
|
||||
x.removeIf(n -> {
|
||||
boolean b = rnd.nextBoolean();
|
||||
if (b) sum[0] += n;
|
||||
return b; });
|
||||
x.removeIf(n -> { sum[0] += n; return true; });
|
||||
check.sum(sum[0]);}}},
|
||||
new Job(description + " removeAll") {
|
||||
public void work() throws Throwable {
|
||||
Collection<Integer> x = supplier.get();
|
||||
int[] sum = new int[1];
|
||||
@ -352,7 +366,7 @@ public class RemoveMicroBenchmark {
|
||||
x.addAll(al);
|
||||
x.removeAll(universe);
|
||||
check.sum(sum[0]);}}},
|
||||
new Job(description + " .retainAll") {
|
||||
new Job(description + " retainAll") {
|
||||
public void work() throws Throwable {
|
||||
Collection<Integer> x = supplier.get();
|
||||
int[] sum = new int[1];
|
||||
@ -375,6 +389,28 @@ public class RemoveMicroBenchmark {
|
||||
it.remove();
|
||||
}
|
||||
check.sum(sum[0]);}}},
|
||||
new Job(description + " Iterator.remove-rnd-two-pass") {
|
||||
public void work() throws Throwable {
|
||||
ThreadLocalRandom rnd = ThreadLocalRandom.current();
|
||||
Collection<Integer> x = supplier.get();
|
||||
int[] sum = new int[1];
|
||||
for (int i = 0; i < iterations; i++) {
|
||||
sum[0] = 0;
|
||||
x.addAll(al);
|
||||
for (Iterator<Integer> it = x.iterator();
|
||||
it.hasNext(); ) {
|
||||
Integer e = it.next();
|
||||
if (rnd.nextBoolean()) {
|
||||
sum[0] += e;
|
||||
it.remove();
|
||||
}
|
||||
}
|
||||
for (Iterator<Integer> it = x.iterator();
|
||||
it.hasNext(); ) {
|
||||
sum[0] += it.next();
|
||||
it.remove();
|
||||
}
|
||||
check.sum(sum[0]);}}},
|
||||
new Job(description + " clear") {
|
||||
public void work() throws Throwable {
|
||||
Collection<Integer> x = supplier.get();
|
||||
|
@ -0,0 +1,355 @@
|
||||
/*
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is available under and governed by the GNU General Public
|
||||
* License version 2 only, as published by the Free Software Foundation.
|
||||
* However, the following notice accompanied the original version of this
|
||||
* file:
|
||||
*
|
||||
* Written by Martin Buchholz with assistance from members of JCP
|
||||
* JSR-166 Expert Group and released to the public domain, as
|
||||
* explained at http://creativecommons.org/publicdomain/zero/1.0/
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @modules java.base/java.util.concurrent:open
|
||||
* @run testng WhiteBox
|
||||
* @summary White box tests of implementation details
|
||||
*/
|
||||
|
||||
import static org.testng.Assert.*;
|
||||
import org.testng.annotations.DataProvider;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.ObjectInputStream;
|
||||
import java.io.ObjectOutputStream;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.lang.invoke.VarHandle;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ConcurrentLinkedQueue;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
@Test
|
||||
public class WhiteBox {
|
||||
final ThreadLocalRandom rnd = ThreadLocalRandom.current();
|
||||
final VarHandle HEAD, TAIL, ITEM, NEXT;
|
||||
|
||||
WhiteBox() throws ReflectiveOperationException {
|
||||
Class<?> qClass = ConcurrentLinkedQueue.class;
|
||||
Class<?> nodeClass = Class.forName(qClass.getName() + "$Node");
|
||||
MethodHandles.Lookup lookup
|
||||
= MethodHandles.privateLookupIn(qClass, MethodHandles.lookup());
|
||||
HEAD = lookup.findVarHandle(qClass, "head", nodeClass);
|
||||
TAIL = lookup.findVarHandle(qClass, "tail", nodeClass);
|
||||
NEXT = lookup.findVarHandle(nodeClass, "next", nodeClass);
|
||||
ITEM = lookup.findVarHandle(nodeClass, "item", Object.class);
|
||||
}
|
||||
|
||||
Object head(ConcurrentLinkedQueue q) { return HEAD.getVolatile(q); }
|
||||
Object tail(ConcurrentLinkedQueue q) { return TAIL.getVolatile(q); }
|
||||
Object item(Object node) { return ITEM.getVolatile(node); }
|
||||
Object next(Object node) { return NEXT.getVolatile(node); }
|
||||
|
||||
int nodeCount(ConcurrentLinkedQueue q) {
|
||||
int i = 0;
|
||||
for (Object p = head(q); p != null; ) {
|
||||
i++;
|
||||
if (p == (p = next(p))) p = head(q);
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
void assertIsSelfLinked(Object node) {
|
||||
assertSame(next(node), node);
|
||||
assertNull(item(node));
|
||||
}
|
||||
|
||||
void assertIsNotSelfLinked(Object node) {
|
||||
assertNotSame(node, next(node));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addRemove() {
|
||||
ConcurrentLinkedQueue q = new ConcurrentLinkedQueue();
|
||||
assertInvariants(q);
|
||||
assertNull(item(head(q)));
|
||||
assertEquals(nodeCount(q), 1);
|
||||
q.add(1);
|
||||
assertEquals(nodeCount(q), 2);
|
||||
assertInvariants(q);
|
||||
q.remove(1);
|
||||
assertEquals(nodeCount(q), 1);
|
||||
assertInvariants(q);
|
||||
}
|
||||
|
||||
/**
|
||||
* Traversal actions that visit every node and do nothing, but
|
||||
* have side effect of squeezing out dead nodes.
|
||||
*/
|
||||
@DataProvider
|
||||
public Object[][] traversalActions() {
|
||||
return List.<Consumer<ConcurrentLinkedQueue>>of(
|
||||
q -> q.forEach(e -> {}),
|
||||
q -> assertFalse(q.contains(new Object())),
|
||||
q -> assertFalse(q.remove(new Object())),
|
||||
q -> q.spliterator().forEachRemaining(e -> {}),
|
||||
q -> q.stream().collect(toList()),
|
||||
q -> assertFalse(q.removeIf(e -> false)),
|
||||
q -> assertFalse(q.removeAll(List.of())))
|
||||
.stream().map(x -> new Object[]{ x }).toArray(Object[][]::new);
|
||||
}
|
||||
|
||||
@Test(dataProvider = "traversalActions")
|
||||
public void traversalOperationsCollapseNodes(
|
||||
Consumer<ConcurrentLinkedQueue> traversalAction) {
|
||||
ConcurrentLinkedQueue q = new ConcurrentLinkedQueue();
|
||||
Object oldHead;
|
||||
int n = 1 + rnd.nextInt(5);
|
||||
for (int i = 0; i < n; i++) q.add(i);
|
||||
assertInvariants(q);
|
||||
assertEquals(nodeCount(q), n + 1);
|
||||
oldHead = head(q);
|
||||
traversalAction.accept(q); // collapses head node
|
||||
assertIsSelfLinked(oldHead);
|
||||
assertInvariants(q);
|
||||
assertEquals(nodeCount(q), n);
|
||||
// Iterator.remove does not currently try to collapse dead nodes
|
||||
for (Iterator it = q.iterator(); it.hasNext(); ) {
|
||||
it.next();
|
||||
it.remove();
|
||||
}
|
||||
assertEquals(nodeCount(q), n);
|
||||
assertInvariants(q);
|
||||
oldHead = head(q);
|
||||
traversalAction.accept(q); // collapses all nodes
|
||||
if (n > 1) assertIsSelfLinked(oldHead);
|
||||
assertEquals(nodeCount(q), 1);
|
||||
assertInvariants(q);
|
||||
|
||||
for (int i = 0; i < n + 1; i++) q.add(i);
|
||||
assertEquals(nodeCount(q), n + 2);
|
||||
oldHead = head(q);
|
||||
assertEquals(0, q.poll()); // 2 leading nodes collapsed
|
||||
assertIsSelfLinked(oldHead);
|
||||
assertEquals(nodeCount(q), n);
|
||||
assertTrue(q.remove(n));
|
||||
assertEquals(nodeCount(q), n);
|
||||
traversalAction.accept(q); // trailing node is never collapsed
|
||||
}
|
||||
|
||||
@Test(dataProvider = "traversalActions")
|
||||
public void traversalOperationsCollapseLeadingNodes(
|
||||
Consumer<ConcurrentLinkedQueue> traversalAction) {
|
||||
ConcurrentLinkedQueue q = new ConcurrentLinkedQueue();
|
||||
Object oldHead;
|
||||
int n = 1 + rnd.nextInt(5);
|
||||
for (int i = 0; i < n; i++) q.add(i);
|
||||
assertEquals(nodeCount(q), n + 1);
|
||||
oldHead = head(q);
|
||||
traversalAction.accept(q);
|
||||
assertInvariants(q);
|
||||
assertEquals(nodeCount(q), n);
|
||||
assertIsSelfLinked(oldHead);
|
||||
}
|
||||
|
||||
@Test(dataProvider = "traversalActions")
|
||||
public void traversalOperationsDoNotSelfLinkInteriorNodes(
|
||||
Consumer<ConcurrentLinkedQueue> traversalAction) {
|
||||
ConcurrentLinkedQueue q = new ConcurrentLinkedQueue();
|
||||
int c;
|
||||
int n = 3 + rnd.nextInt(3);
|
||||
for (int i = 0; i < n; i++) q.add(i);
|
||||
Object oneNode;
|
||||
for (oneNode = head(q);
|
||||
! (item(oneNode) != null && item(oneNode).equals(1));
|
||||
oneNode = next(oneNode))
|
||||
;
|
||||
Object next = next(oneNode);
|
||||
c = nodeCount(q);
|
||||
for (Iterator it = q.iterator(); it.hasNext(); )
|
||||
if (it.next().equals(1)) it.remove();
|
||||
assertEquals(nodeCount(q), c - 1); // iterator detached head!
|
||||
assertNull(item(oneNode));
|
||||
assertSame(next, next(oneNode));
|
||||
assertInvariants(q);
|
||||
c = nodeCount(q);
|
||||
traversalAction.accept(q);
|
||||
assertEquals(nodeCount(q), c - 1);
|
||||
assertSame(next, next(oneNode)); // un-linked, but not self-linked
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks that traversal operations collapse a random pattern of
|
||||
* dead nodes as could normally only occur with a race.
|
||||
*/
|
||||
@Test(dataProvider = "traversalActions")
|
||||
public void traversalOperationsCollapseRandomNodes(
|
||||
Consumer<ConcurrentLinkedQueue> traversalAction) {
|
||||
ConcurrentLinkedQueue q = new ConcurrentLinkedQueue();
|
||||
int n = rnd.nextInt(6);
|
||||
for (int i = 0; i < n; i++) q.add(i);
|
||||
ArrayList nulledOut = new ArrayList();
|
||||
for (Object p = head(q); p != null; p = next(p))
|
||||
if (item(p) != null && rnd.nextBoolean()) {
|
||||
nulledOut.add(item(p));
|
||||
ITEM.setVolatile(p, null);
|
||||
}
|
||||
traversalAction.accept(q);
|
||||
int c = nodeCount(q);
|
||||
assertEquals(q.size(), c - (q.contains(n - 1) ? 0 : 1));
|
||||
for (int i = 0; i < n; i++)
|
||||
assertTrue(nulledOut.contains(i) ^ q.contains(i));
|
||||
}
|
||||
|
||||
/**
|
||||
* Traversal actions that remove every element, and are also
|
||||
* expected to squeeze out dead nodes.
|
||||
*/
|
||||
@DataProvider
|
||||
public Object[][] bulkRemovalActions() {
|
||||
return List.<Consumer<ConcurrentLinkedQueue>>of(
|
||||
q -> q.clear(),
|
||||
q -> assertTrue(q.removeIf(e -> true)),
|
||||
q -> assertTrue(q.retainAll(List.of())))
|
||||
.stream().map(x -> new Object[]{ x }).toArray(Object[][]::new);
|
||||
}
|
||||
|
||||
@Test(dataProvider = "bulkRemovalActions")
|
||||
public void bulkRemovalOperationsCollapseNodes(
|
||||
Consumer<ConcurrentLinkedQueue> bulkRemovalAction) {
|
||||
ConcurrentLinkedQueue q = new ConcurrentLinkedQueue();
|
||||
int n = 1 + rnd.nextInt(5);
|
||||
for (int i = 0; i < n; i++) q.add(i);
|
||||
bulkRemovalAction.accept(q);
|
||||
assertEquals(nodeCount(q), 1);
|
||||
assertInvariants(q);
|
||||
}
|
||||
|
||||
/**
|
||||
* Actions that remove the first element, and are expected to
|
||||
* leave at most one slack dead node at head.
|
||||
*/
|
||||
@DataProvider
|
||||
public Object[][] pollActions() {
|
||||
return List.<Consumer<ConcurrentLinkedQueue>>of(
|
||||
q -> assertNotNull(q.poll()),
|
||||
q -> assertNotNull(q.remove()))
|
||||
.stream().map(x -> new Object[]{ x }).toArray(Object[][]::new);
|
||||
}
|
||||
|
||||
@Test(dataProvider = "pollActions")
|
||||
public void pollActionsOneNodeSlack(
|
||||
Consumer<ConcurrentLinkedQueue> pollAction) {
|
||||
ConcurrentLinkedQueue q = new ConcurrentLinkedQueue();
|
||||
int n = 1 + rnd.nextInt(5);
|
||||
for (int i = 0; i < n; i++) q.add(i);
|
||||
assertEquals(nodeCount(q), n + 1);
|
||||
for (int i = 0; i < n; i++) {
|
||||
int c = nodeCount(q);
|
||||
boolean slack = item(head(q)) == null;
|
||||
if (slack) assertNotNull(item(next(head(q))));
|
||||
pollAction.accept(q);
|
||||
assertEquals(nodeCount(q), q.isEmpty() ? 1 : c - (slack ? 2 : 0));
|
||||
}
|
||||
assertInvariants(q);
|
||||
}
|
||||
|
||||
/**
|
||||
* Actions that append an element, and are expected to
|
||||
* leave at most one slack node at tail.
|
||||
*/
|
||||
@DataProvider
|
||||
public Object[][] addActions() {
|
||||
return List.<Consumer<ConcurrentLinkedQueue>>of(
|
||||
q -> q.add(1),
|
||||
q -> q.offer(1))
|
||||
.stream().map(x -> new Object[]{ x }).toArray(Object[][]::new);
|
||||
}
|
||||
|
||||
@Test(dataProvider = "addActions")
|
||||
public void addActionsOneNodeSlack(
|
||||
Consumer<ConcurrentLinkedQueue> addAction) {
|
||||
ConcurrentLinkedQueue q = new ConcurrentLinkedQueue();
|
||||
int n = 1 + rnd.nextInt(5);
|
||||
for (int i = 0; i < n; i++) {
|
||||
boolean slack = next(tail(q)) != null;
|
||||
addAction.accept(q);
|
||||
if (slack)
|
||||
assertNull(next(tail(q)));
|
||||
else {
|
||||
assertNotNull(next(tail(q)));
|
||||
assertNull(next(next(tail(q))));
|
||||
}
|
||||
assertInvariants(q);
|
||||
}
|
||||
}
|
||||
|
||||
byte[] serialBytes(Object o) {
|
||||
try {
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
ObjectOutputStream oos = new ObjectOutputStream(bos);
|
||||
oos.writeObject(o);
|
||||
oos.flush();
|
||||
oos.close();
|
||||
return bos.toByteArray();
|
||||
} catch (Exception fail) {
|
||||
throw new AssertionError(fail);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
<T> T serialClone(T o) {
|
||||
try {
|
||||
ObjectInputStream ois = new ObjectInputStream
|
||||
(new ByteArrayInputStream(serialBytes(o)));
|
||||
T clone = (T) ois.readObject();
|
||||
assertNotSame(o, clone);
|
||||
assertSame(o.getClass(), clone.getClass());
|
||||
return clone;
|
||||
} catch (Exception fail) {
|
||||
throw new AssertionError(fail);
|
||||
}
|
||||
}
|
||||
|
||||
public void testSerialization() {
|
||||
ConcurrentLinkedQueue q = serialClone(new ConcurrentLinkedQueue());
|
||||
assertInvariants(q);
|
||||
}
|
||||
|
||||
/** Checks conditions which should always be true. */
|
||||
void assertInvariants(ConcurrentLinkedQueue q) {
|
||||
assertNotNull(head(q));
|
||||
assertNotNull(tail(q));
|
||||
// head is never self-linked (but tail may!)
|
||||
for (Object h; next(h = head(q)) == h; )
|
||||
assertNotSame(h, head(q)); // must be update race
|
||||
}
|
||||
}
|
408
jdk/test/java/util/concurrent/LinkedTransferQueue/WhiteBox.java
Normal file
408
jdk/test/java/util/concurrent/LinkedTransferQueue/WhiteBox.java
Normal file
@ -0,0 +1,408 @@
|
||||
/*
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is available under and governed by the GNU General Public
|
||||
* License version 2 only, as published by the Free Software Foundation.
|
||||
* However, the following notice accompanied the original version of this
|
||||
* file:
|
||||
*
|
||||
* Written by Martin Buchholz with assistance from members of JCP
|
||||
* JSR-166 Expert Group and released to the public domain, as
|
||||
* explained at http://creativecommons.org/publicdomain/zero/1.0/
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @modules java.base/java.util.concurrent:open
|
||||
* @run testng WhiteBox
|
||||
* @summary White box tests of implementation details
|
||||
*/
|
||||
|
||||
import static org.testng.Assert.*;
|
||||
import org.testng.annotations.DataProvider;
|
||||
import org.testng.annotations.Test;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.ObjectInputStream;
|
||||
import java.io.ObjectOutputStream;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.lang.invoke.VarHandle;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.LinkedTransferQueue;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
@Test
|
||||
public class WhiteBox {
|
||||
final ThreadLocalRandom rnd = ThreadLocalRandom.current();
|
||||
final VarHandle HEAD, TAIL, ITEM, NEXT;
|
||||
final int SWEEP_THRESHOLD;
|
||||
|
||||
public WhiteBox() throws ReflectiveOperationException {
|
||||
Class<?> qClass = LinkedTransferQueue.class;
|
||||
Class<?> nodeClass = Class.forName(qClass.getName() + "$Node");
|
||||
MethodHandles.Lookup lookup
|
||||
= MethodHandles.privateLookupIn(qClass, MethodHandles.lookup());
|
||||
HEAD = lookup.findVarHandle(qClass, "head", nodeClass);
|
||||
TAIL = lookup.findVarHandle(qClass, "tail", nodeClass);
|
||||
NEXT = lookup.findVarHandle(nodeClass, "next", nodeClass);
|
||||
ITEM = lookup.findVarHandle(nodeClass, "item", Object.class);
|
||||
SWEEP_THRESHOLD = (int)
|
||||
lookup.findStaticVarHandle(qClass, "SWEEP_THRESHOLD", int.class)
|
||||
.get();
|
||||
}
|
||||
|
||||
Object head(LinkedTransferQueue q) { return HEAD.getVolatile(q); }
|
||||
Object tail(LinkedTransferQueue q) { return TAIL.getVolatile(q); }
|
||||
Object item(Object node) { return ITEM.getVolatile(node); }
|
||||
Object next(Object node) { return NEXT.getVolatile(node); }
|
||||
|
||||
int nodeCount(LinkedTransferQueue q) {
|
||||
int i = 0;
|
||||
for (Object p = head(q); p != null; ) {
|
||||
i++;
|
||||
if (p == (p = next(p))) p = head(q);
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
int tailCount(LinkedTransferQueue q) {
|
||||
int i = 0;
|
||||
for (Object p = tail(q); p != null; ) {
|
||||
i++;
|
||||
if (p == (p = next(p))) p = head(q);
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
Object findNode(LinkedTransferQueue q, Object e) {
|
||||
for (Object p = head(q); p != null; ) {
|
||||
if (item(p) != null && e.equals(item(p)))
|
||||
return p;
|
||||
if (p == (p = next(p))) p = head(q);
|
||||
}
|
||||
throw new AssertionError("not found");
|
||||
}
|
||||
|
||||
Iterator iteratorAt(LinkedTransferQueue q, Object e) {
|
||||
for (Iterator it = q.iterator(); it.hasNext(); )
|
||||
if (it.next().equals(e))
|
||||
return it;
|
||||
throw new AssertionError("not found");
|
||||
}
|
||||
|
||||
void assertIsSelfLinked(Object node) {
|
||||
assertSame(next(node), node);
|
||||
assertNull(item(node));
|
||||
}
|
||||
void assertIsNotSelfLinked(Object node) {
|
||||
assertNotSame(node, next(node));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void addRemove() {
|
||||
LinkedTransferQueue q = new LinkedTransferQueue();
|
||||
assertInvariants(q);
|
||||
assertNull(next(head(q)));
|
||||
assertNull(item(head(q)));
|
||||
q.add(1);
|
||||
assertEquals(nodeCount(q), 2);
|
||||
assertInvariants(q);
|
||||
q.remove(1);
|
||||
assertEquals(nodeCount(q), 1);
|
||||
assertInvariants(q);
|
||||
}
|
||||
|
||||
/**
|
||||
* Traversal actions that visit every node and do nothing, but
|
||||
* have side effect of squeezing out dead nodes.
|
||||
*/
|
||||
@DataProvider
|
||||
public Object[][] traversalActions() {
|
||||
return List.<Consumer<LinkedTransferQueue>>of(
|
||||
q -> q.forEach(e -> {}),
|
||||
q -> assertFalse(q.contains(new Object())),
|
||||
q -> assertFalse(q.remove(new Object())),
|
||||
q -> q.spliterator().forEachRemaining(e -> {}),
|
||||
q -> q.stream().collect(toList()),
|
||||
q -> assertFalse(q.removeIf(e -> false)),
|
||||
q -> assertFalse(q.removeAll(List.of())))
|
||||
.stream().map(x -> new Object[]{ x }).toArray(Object[][]::new);
|
||||
}
|
||||
|
||||
@Test(dataProvider = "traversalActions")
|
||||
public void traversalOperationsCollapseLeadingNodes(
|
||||
Consumer<LinkedTransferQueue> traversalAction) {
|
||||
LinkedTransferQueue q = new LinkedTransferQueue();
|
||||
Object oldHead;
|
||||
int n = 1 + rnd.nextInt(5);
|
||||
for (int i = 0; i < n; i++) q.add(i);
|
||||
assertEquals(nodeCount(q), n + 1);
|
||||
oldHead = head(q);
|
||||
traversalAction.accept(q);
|
||||
assertInvariants(q);
|
||||
assertEquals(nodeCount(q), n);
|
||||
assertIsSelfLinked(oldHead);
|
||||
}
|
||||
|
||||
@Test(dataProvider = "traversalActions")
|
||||
public void traversalOperationsCollapseInteriorNodes(
|
||||
Consumer<LinkedTransferQueue> traversalAction) {
|
||||
LinkedTransferQueue q = new LinkedTransferQueue();
|
||||
int n = 6;
|
||||
for (int i = 0; i < n; i++) q.add(i);
|
||||
|
||||
// We must be quite devious to reliably create an interior dead node
|
||||
Object p0 = findNode(q, 0);
|
||||
Object p1 = findNode(q, 1);
|
||||
Object p2 = findNode(q, 2);
|
||||
Object p3 = findNode(q, 3);
|
||||
Object p4 = findNode(q, 4);
|
||||
Object p5 = findNode(q, 5);
|
||||
|
||||
Iterator it1 = iteratorAt(q, 1);
|
||||
Iterator it2 = iteratorAt(q, 2);
|
||||
|
||||
it2.remove(); // causes it2's ancestor to advance to 1
|
||||
assertSame(next(p1), p3);
|
||||
assertSame(next(p2), p3);
|
||||
assertNull(item(p2));
|
||||
it1.remove(); // removes it2's ancestor
|
||||
assertSame(next(p0), p3);
|
||||
assertSame(next(p1), p3);
|
||||
assertSame(next(p2), p3);
|
||||
assertNull(item(p1));
|
||||
assertEquals(it2.next(), 3);
|
||||
it2.remove(); // it2's ancestor can't unlink
|
||||
|
||||
assertSame(next(p0), p3); // p3 is now interior dead node
|
||||
assertSame(next(p1), p4); // it2 uselessly CASed p1.next
|
||||
assertSame(next(p2), p3);
|
||||
assertSame(next(p3), p4);
|
||||
assertInvariants(q);
|
||||
|
||||
int c = nodeCount(q);
|
||||
traversalAction.accept(q);
|
||||
assertEquals(nodeCount(q), c - 1);
|
||||
|
||||
assertSame(next(p0), p4);
|
||||
assertSame(next(p1), p4);
|
||||
assertSame(next(p2), p3);
|
||||
assertSame(next(p3), p4);
|
||||
assertInvariants(q);
|
||||
|
||||
// trailing nodes are not unlinked
|
||||
Iterator it5 = iteratorAt(q, 5); it5.remove();
|
||||
traversalAction.accept(q);
|
||||
assertSame(next(p4), p5);
|
||||
assertNull(next(p5));
|
||||
assertEquals(nodeCount(q), c - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks that traversal operations collapse a random pattern of
|
||||
* dead nodes as could normally only occur with a race.
|
||||
*/
|
||||
@Test(dataProvider = "traversalActions")
|
||||
public void traversalOperationsCollapseRandomNodes(
|
||||
Consumer<LinkedTransferQueue> traversalAction) {
|
||||
LinkedTransferQueue q = new LinkedTransferQueue();
|
||||
int n = rnd.nextInt(6);
|
||||
for (int i = 0; i < n; i++) q.add(i);
|
||||
ArrayList nulledOut = new ArrayList();
|
||||
for (Object p = head(q); p != null; p = next(p))
|
||||
if (rnd.nextBoolean()) {
|
||||
nulledOut.add(item(p));
|
||||
ITEM.setVolatile(p, null);
|
||||
}
|
||||
traversalAction.accept(q);
|
||||
int c = nodeCount(q);
|
||||
assertEquals(q.size(), c - (q.contains(n - 1) ? 0 : 1));
|
||||
for (int i = 0; i < n; i++)
|
||||
assertTrue(nulledOut.contains(i) ^ q.contains(i));
|
||||
}
|
||||
|
||||
/**
|
||||
* Traversal actions that remove every element, and are also
|
||||
* expected to squeeze out dead nodes.
|
||||
*/
|
||||
@DataProvider
|
||||
public Object[][] bulkRemovalActions() {
|
||||
return List.<Consumer<LinkedTransferQueue>>of(
|
||||
q -> q.clear(),
|
||||
q -> assertTrue(q.removeIf(e -> true)),
|
||||
q -> assertTrue(q.retainAll(List.of())))
|
||||
.stream().map(x -> new Object[]{ x }).toArray(Object[][]::new);
|
||||
}
|
||||
|
||||
@Test(dataProvider = "bulkRemovalActions")
|
||||
public void bulkRemovalOperationsCollapseNodes(
|
||||
Consumer<LinkedTransferQueue> bulkRemovalAction) {
|
||||
LinkedTransferQueue q = new LinkedTransferQueue();
|
||||
int n = 1 + rnd.nextInt(5);
|
||||
for (int i = 0; i < n; i++) q.add(i);
|
||||
bulkRemovalAction.accept(q);
|
||||
assertEquals(nodeCount(q), 1);
|
||||
assertInvariants(q);
|
||||
}
|
||||
|
||||
/**
|
||||
* Actions that remove the first element, and are expected to
|
||||
* leave at most one slack dead node at head.
|
||||
*/
|
||||
@DataProvider
|
||||
public Object[][] pollActions() {
|
||||
return List.<Consumer<LinkedTransferQueue>>of(
|
||||
q -> assertNotNull(q.poll()),
|
||||
q -> { try { assertNotNull(q.poll(1L, TimeUnit.DAYS)); }
|
||||
catch (Throwable x) { throw new AssertionError(x); }},
|
||||
q -> { try { assertNotNull(q.take()); }
|
||||
catch (Throwable x) { throw new AssertionError(x); }},
|
||||
q -> assertNotNull(q.remove()))
|
||||
.stream().map(x -> new Object[]{ x }).toArray(Object[][]::new);
|
||||
}
|
||||
|
||||
@Test(dataProvider = "pollActions")
|
||||
public void pollActionsOneNodeSlack(
|
||||
Consumer<LinkedTransferQueue> pollAction) {
|
||||
LinkedTransferQueue q = new LinkedTransferQueue();
|
||||
int n = 1 + rnd.nextInt(5);
|
||||
for (int i = 0; i < n; i++) q.add(i);
|
||||
assertEquals(nodeCount(q), n + 1);
|
||||
for (int i = 0; i < n; i++) {
|
||||
int c = nodeCount(q);
|
||||
boolean slack = item(head(q)) == null;
|
||||
if (slack) assertNotNull(item(next(head(q))));
|
||||
pollAction.accept(q);
|
||||
assertEquals(nodeCount(q), q.isEmpty() ? 1 : c - (slack ? 2 : 0));
|
||||
}
|
||||
assertInvariants(q);
|
||||
}
|
||||
|
||||
/**
|
||||
* Actions that append an element, and are expected to
|
||||
* leave at most one slack node at tail.
|
||||
*/
|
||||
@DataProvider
|
||||
public Object[][] addActions() {
|
||||
return List.<Consumer<LinkedTransferQueue>>of(
|
||||
q -> q.add(1),
|
||||
q -> q.offer(1))
|
||||
.stream().map(x -> new Object[]{ x }).toArray(Object[][]::new);
|
||||
}
|
||||
|
||||
@Test(dataProvider = "addActions")
|
||||
public void addActionsOneNodeSlack(
|
||||
Consumer<LinkedTransferQueue> addAction) {
|
||||
LinkedTransferQueue q = new LinkedTransferQueue();
|
||||
int n = 1 + rnd.nextInt(9);
|
||||
for (int i = 0; i < n; i++) {
|
||||
boolean slack = next(tail(q)) != null;
|
||||
addAction.accept(q);
|
||||
if (slack)
|
||||
assertNull(next(tail(q)));
|
||||
else {
|
||||
assertNotNull(next(tail(q)));
|
||||
assertNull(next(next(tail(q))));
|
||||
}
|
||||
assertInvariants(q);
|
||||
}
|
||||
}
|
||||
|
||||
byte[] serialBytes(Object o) {
|
||||
try {
|
||||
ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
||||
ObjectOutputStream oos = new ObjectOutputStream(bos);
|
||||
oos.writeObject(o);
|
||||
oos.flush();
|
||||
oos.close();
|
||||
return bos.toByteArray();
|
||||
} catch (Exception fail) {
|
||||
throw new AssertionError(fail);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
<T> T serialClone(T o) {
|
||||
try {
|
||||
ObjectInputStream ois = new ObjectInputStream
|
||||
(new ByteArrayInputStream(serialBytes(o)));
|
||||
T clone = (T) ois.readObject();
|
||||
assertNotSame(o, clone);
|
||||
assertSame(o.getClass(), clone.getClass());
|
||||
return clone;
|
||||
} catch (Exception fail) {
|
||||
throw new AssertionError(fail);
|
||||
}
|
||||
}
|
||||
|
||||
public void testSerialization() {
|
||||
LinkedTransferQueue q = serialClone(new LinkedTransferQueue());
|
||||
assertInvariants(q);
|
||||
}
|
||||
|
||||
public void cancelledNodeSweeping() throws Throwable {
|
||||
assertEquals(SWEEP_THRESHOLD & (SWEEP_THRESHOLD - 1), 0);
|
||||
LinkedTransferQueue q = new LinkedTransferQueue();
|
||||
Thread blockHead = null;
|
||||
if (rnd.nextBoolean()) {
|
||||
blockHead = new Thread(
|
||||
() -> { try { q.take(); } catch (InterruptedException ok) {}});
|
||||
blockHead.start();
|
||||
while (nodeCount(q) != 2) { Thread.yield(); }
|
||||
assertTrue(q.hasWaitingConsumer());
|
||||
assertEquals(q.getWaitingConsumerCount(), 1);
|
||||
}
|
||||
int initialNodeCount = nodeCount(q);
|
||||
|
||||
// Some dead nodes do in fact accumulate ...
|
||||
if (blockHead != null)
|
||||
while (nodeCount(q) < initialNodeCount + SWEEP_THRESHOLD / 2)
|
||||
q.poll(1L, TimeUnit.MICROSECONDS);
|
||||
|
||||
// ... but no more than SWEEP_THRESHOLD nodes accumulate
|
||||
for (int i = rnd.nextInt(SWEEP_THRESHOLD * 10); i-->0; )
|
||||
q.poll(1L, TimeUnit.MICROSECONDS);
|
||||
assertTrue(nodeCount(q) <= initialNodeCount + SWEEP_THRESHOLD);
|
||||
|
||||
if (blockHead != null) {
|
||||
blockHead.interrupt();
|
||||
blockHead.join();
|
||||
}
|
||||
}
|
||||
|
||||
/** Checks conditions which should always be true. */
|
||||
void assertInvariants(LinkedTransferQueue q) {
|
||||
assertNotNull(head(q));
|
||||
assertNotNull(tail(q));
|
||||
// head is never self-linked (but tail may!)
|
||||
for (Object h; next(h = head(q)) == h; )
|
||||
assertNotSame(h, head(q)); // must be update race
|
||||
}
|
||||
}
|
@ -753,6 +753,31 @@ public class Collection8Test extends JSR166TestCase {
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Concurrent Spliterators, once exhausted, stay exhausted.
|
||||
*/
|
||||
public void testStickySpliteratorExhaustion() throws Throwable {
|
||||
if (!impl.isConcurrent()) return;
|
||||
if (!testImplementationDetails) return;
|
||||
final ThreadLocalRandom rnd = ThreadLocalRandom.current();
|
||||
final Consumer alwaysThrows = e -> { throw new AssertionError(); };
|
||||
final Collection c = impl.emptyCollection();
|
||||
final Spliterator s = c.spliterator();
|
||||
if (rnd.nextBoolean()) {
|
||||
assertFalse(s.tryAdvance(alwaysThrows));
|
||||
} else {
|
||||
s.forEachRemaining(alwaysThrows);
|
||||
}
|
||||
final Object one = impl.makeElement(1);
|
||||
// Spliterator should not notice added element
|
||||
c.add(one);
|
||||
if (rnd.nextBoolean()) {
|
||||
assertFalse(s.tryAdvance(alwaysThrows));
|
||||
} else {
|
||||
s.forEachRemaining(alwaysThrows);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Motley crew of threads concurrently randomly hammer the collection.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user