/** * Shared internal API for dual stacks and queues. */ // 1 staticabstractclassTransferer{ /** * Performs a put or take. * * @param e if non-null, the item to be handed to a consumer; * if null, requests that transfer return an item * offered by producer. // 2 * @param timed if this operation should timeout * @param nanos the timeout, in nanoseconds * @return if non-null, the item provided or received; if null, * the operation failed due to timeout or interrupt -- * the caller can distinguish which of these occurred * by checking Thread.interrupted. */ abstract Object transfer(Object e, boolean timed, long nanos); }
/** Dual stack */ staticfinalclassTransferStackextendsTransferer{ /* * This extends Scherer-Scott dual stack algorithm, differing, * among other ways, by using "covering" nodes rather than * bit-marked pointers: Fulfilling operations push on marker * nodes (with FULFILLING bit set in mode) to reserve a spot * to match a waiting node. */
/* Modes for SNodes, ORed together in node fields */ /** Node represents an unfulfilled consumer */ // 1 staticfinalint REQUEST = 0; /** Node represents an unfulfilled producer */ // 2 staticfinalint DATA = 1; /** Node is fulfilling another unfulfilled DATA or REQUEST */ // 3 staticfinalint FULFILLING = 2;
/** Return true if m has fulfilling bit set */ // 4 staticbooleanisFulfilling(int m){ return (m & FULFILLING) != 0; }
/** Node class for TransferStacks. */ staticfinalclassSNode{ // 5 volatile SNode next; // next node in stack // 6 volatile SNode match; // the node matched to this volatile Thread waiter; // to control park/unpark Object item; // data; or null for REQUESTs int mode; // Note: item and mode fields don't need to be volatile // since they are always written before, and read after, // other volatile/atomic operations. // 7 SNode(Object item) { this.item = item; }
/** * Tries to match node s to this node, if so, waking up thread. * Fulfillers call tryMatch to identify their waiters. * Waiters block until they have been matched. * * @param s the node to match * @return true if successfully matched to s */ // 8 booleantryMatch(SNode s){ if (match == null && matchUpdater.compareAndSet(this, null, s)) { // 9 Thread w = waiter; if (w != null) { // waiters need at most one unpark waiter = null; LockSupport.unpark(w); } returntrue; } // 10 return match == s; }
/** * Tries to cancel a wait by matching node to itself. */ // 11 voidtryCancel(){ matchUpdater.compareAndSet(this, null, this); }
booleanisCancelled(){ return match == this; } }
/** The head (top) of the stack */ volatile SNode head;
/** * Puts or takes an item. */ Object transfer(Object e, boolean timed, long nanos){ /* * Basic algorithm is to loop trying one of three actions: * * 1. If apparently empty or already containing nodes of same * mode, try to push node on stack and wait for a match, * returning it, or null if cancelled. * * 2. If apparently containing node of complementary mode, * try to push a fulfilling node on to stack, match * with corresponding waiting node, pop both from * stack, and return matched item. The matching or * unlinking might not actually be necessary because of * other threads performing action 3: * * 3. If top of stack already holds another fulfilling node, * help it out by doing its match and/or pop * operations, and then continue. The code for helping * is essentially the same as for fulfilling, except * that it doesn't return the item. */ // 1 SNode s = null; // constructed/reused as needed int mode = (e == null)? REQUEST : DATA;
for (;;) { SNode h = head; // 2 if (h == null || h.mode == mode) { // empty or same-mode // 3 if (timed && nanos <= 0) { // can't wait if (h != null && h.isCancelled()) // 4 casHead(h, h.next); // pop cancelled node else returnnull; } elseif (casHead(h, s = snode(s, e, h, mode))) {// 5 // 6 SNode m = awaitFulfill(s, timed, nanos); // 7 if (m == s) { // wait was cancelled // 8 clean(s); returnnull; } // 9 if ((h = head) != null && h.next == s) // 10 casHead(h, s.next); // help s's fulfiller return mode == REQUEST? m.item : s.item; } } elseif (!isFulfilling(h.mode)) { // try to fulfill // 11 // 12 if (h.isCancelled()) // already cancelled // 13 casHead(h, h.next); // pop and retry elseif (casHead(h, s=snode(s, e, h, FULFILLING|mode))) {// 14 for (;;) { // loop until matched or waiters disappear // 15 SNode m = s.next; // m is s's match // 16 if (m == null) { // all waiters are gone // 17 casHead(s, null); // pop fulfill node // 18 s = null; // use new node next time break; // restart main loop } // 19 SNode mn = m.next; // 20 if (m.tryMatch(s)) { // 21 casHead(s, mn); // pop both s and m return (mode == REQUEST)? m.item : s.item; } else// lost match // 22 // 23 s.casNext(m, mn); // help unlink } } } else { // 24 // help a fulfiller // 25 SNode m = h.next; // m is h's match // 26 if (m == null) // waiter is gone casHead(h, null); // pop fulfilling node else { // 27 SNode mn = m.next; // 28 if (m.tryMatch(h)) // help match // 29 casHead(h, mn); // pop both h and m else//30 // lost match // 31 h.casNext(m, mn); // help unlink } } } }
/** * Creates or resets fields of a node. Called only from transfer * where the node to push on stack is lazily created and * reused when possible to help reduce intervals between reads * and CASes of head and to avoid surges of garbage when CASes * to push nodes fail due to contention. */ static SNode snode(SNode s, Object e, SNode next, int mode){ if (s == null) s = new SNode(e); s.mode = mode; s.next = next; return s; }
/** * Spins/blocks until node s is matched by a fulfill operation. * * @param s the waiting node * @param timed true if timed wait * @param nanos timeout value * @return matched node, or s if cancelled */ // 1 SNode awaitFulfill(SNode s, boolean timed, long nanos){ /* * When a node/thread is about to block, it sets its waiter * field and then rechecks state at least one more time * before actually parking, thus covering race vs * fulfiller noticing that waiter is non-null so should be * woken. * * When invoked by nodes that appear at the point of call * to be at the head of the stack, calls to park are * preceded by spins to avoid blocking when producers and * consumers are arriving very close in time. This can * happen enough to bother only on multiprocessors. * * The order of checks for returning out of main loop * reflects fact that interrupts have precedence over * normal returns, which have precedence over * timeouts. (So, on timeout, one last check for match is * done before giving up.) Except that calls from untimed * SynchronousQueue.{poll/offer} don't check interrupts * and don't wait at all, so are trapped in transfer * method rather than calling awaitFulfill. */ long lastTime = (timed)? System.nanoTime() : 0; Thread w = Thread.currentThread(); SNode h = head; int spins = (shouldSpin(s)? (timed? maxTimedSpins : maxUntimedSpins) : 0); for (;;) { if (w.isInterrupted()) // 2 s.tryCancel(); SNode m = s.match; if (m != null) // 3 return m; if (timed) { long now = System.nanoTime(); nanos -= now - lastTime; lastTime = now; if (nanos <= 0) { // 4 s.tryCancel(); continue; } } if (spins > 0) // 5 spins = shouldSpin(s)? (spins-1) : 0; elseif (s.waiter == null) // 6 s.waiter = w; // establish waiter so can park next iter elseif (!timed) LockSupport.park(this); elseif (nanos > spinForTimeoutThreshold) // 7 LockSupport.parkNanos(this, nanos); } }
/** * Returns true if node s is at head or there is an active * fulfiller. */ // 1 booleanshouldSpin(SNode s){ SNode h = head; return (h == s || h == null || isFulfilling(h.mode)); }
/** The number of CPUs, for spin control */ staticfinalint NCPUS = Runtime.getRuntime().availableProcessors();
/** * The number of times to spin before blocking in timed waits. * The value is empirically derived -- it works well across a * variety of processors and OSes. Empirically, the best value * seems not to vary with number of CPUs (beyond 2) so is just * a constant. */ staticfinalint maxTimedSpins = (NCPUS < 2)? 0 : 32;
/** * The number of times to spin before blocking in untimed waits. * This is greater than timed value because untimed waits spin * faster since they don't need to check times on each spin. */ staticfinalint maxUntimedSpins = maxTimedSpins * 16;
/** * The number of nanoseconds for which it is faster to spin * rather than to use timed park. A rough estimate suffices. */ staticfinallong spinForTimeoutThreshold = 1000L;
/** * Unlinks s from the stack. */ // 1 voidclean(SNode s){ s.item = null; // forget item s.waiter = null; // forget thread
/* * At worst we may need to traverse entire stack to unlink * s. If there are multiple concurrent calls to clean, we * might not see s if another thread has already removed * it. But we can stop when we see any node known to * follow s. We use s.next unless it too is cancelled, in * which case we try the node one past. We don't check any * further because we don't want to doubly traverse just to * find sentinel. */
SNode past = s.next; if (past != null && past.isCancelled()) past = past.next;
// Absorb cancelled nodes at head // 2 SNode p; while ((p = head) != null && p != past && p.isCancelled()) casHead(p, p.next);
// Unsplice embedded nodes // 3 while (p != null && p != past) { SNode n = p.next; if (n != null && n.isCancelled()) p.casNext(n, n.next); else p = n; } }
/** Dual Queue */ staticfinalclassTransferQueueextendsTransferer{ /* * This extends Scherer-Scott dual queue algorithm, differing, * among other ways, by using modes within nodes rather than * marked pointers. The algorithm is a little simpler than * that for stacks because fulfillers do not need explicit * nodes, and matching is done by CAS'ing QNode.item field * from non-null to null (for put) or vice versa (for take). */
/** Node class for TransferQueue. */ staticfinalclassQNode{ volatile QNode next; // next node in queue volatile Object item; // CAS'ed to or from null volatile Thread waiter; // to control park/unpark finalboolean isData;
/** * Tries to cancel by CAS'ing ref to this as item. */ // 1 voidtryCancel(Object cmp){ itemUpdater.compareAndSet(this, cmp, this); }
booleanisCancelled(){ return item == this; }
/** * Returns true if this node is known to be off the queue * because its next pointer has been forgotten due to * an advanceHead operation. */ // 2 booleanisOffList(){ return next == this; } }
/** Head of queue */ // 3 transientvolatile QNode head; /** Tail of queue */ // 4 transientvolatile QNode tail; /** * Reference to a cancelled node that might not yet have been * unlinked from queue because it was the last inserted node * when it cancelled. */ // 5 transientvolatile QNode cleanMe;
TransferQueue() { // 6 QNode h = new QNode(null, false); // initialize to dummy node. head = h; tail = h; }
/** * Puts or takes an item. */ Object transfer(Object e, boolean timed, long nanos){ /* Basic algorithm is to loop trying to take either of * two actions: * * 1. If queue apparently empty or holding same-mode nodes, * try to add node to queue of waiters, wait to be * fulfilled (or cancelled) and return matching item. * * 2. If queue apparently contains waiting items, and this * call is of complementary mode, try to fulfill by CAS'ing * item field of waiting node and dequeuing it, and then * returning matching item. * * In each case, along the way, check for and try to help * advance head and tail on behalf of other stalled/slow * threads. * * The loop starts off with a null check guarding against * seeing uninitialized head or tail values. This never * happens in current SynchronousQueue, but could if * callers held non-volatile/final ref to the * transferer. The check is here anyway because it places * null checks at top of loop, which is usually faster * than having them implicitly interspersed. */ QNode s = null; // constructed/reused as needed boolean isData = (e != null);
for (;;) { QNode t = tail; QNode h = head; // 1 if (t == null || h == null) // saw uninitialized value continue; // spin // 2 if (h == t || t.isData == isData) { // empty or same-mode QNode tn = t.next; // 3 if (t != tail) // inconsistent read continue; // 4 if (tn != null) { // lagging tail // 5 advanceTail(t, tn); continue; } // 6 if (timed && nanos <= 0) // can't wait returnnull; if (s == null) // 7 s = new QNode(e, isData); // 8 if (!t.casNext(null, s)) // failed to link in continue; // 9 advanceTail(t, s); // swing tail and wait // 10 Object x = awaitFulfill(s, e, timed, nanos); // 11 if (x == s) { // wait was cancelled // 12 clean(t, s); returnnull; } // 13 if (!s.isOffList()) { // not already unlinked // 14 advanceHead(t, s); // unlink if head if (x != null) // and forget fields s.item = s; s.waiter = null; } return (x != null)? x : e;
} else { // complementary-mode // 15 QNode m = h.next; // node to fulfill // 16 if (t != tail || m == null || h != head) continue; // inconsistent read
Object x = m.item; // 17 if (isData == (x != null) || // m already fulfilled x == m || // m cancelled !m.casItem(x, e)) { // lost CAS // 18 advanceHead(h, m); // dequeue and retry continue; } // 19 advanceHead(h, m); // successfully fulfilled // 20 LockSupport.unpark(m.waiter); return (x != null)? x : e; } } }
/** * Tries to cas nh as new head; if successful, unlink * old head's next node to avoid garbage retention. */ voidadvanceHead(QNode h, QNode nh){ if (h == head && headUpdater.compareAndSet(this, h, nh)) h.next = h; // forget old next }
SynchronousQueue#TransferQueue#advanceTail()
1 2 3 4 5 6 7
/** * Tries to cas nt as new tail. */ voidadvanceTail(QNode t, QNode nt){ if (tail == t) tailUpdater.compareAndSet(this, t, nt); }
/** * Spins/blocks until node s is fulfilled. * * @param s the waiting node * @param e the comparison value for checking match * @param timed true if timed wait * @param nanos timeout value * @return matched item, or s if cancelled */ Object awaitFulfill(QNode s, Object e, boolean timed, long nanos){ /* Same idea as TransferStack.awaitFulfill */ long lastTime = (timed)? System.nanoTime() : 0; Thread w = Thread.currentThread(); int spins = ((head.next == s) ? (timed? maxTimedSpins : maxUntimedSpins) : 0); for (;;) { if (w.isInterrupted()) s.tryCancel(e); Object x = s.item; if (x != e) return x; if (timed) { long now = System.nanoTime(); nanos -= now - lastTime; lastTime = now; if (nanos <= 0) { s.tryCancel(e); continue; } } if (spins > 0) --spins; elseif (s.waiter == null) s.waiter = w; elseif (!timed) LockSupport.park(this); elseif (nanos > spinForTimeoutThreshold) LockSupport.parkNanos(this, nanos); } }
/** * Gets rid of cancelled node s with original predecessor pred. */ voidclean(QNode pred, QNode s){ s.waiter = null; // forget thread /* * At any given time, exactly one node on list cannot be * deleted -- the last inserted node. To accommodate this, * if we cannot delete s, we save its predecessor as * "cleanMe", deleting the previously saved version * first. At least one of node s or the node previously * saved can always be deleted, so this always terminates. */ // 1 while (pred.next == s) { // Return early if already unlinked QNode h = head; QNode hn = h.next; // Absorb cancelled first node as head if (hn != null && hn.isCancelled()) { // 2 advanceHead(h, hn); continue; } QNode t = tail; // Ensure consistent read for tail // 3 if (t == h) return; QNode tn = t.next; // 4 if (t != tail) continue; if (tn != null) { // 5 advanceTail(t, tn); continue; } // 6 if (s != t) { // If not tail, try to unsplice QNode sn = s.next; // 7 if (sn == s || pred.casNext(s, sn)) return; } // 8 QNode dp = cleanMe; // 9 if (dp != null) { // Try unlinking previous cancelled node QNode d = dp.next; QNode dn; if (d == null || // d is gone or d == dp || // d is off list or !d.isCancelled() || // d not cancelled or (d != t && // d not tail and (dn = d.next) != null && // has successor dn != d && // that is on list dp.casNext(d, dn)))// 10 // d unspliced casCleanMe(dp, null); // 11 if (dp == pred) return; // s is already saved node } elseif (casCleanMe(null, pred)) // 12 return; // Postpone cleaning s } }
/** * The transferer. Set only in constructor, but cannot be declared * as final without further complicating serialization. Since * this is accessed only at most once per public method, there * isn't a noticeable performance penalty for using volatile * instead of final here. */ privatetransientvolatile Transferer transferer;
/** * Creates a <tt>SynchronousQueue</tt> with nonfair access policy. */ publicSynchronousQueue(){ this(false); }
/** * Creates a <tt>SynchronousQueue</tt> with the specified fairness policy. * * @param fair if true, waiting threads contend in FIFO order for * access; otherwise the order is unspecified. */ publicSynchronousQueue(boolean fair){ transferer = (fair)? new TransferQueue() : new TransferStack(); }
/** * Adds the specified element to this queue, waiting if necessary for * another thread to receive it. * * @throws InterruptedException {@inheritDoc} * @throws NullPointerException {@inheritDoc} */ // 1 publicvoidput(E o)throws InterruptedException { if (o == null) thrownew NullPointerException(); if (transferer.transfer(o, false, 0) == null) { Thread.interrupted(); thrownew InterruptedException(); } }
/** * Inserts the specified element into this queue, waiting if necessary * up to the specified wait time for another thread to receive it. * * @return <tt>true</tt> if successful, or <tt>false</tt> if the * specified waiting time elapses before a consumer appears. * @throws InterruptedException {@inheritDoc} * @throws NullPointerException {@inheritDoc} */ // 2 publicbooleanoffer(E o, long timeout, TimeUnit unit) throws InterruptedException { if (o == null) thrownew NullPointerException(); if (transferer.transfer(o, true, unit.toNanos(timeout)) != null) returntrue; if (!Thread.interrupted()) returnfalse; thrownew InterruptedException(); }
/** * Inserts the specified element into this queue, if another thread is * waiting to receive it. * * @param e the element to add * @return <tt>true</tt> if the element was added to this queue, else * <tt>false</tt> * @throws NullPointerException if the specified element is null */ // 3 publicbooleanoffer(E e){ if (e == null) thrownew NullPointerException(); return transferer.transfer(e, true, 0) != null; }
/** * Retrieves and removes the head of this queue, waiting if necessary * for another thread to insert it. * * @return the head of this queue * @throws InterruptedException {@inheritDoc} */ // 4 public E take()throws InterruptedException { Object e = transferer.transfer(null, false, 0); if (e != null) return (E)e; Thread.interrupted(); thrownew InterruptedException(); }
/** * Retrieves and removes the head of this queue, waiting * if necessary up to the specified wait time, for another thread * to insert it. * * @return the head of this queue, or <tt>null</tt> if the * specified waiting time elapses before an element is present. * @throws InterruptedException {@inheritDoc} */ // 5 public E poll(long timeout, TimeUnit unit)throws InterruptedException { Object e = transferer.transfer(null, true, unit.toNanos(timeout)); if (e != null || !Thread.interrupted()) return (E)e; thrownew InterruptedException(); }
/** * Retrieves and removes the head of this queue, if another thread * is currently making an element available. * * @return the head of this queue, or <tt>null</tt> if no * element is available. */ // 6 public E poll(){ return (E)transferer.transfer(null, true, 0); }
/* * To cope with serialization strategy in the 1.5 version of * SynchronousQueue, we declare some unused classes and fields * that exist solely to enable serializability across versions. * These fields are never used, so are initialized only if this * object is ever serialized or deserialized. */
/** * Save the state to a stream (that is, serialize it). * * @param s the stream */ privatevoidwriteObject(java.io.ObjectOutputStream s) throws java.io.IOException { // 1 boolean fair = transferer instanceof TransferQueue; if (fair) { qlock = new ReentrantLock(true); waitingProducers = new FifoWaitQueue(); waitingConsumers = new FifoWaitQueue(); } else { qlock = new ReentrantLock(); waitingProducers = new LifoWaitQueue(); waitingConsumers = new LifoWaitQueue(); } s.defaultWriteObject(); }
privatevoidreadObject(final java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException { s.defaultReadObject(); if (waitingProducers instanceof FifoWaitQueue) transferer = new TransferQueue(); else transferer = new TransferStack(); }