提交 0c4a5dce 编写于 作者: M mduigou

8011920: Main streams implementation

8012542: Stream methods on Collection
Reviewed-by: dholmes, mduigou
Contributed-by: NBrian Goetz &lt;brian.goetz@oracle.com&gt;, Mike Duigou &lt;mike.duigou@oracle.com&gt;, Paul Sandoz <paul.sandoz@oracle.com>
上级 a655c4a6
...@@ -142,6 +142,7 @@ CORE_PKGS = \ ...@@ -142,6 +142,7 @@ CORE_PKGS = \
java.util.prefs \ java.util.prefs \
java.util.regex \ java.util.regex \
java.util.spi \ java.util.spi \
java.util.stream \
java.util.zip \ java.util.zip \
javax.accessibility \ javax.accessibility \
javax.activation \ javax.activation \
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
package java.util; package java.util;
import java.util.function.Predicate; import java.util.function.Predicate;
import java.util.stream.Stream;
import java.util.stream.StreamSupport;
/** /**
* The root interface in the <i>collection hierarchy</i>. A collection * The root interface in the <i>collection hierarchy</i>. A collection
...@@ -499,9 +501,28 @@ public interface Collection<E> extends Iterable<E> { ...@@ -499,9 +501,28 @@ public interface Collection<E> extends Iterable<E> {
/** /**
* Creates a {@link Spliterator} over the elements in this collection. * Creates a {@link Spliterator} over the elements in this collection.
* *
* <p>The {@code Spliterator} reports {@link Spliterator#SIZED}. * <p>The returned {@code Spliterator} must report the characteristic
* Implementations should document the reporting of additional * {@link Spliterator#SIZED}; implementations should document any additional
* characteristic values. * characteristic values reported by the returned Spliterator.
*
* <p>The default implementation should be overridden by subclasses that
* can return a more efficient spliterator. In order to
* preserve expected laziness behavior for the {@link #stream()} and
* {@link #parallelStream()}} methods, spliterators should either have the
* characteristic of {@code IMMUTABLE} or {@code CONCURRENT}, or be
* <em><a href="Spliterator.html#binding">late-binding</a></em>.
* If none of these is practical, the overriding class should describe the
* spliterator's documented policy of binding and structural interference,
* and should override the {@link #stream()} and {@link #parallelStream()}
* methods to create streams using a {@code Supplier} of the spliterator,
* as in:
* <pre>{@code
* Stream<E> s = StreamSupport.stream(() -> spliterator(), spliteratorCharacteristics)
* }</pre>
* <p>These requirements ensure that streams produced by the
* {@link #stream()} and {@link #parallelStream()} methods will reflect the
* contents of the collection as of initiation of the terminal stream
* operation.
* *
* @implSpec * @implSpec
* The default implementation creates a * The default implementation creates a
...@@ -510,7 +531,7 @@ public interface Collection<E> extends Iterable<E> { ...@@ -510,7 +531,7 @@ public interface Collection<E> extends Iterable<E> {
* <em>fail-fast</em> properties of the collection's iterator. * <em>fail-fast</em> properties of the collection's iterator.
* *
* @implNote * @implNote
* The created {@code Spliterator} additionally reports * The returned {@code Spliterator} additionally reports
* {@link Spliterator#SUBSIZED}. * {@link Spliterator#SUBSIZED}.
* *
* @return a {@code Spliterator} over the elements in this collection * @return a {@code Spliterator} over the elements in this collection
...@@ -519,4 +540,44 @@ public interface Collection<E> extends Iterable<E> { ...@@ -519,4 +540,44 @@ public interface Collection<E> extends Iterable<E> {
default Spliterator<E> spliterator() { default Spliterator<E> spliterator() {
return Spliterators.spliterator(this, 0); return Spliterators.spliterator(this, 0);
} }
/**
* Returns a sequential {@code Stream} with this collection as its source.
*
* <p>This method should be overridden when the {@link #spliterator()}
* method cannot return a spliterator that is {@code IMMUTABLE},
* {@code CONCURRENT}, or <em>late-binding</em>. (See {@link #spliterator()}
* for details.)
*
* @implSpec
* The default implementation creates a sequential {@code Stream} from the
* collection's {@code Spliterator}.
*
* @return a sequential {@code Stream} over the elements in this collection
* @since 1.8
*/
default Stream<E> stream() {
return StreamSupport.stream(spliterator());
}
/**
* Returns a possibly parallel {@code Stream} with this collection as its
* source. It is allowable for this method to return a sequential stream.
*
* <p>This method should be overridden when the {@link #spliterator()}
* method cannot return a spliterator that is {@code IMMUTABLE},
* {@code CONCURRENT}, or <em>late-binding</em>. (See {@link #spliterator()}
* for details.)
*
* @implSpec
* The default implementation creates a parallel {@code Stream} from the
* collection's {@code Spliterator}.
*
* @return a possibly parallel {@code Stream} over the elements in this
* collection
* @since 1.8
*/
default Stream<E> parallelStream() {
return StreamSupport.parallelStream(spliterator());
}
} }
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.stream;
import java.util.Objects;
import java.util.Spliterator;
import java.util.function.IntFunction;
import java.util.function.Supplier;
/**
* Abstract base class for "pipeline" classes, which are the core
* implementations of the Stream interface and its primitive specializations.
* Manages construction and evaluation of stream pipelines.
*
* <p>An {@code AbstractPipeline} represents an initial portion of a stream
* pipeline, encapsulating a stream source and zero or more intermediate
* operations. The individual {@code AbstractPipeline} objects are often
* referred to as <em>stages</em>, where each stage describes either the stream
* source or an intermediate operation.
*
* <p>A concrete intermediate stage is generally built from an
* {@code AbstractPipeline}, a shape-specific pipeline class which extends it
* (e.g., {@code IntPipeline}) which is also abstract, and an operation-specific
* concrete class which extends that. {@code AbstractPipeline} contains most of
* the mechanics of evaluating the pipeline, and implements methods that will be
* used by the operation; the shape-specific classes add helper methods for
* dealing with collection of results into the appropriate shape-specific
* containers.
*
* <p>After chaining a new intermediate operation, or executing a terminal
* operation, the stream is considered to be consumed, and no more intermediate
* or terminal operations are permitted on this stream instance.
*
* <p>{@code AbstractPipeline} implements a number of methods that are
* specified in {@link BaseStream}, though it does not implement
* {@code BaseStream} directly. Subclasses of {@code AbstractPipeline}
* will generally implement {@code BaseStream}.
*
* @implNote
* <p>For sequential streams, and parallel streams without
* <a href="package-summary.html#StreamOps">stateful intermediate
* operations</a>, parallel streams, pipeline evaluation is done in a single
* pass that "jams" all the operations together. For parallel streams with
* stateful operations, execution is divided into segments, where each
* stateful operations marks the end of a segment, and each segment is
* evaluated separately and the result used as the input to the next
* segment. In all cases, the source data is not consumed until a terminal
* operation begins.
*
* @param <E_IN> type of input elements
* @param <E_OUT> type of output elements
* @param <S> type of the subclass implementing {@code BaseStream}
* @since 1.8
*/
abstract class AbstractPipeline<E_IN, E_OUT, S extends BaseStream<E_OUT, S>>
extends PipelineHelper<E_OUT> {
/**
* Backlink to the head of the pipeline chain (self if this is the source
* stage).
*/
private final AbstractPipeline sourceStage;
/**
* The "upstream" pipeline, or null if this is the source stage.
*/
private final AbstractPipeline previousStage;
/**
* The operation flags for the intermediate operation represented by this
* pipeline object.
*/
protected final int sourceOrOpFlags;
/**
* The next stage in the pipeline, or null if this is the last stage.
* Effectively final at the point of linking to the next pipeline.
*/
private AbstractPipeline nextStage;
/**
* The number of intermediate operations between this pipeline object
* and the stream source if sequential, or the previous stateful if parallel.
* Valid at the point of pipeline preparation for evaluation.
*/
private int depth;
/**
* The combined source and operation flags for the source and all operations
* up to and including the operation represented by this pipeline object.
* Valid at the point of pipeline preparation for evaluation.
*/
private int combinedFlags;
/**
* The source spliterator. Only valid for the head pipeline.
* Before the pipeline is consumed if non-null then {@code sourceSupplier}
* must be null. After the pipeline is consumed if non-null then is set to
* null.
*/
private Spliterator<?> sourceSpliterator;
/**
* The source supplier. Only valid for the head pipeline. Before the
* pipeline is consumed if non-null then {@code sourceSpliterator} must be
* null. After the pipeline is consumed if non-null then is set to null.
*/
private Supplier<? extends Spliterator<?>> sourceSupplier;
/**
* True if this pipeline has been linked or consumed
*/
private boolean linkedOrConsumed;
/**
* True if there are any stateful ops in the pipeline; only valid for the
* source stage.
*/
private boolean sourceAnyStateful;
/**
* True if pipeline is parallel, otherwise the pipeline is sequential; only
* valid for the source stage.
*/
private boolean parallel;
/**
* Constructor for the head of a stream pipeline.
*
* @param source {@code Supplier<Spliterator>} describing the stream source
* @param sourceFlags The source flags for the stream source, described in
* {@link StreamOpFlag}
* @param parallel True if the pipeline is parallel
*/
AbstractPipeline(Supplier<? extends Spliterator<?>> source,
int sourceFlags, boolean parallel) {
this.previousStage = null;
this.sourceSupplier = source;
this.sourceStage = this;
this.sourceOrOpFlags = sourceFlags & StreamOpFlag.STREAM_MASK;
// The following is an optimization of:
// StreamOpFlag.combineOpFlags(sourceOrOpFlags, StreamOpFlag.INITIAL_OPS_VALUE);
this.combinedFlags = (~(sourceOrOpFlags << 1)) & StreamOpFlag.INITIAL_OPS_VALUE;
this.depth = 0;
this.parallel = parallel;
}
/**
* Constructor for the head of a stream pipeline.
*
* @param source {@code Spliterator} describing the stream source
* @param sourceFlags the source flags for the stream source, described in
* {@link StreamOpFlag}
* @param parallel {@code true} if the pipeline is parallel
*/
AbstractPipeline(Spliterator<?> source,
int sourceFlags, boolean parallel) {
this.previousStage = null;
this.sourceSpliterator = source;
this.sourceStage = this;
this.sourceOrOpFlags = sourceFlags & StreamOpFlag.STREAM_MASK;
// The following is an optimization of:
// StreamOpFlag.combineOpFlags(sourceOrOpFlags, StreamOpFlag.INITIAL_OPS_VALUE);
this.combinedFlags = (~(sourceOrOpFlags << 1)) & StreamOpFlag.INITIAL_OPS_VALUE;
this.depth = 0;
this.parallel = parallel;
}
/**
* Constructor for appending an intermediate operation stage onto an
* existing pipeline.
*
* @param previousStage the upstream pipeline stage
* @param opFlags the operation flags for the new stage, described in
* {@link StreamOpFlag}
*/
AbstractPipeline(AbstractPipeline<?, E_IN, ?> previousStage, int opFlags) {
if (previousStage.linkedOrConsumed)
throw new IllegalStateException("stream has already been operated upon");
previousStage.linkedOrConsumed = true;
previousStage.nextStage = this;
this.previousStage = previousStage;
this.sourceOrOpFlags = opFlags & StreamOpFlag.OP_MASK;
this.combinedFlags = StreamOpFlag.combineOpFlags(opFlags, previousStage.combinedFlags);
this.sourceStage = previousStage.sourceStage;
if (opIsStateful())
sourceStage.sourceAnyStateful = true;
this.depth = previousStage.depth + 1;
}
// Terminal evaluation methods
/**
* Evaluate the pipeline with a terminal operation to produce a result.
*
* @param <R> the type of result
* @param terminalOp the terminal operation to be applied to the pipeline.
* @return the result
*/
final <R> R evaluate(TerminalOp<E_OUT, R> terminalOp) {
assert getOutputShape() == terminalOp.inputShape();
if (linkedOrConsumed)
throw new IllegalStateException("stream has already been operated upon");
linkedOrConsumed = true;
return isParallel()
? (R) terminalOp.evaluateParallel(this, sourceSpliterator(terminalOp.getOpFlags()))
: (R) terminalOp.evaluateSequential(this, sourceSpliterator(terminalOp.getOpFlags()));
}
/**
* Collect the elements output from the pipeline stage.
*
* @param generator the array generator to be used to create array instances
* @return a flat array-backed Node that holds the collected output elements
*/
final Node<E_OUT> evaluateToArrayNode(IntFunction<E_OUT[]> generator) {
if (linkedOrConsumed)
throw new IllegalStateException("stream has already been operated upon");
linkedOrConsumed = true;
// If the last intermediate operation is stateful then
// evaluate directly to avoid an extra collection step
if (isParallel() && previousStage != null && opIsStateful()) {
return opEvaluateParallel(previousStage, previousStage.sourceSpliterator(0), generator);
}
else {
return evaluate(sourceSpliterator(0), true, generator);
}
}
/**
* Gets the source stage spliterator if this pipeline stage is the source
* stage. The pipeline is consumed after this method is called and
* returns successfully.
*
* @return the source stage spliterator
* @throws IllegalStateException if this pipeline stage is not the source
* stage.
*/
final Spliterator<E_OUT> sourceStageSpliterator() {
if (this != sourceStage)
throw new IllegalStateException();
if (linkedOrConsumed)
throw new IllegalStateException("stream has already been operated upon");
linkedOrConsumed = true;
if (sourceStage.sourceSpliterator != null) {
Spliterator<E_OUT> s = sourceStage.sourceSpliterator;
sourceStage.sourceSpliterator = null;
return s;
}
else if (sourceStage.sourceSupplier != null) {
Spliterator<E_OUT> s = (Spliterator<E_OUT>) sourceStage.sourceSupplier.get();
sourceStage.sourceSupplier = null;
return s;
}
else {
throw new IllegalStateException("source already consumed");
}
}
// BaseStream
/**
* Implements {@link BaseStream#sequential()}
*/
public final S sequential() {
sourceStage.parallel = false;
return (S) this;
}
/**
* Implements {@link BaseStream#parallel()}
*/
public final S parallel() {
sourceStage.parallel = true;
return (S) this;
}
// Primitive specialization use co-variant overrides, hence is not final
/**
* Implements {@link BaseStream#spliterator()}
*/
public Spliterator<E_OUT> spliterator() {
if (linkedOrConsumed)
throw new IllegalStateException("stream has already been operated upon");
linkedOrConsumed = true;
if (this == sourceStage) {
if (sourceStage.sourceSpliterator != null) {
Spliterator<E_OUT> s = sourceStage.sourceSpliterator;
sourceStage.sourceSpliterator = null;
return s;
}
else if (sourceStage.sourceSupplier != null) {
Supplier<Spliterator<E_OUT>> s = sourceStage.sourceSupplier;
sourceStage.sourceSupplier = null;
return lazySpliterator(s);
}
else {
throw new IllegalStateException("source already consumed");
}
}
else {
return wrap(this, () -> sourceSpliterator(0), isParallel());
}
}
/**
* Implements {@link BaseStream#isParallel()}
*/
public final boolean isParallel() {
return sourceStage.parallel;
}
/**
* Returns the composition of stream flags of the stream source and all
* intermediate operations.
*
* @return the composition of stream flags of the stream source and all
* intermediate operations
* @see StreamOpFlag
*/
final int getStreamFlags() {
return StreamOpFlag.toStreamFlags(combinedFlags);
}
/**
* Prepare the pipeline for a parallel execution. As the pipeline is built,
* the flags and depth indicators are set up for a sequential execution.
* If the execution is parallel, and there are any stateful operations, then
* some of these need to be adjusted, as well as adjusting for flags from
* the terminal operation (such as back-propagating UNORDERED).
* Need not be called for a sequential execution.
*
* @param terminalFlags Operation flags for the terminal operation
*/
private void parallelPrepare(int terminalFlags) {
AbstractPipeline backPropagationHead = sourceStage;
if (sourceStage.sourceAnyStateful) {
int depth = 1;
for (AbstractPipeline u = sourceStage, p = sourceStage.nextStage;
p != null;
u = p, p = p.nextStage) {
int thisOpFlags = p.sourceOrOpFlags;
if (p.opIsStateful()) {
// If the stateful operation is a short-circuit operation
// then move the back propagation head forwards
// NOTE: there are no size-injecting ops
if (StreamOpFlag.SHORT_CIRCUIT.isKnown(thisOpFlags)) {
backPropagationHead = p;
}
depth = 0;
// The following injects size, it is equivalent to:
// StreamOpFlag.combineOpFlags(StreamOpFlag.IS_SIZED, p.combinedFlags);
thisOpFlags = (thisOpFlags & ~StreamOpFlag.NOT_SIZED) | StreamOpFlag.IS_SIZED;
}
p.depth = depth++;
p.combinedFlags = StreamOpFlag.combineOpFlags(thisOpFlags, u.combinedFlags);
}
}
// Apply the upstream terminal flags
if (terminalFlags != 0) {
int upstreamTerminalFlags = terminalFlags & StreamOpFlag.UPSTREAM_TERMINAL_OP_MASK;
for (AbstractPipeline p = backPropagationHead; p.nextStage != null; p = p.nextStage) {
p.combinedFlags = StreamOpFlag.combineOpFlags(upstreamTerminalFlags, p.combinedFlags);
}
combinedFlags = StreamOpFlag.combineOpFlags(terminalFlags, combinedFlags);
}
}
/**
* Get the source spliterator for this pipeline stage. For a sequential or
* stateless parallel pipeline, this is the source spliterator. For a
* stateful parallel pipeline, this is a spliterator describing the results
* of all computations up to and including the most recent stateful
* operation.
*/
private Spliterator<?> sourceSpliterator(int terminalFlags) {
// Get the source spliterator of the pipeline
Spliterator<?> spliterator = null;
if (sourceStage.sourceSpliterator != null) {
spliterator = sourceStage.sourceSpliterator;
sourceStage.sourceSpliterator = null;
}
else if (sourceStage.sourceSupplier != null) {
spliterator = (Spliterator<?>) sourceStage.sourceSupplier.get();
sourceStage.sourceSupplier = null;
}
else {
throw new IllegalStateException("source already consumed");
}
if (isParallel()) {
// @@@ Merge parallelPrepare with the loop below and use the
// spliterator characteristics to determine if SIZED
// should be injected
parallelPrepare(terminalFlags);
// Adapt the source spliterator, evaluating each stateful op
// in the pipeline up to and including this pipeline stage
for (AbstractPipeline u = sourceStage, p = sourceStage.nextStage, e = this;
u != e;
u = p, p = p.nextStage) {
if (p.opIsStateful()) {
spliterator = p.opEvaluateParallelLazy(u, spliterator);
}
}
}
else if (terminalFlags != 0) {
combinedFlags = StreamOpFlag.combineOpFlags(terminalFlags, combinedFlags);
}
return spliterator;
}
// PipelineHelper
@Override
final <P_IN> long exactOutputSizeIfKnown(Spliterator<P_IN> spliterator) {
return StreamOpFlag.SIZED.isKnown(getStreamAndOpFlags()) ? spliterator.getExactSizeIfKnown() : -1;
}
@Override
final <P_IN, S extends Sink<E_OUT>> S wrapAndCopyInto(S sink, Spliterator<P_IN> spliterator) {
copyInto(wrapSink(Objects.requireNonNull(sink)), spliterator);
return sink;
}
@Override
final <P_IN> void copyInto(Sink<P_IN> wrappedSink, Spliterator<P_IN> spliterator) {
Objects.requireNonNull(wrappedSink);
if (!StreamOpFlag.SHORT_CIRCUIT.isKnown(getStreamAndOpFlags())) {
wrappedSink.begin(spliterator.getExactSizeIfKnown());
spliterator.forEachRemaining(wrappedSink);
wrappedSink.end();
}
else {
copyIntoWithCancel(wrappedSink, spliterator);
}
}
@Override
final <P_IN> void copyIntoWithCancel(Sink<P_IN> wrappedSink, Spliterator<P_IN> spliterator) {
AbstractPipeline p = AbstractPipeline.this;
while (p.depth > 0) {
p = p.previousStage;
}
wrappedSink.begin(spliterator.getExactSizeIfKnown());
p.forEachWithCancel(spliterator, wrappedSink);
wrappedSink.end();
}
@Override
final int getStreamAndOpFlags() {
return combinedFlags;
}
final boolean isOrdered() {
return StreamOpFlag.ORDERED.isKnown(combinedFlags);
}
@Override
final <P_IN> Sink<P_IN> wrapSink(Sink<E_OUT> sink) {
Objects.requireNonNull(sink);
for (AbstractPipeline p=AbstractPipeline.this; p.depth > 0; p=p.previousStage) {
sink = p.opWrapSink(p.previousStage.combinedFlags, sink);
}
return (Sink<P_IN>) sink;
}
@Override
@SuppressWarnings("unchecked")
final <P_IN> Node<E_OUT> evaluate(Spliterator<P_IN> spliterator,
boolean flatten,
IntFunction<E_OUT[]> generator) {
if (isParallel()) {
// @@@ Optimize if op of this pipeline stage is a stateful op
return evaluateToNode(this, spliterator, flatten, generator);
}
else {
Node.Builder<E_OUT> nb = makeNodeBuilder(
exactOutputSizeIfKnown(spliterator), generator);
return wrapAndCopyInto(nb, spliterator).build();
}
}
// Shape-specific abstract methods, implemented by XxxPipeline classes
/**
* Get the output shape of the pipeline. If the pipeline is the head,
* then it's output shape corresponds to the shape of the source.
* Otherwise, it's output shape corresponds to the output shape of the
* associated operation.
*
* @return the output shape
*/
abstract StreamShape getOutputShape();
/**
* Collect elements output from a pipeline into a Node that holds elements
* of this shape.
*
* @param helper the pipeline helper describing the pipeline stages
* @param spliterator the source spliterator
* @param flattenTree true if the returned node should be flattened
* @param generator the array generator
* @return a Node holding the output of the pipeline
*/
abstract <P_IN> Node<E_OUT> evaluateToNode(PipelineHelper<E_OUT> helper,
Spliterator<P_IN> spliterator,
boolean flattenTree,
IntFunction<E_OUT[]> generator);
/**
* Create a spliterator that wraps a source spliterator, compatible with
* this stream shape, and operations associated with a {@link
* PipelineHelper}.
*
* @param ph the pipeline helper describing the pipeline stages
* @param supplier the supplier of a spliterator
* @return a wrapping spliterator compatible with this shape
*/
abstract <P_IN> Spliterator<E_OUT> wrap(PipelineHelper<E_OUT> ph,
Supplier<Spliterator<P_IN>> supplier,
boolean isParallel);
/**
* Create a lazy spliterator that wraps and obtains the supplied the
* spliterator when a method is invoked on the lazy spliterator.
* @param supplier the supplier of a spliterator
*/
abstract Spliterator<E_OUT> lazySpliterator(Supplier<? extends Spliterator<E_OUT>> supplier);
/**
* Traverse the elements of a spliterator compatible with this stream shape,
* pushing those elements into a sink. If the sink requests cancellation,
* no further elements will be pulled or pushed.
*
* @param spliterator the spliterator to pull elements from
* @param sink the sink to push elements to
*/
abstract void forEachWithCancel(Spliterator<E_OUT> spliterator, Sink<E_OUT> sink);
/**
* Make a node builder compatible with this stream shape.
*
* @param exactSizeIfKnown if {@literal >=0}, then a node builder will be created that
* has a fixed capacity of at most sizeIfKnown elements. If {@literal < 0},
* then the node builder has an unfixed capacity. A fixed capacity node
* builder will throw exceptions if an element is added after builder has
* reached capacity, or is built before the builder has reached capacity.
* @param generator the array generator to be used to create instances of a
* T[] array. For implementations supporting primitive nodes, this parameter
* may be ignored.
* @return a node builder
*/
abstract Node.Builder<E_OUT> makeNodeBuilder(long exactSizeIfKnown,
IntFunction<E_OUT[]> generator);
// Op-specific abstract methods, implemented by the operation class
/**
* Returns whether this operation is stateful or not. If it is stateful,
* then the method
* {@link #opEvaluateParallel(PipelineHelper, java.util.Spliterator, java.util.function.IntFunction)}
* must be overridden.
*
* @return {@code true} if this operation is stateful
*/
abstract boolean opIsStateful();
/**
* Accepts a {@code Sink} which will receive the results of this operation,
* and return a {@code Sink} which accepts elements of the input type of
* this operation and which performs the operation, passing the results to
* the provided {@code Sink}.
*
* @apiNote
* The implementation may use the {@code flags} parameter to optimize the
* sink wrapping. For example, if the input is already {@code DISTINCT},
* the implementation for the {@code Stream#distinct()} method could just
* return the sink it was passed.
*
* @param flags The combined stream and operation flags up to, but not
* including, this operation
* @param sink sink to which elements should be sent after processing
* @return a sink which accepts elements, perform the operation upon
* each element, and passes the results (if any) to the provided
* {@code Sink}.
*/
abstract Sink<E_IN> opWrapSink(int flags, Sink<E_OUT> sink);
/**
* Performs a parallel evaluation of the operation using the specified
* {@code PipelineHelper} which describes the upstream intermediate
* operations. Only called on stateful operations. If {@link
* #opIsStateful()} returns true then implementations must override the
* default implementation.
*
* @implSpec The default implementation always throw
* {@code UnsupportedOperationException}.
*
* @param helper the pipeline helper describing the pipeline stages
* @param spliterator the source {@code Spliterator}
* @param generator the array generator
* @return a {@code Node} describing the result of the evaluation
*/
<P_IN> Node<E_OUT> opEvaluateParallel(PipelineHelper<E_OUT> helper,
Spliterator<P_IN> spliterator,
IntFunction<E_OUT[]> generator) {
throw new UnsupportedOperationException("Parallel evaluation is not supported");
}
/**
* Returns a {@code Spliterator} describing a parallel evaluation of the
* operation, using the specified {@code PipelineHelper} which describes the
* upstream intermediate operations. Only called on stateful operations.
* It is not necessary (though acceptable) to do a full computation of the
* result here; it is preferable, if possible, to describe the result via a
* lazily evaluated spliterator.
*
* @implSpec The default implementation behaves as if:
* <pre>{@code
* return evaluateParallel(helper, i -> (E_OUT[]) new
* Object[i]).spliterator();
* }</pre>
* and is suitable for implementations that cannot do better than a full
* synchronous evaluation.
*
* @param helper the pipeline helper
* @param spliterator the source {@code Spliterator}
* @return a {@code Spliterator} describing the result of the evaluation
*/
<P_IN> Spliterator<E_OUT> opEvaluateParallelLazy(PipelineHelper<E_OUT> helper,
Spliterator<P_IN> spliterator) {
return opEvaluateParallel(helper, spliterator, i -> (E_OUT[]) new Object[i]).spliterator();
}
}
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.stream;
/**
* Base class for a data structure for gathering elements into a buffer and then
* iterating them. Maintains an array of increasingly sized arrays, so there is
* no copying cost associated with growing the data structure.
* @since 1.8
*/
abstract class AbstractSpinedBuffer {
/**
* Minimum power-of-two for the first chunk.
*/
public static final int MIN_CHUNK_POWER = 4;
/**
* Minimum size for the first chunk.
*/
public static final int MIN_CHUNK_SIZE = 1 << MIN_CHUNK_POWER;
/**
* Max power-of-two for chunks.
*/
public static final int MAX_CHUNK_POWER = 30;
/**
* Minimum array size for array-of-chunks.
*/
public static final int MIN_SPINE_SIZE = 8;
/**
* log2 of the size of the first chunk.
*/
protected final int initialChunkPower;
/**
* Index of the *next* element to write; may point into, or just outside of,
* the current chunk.
*/
protected int elementIndex;
/**
* Index of the *current* chunk in the spine array, if the spine array is
* non-null.
*/
protected int spineIndex;
/**
* Count of elements in all prior chunks.
*/
protected long[] priorElementCount;
/**
* Construct with an initial capacity of 16.
*/
protected AbstractSpinedBuffer() {
this.initialChunkPower = MIN_CHUNK_POWER;
}
/**
* Construct with a specified initial capacity.
*
* @param initialCapacity The minimum expected number of elements
*/
protected AbstractSpinedBuffer(int initialCapacity) {
if (initialCapacity < 0)
throw new IllegalArgumentException("Illegal Capacity: "+ initialCapacity);
this.initialChunkPower = Math.max(MIN_CHUNK_POWER,
Integer.SIZE - Integer.numberOfLeadingZeros(initialCapacity - 1));
}
/**
* Is the buffer currently empty?
*/
public boolean isEmpty() {
return (spineIndex == 0) && (elementIndex == 0);
}
/**
* How many elements are currently in the buffer?
*/
public long count() {
return (spineIndex == 0)
? elementIndex
: priorElementCount[spineIndex] + elementIndex;
}
/**
* How big should the nth chunk be?
*/
protected int chunkSize(int n) {
int power = (n == 0 || n == 1)
? initialChunkPower
: Math.min(initialChunkPower + n - 1, AbstractSpinedBuffer.MAX_CHUNK_POWER);
return 1 << power;
}
/**
* Remove all data from the buffer
*/
public abstract void clear();
}
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.stream;
import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.Objects;
import java.util.Set;
import java.util.Spliterator;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.IntFunction;
/**
* Factory methods for transforming streams into duplicate-free streams, using
* {@link Object#equals(Object)} to determine equality.
*
* @since 1.8
*/
final class DistinctOps {
private DistinctOps() { }
/**
* Appends a "distinct" operation to the provided stream, and returns the
* new stream.
*
* @param <T> the type of both input and output elements
* @param upstream a reference stream with element type T
* @return the new stream
*/
static <T> ReferencePipeline<T, T> makeRef(AbstractPipeline<?, T, ?> upstream) {
return new ReferencePipeline.StatefulOp<T, T>(upstream, StreamShape.REFERENCE,
StreamOpFlag.IS_DISTINCT | StreamOpFlag.NOT_SIZED) {
@Override
<P_IN> Node<T> opEvaluateParallel(PipelineHelper<T> helper,
Spliterator<P_IN> spliterator,
IntFunction<T[]> generator) {
if (StreamOpFlag.DISTINCT.isKnown(helper.getStreamAndOpFlags())) {
// No-op
return helper.evaluate(spliterator, false, generator);
}
else if (StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
// If the stream is SORTED then it should also be ORDERED so the following will also
// preserve the sort order
TerminalOp<T, LinkedHashSet<T>> reduceOp
= ReduceOps.<T, LinkedHashSet<T>>makeRef(LinkedHashSet::new, LinkedHashSet::add,
LinkedHashSet::addAll);
return Nodes.node(reduceOp.evaluateParallel(helper, spliterator));
}
else {
// Holder of null state since ConcurrentHashMap does not support null values
AtomicBoolean seenNull = new AtomicBoolean(false);
ConcurrentHashMap<T, Boolean> map = new ConcurrentHashMap<>();
TerminalOp<T, Void> forEachOp = ForEachOps.makeRef(t -> {
if (t == null)
seenNull.set(true);
else
map.putIfAbsent(t, Boolean.TRUE);
}, false);
forEachOp.evaluateParallel(helper, spliterator);
// If null has been seen then copy the key set into a HashSet that supports null values
// and add null
Set<T> keys = map.keySet();
if (seenNull.get()) {
// TODO Implement a more efficient set-union view, rather than copying
keys = new HashSet<>(keys);
keys.add(null);
}
return Nodes.node(keys);
}
}
@Override
Sink<T> opWrapSink(int flags, Sink<T> sink) {
Objects.requireNonNull(sink);
if (StreamOpFlag.DISTINCT.isKnown(flags)) {
return sink;
} else if (StreamOpFlag.SORTED.isKnown(flags)) {
return new Sink.ChainedReference<T>(sink) {
boolean seenNull;
T lastSeen;
@Override
public void begin(long size) {
seenNull = false;
lastSeen = null;
downstream.begin(-1);
}
@Override
public void end() {
seenNull = false;
lastSeen = null;
downstream.end();
}
@Override
public void accept(T t) {
if (t == null) {
if (!seenNull) {
seenNull = true;
downstream.accept(lastSeen = null);
}
} else if (lastSeen == null || !t.equals(lastSeen)) {
downstream.accept(lastSeen = t);
}
}
};
} else {
return new Sink.ChainedReference<T>(sink) {
Set<T> seen;
@Override
public void begin(long size) {
seen = new HashSet<>();
downstream.begin(-1);
}
@Override
public void end() {
seen = null;
downstream.end();
}
@Override
public void accept(T t) {
if (!seen.contains(t)) {
seen.add(t);
downstream.accept(t);
}
}
};
}
}
};
}
}
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.stream;
import java.util.DoubleSummaryStatistics;
import java.util.Objects;
import java.util.OptionalDouble;
import java.util.PrimitiveIterator;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.function.BiConsumer;
import java.util.function.BinaryOperator;
import java.util.function.DoubleBinaryOperator;
import java.util.function.DoubleConsumer;
import java.util.function.DoubleFunction;
import java.util.function.DoublePredicate;
import java.util.function.DoubleToIntFunction;
import java.util.function.DoubleToLongFunction;
import java.util.function.DoubleUnaryOperator;
import java.util.function.IntFunction;
import java.util.function.ObjDoubleConsumer;
import java.util.function.Supplier;
/**
* Abstract base class for an intermediate pipeline stage or pipeline source
* stage implementing whose elements are of type {@code double}.
*
* @param <E_IN> type of elements in the upstream source
*
* @since 1.8
*/
abstract class DoublePipeline<E_IN>
extends AbstractPipeline<E_IN, Double, DoubleStream>
implements DoubleStream {
/**
* Constructor for the head of a stream pipeline.
*
* @param source {@code Supplier<Spliterator>} describing the stream source
* @param sourceFlags the source flags for the stream source, described in
* {@link StreamOpFlag}
*/
DoublePipeline(Supplier<? extends Spliterator<Double>> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
/**
* Constructor for the head of a stream pipeline.
*
* @param source {@code Spliterator} describing the stream source
* @param sourceFlags the source flags for the stream source, described in
* {@link StreamOpFlag}
*/
DoublePipeline(Spliterator<Double> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
/**
* Constructor for appending an intermediate operation onto an existing
* pipeline.
*
* @param upstream the upstream element source.
* @param opFlags the operation flags
*/
DoublePipeline(AbstractPipeline<?, E_IN, ?> upstream, int opFlags) {
super(upstream, opFlags);
}
/**
* Adapt a {@code Sink<Double> to a {@code DoubleConsumer}, ideally simply
* by casting.
*/
private static DoubleConsumer adapt(Sink<Double> sink) {
if (sink instanceof DoubleConsumer) {
return (DoubleConsumer) sink;
} else {
if (Tripwire.ENABLED)
Tripwire.trip(AbstractPipeline.class,
"using DoubleStream.adapt(Sink<Double> s)");
return sink::accept;
}
}
/**
* Adapt a {@code Spliterator<Double>} to a {@code Spliterator.OfDouble}.
*
* @implNote
* The implementation attempts to cast to a Spliterator.OfDouble, and throws
* an exception if this cast is not possible.
*/
private static Spliterator.OfDouble adapt(Spliterator<Double> s) {
if (s instanceof Spliterator.OfDouble) {
return (Spliterator.OfDouble) s;
} else {
if (Tripwire.ENABLED)
Tripwire.trip(AbstractPipeline.class,
"using DoubleStream.adapt(Spliterator<Double> s)");
throw new UnsupportedOperationException("DoubleStream.adapt(Spliterator<Double> s)");
}
}
// Shape-specific methods
@Override
final StreamShape getOutputShape() {
return StreamShape.DOUBLE_VALUE;
}
@Override
final <P_IN> Node<Double> evaluateToNode(PipelineHelper<Double> helper,
Spliterator<P_IN> spliterator,
boolean flattenTree,
IntFunction<Double[]> generator) {
return Nodes.collectDouble(helper, spliterator, flattenTree);
}
@Override
final <P_IN> Spliterator<Double> wrap(PipelineHelper<Double> ph,
Supplier<Spliterator<P_IN>> supplier,
boolean isParallel) {
return new StreamSpliterators.DoubleWrappingSpliterator<>(ph, supplier, isParallel);
}
@Override
final Spliterator.OfDouble lazySpliterator(Supplier<? extends Spliterator<Double>> supplier) {
return new StreamSpliterators.DelegatingSpliterator.OfDouble((Supplier<Spliterator.OfDouble>) supplier);
}
@Override
final void forEachWithCancel(Spliterator<Double> spliterator, Sink<Double> sink) {
Spliterator.OfDouble spl = adapt(spliterator);
DoubleConsumer adaptedSink = adapt(sink);
do { } while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
}
@Override
final Node.Builder<Double> makeNodeBuilder(long exactSizeIfKnown, IntFunction<Double[]> generator) {
return Nodes.doubleBuilder(exactSizeIfKnown);
}
// DoubleStream
@Override
public final PrimitiveIterator.OfDouble iterator() {
return Spliterators.iteratorFromSpliterator(spliterator());
}
@Override
public final Spliterator.OfDouble spliterator() {
return adapt(super.spliterator());
}
// Stateless intermediate ops from DoubleStream
@Override
public final Stream<Double> boxed() {
return mapToObj(Double::valueOf);
}
@Override
public final DoubleStream map(DoubleUnaryOperator mapper) {
Objects.requireNonNull(mapper);
return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
return new Sink.ChainedDouble(sink) {
@Override
public void accept(double t) {
downstream.accept(mapper.applyAsDouble(t));
}
};
}
};
}
@Override
public final <U> Stream<U> mapToObj(DoubleFunction<? extends U> mapper) {
Objects.requireNonNull(mapper);
return new ReferencePipeline.StatelessOp<Double, U>(this, StreamShape.DOUBLE_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Double> opWrapSink(int flags, Sink<U> sink) {
return new Sink.ChainedDouble(sink) {
@Override
public void accept(double t) {
downstream.accept(mapper.apply(t));
}
};
}
};
}
@Override
public final IntStream mapToInt(DoubleToIntFunction mapper) {
Objects.requireNonNull(mapper);
return new IntPipeline.StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Double> opWrapSink(int flags, Sink<Integer> sink) {
return new Sink.ChainedDouble(sink) {
@Override
public void accept(double t) {
downstream.accept(mapper.applyAsInt(t));
}
};
}
};
}
@Override
public final LongStream mapToLong(DoubleToLongFunction mapper) {
Objects.requireNonNull(mapper);
return new LongPipeline.StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Double> opWrapSink(int flags, Sink<Long> sink) {
return new Sink.ChainedDouble(sink) {
@Override
public void accept(double t) {
downstream.accept(mapper.applyAsLong(t));
}
};
}
};
}
@Override
public final DoubleStream flatMap(DoubleFunction<? extends DoubleStream> mapper) {
return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
@Override
Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
return new Sink.ChainedDouble(sink) {
public void accept(double t) {
// We can do better that this too; optimize for depth=0 case and just grab spliterator and forEach it
DoubleStream result = mapper.apply(t);
if (result != null)
result.sequential().forEach(i -> downstream.accept(i));
}
};
}
};
}
@Override
public DoubleStream unordered() {
if (!isOrdered())
return this;
return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE, StreamOpFlag.NOT_ORDERED) {
@Override
Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
return sink;
}
};
}
@Override
public final DoubleStream filter(DoublePredicate predicate) {
Objects.requireNonNull(predicate);
return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
StreamOpFlag.NOT_SIZED) {
@Override
Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
return new Sink.ChainedDouble(sink) {
@Override
public void accept(double t) {
if (predicate.test(t))
downstream.accept(t);
}
};
}
};
}
@Override
public final DoubleStream peek(DoubleConsumer consumer) {
Objects.requireNonNull(consumer);
return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
0) {
@Override
Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
return new Sink.ChainedDouble(sink) {
@Override
public void accept(double t) {
consumer.accept(t);
downstream.accept(t);
}
};
}
};
}
// Stateful intermediate ops from DoubleStream
@Override
public final DoubleStream limit(long maxSize) {
if (maxSize < 0)
throw new IllegalArgumentException(Long.toString(maxSize));
return SliceOps.makeDouble(this, (long) 0, maxSize);
}
@Override
public final DoubleStream substream(long startingOffset) {
if (startingOffset < 0)
throw new IllegalArgumentException(Long.toString(startingOffset));
if (startingOffset == 0)
return this;
else {
long limit = -1;
return SliceOps.makeDouble(this, startingOffset, limit);
}
}
@Override
public final DoubleStream substream(long startingOffset, long endingOffset) {
if (startingOffset < 0 || endingOffset < startingOffset)
throw new IllegalArgumentException(String.format("substream(%d, %d)", startingOffset, endingOffset));
return SliceOps.makeDouble(this, startingOffset, endingOffset - startingOffset);
}
@Override
public final DoubleStream sorted() {
return SortedOps.makeDouble(this);
}
@Override
public final DoubleStream distinct() {
// While functional and quick to implement, this approach is not very efficient.
// An efficient version requires a double-specific map/set implementation.
return boxed().distinct().mapToDouble(i -> (double) i);
}
// Terminal ops from DoubleStream
@Override
public void forEach(DoubleConsumer consumer) {
evaluate(ForEachOps.makeDouble(consumer, false));
}
@Override
public void forEachOrdered(DoubleConsumer consumer) {
evaluate(ForEachOps.makeDouble(consumer, true));
}
@Override
public final double sum() {
// TODO: better algorithm to compensate for errors
return reduce(0.0, Double::sum);
}
@Override
public final OptionalDouble min() {
return reduce(Math::min);
}
@Override
public final OptionalDouble max() {
return reduce(Math::max);
}
@Override
public final OptionalDouble average() {
double[] avg = collect(() -> new double[2],
(ll, i) -> {
ll[0]++;
ll[1] += i;
},
(ll, rr) -> {
ll[0] += rr[0];
ll[1] += rr[1];
});
return avg[0] > 0
? OptionalDouble.of(avg[1] / avg[0])
: OptionalDouble.empty();
}
@Override
public final long count() {
return mapToObj(e -> null).mapToInt(e -> 1).sum();
}
@Override
public final DoubleSummaryStatistics summaryStatistics() {
return collect(DoubleSummaryStatistics::new, DoubleSummaryStatistics::accept,
DoubleSummaryStatistics::combine);
}
@Override
public final double reduce(double identity, DoubleBinaryOperator op) {
return evaluate(ReduceOps.makeDouble(identity, op));
}
@Override
public final OptionalDouble reduce(DoubleBinaryOperator op) {
return evaluate(ReduceOps.makeDouble(op));
}
@Override
public final <R> R collect(Supplier<R> resultFactory,
ObjDoubleConsumer<R> accumulator,
BiConsumer<R, R> combiner) {
BinaryOperator<R> operator = (left, right) -> {
combiner.accept(left, right);
return left;
};
return evaluate(ReduceOps.makeDouble(resultFactory, accumulator, operator));
}
@Override
public final boolean anyMatch(DoublePredicate predicate) {
return evaluate(MatchOps.makeDouble(predicate, MatchOps.MatchKind.ANY));
}
@Override
public final boolean allMatch(DoublePredicate predicate) {
return evaluate(MatchOps.makeDouble(predicate, MatchOps.MatchKind.ALL));
}
@Override
public final boolean noneMatch(DoublePredicate predicate) {
return evaluate(MatchOps.makeDouble(predicate, MatchOps.MatchKind.NONE));
}
@Override
public final OptionalDouble findFirst() {
return evaluate(FindOps.makeDouble(true));
}
@Override
public final OptionalDouble findAny() {
return evaluate(FindOps.makeDouble(false));
}
@Override
public final double[] toArray() {
return Nodes.flattenDouble((Node.OfDouble) evaluateToArrayNode(Double[]::new))
.asDoubleArray();
}
//
/**
* Source stage of a DoubleStream
*
* @param <E_IN> type of elements in the upstream source
*/
static class Head<E_IN> extends DoublePipeline<E_IN> {
/**
* Constructor for the source stage of a DoubleStream.
*
* @param source {@code Supplier<Spliterator>} describing the stream
* source
* @param sourceFlags the source flags for the stream source, described
* in {@link StreamOpFlag}
* @param parallel {@code true} if the pipeline is parallel
*/
Head(Supplier<? extends Spliterator<Double>> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
/**
* Constructor for the source stage of a DoubleStream.
*
* @param source {@code Spliterator} describing the stream source
* @param sourceFlags the source flags for the stream source, described
* in {@link StreamOpFlag}
* @param parallel {@code true} if the pipeline is parallel
*/
Head(Spliterator<Double> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
@Override
final boolean opIsStateful() {
throw new UnsupportedOperationException();
}
@Override
final Sink<E_IN> opWrapSink(int flags, Sink<Double> sink) {
throw new UnsupportedOperationException();
}
// Optimized sequential terminal operations for the head of the pipeline
@Override
public void forEach(DoubleConsumer consumer) {
if (!isParallel()) {
adapt(sourceStageSpliterator()).forEachRemaining(consumer);
}
else {
super.forEach(consumer);
}
}
@Override
public void forEachOrdered(DoubleConsumer consumer) {
if (!isParallel()) {
adapt(sourceStageSpliterator()).forEachRemaining(consumer);
}
else {
super.forEachOrdered(consumer);
}
}
}
/**
* Base class for a stateless intermediate stage of a DoubleStream.
*
* @param <E_IN> type of elements in the upstream source
* @since 1.8
*/
abstract static class StatelessOp<E_IN> extends DoublePipeline<E_IN> {
/**
* Construct a new DoubleStream by appending a stateless intermediate
* operation to an existing stream.
*
* @param upstream the upstream pipeline stage
* @param inputShape the stream shape for the upstream pipeline stage
* @param opFlags operation flags for the new stage
*/
StatelessOp(AbstractPipeline<?, E_IN, ?> upstream,
StreamShape inputShape,
int opFlags) {
super(upstream, opFlags);
assert upstream.getOutputShape() == inputShape;
}
@Override
final boolean opIsStateful() {
return false;
}
}
/**
* Base class for a stateful intermediate stage of a DoubleStream.
*
* @param <E_IN> type of elements in the upstream source
* @since 1.8
*/
abstract static class StatefulOp<E_IN> extends DoublePipeline<E_IN> {
/**
* Construct a new DoubleStream by appending a stateful intermediate
* operation to an existing stream.
*
* @param upstream the upstream pipeline stage
* @param inputShape the stream shape for the upstream pipeline stage
* @param opFlags operation flags for the new stage
*/
StatefulOp(AbstractPipeline<?, E_IN, ?> upstream,
StreamShape inputShape,
int opFlags) {
super(upstream, opFlags);
assert upstream.getOutputShape() == inputShape;
}
@Override
final boolean opIsStateful() {
return true;
}
@Override
abstract <P_IN> Node<Double> opEvaluateParallel(PipelineHelper<Double> helper,
Spliterator<P_IN> spliterator,
IntFunction<Double[]> generator);
}
}
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.stream;
import java.util.IntSummaryStatistics;
import java.util.Objects;
import java.util.OptionalDouble;
import java.util.OptionalInt;
import java.util.PrimitiveIterator;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.function.BiConsumer;
import java.util.function.BinaryOperator;
import java.util.function.IntBinaryOperator;
import java.util.function.IntConsumer;
import java.util.function.IntFunction;
import java.util.function.IntPredicate;
import java.util.function.IntToDoubleFunction;
import java.util.function.IntToLongFunction;
import java.util.function.IntUnaryOperator;
import java.util.function.ObjIntConsumer;
import java.util.function.Supplier;
/**
* Abstract base class for an intermediate pipeline stage or pipeline source
* stage implementing whose elements are of type {@code int}.
*
* @param <E_IN> type of elements in the upstream source
* @since 1.8
*/
abstract class IntPipeline<E_IN>
extends AbstractPipeline<E_IN, Integer, IntStream>
implements IntStream {
/**
* Constructor for the head of a stream pipeline.
*
* @param source {@code Supplier<Spliterator>} describing the stream source
* @param sourceFlags The source flags for the stream source, described in
* {@link StreamOpFlag}
* @param parallel {@code true} if the pipeline is parallel
*/
IntPipeline(Supplier<? extends Spliterator<Integer>> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
/**
* Constructor for the head of a stream pipeline.
*
* @param source {@code Spliterator} describing the stream source
* @param sourceFlags The source flags for the stream source, described in
* {@link StreamOpFlag}
* @param parallel {@code true} if the pipeline is parallel
*/
IntPipeline(Spliterator<Integer> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
/**
* Constructor for appending an intermediate operation onto an existing
* pipeline.
*
* @param upstream the upstream element source
* @param opFlags the operation flags for the new operation
*/
IntPipeline(AbstractPipeline<?, E_IN, ?> upstream, int opFlags) {
super(upstream, opFlags);
}
/**
* Adapt a {@code Sink<Integer> to an {@code IntConsumer}, ideally simply
* by casting.
*/
private static IntConsumer adapt(Sink<Integer> sink) {
if (sink instanceof IntConsumer) {
return (IntConsumer) sink;
}
else {
if (Tripwire.ENABLED)
Tripwire.trip(AbstractPipeline.class,
"using IntStream.adapt(Sink<Integer> s)");
return sink::accept;
}
}
/**
* Adapt a {@code Spliterator<Integer>} to a {@code Spliterator.OfInt}.
*
* @implNote
* The implementation attempts to cast to a Spliterator.OfInt, and throws an
* exception if this cast is not possible.
*/
private static Spliterator.OfInt adapt(Spliterator<Integer> s) {
if (s instanceof Spliterator.OfInt) {
return (Spliterator.OfInt) s;
}
else {
if (Tripwire.ENABLED)
Tripwire.trip(AbstractPipeline.class,
"using IntStream.adapt(Spliterator<Integer> s)");
throw new UnsupportedOperationException("IntStream.adapt(Spliterator<Integer> s)");
}
}
// Shape-specific methods
@Override
final StreamShape getOutputShape() {
return StreamShape.INT_VALUE;
}
@Override
final <P_IN> Node<Integer> evaluateToNode(PipelineHelper<Integer> helper,
Spliterator<P_IN> spliterator,
boolean flattenTree,
IntFunction<Integer[]> generator) {
return Nodes.collectInt(helper, spliterator, flattenTree);
}
@Override
final <P_IN> Spliterator<Integer> wrap(PipelineHelper<Integer> ph,
Supplier<Spliterator<P_IN>> supplier,
boolean isParallel) {
return new StreamSpliterators.IntWrappingSpliterator<>(ph, supplier, isParallel);
}
@Override
final Spliterator.OfInt lazySpliterator(Supplier<? extends Spliterator<Integer>> supplier) {
return new StreamSpliterators.DelegatingSpliterator.OfInt((Supplier<Spliterator.OfInt>) supplier);
}
@Override
final void forEachWithCancel(Spliterator<Integer> spliterator, Sink<Integer> sink) {
Spliterator.OfInt spl = adapt(spliterator);
IntConsumer adaptedSink = adapt(sink);
do { } while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
}
@Override
final Node.Builder<Integer> makeNodeBuilder(long exactSizeIfKnown,
IntFunction<Integer[]> generator) {
return Nodes.intBuilder(exactSizeIfKnown);
}
// IntStream
@Override
public final PrimitiveIterator.OfInt iterator() {
return Spliterators.iteratorFromSpliterator(spliterator());
}
@Override
public final Spliterator.OfInt spliterator() {
return adapt(super.spliterator());
}
// Stateless intermediate ops from IntStream
@Override
public final LongStream longs() {
return new LongPipeline.StatelessOp<Integer>(this, StreamShape.INT_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Integer> opWrapSink(int flags, Sink<Long> sink) {
return new Sink.ChainedInt(sink) {
@Override
public void accept(int t) {
downstream.accept((long) t);
}
};
}
};
}
@Override
public final DoubleStream doubles() {
return new DoublePipeline.StatelessOp<Integer>(this, StreamShape.INT_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Integer> opWrapSink(int flags, Sink<Double> sink) {
return new Sink.ChainedInt(sink) {
@Override
public void accept(int t) {
downstream.accept((double) t);
}
};
}
};
}
@Override
public final Stream<Integer> boxed() {
return mapToObj(Integer::valueOf);
}
@Override
public final IntStream map(IntUnaryOperator mapper) {
Objects.requireNonNull(mapper);
return new StatelessOp<Integer>(this, StreamShape.INT_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
return new Sink.ChainedInt(sink) {
@Override
public void accept(int t) {
downstream.accept(mapper.applyAsInt(t));
}
};
}
};
}
@Override
public final <U> Stream<U> mapToObj(IntFunction<? extends U> mapper) {
Objects.requireNonNull(mapper);
return new ReferencePipeline.StatelessOp<Integer, U>(this, StreamShape.INT_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Integer> opWrapSink(int flags, Sink<U> sink) {
return new Sink.ChainedInt(sink) {
@Override
public void accept(int t) {
downstream.accept(mapper.apply(t));
}
};
}
};
}
@Override
public final LongStream mapToLong(IntToLongFunction mapper) {
Objects.requireNonNull(mapper);
return new LongPipeline.StatelessOp<Integer>(this, StreamShape.INT_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Integer> opWrapSink(int flags, Sink<Long> sink) {
return new Sink.ChainedInt(sink) {
@Override
public void accept(int t) {
downstream.accept(mapper.applyAsLong(t));
}
};
}
};
}
@Override
public final DoubleStream mapToDouble(IntToDoubleFunction mapper) {
Objects.requireNonNull(mapper);
return new DoublePipeline.StatelessOp<Integer>(this, StreamShape.INT_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Integer> opWrapSink(int flags, Sink<Double> sink) {
return new Sink.ChainedInt(sink) {
@Override
public void accept(int t) {
downstream.accept(mapper.applyAsDouble(t));
}
};
}
};
}
@Override
public final IntStream flatMap(IntFunction<? extends IntStream> mapper) {
return new StatelessOp<Integer>(this, StreamShape.INT_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
@Override
Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
return new Sink.ChainedInt(sink) {
public void accept(int t) {
// We can do better that this too; optimize for depth=0 case and just grab spliterator and forEach it
IntStream result = mapper.apply(t);
if (result != null)
result.sequential().forEach(i -> downstream.accept(i));
}
};
}
};
}
@Override
public IntStream unordered() {
if (!isOrdered())
return this;
return new StatelessOp<Integer>(this, StreamShape.INT_VALUE, StreamOpFlag.NOT_ORDERED) {
@Override
Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
return sink;
}
};
}
@Override
public final IntStream filter(IntPredicate predicate) {
Objects.requireNonNull(predicate);
return new StatelessOp<Integer>(this, StreamShape.INT_VALUE,
StreamOpFlag.NOT_SIZED) {
@Override
Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
return new Sink.ChainedInt(sink) {
@Override
public void accept(int t) {
if (predicate.test(t))
downstream.accept(t);
}
};
}
};
}
@Override
public final IntStream peek(IntConsumer consumer) {
Objects.requireNonNull(consumer);
return new StatelessOp<Integer>(this, StreamShape.INT_VALUE,
0) {
@Override
Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
return new Sink.ChainedInt(sink) {
@Override
public void accept(int t) {
consumer.accept(t);
downstream.accept(t);
}
};
}
};
}
// Stateful intermediate ops from IntStream
private IntStream slice(long skip, long limit) {
return SliceOps.makeInt(this, skip, limit);
}
@Override
public final IntStream limit(long maxSize) {
if (maxSize < 0)
throw new IllegalArgumentException(Long.toString(maxSize));
return slice(0, maxSize);
}
@Override
public final IntStream substream(long startingOffset) {
if (startingOffset < 0)
throw new IllegalArgumentException(Long.toString(startingOffset));
if (startingOffset == 0)
return this;
else
return slice(startingOffset, -1);
}
@Override
public final IntStream substream(long startingOffset, long endingOffset) {
if (startingOffset < 0 || endingOffset < startingOffset)
throw new IllegalArgumentException(String.format("substream(%d, %d)", startingOffset, endingOffset));
return slice(startingOffset, endingOffset - startingOffset);
}
@Override
public final IntStream sorted() {
return SortedOps.makeInt(this);
}
@Override
public final IntStream distinct() {
// While functional and quick to implement, this approach is not very efficient.
// An efficient version requires an int-specific map/set implementation.
return boxed().distinct().mapToInt(i -> i);
}
// Terminal ops from IntStream
@Override
public void forEach(IntConsumer action) {
evaluate(ForEachOps.makeInt(action, false));
}
@Override
public void forEachOrdered(IntConsumer action) {
evaluate(ForEachOps.makeInt(action, true));
}
@Override
public final int sum() {
return reduce(0, Integer::sum);
}
@Override
public final OptionalInt min() {
return reduce(Math::min);
}
@Override
public final OptionalInt max() {
return reduce(Math::max);
}
@Override
public final long count() {
return longs().map(e -> 1L).sum();
}
@Override
public final OptionalDouble average() {
long[] avg = collect(() -> new long[2],
(ll, i) -> {
ll[0]++;
ll[1] += i;
},
(ll, rr) -> {
ll[0] += rr[0];
ll[1] += rr[1];
});
return avg[0] > 0
? OptionalDouble.of((double) avg[1] / avg[0])
: OptionalDouble.empty();
}
@Override
public final IntSummaryStatistics summaryStatistics() {
return collect(IntSummaryStatistics::new, IntSummaryStatistics::accept,
IntSummaryStatistics::combine);
}
@Override
public final int reduce(int identity, IntBinaryOperator op) {
return evaluate(ReduceOps.makeInt(identity, op));
}
@Override
public final OptionalInt reduce(IntBinaryOperator op) {
return evaluate(ReduceOps.makeInt(op));
}
@Override
public final <R> R collect(Supplier<R> resultFactory,
ObjIntConsumer<R> accumulator,
BiConsumer<R, R> combiner) {
BinaryOperator<R> operator = (left, right) -> {
combiner.accept(left, right);
return left;
};
return evaluate(ReduceOps.makeInt(resultFactory, accumulator, operator));
}
@Override
public final boolean anyMatch(IntPredicate predicate) {
return evaluate(MatchOps.makeInt(predicate, MatchOps.MatchKind.ANY));
}
@Override
public final boolean allMatch(IntPredicate predicate) {
return evaluate(MatchOps.makeInt(predicate, MatchOps.MatchKind.ALL));
}
@Override
public final boolean noneMatch(IntPredicate predicate) {
return evaluate(MatchOps.makeInt(predicate, MatchOps.MatchKind.NONE));
}
@Override
public final OptionalInt findFirst() {
return evaluate(FindOps.makeInt(true));
}
@Override
public final OptionalInt findAny() {
return evaluate(FindOps.makeInt(false));
}
@Override
public final int[] toArray() {
return Nodes.flattenInt((Node.OfInt) evaluateToArrayNode(Integer[]::new))
.asIntArray();
}
//
/**
* Source stage of an IntStream.
*
* @param <E_IN> type of elements in the upstream source
* @since 1.8
*/
static class Head<E_IN> extends IntPipeline<E_IN> {
/**
* Constructor for the source stage of an IntStream.
*
* @param source {@code Supplier<Spliterator>} describing the stream
* source
* @param sourceFlags the source flags for the stream source, described
* in {@link StreamOpFlag}
* @param parallel {@code true} if the pipeline is parallel
*/
Head(Supplier<? extends Spliterator<Integer>> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
/**
* Constructor for the source stage of an IntStream.
*
* @param source {@code Spliterator} describing the stream source
* @param sourceFlags the source flags for the stream source, described
* in {@link StreamOpFlag}
* @param parallel {@code true} if the pipeline is parallel
*/
Head(Spliterator<Integer> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
@Override
final boolean opIsStateful() {
throw new UnsupportedOperationException();
}
@Override
final Sink<E_IN> opWrapSink(int flags, Sink<Integer> sink) {
throw new UnsupportedOperationException();
}
// Optimized sequential terminal operations for the head of the pipeline
@Override
public void forEach(IntConsumer action) {
if (!isParallel()) {
adapt(sourceStageSpliterator()).forEachRemaining(action);
}
else {
super.forEach(action);
}
}
@Override
public void forEachOrdered(IntConsumer action) {
if (!isParallel()) {
adapt(sourceStageSpliterator()).forEachRemaining(action);
}
else {
super.forEachOrdered(action);
}
}
}
/**
* Base class for a stateless intermediate stage of an IntStream
*
* @param <E_IN> type of elements in the upstream source
* @since 1.8
*/
abstract static class StatelessOp<E_IN> extends IntPipeline<E_IN> {
/**
* Construct a new IntStream by appending a stateless intermediate
* operation to an existing stream.
* @param upstream The upstream pipeline stage
* @param inputShape The stream shape for the upstream pipeline stage
* @param opFlags Operation flags for the new stage
*/
StatelessOp(AbstractPipeline<?, E_IN, ?> upstream,
StreamShape inputShape,
int opFlags) {
super(upstream, opFlags);
assert upstream.getOutputShape() == inputShape;
}
@Override
final boolean opIsStateful() {
return false;
}
}
/**
* Base class for a stateful intermediate stage of an IntStream.
*
* @param <E_IN> type of elements in the upstream source
* @since 1.8
*/
abstract static class StatefulOp<E_IN> extends IntPipeline<E_IN> {
/**
* Construct a new IntStream by appending a stateful intermediate
* operation to an existing stream.
* @param upstream The upstream pipeline stage
* @param inputShape The stream shape for the upstream pipeline stage
* @param opFlags Operation flags for the new stage
*/
StatefulOp(AbstractPipeline<?, E_IN, ?> upstream,
StreamShape inputShape,
int opFlags) {
super(upstream, opFlags);
assert upstream.getOutputShape() == inputShape;
}
@Override
final boolean opIsStateful() {
return true;
}
@Override
abstract <P_IN> Node<Integer> opEvaluateParallel(PipelineHelper<Integer> helper,
Spliterator<P_IN> spliterator,
IntFunction<Integer[]> generator);
}
}
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.stream;
import java.util.LongSummaryStatistics;
import java.util.Objects;
import java.util.OptionalDouble;
import java.util.OptionalLong;
import java.util.PrimitiveIterator;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.function.BiConsumer;
import java.util.function.BinaryOperator;
import java.util.function.IntFunction;
import java.util.function.LongBinaryOperator;
import java.util.function.LongConsumer;
import java.util.function.LongFunction;
import java.util.function.LongPredicate;
import java.util.function.LongToDoubleFunction;
import java.util.function.LongToIntFunction;
import java.util.function.LongUnaryOperator;
import java.util.function.ObjLongConsumer;
import java.util.function.Supplier;
/**
* Abstract base class for an intermediate pipeline stage or pipeline source
* stage implementing whose elements are of type {@code long}.
*
* @param <E_IN> type of elements in the upstream source
* @since 1.8
*/
abstract class LongPipeline<E_IN>
extends AbstractPipeline<E_IN, Long, LongStream>
implements LongStream {
/**
* Constructor for the head of a stream pipeline.
*
* @param source {@code Supplier<Spliterator>} describing the stream source
* @param sourceFlags the source flags for the stream source, described in
* {@link StreamOpFlag}
* @param parallel {@code true} if the pipeline is parallel
*/
LongPipeline(Supplier<? extends Spliterator<Long>> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
/**
* Constructor for the head of a stream pipeline.
*
* @param source {@code Spliterator} describing the stream source
* @param sourceFlags the source flags for the stream source, described in
* {@link StreamOpFlag}
* @param parallel {@code true} if the pipeline is parallel
*/
LongPipeline(Spliterator<Long> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
/**
* Constructor for appending an intermediate operation onto an existing pipeline.
*
* @param upstream the upstream element source.
* @param opFlags the operation flags
*/
LongPipeline(AbstractPipeline<?, E_IN, ?> upstream, int opFlags) {
super(upstream, opFlags);
}
/**
* Adapt a {@code Sink<Long> to an {@code LongConsumer}, ideally simply
* by casting.
*/
private static LongConsumer adapt(Sink<Long> sink) {
if (sink instanceof LongConsumer) {
return (LongConsumer) sink;
} else {
if (Tripwire.ENABLED)
Tripwire.trip(AbstractPipeline.class,
"using LongStream.adapt(Sink<Long> s)");
return sink::accept;
}
}
/**
* Adapt a {@code Spliterator<Long>} to a {@code Spliterator.OfLong}.
*
* @implNote
* The implementation attempts to cast to a Spliterator.OfLong, and throws
* an exception if this cast is not possible.
*/
private static Spliterator.OfLong adapt(Spliterator<Long> s) {
if (s instanceof Spliterator.OfLong) {
return (Spliterator.OfLong) s;
} else {
if (Tripwire.ENABLED)
Tripwire.trip(AbstractPipeline.class,
"using LongStream.adapt(Spliterator<Long> s)");
throw new UnsupportedOperationException("LongStream.adapt(Spliterator<Long> s)");
}
}
// Shape-specific methods
@Override
final StreamShape getOutputShape() {
return StreamShape.LONG_VALUE;
}
@Override
final <P_IN> Node<Long> evaluateToNode(PipelineHelper<Long> helper,
Spliterator<P_IN> spliterator,
boolean flattenTree,
IntFunction<Long[]> generator) {
return Nodes.collectLong(helper, spliterator, flattenTree);
}
@Override
final <P_IN> Spliterator<Long> wrap(PipelineHelper<Long> ph,
Supplier<Spliterator<P_IN>> supplier,
boolean isParallel) {
return new StreamSpliterators.LongWrappingSpliterator<>(ph, supplier, isParallel);
}
@Override
final Spliterator.OfLong lazySpliterator(Supplier<? extends Spliterator<Long>> supplier) {
return new StreamSpliterators.DelegatingSpliterator.OfLong((Supplier<Spliterator.OfLong>) supplier);
}
@Override
final void forEachWithCancel(Spliterator<Long> spliterator, Sink<Long> sink) {
Spliterator.OfLong spl = adapt(spliterator);
LongConsumer adaptedSink = adapt(sink);
do { } while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
}
@Override
final Node.Builder<Long> makeNodeBuilder(long exactSizeIfKnown, IntFunction<Long[]> generator) {
return Nodes.longBuilder(exactSizeIfKnown);
}
// LongStream
@Override
public final PrimitiveIterator.OfLong iterator() {
return Spliterators.iteratorFromSpliterator(spliterator());
}
@Override
public final Spliterator.OfLong spliterator() {
return adapt(super.spliterator());
}
// Stateless intermediate ops from LongStream
@Override
public final DoubleStream doubles() {
return new DoublePipeline.StatelessOp<Long>(this, StreamShape.LONG_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Long> opWrapSink(int flags, Sink<Double> sink) {
return new Sink.ChainedLong(sink) {
@Override
public void accept(long t) {
downstream.accept((double) t);
}
};
}
};
}
@Override
public final Stream<Long> boxed() {
return mapToObj(Long::valueOf);
}
@Override
public final LongStream map(LongUnaryOperator mapper) {
Objects.requireNonNull(mapper);
return new StatelessOp<Long>(this, StreamShape.LONG_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
return new Sink.ChainedLong(sink) {
@Override
public void accept(long t) {
downstream.accept(mapper.applyAsLong(t));
}
};
}
};
}
@Override
public final <U> Stream<U> mapToObj(LongFunction<? extends U> mapper) {
Objects.requireNonNull(mapper);
return new ReferencePipeline.StatelessOp<Long, U>(this, StreamShape.LONG_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Long> opWrapSink(int flags, Sink<U> sink) {
return new Sink.ChainedLong(sink) {
@Override
public void accept(long t) {
downstream.accept(mapper.apply(t));
}
};
}
};
}
@Override
public final IntStream mapToInt(LongToIntFunction mapper) {
Objects.requireNonNull(mapper);
return new IntPipeline.StatelessOp<Long>(this, StreamShape.LONG_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Long> opWrapSink(int flags, Sink<Integer> sink) {
return new Sink.ChainedLong(sink) {
@Override
public void accept(long t) {
downstream.accept(mapper.applyAsInt(t));
}
};
}
};
}
@Override
public final DoubleStream mapToDouble(LongToDoubleFunction mapper) {
Objects.requireNonNull(mapper);
return new DoublePipeline.StatelessOp<Long>(this, StreamShape.LONG_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<Long> opWrapSink(int flags, Sink<Double> sink) {
return new Sink.ChainedLong(sink) {
@Override
public void accept(long t) {
downstream.accept(mapper.applyAsDouble(t));
}
};
}
};
}
@Override
public final LongStream flatMap(LongFunction<? extends LongStream> mapper) {
return new StatelessOp<Long>(this, StreamShape.LONG_VALUE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
@Override
Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
return new Sink.ChainedLong(sink) {
public void accept(long t) {
// We can do better that this too; optimize for depth=0 case and just grab spliterator and forEach it
LongStream result = mapper.apply(t);
if (result != null)
result.sequential().forEach(i -> downstream.accept(i));
}
};
}
};
}
@Override
public LongStream unordered() {
if (!isOrdered())
return this;
return new StatelessOp<Long>(this, StreamShape.LONG_VALUE, StreamOpFlag.NOT_ORDERED) {
@Override
Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
return sink;
}
};
}
@Override
public final LongStream filter(LongPredicate predicate) {
Objects.requireNonNull(predicate);
return new StatelessOp<Long>(this, StreamShape.LONG_VALUE,
StreamOpFlag.NOT_SIZED) {
@Override
Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
return new Sink.ChainedLong(sink) {
@Override
public void accept(long t) {
if (predicate.test(t))
downstream.accept(t);
}
};
}
};
}
@Override
public final LongStream peek(LongConsumer consumer) {
Objects.requireNonNull(consumer);
return new StatelessOp<Long>(this, StreamShape.LONG_VALUE,
0) {
@Override
Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
return new Sink.ChainedLong(sink) {
@Override
public void accept(long t) {
consumer.accept(t);
downstream.accept(t);
}
};
}
};
}
// Stateful intermediate ops from LongStream
private LongStream slice(long skip, long limit) {
return SliceOps.makeLong(this, skip, limit);
}
@Override
public final LongStream limit(long maxSize) {
if (maxSize < 0)
throw new IllegalArgumentException(Long.toString(maxSize));
return slice(0, maxSize);
}
@Override
public final LongStream substream(long startingOffset) {
if (startingOffset < 0)
throw new IllegalArgumentException(Long.toString(startingOffset));
if (startingOffset == 0)
return this;
else
return slice(startingOffset, -1);
}
@Override
public final LongStream substream(long startingOffset, long endingOffset) {
if (startingOffset < 0 || endingOffset < startingOffset)
throw new IllegalArgumentException(String.format("substream(%d, %d)", startingOffset, endingOffset));
return slice(startingOffset, endingOffset - startingOffset);
}
@Override
public final LongStream sorted() {
return SortedOps.makeLong(this);
}
@Override
public final LongStream distinct() {
// While functional and quick to implement, this approach is not very efficient.
// An efficient version requires a long-specific map/set implementation.
return boxed().distinct().mapToLong(i -> (long) i);
}
// Terminal ops from LongStream
@Override
public void forEach(LongConsumer action) {
evaluate(ForEachOps.makeLong(action, false));
}
@Override
public void forEachOrdered(LongConsumer action) {
evaluate(ForEachOps.makeLong(action, true));
}
@Override
public final long sum() {
// use better algorithm to compensate for intermediate overflow?
return reduce(0, Long::sum);
}
@Override
public final OptionalLong min() {
return reduce(Math::min);
}
@Override
public final OptionalLong max() {
return reduce(Math::max);
}
@Override
public final OptionalDouble average() {
long[] avg = collect(() -> new long[2],
(ll, i) -> {
ll[0]++;
ll[1] += i;
},
(ll, rr) -> {
ll[0] += rr[0];
ll[1] += rr[1];
});
return avg[0] > 0
? OptionalDouble.of((double) avg[1] / avg[0])
: OptionalDouble.empty();
}
@Override
public final long count() {
return map(e -> 1L).sum();
}
@Override
public final LongSummaryStatistics summaryStatistics() {
return collect(LongSummaryStatistics::new, LongSummaryStatistics::accept,
LongSummaryStatistics::combine);
}
@Override
public final long reduce(long identity, LongBinaryOperator op) {
return evaluate(ReduceOps.makeLong(identity, op));
}
@Override
public final OptionalLong reduce(LongBinaryOperator op) {
return evaluate(ReduceOps.makeLong(op));
}
@Override
public final <R> R collect(Supplier<R> resultFactory,
ObjLongConsumer<R> accumulator,
BiConsumer<R, R> combiner) {
BinaryOperator<R> operator = (left, right) -> {
combiner.accept(left, right);
return left;
};
return evaluate(ReduceOps.makeLong(resultFactory, accumulator, operator));
}
@Override
public final boolean anyMatch(LongPredicate predicate) {
return evaluate(MatchOps.makeLong(predicate, MatchOps.MatchKind.ANY));
}
@Override
public final boolean allMatch(LongPredicate predicate) {
return evaluate(MatchOps.makeLong(predicate, MatchOps.MatchKind.ALL));
}
@Override
public final boolean noneMatch(LongPredicate predicate) {
return evaluate(MatchOps.makeLong(predicate, MatchOps.MatchKind.NONE));
}
@Override
public final OptionalLong findFirst() {
return evaluate(FindOps.makeLong(true));
}
@Override
public final OptionalLong findAny() {
return evaluate(FindOps.makeLong(false));
}
@Override
public final long[] toArray() {
return Nodes.flattenLong((Node.OfLong) evaluateToArrayNode(Long[]::new)).asLongArray();
}
//
/**
* Source stage of a LongPipeline.
*
* @param <E_IN> type of elements in the upstream source
* @since 1.8
*/
static class Head<E_IN> extends LongPipeline<E_IN> {
/**
* Constructor for the source stage of a LongStream.
*
* @param source {@code Supplier<Spliterator>} describing the stream
* source
* @param sourceFlags the source flags for the stream source, described
* in {@link StreamOpFlag}
* @param parallel {@code true} if the pipeline is parallel
*/
Head(Supplier<? extends Spliterator<Long>> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
/**
* Constructor for the source stage of a LongStream.
*
* @param source {@code Spliterator} describing the stream source
* @param sourceFlags the source flags for the stream source, described
* in {@link StreamOpFlag}
* @param parallel {@code true} if the pipeline is parallel
*/
Head(Spliterator<Long> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
@Override
final boolean opIsStateful() {
throw new UnsupportedOperationException();
}
@Override
final Sink<E_IN> opWrapSink(int flags, Sink<Long> sink) {
throw new UnsupportedOperationException();
}
// Optimized sequential terminal operations for the head of the pipeline
@Override
public void forEach(LongConsumer action) {
if (!isParallel()) {
adapt(sourceStageSpliterator()).forEachRemaining(action);
} else {
super.forEach(action);
}
}
@Override
public void forEachOrdered(LongConsumer action) {
if (!isParallel()) {
adapt(sourceStageSpliterator()).forEachRemaining(action);
} else {
super.forEachOrdered(action);
}
}
}
/** Base class for a stateless intermediate stage of a LongStream.
*
* @param <E_IN> type of elements in the upstream source
* @since 1.8
*/
abstract static class StatelessOp<E_IN> extends LongPipeline<E_IN> {
/**
* Construct a new LongStream by appending a stateless intermediate
* operation to an existing stream.
* @param upstream The upstream pipeline stage
* @param inputShape The stream shape for the upstream pipeline stage
* @param opFlags Operation flags for the new stage
*/
StatelessOp(AbstractPipeline<?, E_IN, ?> upstream,
StreamShape inputShape,
int opFlags) {
super(upstream, opFlags);
assert upstream.getOutputShape() == inputShape;
}
@Override
final boolean opIsStateful() {
return false;
}
}
/**
* Base class for a stateful intermediate stage of a LongStream.
*
* @param <E_IN> type of elements in the upstream source
* @since 1.8
*/
abstract static class StatefulOp<E_IN> extends LongPipeline<E_IN> {
/**
* Construct a new LongStream by appending a stateful intermediate
* operation to an existing stream.
* @param upstream The upstream pipeline stage
* @param inputShape The stream shape for the upstream pipeline stage
* @param opFlags Operation flags for the new stage
*/
StatefulOp(AbstractPipeline<?, E_IN, ?> upstream,
StreamShape inputShape,
int opFlags) {
super(upstream, opFlags);
assert upstream.getOutputShape() == inputShape;
}
@Override
final boolean opIsStateful() {
return true;
}
@Override
abstract <P_IN> Node<Long> opEvaluateParallel(PipelineHelper<Long> helper,
Spliterator<P_IN> spliterator,
IntFunction<Long[]> generator);
}
}
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.stream;
import java.util.ArrayDeque;
import java.util.Arrays;
import java.util.Collection;
import java.util.Deque;
import java.util.List;
import java.util.Objects;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.concurrent.CountedCompleter;
import java.util.function.Consumer;
import java.util.function.DoubleConsumer;
import java.util.function.IntConsumer;
import java.util.function.IntFunction;
import java.util.function.LongConsumer;
/**
* Factory methods for constructing implementations of {@link Node} and
* {@link Node.Builder} and their primitive specializations. Fork/Join tasks
* for collecting output from a {@link PipelineHelper} to a {@link Node} and
* flattening {@link Node}s.
*
* @since 1.8
*/
final class Nodes {
private Nodes() {
throw new Error("no instances");
}
/**
* The maximum size of an array that can be allocated.
*/
static final long MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
private static final Node EMPTY_NODE = new EmptyNode.OfRef();
private static final Node.OfInt EMPTY_INT_NODE = new EmptyNode.OfInt();
private static final Node.OfLong EMPTY_LONG_NODE = new EmptyNode.OfLong();
private static final Node.OfDouble EMPTY_DOUBLE_NODE = new EmptyNode.OfDouble();
// General shape-based node creation methods
/**
* Produces an empty node whose count is zero, has no children and no content.
*
* @param <T> the type of elements of the created node
* @param shape the shape of the node to be created
* @return an empty node.
*/
@SuppressWarnings("unchecked")
static <T> Node<T> emptyNode(StreamShape shape) {
switch (shape) {
case REFERENCE: return (Node<T>) EMPTY_NODE;
case INT_VALUE: return (Node<T>) EMPTY_INT_NODE;
case LONG_VALUE: return (Node<T>) EMPTY_LONG_NODE;
case DOUBLE_VALUE: return (Node<T>) EMPTY_DOUBLE_NODE;
default:
throw new IllegalStateException("Unknown shape " + shape);
}
}
/**
* Produces a concatenated {@link Node} that has two or more children.
* <p>The count of the concatenated node is equal to the sum of the count
* of each child. Traversal of the concatenated node traverses the content
* of each child in encounter order of the list of children. Splitting a
* spliterator obtained from the concatenated node preserves the encounter
* order of the list of children.
*
* <p>The result may be a concatenated node, the input sole node if the size
* of the list is 1, or an empty node.
*
* @param <T> the type of elements of the concatenated node
* @param shape the shape of the concatenated node to be created
* @param nodes the input nodes
* @return a {@code Node} covering the elements of the input nodes
* @throws IllegalStateException if all {@link Node} elements of the list
* are an not instance of type supported by this factory.
*/
@SuppressWarnings("unchecked")
static <T> Node<T> conc(StreamShape shape, List<? extends Node<T>> nodes) {
int size = nodes.size();
if (size == 0)
return emptyNode(shape);
else if (size == 1)
return nodes.get(0);
else {
// Create a right-balanced tree when there are more that 2 nodes
switch (shape) {
case REFERENCE: {
List<Node<T>> refNodes = (List<Node<T>>) nodes;
ConcNode<T> c = new ConcNode<>(refNodes.get(size - 2), refNodes.get(size - 1));
for (int i = size - 3; i >= 0; i--) {
c = new ConcNode<>(refNodes.get(i), c);
}
return c;
}
case INT_VALUE: {
List<? extends Node.OfInt> intNodes = (List<? extends Node.OfInt>) nodes;
IntConcNode c = new IntConcNode(intNodes.get(size - 2), intNodes.get(size - 1));
for (int i = size - 3; i >= 0; i--) {
c = new IntConcNode(intNodes.get(i), c);
}
return (Node<T>) c;
}
case LONG_VALUE: {
List<? extends Node.OfLong> longNodes = (List<? extends Node.OfLong>) nodes;
LongConcNode c = new LongConcNode(longNodes.get(size - 2), longNodes.get(size - 1));
for (int i = size - 3; i >= 0; i--) {
c = new LongConcNode(longNodes.get(i), c);
}
return (Node<T>) c;
}
case DOUBLE_VALUE: {
List<? extends Node.OfDouble> doubleNodes = (List<? extends Node.OfDouble>) nodes;
DoubleConcNode c = new DoubleConcNode(doubleNodes.get(size - 2), doubleNodes.get(size - 1));
for (int i = size - 3; i >= 0; i--) {
c = new DoubleConcNode(doubleNodes.get(i), c);
}
return (Node<T>) c;
}
default:
throw new IllegalStateException("Unknown shape " + shape);
}
}
}
/**
* Truncate a {@link Node}, returning a node describing a subsequence of
* the contents of the input node.
*
* @param <T> the type of elements of the input node and truncated node
* @param input the input node
* @param from the starting offset to include in the truncated node (inclusive)
* @param to the ending offset ot include in the truncated node (exclusive)
* @param generator the array factory (only used for reference nodes)
* @return the truncated node
*/
@SuppressWarnings("unchecked")
static <T> Node<T> truncateNode(Node<T> input, long from, long to, IntFunction<T[]> generator) {
StreamShape shape = input.getShape();
long size = truncatedSize(input.count(), from, to);
if (size == 0)
return emptyNode(shape);
else if (from == 0 && to >= input.count())
return input;
switch (shape) {
case REFERENCE: {
Spliterator<T> spliterator = input.spliterator();
Node.Builder<T> nodeBuilder = Nodes.builder(size, generator);
nodeBuilder.begin(size);
for (int i = 0; i < from && spliterator.tryAdvance(e -> { }); i++) { }
for (int i = 0; (i < size) && spliterator.tryAdvance(nodeBuilder); i++) { }
nodeBuilder.end();
return nodeBuilder.build();
}
case INT_VALUE: {
Spliterator.OfInt spliterator = ((Node.OfInt) input).spliterator();
Node.Builder.OfInt nodeBuilder = Nodes.intBuilder(size);
nodeBuilder.begin(size);
for (int i = 0; i < from && spliterator.tryAdvance((IntConsumer) e -> { }); i++) { }
for (int i = 0; (i < size) && spliterator.tryAdvance((IntConsumer) nodeBuilder); i++) { }
nodeBuilder.end();
return (Node<T>) nodeBuilder.build();
}
case LONG_VALUE: {
Spliterator.OfLong spliterator = ((Node.OfLong) input).spliterator();
Node.Builder.OfLong nodeBuilder = Nodes.longBuilder(size);
nodeBuilder.begin(size);
for (int i = 0; i < from && spliterator.tryAdvance((LongConsumer) e -> { }); i++) { }
for (int i = 0; (i < size) && spliterator.tryAdvance((LongConsumer) nodeBuilder); i++) { }
nodeBuilder.end();
return (Node<T>) nodeBuilder.build();
}
case DOUBLE_VALUE: {
Spliterator.OfDouble spliterator = ((Node.OfDouble) input).spliterator();
Node.Builder.OfDouble nodeBuilder = Nodes.doubleBuilder(size);
nodeBuilder.begin(size);
for (int i = 0; i < from && spliterator.tryAdvance((DoubleConsumer) e -> { }); i++) { }
for (int i = 0; (i < size) && spliterator.tryAdvance((DoubleConsumer) nodeBuilder); i++) { }
nodeBuilder.end();
return (Node<T>) nodeBuilder.build();
}
default:
throw new IllegalStateException("Unknown shape " + shape);
}
}
private static long truncatedSize(long size, long from, long to) {
if (from >= 0)
size = Math.max(0, size - from);
long limit = to - from;
if (limit >= 0)
size = Math.min(size, limit);
return size;
}
// Reference-based node methods
/**
* Produces a {@link Node} describing an array.
*
* <p>The node will hold a reference to the array and will not make a copy.
*
* @param <T> the type of elements held by the node
* @param array the array
* @return a node holding an array
*/
static <T> Node<T> node(T[] array) {
return new ArrayNode<>(array);
}
/**
* Produces a {@link Node} describing a {@link Collection}.
* <p>
* The node will hold a reference to the collection and will not make a copy.
*
* @param <T> the type of elements held by the node
* @param c the collection
* @return a node holding a collection
*/
static <T> Node<T> node(Collection<T> c) {
return new CollectionNode<>(c);
}
/**
* Produces a {@link Node.Builder}.
*
* @param exactSizeIfKnown -1 if a variable size builder is requested,
* otherwise the exact capacity desired. A fixed capacity builder will
* fail if the wrong number of elements are added to the builder.
* @param generator the array factory
* @param <T> the type of elements of the node builder
* @return a {@code Node.Builder}
*/
static <T> Node.Builder<T> builder(long exactSizeIfKnown, IntFunction<T[]> generator) {
return (exactSizeIfKnown >= 0 && exactSizeIfKnown < MAX_ARRAY_SIZE)
? new FixedNodeBuilder<>(exactSizeIfKnown, generator)
: builder();
}
/**
* Produces a variable size @{link Node.Builder}.
*
* @param <T> the type of elements of the node builder
* @return a {@code Node.Builder}
*/
static <T> Node.Builder<T> builder() {
return new SpinedNodeBuilder<>();
}
// Int nodes
/**
* Produces a {@link Node.OfInt} describing an int[] array.
*
* <p>The node will hold a reference to the array and will not make a copy.
*
* @param array the array
* @return a node holding an array
*/
static Node.OfInt node(int[] array) {
return new IntArrayNode(array);
}
/**
* Produces a {@link Node.Builder.OfInt}.
*
* @param exactSizeIfKnown -1 if a variable size builder is requested,
* otherwise the exact capacity desired. A fixed capacity builder will
* fail if the wrong number of elements are added to the builder.
* @return a {@code Node.Builder.OfInt}
*/
static Node.Builder.OfInt intBuilder(long exactSizeIfKnown) {
return (exactSizeIfKnown >= 0 && exactSizeIfKnown < MAX_ARRAY_SIZE)
? new IntFixedNodeBuilder(exactSizeIfKnown)
: intBuilder();
}
/**
* Produces a variable size @{link Node.Builder.OfInt}.
*
* @return a {@code Node.Builder.OfInt}
*/
static Node.Builder.OfInt intBuilder() {
return new IntSpinedNodeBuilder();
}
// Long nodes
/**
* Produces a {@link Node.OfLong} describing a long[] array.
* <p>
* The node will hold a reference to the array and will not make a copy.
*
* @param array the array
* @return a node holding an array
*/
static Node.OfLong node(final long[] array) {
return new LongArrayNode(array);
}
/**
* Produces a {@link Node.Builder.OfLong}.
*
* @param exactSizeIfKnown -1 if a variable size builder is requested,
* otherwise the exact capacity desired. A fixed capacity builder will
* fail if the wrong number of elements are added to the builder.
* @return a {@code Node.Builder.OfLong}
*/
static Node.Builder.OfLong longBuilder(long exactSizeIfKnown) {
return (exactSizeIfKnown >= 0 && exactSizeIfKnown < MAX_ARRAY_SIZE)
? new LongFixedNodeBuilder(exactSizeIfKnown)
: longBuilder();
}
/**
* Produces a variable size @{link Node.Builder.OfLong}.
*
* @return a {@code Node.Builder.OfLong}
*/
static Node.Builder.OfLong longBuilder() {
return new LongSpinedNodeBuilder();
}
// Double nodes
/**
* Produces a {@link Node.OfDouble} describing a double[] array.
*
* <p>The node will hold a reference to the array and will not make a copy.
*
* @param array the array
* @return a node holding an array
*/
static Node.OfDouble node(final double[] array) {
return new DoubleArrayNode(array);
}
/**
* Produces a {@link Node.Builder.OfDouble}.
*
* @param exactSizeIfKnown -1 if a variable size builder is requested,
* otherwise the exact capacity desired. A fixed capacity builder will
* fail if the wrong number of elements are added to the builder.
* @return a {@code Node.Builder.OfDouble}
*/
static Node.Builder.OfDouble doubleBuilder(long exactSizeIfKnown) {
return (exactSizeIfKnown >= 0 && exactSizeIfKnown < MAX_ARRAY_SIZE)
? new DoubleFixedNodeBuilder(exactSizeIfKnown)
: doubleBuilder();
}
/**
* Produces a variable size @{link Node.Builder.OfDouble}.
*
* @return a {@code Node.Builder.OfDouble}
*/
static Node.Builder.OfDouble doubleBuilder() {
return new DoubleSpinedNodeBuilder();
}
// Parallel evaluation of pipelines to nodes
/**
* Collect, in parallel, elements output from a pipeline and describe those
* elements with a {@link Node}.
*
* @implSpec
* If the exact size of the output from the pipeline is known and the source
* {@link Spliterator} has the {@link Spliterator#SUBSIZED} characteristic,
* then a flat {@link Node} will be returned whose content is an array,
* since the size is known the array can be constructed in advance and
* output elements can be placed into the array concurrently by leaf
* tasks at the correct offsets. If the exact size is not known, output
* elements are collected into a conc-node whose shape mirrors that
* of the computation. This conc-node can then be flattened in
* parallel to produce a flat {@code Node} if desired.
*
* @param helper the pipeline helper describing the pipeline
* @param flattenTree whether a conc node should be flattened into a node
* describing an array before returning
* @param generator the array generator
* @return a {@link Node} describing the output elements
*/
public static <P_IN, P_OUT> Node<P_OUT> collect(PipelineHelper<P_OUT> helper,
Spliterator<P_IN> spliterator,
boolean flattenTree,
IntFunction<P_OUT[]> generator) {
long size = helper.exactOutputSizeIfKnown(spliterator);
if (size >= 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
if (size >= MAX_ARRAY_SIZE)
throw new IllegalArgumentException("Stream size exceeds max array size");
P_OUT[] array = generator.apply((int) size);
new SizedCollectorTask.OfRef<>(spliterator, helper, array).invoke();
return node(array);
} else {
Node<P_OUT> node = new CollectorTask<>(helper, generator, spliterator).invoke();
return flattenTree ? flatten(node, generator) : node;
}
}
/**
* Collect, in parallel, elements output from an int-valued pipeline and
* describe those elements with a {@link Node.OfInt}.
*
* @implSpec
* If the exact size of the output from the pipeline is known and the source
* {@link Spliterator} has the {@link Spliterator#SUBSIZED} characteristic,
* then a flat {@link Node} will be returned whose content is an array,
* since the size is known the array can be constructed in advance and
* output elements can be placed into the array concurrently by leaf
* tasks at the correct offsets. If the exact size is not known, output
* elements are collected into a conc-node whose shape mirrors that
* of the computation. This conc-node can then be flattened in
* parallel to produce a flat {@code Node.OfInt} if desired.
*
* @param <P_IN> the type of elements from the source Spliterator
* @param helper the pipeline helper describing the pipeline
* @param flattenTree whether a conc node should be flattened into a node
* describing an array before returning
* @return a {@link Node.OfInt} describing the output elements
*/
public static <P_IN> Node.OfInt collectInt(PipelineHelper<Integer> helper,
Spliterator<P_IN> spliterator,
boolean flattenTree) {
long size = helper.exactOutputSizeIfKnown(spliterator);
if (size >= 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
if (size >= MAX_ARRAY_SIZE)
throw new IllegalArgumentException("Stream size exceeds max array size");
int[] array = new int[(int) size];
new SizedCollectorTask.OfInt<>(spliterator, helper, array).invoke();
return node(array);
}
else {
Node.OfInt node = new IntCollectorTask<>(helper, spliterator).invoke();
return flattenTree ? flattenInt(node) : node;
}
}
/**
* Collect, in parallel, elements output from a long-valued pipeline and
* describe those elements with a {@link Node.OfLong}.
*
* @implSpec
* If the exact size of the output from the pipeline is known and the source
* {@link Spliterator} has the {@link Spliterator#SUBSIZED} characteristic,
* then a flat {@link Node} will be returned whose content is an array,
* since the size is known the array can be constructed in advance and
* output elements can be placed into the array concurrently by leaf
* tasks at the correct offsets. If the exact size is not known, output
* elements are collected into a conc-node whose shape mirrors that
* of the computation. This conc-node can then be flattened in
* parallel to produce a flat {@code Node.OfLong} if desired.
*
* @param <P_IN> the type of elements from the source Spliterator
* @param helper the pipeline helper describing the pipeline
* @param flattenTree whether a conc node should be flattened into a node
* describing an array before returning
* @return a {@link Node.OfLong} describing the output elements
*/
public static <P_IN> Node.OfLong collectLong(PipelineHelper<Long> helper,
Spliterator<P_IN> spliterator,
boolean flattenTree) {
long size = helper.exactOutputSizeIfKnown(spliterator);
if (size >= 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
if (size >= MAX_ARRAY_SIZE)
throw new IllegalArgumentException("Stream size exceeds max array size");
long[] array = new long[(int) size];
new SizedCollectorTask.OfLong<>(spliterator, helper, array).invoke();
return node(array);
}
else {
Node.OfLong node = new LongCollectorTask<>(helper, spliterator).invoke();
return flattenTree ? flattenLong(node) : node;
}
}
/**
* Collect, in parallel, elements output from n double-valued pipeline and
* describe those elements with a {@link Node.OfDouble}.
*
* @implSpec
* If the exact size of the output from the pipeline is known and the source
* {@link Spliterator} has the {@link Spliterator#SUBSIZED} characteristic,
* then a flat {@link Node} will be returned whose content is an array,
* since the size is known the array can be constructed in advance and
* output elements can be placed into the array concurrently by leaf
* tasks at the correct offsets. If the exact size is not known, output
* elements are collected into a conc-node whose shape mirrors that
* of the computation. This conc-node can then be flattened in
* parallel to produce a flat {@code Node.OfDouble} if desired.
*
* @param <P_IN> the type of elements from the source Spliterator
* @param helper the pipeline helper describing the pipeline
* @param flattenTree whether a conc node should be flattened into a node
* describing an array before returning
* @return a {@link Node.OfDouble} describing the output elements
*/
public static <P_IN> Node.OfDouble collectDouble(PipelineHelper<Double> helper,
Spliterator<P_IN> spliterator,
boolean flattenTree) {
long size = helper.exactOutputSizeIfKnown(spliterator);
if (size >= 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
if (size >= MAX_ARRAY_SIZE)
throw new IllegalArgumentException("Stream size exceeds max array size");
double[] array = new double[(int) size];
new SizedCollectorTask.OfDouble<>(spliterator, helper, array).invoke();
return node(array);
}
else {
Node.OfDouble node = new DoubleCollectorTask<>(helper, spliterator).invoke();
return flattenTree ? flattenDouble(node) : node;
}
}
// Parallel flattening of nodes
/**
* Flatten, in parallel, a {@link Node}. A flattened node is one that has
* no children. If the node is already flat, it is simply returned.
*
* @implSpec
* If a new node is to be created, the generator is used to create an array
* whose length is {@link Node#count()}. Then the node tree is traversed
* and leaf node elements are placed in the array concurrently by leaf tasks
* at the correct offsets.
*
* @param <T> type of elements contained by the node
* @param node the node to flatten
* @param generator the array factory used to create array instances
* @return a flat {@code Node}
*/
public static <T> Node<T> flatten(Node<T> node, IntFunction<T[]> generator) {
if (node.getChildCount() > 0) {
T[] array = generator.apply((int) node.count());
new ToArrayTask.OfRef<>(node, array, 0).invoke();
return node(array);
} else {
return node;
}
}
/**
* Flatten, in parallel, a {@link Node.OfInt}. A flattened node is one that
* has no children. If the node is already flat, it is simply returned.
*
* @implSpec
* If a new node is to be created, a new int[] array is created whose length
* is {@link Node#count()}. Then the node tree is traversed and leaf node
* elements are placed in the array concurrently by leaf tasks at the
* correct offsets.
*
* @param node the node to flatten
* @return a flat {@code Node.OfInt}
*/
public static Node.OfInt flattenInt(Node.OfInt node) {
if (node.getChildCount() > 0) {
int[] array = new int[(int) node.count()];
new ToArrayTask.OfInt(node, array, 0).invoke();
return node(array);
} else {
return node;
}
}
/**
* Flatten, in parallel, a {@link Node.OfLong}. A flattened node is one that
* has no children. If the node is already flat, it is simply returned.
*
* @implSpec
* If a new node is to be created, a new long[] array is created whose length
* is {@link Node#count()}. Then the node tree is traversed and leaf node
* elements are placed in the array concurrently by leaf tasks at the
* correct offsets.
*
* @param node the node to flatten
* @return a flat {@code Node.OfLong}
*/
public static Node.OfLong flattenLong(Node.OfLong node) {
if (node.getChildCount() > 0) {
long[] array = new long[(int) node.count()];
new ToArrayTask.OfLong(node, array, 0).invoke();
return node(array);
} else {
return node;
}
}
/**
* Flatten, in parallel, a {@link Node.OfDouble}. A flattened node is one that
* has no children. If the node is already flat, it is simply returned.
*
* @implSpec
* If a new node is to be created, a new double[] array is created whose length
* is {@link Node#count()}. Then the node tree is traversed and leaf node
* elements are placed in the array concurrently by leaf tasks at the
* correct offsets.
*
* @param node the node to flatten
* @return a flat {@code Node.OfDouble}
*/
public static Node.OfDouble flattenDouble(Node.OfDouble node) {
if (node.getChildCount() > 0) {
double[] array = new double[(int) node.count()];
new ToArrayTask.OfDouble(node, array, 0).invoke();
return node(array);
} else {
return node;
}
}
// Implementations
private static abstract class EmptyNode<T, T_ARR, T_CONS> implements Node<T> {
EmptyNode() { }
@Override
public T[] asArray(IntFunction<T[]> generator) {
return generator.apply(0);
}
public void copyInto(T_ARR array, int offset) { }
@Override
public long count() {
return 0;
}
public void forEach(T_CONS consumer) { }
private static class OfRef<T> extends EmptyNode<T, T[], Consumer<? super T>> {
private OfRef() {
super();
}
@Override
public Spliterator<T> spliterator() {
return Spliterators.emptySpliterator();
}
}
private static final class OfInt
extends EmptyNode<Integer, int[], IntConsumer>
implements Node.OfInt {
OfInt() { } // Avoid creation of special accessor
@Override
public Spliterator.OfInt spliterator() {
return Spliterators.emptyIntSpliterator();
}
@Override
public int[] asIntArray() {
return EMPTY_INT_ARRAY;
}
}
private static final class OfLong
extends EmptyNode<Long, long[], LongConsumer>
implements Node.OfLong {
OfLong() { } // Avoid creation of special accessor
@Override
public Spliterator.OfLong spliterator() {
return Spliterators.emptyLongSpliterator();
}
@Override
public long[] asLongArray() {
return EMPTY_LONG_ARRAY;
}
}
private static final class OfDouble
extends EmptyNode<Double, double[], DoubleConsumer>
implements Node.OfDouble {
OfDouble() { } // Avoid creation of special accessor
@Override
public Spliterator.OfDouble spliterator() {
return Spliterators.emptyDoubleSpliterator();
}
@Override
public double[] asDoubleArray() {
return EMPTY_DOUBLE_ARRAY;
}
}
}
/** Node class for a reference array */
private static class ArrayNode<T> implements Node<T> {
final T[] array;
int curSize;
@SuppressWarnings("unchecked")
ArrayNode(long size, IntFunction<T[]> generator) {
if (size >= MAX_ARRAY_SIZE)
throw new IllegalArgumentException("Stream size exceeds max array size");
this.array = generator.apply((int) size);
this.curSize = 0;
}
ArrayNode(T[] array) {
this.array = array;
this.curSize = array.length;
}
// Node
@Override
public Spliterator<T> spliterator() {
return Arrays.spliterator(array, 0, curSize);
}
@Override
public void copyInto(T[] dest, int destOffset) {
System.arraycopy(array, 0, dest, destOffset, curSize);
}
@Override
public T[] asArray(IntFunction<T[]> generator) {
if (array.length == curSize) {
return array;
} else {
throw new IllegalStateException();
}
}
@Override
public long count() {
return curSize;
}
// Traversable
@Override
public void forEach(Consumer<? super T> consumer) {
for (int i = 0; i < curSize; i++) {
consumer.accept(array[i]);
}
}
//
@Override
public String toString() {
return String.format("ArrayNode[%d][%s]",
array.length - curSize, Arrays.toString(array));
}
}
/** Node class for a Collection */
private static final class CollectionNode<T> implements Node<T> {
private final Collection<T> c;
CollectionNode(Collection<T> c) {
this.c = c;
}
// Node
@Override
public Spliterator<T> spliterator() {
return c.stream().spliterator();
}
@Override
public void copyInto(T[] array, int offset) {
for (T t : c)
array[offset++] = t;
}
@Override
@SuppressWarnings("unchecked")
public T[] asArray(IntFunction<T[]> generator) {
return c.toArray(generator.apply(c.size()));
}
@Override
public long count() {
return c.size();
}
@Override
public void forEach(Consumer<? super T> consumer) {
c.forEach(consumer);
}
//
@Override
public String toString() {
return String.format("CollectionNode[%d][%s]", c.size(), c);
}
}
/**
* Node class for an internal node with two or more children
*/
static final class ConcNode<T> implements Node<T> {
private final Node<T> left;
private final Node<T> right;
private final long size;
ConcNode(Node<T> left, Node<T> right) {
this.left = left;
this.right = right;
// The Node count will be required when the Node spliterator is
// obtained and it is cheaper to aggressively calculate bottom up
// as the tree is built rather than later on from the top down
// traversing the tree
this.size = left.count() + right.count();
}
// Node
@Override
public Spliterator<T> spliterator() {
return new Nodes.InternalNodeSpliterator.OfRef<>(this);
}
@Override
public int getChildCount() {
return 2;
}
@Override
public Node<T> getChild(int i) {
if (i == 0) return left;
if (i == 1) return right;
throw new IndexOutOfBoundsException();
}
@Override
public void copyInto(T[] array, int offset) {
Objects.requireNonNull(array);
left.copyInto(array, offset);
right.copyInto(array, offset + (int) left.count());
}
@Override
public T[] asArray(IntFunction<T[]> generator) {
T[] array = generator.apply((int) count());
copyInto(array, 0);
return array;
}
@Override
public long count() {
return size;
}
@Override
public void forEach(Consumer<? super T> consumer) {
left.forEach(consumer);
right.forEach(consumer);
}
@Override
public String toString() {
if (count() < 32) {
return String.format("ConcNode[%s.%s]", left, right);
} else {
return String.format("ConcNode[size=%d]", count());
}
}
}
/** Abstract class for spliterator for all internal node classes */
private static abstract class InternalNodeSpliterator<T,
S extends Spliterator<T>,
N extends Node<T>, C>
implements Spliterator<T> {
// Node we are pointing to
// null if full traversal has occurred
N curNode;
// next child of curNode to consume
int curChildIndex;
// The spliterator of the curNode if that node is last and has no children.
// This spliterator will be delegated to for splitting and traversing.
// null if curNode has children
S lastNodeSpliterator;
// spliterator used while traversing with tryAdvance
// null if no partial traversal has occurred
S tryAdvanceSpliterator;
// node stack used when traversing to search and find leaf nodes
// null if no partial traversal has occurred
Deque<N> tryAdvanceStack;
InternalNodeSpliterator(N curNode) {
this.curNode = curNode;
}
/**
* Initiate a stack containing, in left-to-right order, the child nodes
* covered by this spliterator
*/
protected final Deque<N> initStack() {
// Bias size to the case where leaf nodes are close to this node
// 8 is the minimum initial capacity for the ArrayDeque implementation
Deque<N> stack = new ArrayDeque<>(8);
for (int i = curNode.getChildCount() - 1; i >= curChildIndex; i--)
stack.addFirst((N) curNode.getChild(i));
return stack;
}
/**
* Depth first search, in left-to-right order, of the node tree, using
* an explicit stack, to find the next non-empty leaf node.
*/
protected final N findNextLeafNode(Deque<N> stack) {
N n = null;
while ((n = stack.pollFirst()) != null) {
if (n.getChildCount() == 0) {
if (n.count() > 0)
return n;
} else {
for (int i = n.getChildCount() - 1; i >= 0; i--)
stack.addFirst((N) n.getChild(i));
}
}
return null;
}
protected final boolean internalTryAdvance(C consumer) {
if (curNode == null)
return false;
if (tryAdvanceSpliterator == null) {
if (lastNodeSpliterator == null) {
// Initiate the node stack
tryAdvanceStack = initStack();
N leaf = findNextLeafNode(tryAdvanceStack);
if (leaf != null)
tryAdvanceSpliterator = (S) leaf.spliterator();
else {
// A non-empty leaf node was not found
// No elements to traverse
curNode = null;
return false;
}
}
else
tryAdvanceSpliterator = lastNodeSpliterator;
}
boolean hasNext = tryAdvance(tryAdvanceSpliterator, consumer);
if (!hasNext) {
if (lastNodeSpliterator == null) {
// Advance to the spliterator of the next non-empty leaf node
Node<T> leaf = findNextLeafNode(tryAdvanceStack);
if (leaf != null) {
tryAdvanceSpliterator = (S) leaf.spliterator();
// Since the node is not-empty the spliterator can be advanced
return tryAdvance(tryAdvanceSpliterator, consumer);
}
}
// No more elements to traverse
curNode = null;
}
return hasNext;
}
protected abstract boolean tryAdvance(S spliterator, C consumer);
@Override
@SuppressWarnings("unchecked")
public S trySplit() {
if (curNode == null || tryAdvanceSpliterator != null)
return null; // Cannot split if fully or partially traversed
else if (lastNodeSpliterator != null)
return (S) lastNodeSpliterator.trySplit();
else if (curChildIndex < curNode.getChildCount() - 1)
return (S) curNode.getChild(curChildIndex++).spliterator();
else {
curNode = (N) curNode.getChild(curChildIndex);
if (curNode.getChildCount() == 0) {
lastNodeSpliterator = (S) curNode.spliterator();
return (S) lastNodeSpliterator.trySplit();
}
else {
curChildIndex = 0;
return (S) curNode.getChild(curChildIndex++).spliterator();
}
}
}
@Override
public long estimateSize() {
if (curNode == null)
return 0;
// Will not reflect the effects of partial traversal.
// This is compliant with the specification
if (lastNodeSpliterator != null)
return lastNodeSpliterator.estimateSize();
else {
long size = 0;
for (int i = curChildIndex; i < curNode.getChildCount(); i++)
size += curNode.getChild(i).count();
return size;
}
}
@Override
public int characteristics() {
return Spliterator.SIZED;
}
private static final class OfRef<T>
extends InternalNodeSpliterator<T, Spliterator<T>, Node<T>, Consumer<? super T>> {
OfRef(Node<T> curNode) {
super(curNode);
}
@Override
public boolean tryAdvance(Consumer<? super T> consumer) {
return internalTryAdvance(consumer);
}
@Override
protected boolean tryAdvance(Spliterator<T> spliterator,
Consumer<? super T> consumer) {
return spliterator.tryAdvance(consumer);
}
@Override
public void forEachRemaining(Consumer<? super T> consumer) {
if (curNode == null)
return;
if (tryAdvanceSpliterator == null) {
if (lastNodeSpliterator == null) {
Deque<Node<T>> stack = initStack();
Node<T> leaf;
while ((leaf = findNextLeafNode(stack)) != null) {
leaf.forEach(consumer);
}
curNode = null;
}
else
lastNodeSpliterator.forEachRemaining(consumer);
}
else
while(tryAdvance(consumer)) { }
}
}
private static final class OfInt
extends InternalNodeSpliterator<Integer, Spliterator.OfInt, Node.OfInt, IntConsumer>
implements Spliterator.OfInt {
OfInt(Node.OfInt cur) {
super(cur);
}
@Override
public boolean tryAdvance(IntConsumer consumer) {
return internalTryAdvance(consumer);
}
@Override
protected boolean tryAdvance(Spliterator.OfInt spliterator,
IntConsumer consumer) {
return spliterator.tryAdvance(consumer);
}
@Override
public void forEachRemaining(IntConsumer consumer) {
if (curNode == null)
return;
if (tryAdvanceSpliterator == null) {
if (lastNodeSpliterator == null) {
Deque<Node.OfInt> stack = initStack();
Node.OfInt leaf;
while ((leaf = findNextLeafNode(stack)) != null) {
leaf.forEach(consumer);
}
curNode = null;
}
else
lastNodeSpliterator.forEachRemaining(consumer);
}
else
while(tryAdvance(consumer)) { }
}
}
private static final class OfLong
extends InternalNodeSpliterator<Long, Spliterator.OfLong, Node.OfLong, LongConsumer>
implements Spliterator.OfLong {
OfLong(Node.OfLong cur) {
super(cur);
}
@Override
public boolean tryAdvance(LongConsumer consumer) {
return internalTryAdvance(consumer);
}
@Override
protected boolean tryAdvance(Spliterator.OfLong spliterator,
LongConsumer consumer) {
return spliterator.tryAdvance(consumer);
}
@Override
public void forEachRemaining(LongConsumer consumer) {
if (curNode == null)
return;
if (tryAdvanceSpliterator == null) {
if (lastNodeSpliterator == null) {
Deque<Node.OfLong> stack = initStack();
Node.OfLong leaf;
while ((leaf = findNextLeafNode(stack)) != null) {
leaf.forEach(consumer);
}
curNode = null;
}
else
lastNodeSpliterator.forEachRemaining(consumer);
}
else
while(tryAdvance(consumer)) { }
}
}
private static final class OfDouble
extends InternalNodeSpliterator<Double, Spliterator.OfDouble, Node.OfDouble, DoubleConsumer>
implements Spliterator.OfDouble {
OfDouble(Node.OfDouble cur) {
super(cur);
}
@Override
public boolean tryAdvance(DoubleConsumer consumer) {
return internalTryAdvance(consumer);
}
@Override
protected boolean tryAdvance(Spliterator.OfDouble spliterator,
DoubleConsumer consumer) {
return spliterator.tryAdvance(consumer);
}
@Override
public void forEachRemaining(DoubleConsumer consumer) {
if (curNode == null)
return;
if (tryAdvanceSpliterator == null) {
if (lastNodeSpliterator == null) {
Deque<Node.OfDouble> stack = initStack();
Node.OfDouble leaf;
while ((leaf = findNextLeafNode(stack)) != null) {
leaf.forEach(consumer);
}
curNode = null;
}
else
lastNodeSpliterator.forEachRemaining(consumer);
}
else
while(tryAdvance(consumer)) { }
}
}
}
/**
* Fixed-sized builder class for reference nodes
*/
private static final class FixedNodeBuilder<T>
extends ArrayNode<T>
implements Node.Builder<T> {
FixedNodeBuilder(long size, IntFunction<T[]> generator) {
super(size, generator);
assert size < MAX_ARRAY_SIZE;
}
@Override
public Node<T> build() {
if (curSize < array.length)
throw new IllegalStateException(String.format("Current size %d is less than fixed size %d",
curSize, array.length));
return this;
}
@Override
public void begin(long size) {
if (size != array.length)
throw new IllegalStateException(String.format("Begin size %d is not equal to fixed size %d",
size, array.length));
curSize = 0;
}
@Override
public void accept(T t) {
if (curSize < array.length) {
array[curSize++] = t;
} else {
throw new IllegalStateException(String.format("Accept exceeded fixed size of %d",
array.length));
}
}
@Override
public void end() {
if (curSize < array.length)
throw new IllegalStateException(String.format("End size %d is less than fixed size %d",
curSize, array.length));
}
@Override
public String toString() {
return String.format("FixedNodeBuilder[%d][%s]",
array.length - curSize, Arrays.toString(array));
}
}
/**
* Variable-sized builder class for reference nodes
*/
private static final class SpinedNodeBuilder<T>
extends SpinedBuffer<T>
implements Node<T>, Node.Builder<T> {
private boolean building = false;
SpinedNodeBuilder() {} // Avoid creation of special accessor
@Override
public Spliterator<T> spliterator() {
assert !building : "during building";
return super.spliterator();
}
@Override
public void forEach(Consumer<? super T> consumer) {
assert !building : "during building";
super.forEach(consumer);
}
//
@Override
public void begin(long size) {
assert !building : "was already building";
building = true;
clear();
ensureCapacity(size);
}
@Override
public void accept(T t) {
assert building : "not building";
super.accept(t);
}
@Override
public void end() {
assert building : "was not building";
building = false;
// @@@ check begin(size) and size
}
@Override
public void copyInto(T[] array, int offset) {
assert !building : "during building";
super.copyInto(array, offset);
}
@Override
public T[] asArray(IntFunction<T[]> arrayFactory) {
assert !building : "during building";
return super.asArray(arrayFactory);
}
@Override
public Node<T> build() {
assert !building : "during building";
return this;
}
}
//
private static final int[] EMPTY_INT_ARRAY = new int[0];
private static final long[] EMPTY_LONG_ARRAY = new long[0];
private static final double[] EMPTY_DOUBLE_ARRAY = new double[0];
private abstract static class AbstractPrimitiveConcNode<E, N extends Node<E>>
implements Node<E> {
final N left;
final N right;
final long size;
AbstractPrimitiveConcNode(N left, N right) {
this.left = left;
this.right = right;
// The Node count will be required when the Node spliterator is
// obtained and it is cheaper to aggressively calculate bottom up as
// the tree is built rather than later on by traversing the tree
this.size = left.count() + right.count();
}
@Override
public int getChildCount() {
return 2;
}
@Override
public N getChild(int i) {
if (i == 0) return left;
if (i == 1) return right;
throw new IndexOutOfBoundsException();
}
@Override
public long count() {
return size;
}
@Override
public String toString() {
if (count() < 32)
return String.format("%s[%s.%s]", this.getClass().getName(), left, right);
else
return String.format("%s[size=%d]", this.getClass().getName(), count());
}
}
private static class IntArrayNode implements Node.OfInt {
final int[] array;
int curSize;
IntArrayNode(long size) {
if (size >= MAX_ARRAY_SIZE)
throw new IllegalArgumentException("Stream size exceeds max array size");
this.array = new int[(int) size];
this.curSize = 0;
}
IntArrayNode(int[] array) {
this.array = array;
this.curSize = array.length;
}
// Node
@Override
public Spliterator.OfInt spliterator() {
return Arrays.spliterator(array, 0, curSize);
}
@Override
public int[] asIntArray() {
if (array.length == curSize) {
return array;
} else {
return Arrays.copyOf(array, curSize);
}
}
@Override
public void copyInto(int[] dest, int destOffset) {
System.arraycopy(array, 0, dest, destOffset, curSize);
}
@Override
public long count() {
return curSize;
}
@Override
public void forEach(IntConsumer consumer) {
for (int i = 0; i < curSize; i++) {
consumer.accept(array[i]);
}
}
@Override
public String toString() {
return String.format("IntArrayNode[%d][%s]",
array.length - curSize, Arrays.toString(array));
}
}
private static class LongArrayNode implements Node.OfLong {
final long[] array;
int curSize;
LongArrayNode(long size) {
if (size >= MAX_ARRAY_SIZE)
throw new IllegalArgumentException("Stream size exceeds max array size");
this.array = new long[(int) size];
this.curSize = 0;
}
LongArrayNode(long[] array) {
this.array = array;
this.curSize = array.length;
}
@Override
public Spliterator.OfLong spliterator() {
return Arrays.spliterator(array, 0, curSize);
}
@Override
public long[] asLongArray() {
if (array.length == curSize) {
return array;
} else {
return Arrays.copyOf(array, curSize);
}
}
@Override
public void copyInto(long[] dest, int destOffset) {
System.arraycopy(array, 0, dest, destOffset, curSize);
}
@Override
public long count() {
return curSize;
}
@Override
public void forEach(LongConsumer consumer) {
for (int i = 0; i < curSize; i++) {
consumer.accept(array[i]);
}
}
@Override
public String toString() {
return String.format("LongArrayNode[%d][%s]",
array.length - curSize, Arrays.toString(array));
}
}
private static class DoubleArrayNode implements Node.OfDouble {
final double[] array;
int curSize;
DoubleArrayNode(long size) {
if (size >= MAX_ARRAY_SIZE)
throw new IllegalArgumentException("Stream size exceeds max array size");
this.array = new double[(int) size];
this.curSize = 0;
}
DoubleArrayNode(double[] array) {
this.array = array;
this.curSize = array.length;
}
@Override
public Spliterator.OfDouble spliterator() {
return Arrays.spliterator(array, 0, curSize);
}
@Override
public double[] asDoubleArray() {
if (array.length == curSize) {
return array;
} else {
return Arrays.copyOf(array, curSize);
}
}
@Override
public void copyInto(double[] dest, int destOffset) {
System.arraycopy(array, 0, dest, destOffset, curSize);
}
@Override
public long count() {
return curSize;
}
@Override
public void forEach(DoubleConsumer consumer) {
for (int i = 0; i < curSize; i++) {
consumer.accept(array[i]);
}
}
@Override
public String toString() {
return String.format("DoubleArrayNode[%d][%s]",
array.length - curSize, Arrays.toString(array));
}
}
static final class IntConcNode
extends AbstractPrimitiveConcNode<Integer, Node.OfInt>
implements Node.OfInt {
IntConcNode(Node.OfInt left, Node.OfInt right) {
super(left, right);
}
@Override
public void forEach(IntConsumer consumer) {
left.forEach(consumer);
right.forEach(consumer);
}
@Override
public Spliterator.OfInt spliterator() {
return new InternalNodeSpliterator.OfInt(this);
}
@Override
public void copyInto(int[] array, int offset) {
left.copyInto(array, offset);
right.copyInto(array, offset + (int) left.count());
}
@Override
public int[] asIntArray() {
int[] array = new int[(int) count()];
copyInto(array, 0);
return array;
}
}
static final class LongConcNode
extends AbstractPrimitiveConcNode<Long, Node.OfLong>
implements Node.OfLong {
LongConcNode(Node.OfLong left, Node.OfLong right) {
super(left, right);
}
@Override
public void forEach(LongConsumer consumer) {
left.forEach(consumer);
right.forEach(consumer);
}
@Override
public Spliterator.OfLong spliterator() {
return new InternalNodeSpliterator.OfLong(this);
}
@Override
public void copyInto(long[] array, int offset) {
left.copyInto(array, offset);
right.copyInto(array, offset + (int) left.count());
}
@Override
public long[] asLongArray() {
long[] array = new long[(int) count()];
copyInto(array, 0);
return array;
}
}
static final class DoubleConcNode
extends AbstractPrimitiveConcNode<Double, Node.OfDouble>
implements Node.OfDouble {
DoubleConcNode(Node.OfDouble left, Node.OfDouble right) {
super(left, right);
}
@Override
public void forEach(DoubleConsumer consumer) {
left.forEach(consumer);
right.forEach(consumer);
}
@Override
public Spliterator.OfDouble spliterator() {
return new InternalNodeSpliterator.OfDouble(this);
}
@Override
public void copyInto(double[] array, int offset) {
left.copyInto(array, offset);
right.copyInto(array, offset + (int) left.count());
}
@Override
public double[] asDoubleArray() {
double[] array = new double[(int) count()];
copyInto(array, 0);
return array;
}
}
private static final class IntFixedNodeBuilder
extends IntArrayNode
implements Node.Builder.OfInt {
IntFixedNodeBuilder(long size) {
super(size);
assert size < MAX_ARRAY_SIZE;
}
@Override
public Node.OfInt build() {
if (curSize < array.length) {
throw new IllegalStateException(String.format("Current size %d is less than fixed size %d",
curSize, array.length));
}
return this;
}
@Override
public void begin(long size) {
if (size != array.length) {
throw new IllegalStateException(String.format("Begin size %d is not equal to fixed size %d",
size, array.length));
}
curSize = 0;
}
@Override
public void accept(int i) {
if (curSize < array.length) {
array[curSize++] = i;
} else {
throw new IllegalStateException(String.format("Accept exceeded fixed size of %d",
array.length));
}
}
@Override
public void end() {
if (curSize < array.length) {
throw new IllegalStateException(String.format("End size %d is less than fixed size %d",
curSize, array.length));
}
}
@Override
public String toString() {
return String.format("IntFixedNodeBuilder[%d][%s]",
array.length - curSize, Arrays.toString(array));
}
}
private static final class LongFixedNodeBuilder
extends LongArrayNode
implements Node.Builder.OfLong {
LongFixedNodeBuilder(long size) {
super(size);
assert size < MAX_ARRAY_SIZE;
}
@Override
public Node.OfLong build() {
if (curSize < array.length) {
throw new IllegalStateException(String.format("Current size %d is less than fixed size %d",
curSize, array.length));
}
return this;
}
@Override
public void begin(long size) {
if (size != array.length) {
throw new IllegalStateException(String.format("Begin size %d is not equal to fixed size %d",
size, array.length));
}
curSize = 0;
}
@Override
public void accept(long i) {
if (curSize < array.length) {
array[curSize++] = i;
} else {
throw new IllegalStateException(String.format("Accept exceeded fixed size of %d",
array.length));
}
}
@Override
public void end() {
if (curSize < array.length) {
throw new IllegalStateException(String.format("End size %d is less than fixed size %d",
curSize, array.length));
}
}
@Override
public String toString() {
return String.format("LongFixedNodeBuilder[%d][%s]",
array.length - curSize, Arrays.toString(array));
}
}
private static final class DoubleFixedNodeBuilder
extends DoubleArrayNode
implements Node.Builder.OfDouble {
DoubleFixedNodeBuilder(long size) {
super(size);
assert size < MAX_ARRAY_SIZE;
}
@Override
public Node.OfDouble build() {
if (curSize < array.length) {
throw new IllegalStateException(String.format("Current size %d is less than fixed size %d",
curSize, array.length));
}
return this;
}
@Override
public void begin(long size) {
if (size != array.length) {
throw new IllegalStateException(String.format("Begin size %d is not equal to fixed size %d",
size, array.length));
}
curSize = 0;
}
@Override
public void accept(double i) {
if (curSize < array.length) {
array[curSize++] = i;
} else {
throw new IllegalStateException(String.format("Accept exceeded fixed size of %d",
array.length));
}
}
@Override
public void end() {
if (curSize < array.length) {
throw new IllegalStateException(String.format("End size %d is less than fixed size %d",
curSize, array.length));
}
}
@Override
public String toString() {
return String.format("DoubleFixedNodeBuilder[%d][%s]",
array.length - curSize, Arrays.toString(array));
}
}
private static final class IntSpinedNodeBuilder
extends SpinedBuffer.OfInt
implements Node.OfInt, Node.Builder.OfInt {
private boolean building = false;
IntSpinedNodeBuilder() {} // Avoid creation of special accessor
@Override
public Spliterator.OfInt spliterator() {
assert !building : "during building";
return super.spliterator();
}
@Override
public void forEach(IntConsumer consumer) {
assert !building : "during building";
super.forEach(consumer);
}
//
@Override
public void begin(long size) {
assert !building : "was already building";
building = true;
clear();
ensureCapacity(size);
}
@Override
public void accept(int i) {
assert building : "not building";
super.accept(i);
}
@Override
public void end() {
assert building : "was not building";
building = false;
// @@@ check begin(size) and size
}
@Override
public void copyInto(int[] array, int offset) throws IndexOutOfBoundsException {
assert !building : "during building";
super.copyInto(array, offset);
}
@Override
public int[] asIntArray() {
assert !building : "during building";
return super.asIntArray();
}
@Override
public Node.OfInt build() {
assert !building : "during building";
return this;
}
}
private static final class LongSpinedNodeBuilder
extends SpinedBuffer.OfLong
implements Node.OfLong, Node.Builder.OfLong {
private boolean building = false;
LongSpinedNodeBuilder() {} // Avoid creation of special accessor
@Override
public Spliterator.OfLong spliterator() {
assert !building : "during building";
return super.spliterator();
}
@Override
public void forEach(LongConsumer consumer) {
assert !building : "during building";
super.forEach(consumer);
}
//
@Override
public void begin(long size) {
assert !building : "was already building";
building = true;
clear();
ensureCapacity(size);
}
@Override
public void accept(long i) {
assert building : "not building";
super.accept(i);
}
@Override
public void end() {
assert building : "was not building";
building = false;
// @@@ check begin(size) and size
}
@Override
public void copyInto(long[] array, int offset) {
assert !building : "during building";
super.copyInto(array, offset);
}
@Override
public long[] asLongArray() {
assert !building : "during building";
return super.asLongArray();
}
@Override
public Node.OfLong build() {
assert !building : "during building";
return this;
}
}
private static final class DoubleSpinedNodeBuilder
extends SpinedBuffer.OfDouble
implements Node.OfDouble, Node.Builder.OfDouble {
private boolean building = false;
DoubleSpinedNodeBuilder() {} // Avoid creation of special accessor
@Override
public Spliterator.OfDouble spliterator() {
assert !building : "during building";
return super.spliterator();
}
@Override
public void forEach(DoubleConsumer consumer) {
assert !building : "during building";
super.forEach(consumer);
}
//
@Override
public void begin(long size) {
assert !building : "was already building";
building = true;
clear();
ensureCapacity(size);
}
@Override
public void accept(double i) {
assert building : "not building";
super.accept(i);
}
@Override
public void end() {
assert building : "was not building";
building = false;
// @@@ check begin(size) and size
}
@Override
public void copyInto(double[] array, int offset) {
assert !building : "during building";
super.copyInto(array, offset);
}
@Override
public double[] asDoubleArray() {
assert !building : "during building";
return super.asDoubleArray();
}
@Override
public Node.OfDouble build() {
assert !building : "during building";
return this;
}
}
private static abstract class SizedCollectorTask<P_IN, P_OUT, T_SINK extends Sink<P_OUT>,
K extends SizedCollectorTask<P_IN, P_OUT, T_SINK, K>>
extends CountedCompleter<Void>
implements Sink<P_OUT> {
protected final Spliterator<P_IN> spliterator;
protected final PipelineHelper<P_OUT> helper;
protected final long targetSize;
protected long offset;
protected long length;
// For Sink implementation
protected int index, fence;
SizedCollectorTask(Spliterator<P_IN> spliterator,
PipelineHelper<P_OUT> helper,
int arrayLength) {
assert spliterator.hasCharacteristics(Spliterator.SUBSIZED);
this.spliterator = spliterator;
this.helper = helper;
this.targetSize = AbstractTask.suggestTargetSize(spliterator.estimateSize());
this.offset = 0;
this.length = arrayLength;
}
SizedCollectorTask(K parent, Spliterator<P_IN> spliterator,
long offset, long length, int arrayLength) {
super(parent);
assert spliterator.hasCharacteristics(Spliterator.SUBSIZED);
this.spliterator = spliterator;
this.helper = parent.helper;
this.targetSize = parent.targetSize;
this.offset = offset;
this.length = length;
if (offset < 0 || length < 0 || (offset + length - 1 >= arrayLength)) {
throw new IllegalArgumentException(
String.format("offset and length interval [%d, %d + %d) is not within array size interval [0, %d)",
offset, offset, length, arrayLength));
}
}
@Override
public void compute() {
SizedCollectorTask<P_IN, P_OUT, T_SINK, K> task = this;
while (true) {
Spliterator<P_IN> leftSplit;
if (!AbstractTask.suggestSplit(task.spliterator, task.targetSize)
|| ((leftSplit = task.spliterator.trySplit()) == null)) {
if (task.offset + task.length >= MAX_ARRAY_SIZE)
throw new IllegalArgumentException("Stream size exceeds max array size");
T_SINK sink = (T_SINK) task;
task.helper.wrapAndCopyInto(sink, task.spliterator);
task.propagateCompletion();
return;
}
else {
task.setPendingCount(1);
long leftSplitSize = leftSplit.estimateSize();
task.makeChild(leftSplit, task.offset, leftSplitSize).fork();
task = task.makeChild(task.spliterator, task.offset + leftSplitSize,
task.length - leftSplitSize);
}
}
}
abstract K makeChild(Spliterator<P_IN> spliterator, long offset, long size);
@Override
public void begin(long size) {
if(size > length)
throw new IllegalStateException("size passed to Sink.begin exceeds array length");
index = (int) offset;
fence = (int) offset + (int) length;
}
static final class OfRef<P_IN, P_OUT>
extends SizedCollectorTask<P_IN, P_OUT, Sink<P_OUT>, OfRef<P_IN, P_OUT>>
implements Sink<P_OUT> {
private final P_OUT[] array;
OfRef(Spliterator<P_IN> spliterator, PipelineHelper<P_OUT> helper, P_OUT[] array) {
super(spliterator, helper, array.length);
this.array = array;
}
OfRef(OfRef<P_IN, P_OUT> parent, Spliterator<P_IN> spliterator,
long offset, long length) {
super(parent, spliterator, offset, length, parent.array.length);
this.array = parent.array;
}
@Override
OfRef<P_IN, P_OUT> makeChild(Spliterator<P_IN> spliterator,
long offset, long size) {
return new OfRef<>(this, spliterator, offset, size);
}
@Override
public void accept(P_OUT value) {
if (index >= fence) {
throw new IndexOutOfBoundsException(Integer.toString(index));
}
array[index++] = value;
}
}
static final class OfInt<P_IN>
extends SizedCollectorTask<P_IN, Integer, Sink.OfInt, OfInt<P_IN>>
implements Sink.OfInt {
private final int[] array;
OfInt(Spliterator<P_IN> spliterator, PipelineHelper<Integer> helper, int[] array) {
super(spliterator, helper, array.length);
this.array = array;
}
OfInt(SizedCollectorTask.OfInt<P_IN> parent, Spliterator<P_IN> spliterator,
long offset, long length) {
super(parent, spliterator, offset, length, parent.array.length);
this.array = parent.array;
}
@Override
SizedCollectorTask.OfInt<P_IN> makeChild(Spliterator<P_IN> spliterator,
long offset, long size) {
return new SizedCollectorTask.OfInt<>(this, spliterator, offset, size);
}
@Override
public void accept(int value) {
if (index >= fence) {
throw new IndexOutOfBoundsException(Integer.toString(index));
}
array[index++] = value;
}
}
static final class OfLong<P_IN>
extends SizedCollectorTask<P_IN, Long, Sink.OfLong, OfLong<P_IN>>
implements Sink.OfLong {
private final long[] array;
OfLong(Spliterator<P_IN> spliterator, PipelineHelper<Long> helper, long[] array) {
super(spliterator, helper, array.length);
this.array = array;
}
OfLong(SizedCollectorTask.OfLong<P_IN> parent, Spliterator<P_IN> spliterator,
long offset, long length) {
super(parent, spliterator, offset, length, parent.array.length);
this.array = parent.array;
}
@Override
SizedCollectorTask.OfLong<P_IN> makeChild(Spliterator<P_IN> spliterator,
long offset, long size) {
return new SizedCollectorTask.OfLong<>(this, spliterator, offset, size);
}
@Override
public void accept(long value) {
if (index >= fence) {
throw new IndexOutOfBoundsException(Integer.toString(index));
}
array[index++] = value;
}
}
static final class OfDouble<P_IN>
extends SizedCollectorTask<P_IN, Double, Sink.OfDouble, OfDouble<P_IN>>
implements Sink.OfDouble {
private final double[] array;
OfDouble(Spliterator<P_IN> spliterator, PipelineHelper<Double> helper, double[] array) {
super(spliterator, helper, array.length);
this.array = array;
}
OfDouble(SizedCollectorTask.OfDouble<P_IN> parent, Spliterator<P_IN> spliterator,
long offset, long length) {
super(parent, spliterator, offset, length, parent.array.length);
this.array = parent.array;
}
@Override
SizedCollectorTask.OfDouble<P_IN> makeChild(Spliterator<P_IN> spliterator,
long offset, long size) {
return new SizedCollectorTask.OfDouble<>(this, spliterator, offset, size);
}
@Override
public void accept(double value) {
if (index >= fence) {
throw new IndexOutOfBoundsException(Integer.toString(index));
}
array[index++] = value;
}
}
}
private static abstract class ToArrayTask<T, T_NODE extends Node<T>,
K extends ToArrayTask<T, T_NODE, K>>
extends CountedCompleter<Void> {
protected final T_NODE node;
protected final int offset;
ToArrayTask(T_NODE node, int offset) {
this.node = node;
this.offset = offset;
}
ToArrayTask(K parent, T_NODE node, int offset) {
super(parent);
this.node = node;
this.offset = offset;
}
abstract void copyNodeToArray();
abstract K makeChild(int childIndex, int offset);
@Override
public void compute() {
ToArrayTask<T, T_NODE, K> task = this;
while (true) {
if (task.node.getChildCount() == 0) {
task.copyNodeToArray();
task.propagateCompletion();
return;
}
else {
task.setPendingCount(task.node.getChildCount() - 1);
int size = 0;
int i = 0;
for (;i < task.node.getChildCount() - 1; i++) {
K leftTask = task.makeChild(i, task.offset + size);
size += leftTask.node.count();
leftTask.fork();
}
task = task.makeChild(i, task.offset + size);
}
}
}
private static final class OfRef<T>
extends ToArrayTask<T, Node<T>, OfRef<T>> {
private final T[] array;
private OfRef(Node<T> node, T[] array, int offset) {
super(node, offset);
this.array = array;
}
private OfRef(OfRef<T> parent, Node<T> node, int offset) {
super(parent, node, offset);
this.array = parent.array;
}
@Override
OfRef<T> makeChild(int childIndex, int offset) {
return new OfRef<>(this, node.getChild(childIndex), offset);
}
@Override
void copyNodeToArray() {
node.copyInto(array, offset);
}
}
private static final class OfInt
extends ToArrayTask<Integer, Node.OfInt, OfInt> {
private final int[] array;
private OfInt(Node.OfInt node, int[] array, int offset) {
super(node, offset);
this.array = array;
}
private OfInt(OfInt parent, Node.OfInt node, int offset) {
super(parent, node, offset);
this.array = parent.array;
}
@Override
OfInt makeChild(int childIndex, int offset) {
return new OfInt(this, node.getChild(childIndex), offset);
}
@Override
void copyNodeToArray() {
node.copyInto(array, offset);
}
}
private static final class OfLong
extends ToArrayTask<Long, Node.OfLong, OfLong> {
private final long[] array;
private OfLong(Node.OfLong node, long[] array, int offset) {
super(node, offset);
this.array = array;
}
private OfLong(OfLong parent, Node.OfLong node, int offset) {
super(parent, node, offset);
this.array = parent.array;
}
@Override
OfLong makeChild(int childIndex, int offset) {
return new OfLong(this, node.getChild(childIndex), offset);
}
@Override
void copyNodeToArray() {
node.copyInto(array, offset);
}
}
private static final class OfDouble
extends ToArrayTask<Double, Node.OfDouble, OfDouble> {
private final double[] array;
private OfDouble(Node.OfDouble node, double[] array, int offset) {
super(node, offset);
this.array = array;
}
private OfDouble(OfDouble parent, Node.OfDouble node, int offset) {
super(parent, node, offset);
this.array = parent.array;
}
@Override
OfDouble makeChild(int childIndex, int offset) {
return new OfDouble(this, node.getChild(childIndex), offset);
}
@Override
void copyNodeToArray() {
node.copyInto(array, offset);
}
}
}
private static final class CollectorTask<P_IN, P_OUT>
extends AbstractTask<P_IN, P_OUT, Node<P_OUT>, CollectorTask<P_IN, P_OUT>> {
private final PipelineHelper<P_OUT> helper;
private final IntFunction<P_OUT[]> generator;
CollectorTask(PipelineHelper<P_OUT> helper,
IntFunction<P_OUT[]> generator,
Spliterator<P_IN> spliterator) {
super(helper, spliterator);
this.helper = helper;
this.generator = generator;
}
CollectorTask(CollectorTask<P_IN, P_OUT> parent, Spliterator<P_IN> spliterator) {
super(parent, spliterator);
helper = parent.helper;
generator = parent.generator;
}
@Override
protected CollectorTask<P_IN, P_OUT> makeChild(Spliterator<P_IN> spliterator) {
return new CollectorTask<>(this, spliterator);
}
@Override
protected Node<P_OUT> doLeaf() {
Node.Builder<P_OUT> builder
= builder(helper.exactOutputSizeIfKnown(spliterator),
generator);
return helper.wrapAndCopyInto(builder, spliterator).build();
}
@Override
public void onCompletion(CountedCompleter caller) {
if (!isLeaf()) {
setLocalResult(new ConcNode<>(leftChild.getLocalResult(), rightChild.getLocalResult()));
}
super.onCompletion(caller);
}
}
private static final class IntCollectorTask<P_IN>
extends AbstractTask<P_IN, Integer, Node.OfInt, IntCollectorTask<P_IN>> {
private final PipelineHelper<Integer> helper;
IntCollectorTask(PipelineHelper<Integer> helper, Spliterator<P_IN> spliterator) {
super(helper, spliterator);
this.helper = helper;
}
IntCollectorTask(IntCollectorTask<P_IN> parent, Spliterator<P_IN> spliterator) {
super(parent, spliterator);
helper = parent.helper;
}
@Override
protected IntCollectorTask<P_IN> makeChild(Spliterator<P_IN> spliterator) {
return new IntCollectorTask<>(this, spliterator);
}
@Override
protected Node.OfInt doLeaf() {
Node.Builder.OfInt builder = intBuilder(helper.exactOutputSizeIfKnown(spliterator));
return helper.wrapAndCopyInto(builder, spliterator).build();
}
@Override
public void onCompletion(CountedCompleter caller) {
if (!isLeaf()) {
setLocalResult(new IntConcNode(leftChild.getLocalResult(), rightChild.getLocalResult()));
}
super.onCompletion(caller);
}
}
private static final class LongCollectorTask<P_IN>
extends AbstractTask<P_IN, Long, Node.OfLong, LongCollectorTask<P_IN>> {
private final PipelineHelper<Long> helper;
LongCollectorTask(PipelineHelper<Long> helper, Spliterator<P_IN> spliterator) {
super(helper, spliterator);
this.helper = helper;
}
LongCollectorTask(LongCollectorTask<P_IN> parent, Spliterator<P_IN> spliterator) {
super(parent, spliterator);
helper = parent.helper;
}
@Override
protected LongCollectorTask<P_IN> makeChild(Spliterator<P_IN> spliterator) {
return new LongCollectorTask<>(this, spliterator);
}
@Override
protected Node.OfLong doLeaf() {
Node.Builder.OfLong builder = longBuilder(helper.exactOutputSizeIfKnown(spliterator));
return helper.wrapAndCopyInto(builder, spliterator).build();
}
@Override
public void onCompletion(CountedCompleter caller) {
if (!isLeaf()) {
setLocalResult(new LongConcNode(leftChild.getLocalResult(), rightChild.getLocalResult()));
}
super.onCompletion(caller);
}
}
private static final class DoubleCollectorTask<P_IN>
extends AbstractTask<P_IN, Double, Node.OfDouble, DoubleCollectorTask<P_IN>> {
private final PipelineHelper<Double> helper;
DoubleCollectorTask(PipelineHelper<Double> helper, Spliterator<P_IN> spliterator) {
super(helper, spliterator);
this.helper = helper;
}
DoubleCollectorTask(DoubleCollectorTask<P_IN> parent, Spliterator<P_IN> spliterator) {
super(parent, spliterator);
helper = parent.helper;
}
@Override
protected DoubleCollectorTask<P_IN> makeChild(Spliterator<P_IN> spliterator) {
return new DoubleCollectorTask<>(this, spliterator);
}
@Override
protected Node.OfDouble doLeaf() {
Node.Builder.OfDouble builder
= doubleBuilder(helper.exactOutputSizeIfKnown(spliterator));
return helper.wrapAndCopyInto(builder, spliterator).build();
}
@Override
public void onCompletion(CountedCompleter caller) {
if (!isLeaf()) {
setLocalResult(new DoubleConcNode(leftChild.getLocalResult(), rightChild.getLocalResult()));
}
super.onCompletion(caller);
}
}
}
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.stream;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalDouble;
import java.util.OptionalInt;
import java.util.OptionalLong;
import java.util.Spliterator;
import java.util.concurrent.CountedCompleter;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.BinaryOperator;
import java.util.function.DoubleBinaryOperator;
import java.util.function.IntBinaryOperator;
import java.util.function.LongBinaryOperator;
import java.util.function.ObjDoubleConsumer;
import java.util.function.ObjIntConsumer;
import java.util.function.ObjLongConsumer;
import java.util.function.Supplier;
/**
* Factory for the creating instances of {@code TerminalOp) that implement
* reductions.
*
* @since 1.8
*/
final class ReduceOps {
private ReduceOps() { }
/**
* Constructs a {@code TerminalOp} that implements a functional reduce on
* reference values.
*
* @param <T> the type of the input elements
* @param <U> the type of the result
* @param seed the identity element for the reduction
* @param reducer the accumulating function that incorporates an additional
* input element into the result
* @param combiner the combining function that combines two intermediate
* results
* @return a {@code TerminalOp} implementing the reduction
*/
public static <T, U> TerminalOp<T, U>
makeRef(U seed, BiFunction<U, ? super T, U> reducer, BinaryOperator<U> combiner) {
Objects.requireNonNull(reducer);
Objects.requireNonNull(combiner);
class ReducingSink extends Box<U> implements AccumulatingSink<T, U, ReducingSink> {
@Override
public void begin(long size) {
state = seed;
}
@Override
public void accept(T t) {
state = reducer.apply(state, t);
}
@Override
public void combine(ReducingSink other) {
state = combiner.apply(state, other.state);
}
}
return new ReduceOp<T, U, ReducingSink>(StreamShape.REFERENCE) {
@Override
public ReducingSink makeSink() {
return new ReducingSink();
}
};
}
/**
* Constructs a {@code TerminalOp} that implements a functional reduce on
* reference values producing an optional reference result.
*
* @param <T> The type of the input elements, and the type of the result
* @param operator The reducing function
* @return A {@code TerminalOp} implementing the reduction
*/
public static <T> TerminalOp<T, Optional<T>>
makeRef(BinaryOperator<T> operator) {
Objects.requireNonNull(operator);
class ReducingSink
implements AccumulatingSink<T, Optional<T>, ReducingSink> {
private boolean empty;
private T state;
public void begin(long size) {
empty = true;
state = null;
}
@Override
public void accept(T t) {
if (empty) {
empty = false;
state = t;
} else {
state = operator.apply(state, t);
}
}
@Override
public Optional<T> get() {
return empty ? Optional.empty() : Optional.of(state);
}
@Override
public void combine(ReducingSink other) {
if (!other.empty)
accept(other.state);
}
}
return new ReduceOp<T, Optional<T>, ReducingSink>(StreamShape.REFERENCE) {
@Override
public ReducingSink makeSink() {
return new ReducingSink();
}
};
}
/**
* Constructs a {@code TerminalOp} that implements a mutable reduce on
* reference values.
*
* @param <T> the type of the input elements
* @param <R> the type of the result
* @param collector a {@code Collector} defining the reduction
* @return a {@code ReduceOp} implementing the reduction
*/
public static <T,R> TerminalOp<T, R>
makeRef(Collector<? super T,R> collector) {
Supplier<R> supplier = Objects.requireNonNull(collector).resultSupplier();
BiFunction<R, ? super T, R> accumulator = collector.accumulator();
BinaryOperator<R> combiner = collector.combiner();
class ReducingSink extends Box<R>
implements AccumulatingSink<T, R, ReducingSink> {
@Override
public void begin(long size) {
state = supplier.get();
}
@Override
public void accept(T t) {
R newResult = accumulator.apply(state, t);
if (state != newResult)
state = newResult;
}
@Override
public void combine(ReducingSink other) {
state = combiner.apply(state, other.state);
}
}
return new ReduceOp<T, R, ReducingSink>(StreamShape.REFERENCE) {
@Override
public ReducingSink makeSink() {
return new ReducingSink();
}
@Override
public int getOpFlags() {
return collector.characteristics().contains(Collector.Characteristics.UNORDERED)
? StreamOpFlag.NOT_ORDERED
: 0;
}
};
}
/**
* Constructs a {@code TerminalOp} that implements a mutable reduce on
* reference values.
*
* @param <T> the type of the input elements
* @param <R> the type of the result
* @param seedFactory a factory to produce a new base accumulator
* @param accumulator a function to incorporate an element into an
* accumulator
* @param reducer a function to combine an accumulator into another
* @return a {@code TerminalOp} implementing the reduction
*/
public static <T, R> TerminalOp<T, R>
makeRef(Supplier<R> seedFactory,
BiConsumer<R, ? super T> accumulator,
BiConsumer<R,R> reducer) {
Objects.requireNonNull(seedFactory);
Objects.requireNonNull(accumulator);
Objects.requireNonNull(reducer);
class ReducingSink extends Box<R>
implements AccumulatingSink<T, R, ReducingSink> {
@Override
public void begin(long size) {
state = seedFactory.get();
}
@Override
public void accept(T t) {
accumulator.accept(state, t);
}
@Override
public void combine(ReducingSink other) {
reducer.accept(state, other.state);
}
}
return new ReduceOp<T, R, ReducingSink>(StreamShape.REFERENCE) {
@Override
public ReducingSink makeSink() {
return new ReducingSink();
}
};
}
/**
* Constructs a {@code TerminalOp} that implements a functional reduce on
* {@code int} values.
*
* @param identity the identity for the combining function
* @param operator the combining function
* @return a {@code TerminalOp} implementing the reduction
*/
public static TerminalOp<Integer, Integer>
makeInt(int identity, IntBinaryOperator operator) {
Objects.requireNonNull(operator);
class ReducingSink
implements AccumulatingSink<Integer, Integer, ReducingSink>, Sink.OfInt {
private int state;
@Override
public void begin(long size) {
state = identity;
}
@Override
public void accept(int t) {
state = operator.applyAsInt(state, t);
}
@Override
public Integer get() {
return state;
}
@Override
public void combine(ReducingSink other) {
accept(other.state);
}
}
return new ReduceOp<Integer, Integer, ReducingSink>(StreamShape.INT_VALUE) {
@Override
public ReducingSink makeSink() {
return new ReducingSink();
}
};
}
/**
* Constructs a {@code TerminalOp} that implements a functional reduce on
* {@code int} values, producing an optional integer result.
*
* @param operator the combining function
* @return a {@code TerminalOp} implementing the reduction
*/
public static TerminalOp<Integer, OptionalInt>
makeInt(IntBinaryOperator operator) {
Objects.requireNonNull(operator);
class ReducingSink
implements AccumulatingSink<Integer, OptionalInt, ReducingSink>, Sink.OfInt {
private boolean empty;
private int state;
public void begin(long size) {
empty = true;
state = 0;
}
@Override
public void accept(int t) {
if (empty) {
empty = false;
state = t;
}
else {
state = operator.applyAsInt(state, t);
}
}
@Override
public OptionalInt get() {
return empty ? OptionalInt.empty() : OptionalInt.of(state);
}
@Override
public void combine(ReducingSink other) {
if (!other.empty)
accept(other.state);
}
}
return new ReduceOp<Integer, OptionalInt, ReducingSink>(StreamShape.INT_VALUE) {
@Override
public ReducingSink makeSink() {
return new ReducingSink();
}
};
}
/**
* Constructs a {@code TerminalOp} that implements a mutable reduce on
* {@code int} values.
*
* @param <R> The type of the result
* @param supplier a factory to produce a new accumulator of the result type
* @param accumulator a function to incorporate an int into an
* accumulator
* @param combiner a function to combine an accumulator into another
* @return A {@code ReduceOp} implementing the reduction
*/
public static <R> TerminalOp<Integer, R>
makeInt(Supplier<R> supplier,
ObjIntConsumer<R> accumulator,
BinaryOperator<R> combiner) {
Objects.requireNonNull(supplier);
Objects.requireNonNull(accumulator);
Objects.requireNonNull(combiner);
class ReducingSink extends Box<R>
implements AccumulatingSink<Integer, R, ReducingSink>, Sink.OfInt {
@Override
public void begin(long size) {
state = supplier.get();
}
@Override
public void accept(int t) {
accumulator.accept(state, t);
}
@Override
public void combine(ReducingSink other) {
state = combiner.apply(state, other.state);
}
}
return new ReduceOp<Integer, R, ReducingSink>(StreamShape.INT_VALUE) {
@Override
public ReducingSink makeSink() {
return new ReducingSink();
}
};
}
/**
* Constructs a {@code TerminalOp} that implements a functional reduce on
* {@code long} values.
*
* @param identity the identity for the combining function
* @param operator the combining function
* @return a {@code TerminalOp} implementing the reduction
*/
public static TerminalOp<Long, Long>
makeLong(long identity, LongBinaryOperator operator) {
Objects.requireNonNull(operator);
class ReducingSink
implements AccumulatingSink<Long, Long, ReducingSink>, Sink.OfLong {
private long state;
@Override
public void begin(long size) {
state = identity;
}
@Override
public void accept(long t) {
state = operator.applyAsLong(state, t);
}
@Override
public Long get() {
return state;
}
@Override
public void combine(ReducingSink other) {
accept(other.state);
}
}
return new ReduceOp<Long, Long, ReducingSink>(StreamShape.LONG_VALUE) {
@Override
public ReducingSink makeSink() {
return new ReducingSink();
}
};
}
/**
* Constructs a {@code TerminalOp} that implements a functional reduce on
* {@code long} values, producing an optional long result.
*
* @param operator the combining function
* @return a {@code TerminalOp} implementing the reduction
*/
public static TerminalOp<Long, OptionalLong>
makeLong(LongBinaryOperator operator) {
Objects.requireNonNull(operator);
class ReducingSink
implements AccumulatingSink<Long, OptionalLong, ReducingSink>, Sink.OfLong {
private boolean empty;
private long state;
public void begin(long size) {
empty = true;
state = 0;
}
@Override
public void accept(long t) {
if (empty) {
empty = false;
state = t;
}
else {
state = operator.applyAsLong(state, t);
}
}
@Override
public OptionalLong get() {
return empty ? OptionalLong.empty() : OptionalLong.of(state);
}
@Override
public void combine(ReducingSink other) {
if (!other.empty)
accept(other.state);
}
}
return new ReduceOp<Long, OptionalLong, ReducingSink>(StreamShape.LONG_VALUE) {
@Override
public ReducingSink makeSink() {
return new ReducingSink();
}
};
}
/**
* Constructs a {@code TerminalOp} that implements a mutable reduce on
* {@code long} values.
*
* @param <R> the type of the result
* @param supplier a factory to produce a new accumulator of the result type
* @param accumulator a function to incorporate an int into an
* accumulator
* @param combiner a function to combine an accumulator into another
* @return a {@code TerminalOp} implementing the reduction
*/
public static <R> TerminalOp<Long, R>
makeLong(Supplier<R> supplier,
ObjLongConsumer<R> accumulator,
BinaryOperator<R> combiner) {
Objects.requireNonNull(supplier);
Objects.requireNonNull(accumulator);
Objects.requireNonNull(combiner);
class ReducingSink extends Box<R>
implements AccumulatingSink<Long, R, ReducingSink>, Sink.OfLong {
@Override
public void begin(long size) {
state = supplier.get();
}
@Override
public void accept(long t) {
accumulator.accept(state, t);
}
@Override
public void combine(ReducingSink other) {
state = combiner.apply(state, other.state);
}
}
return new ReduceOp<Long, R, ReducingSink>(StreamShape.LONG_VALUE) {
@Override
public ReducingSink makeSink() {
return new ReducingSink();
}
};
}
/**
* Constructs a {@code TerminalOp} that implements a functional reduce on
* {@code double} values.
*
* @param identity the identity for the combining function
* @param operator the combining function
* @return a {@code TerminalOp} implementing the reduction
*/
public static TerminalOp<Double, Double>
makeDouble(double identity, DoubleBinaryOperator operator) {
Objects.requireNonNull(operator);
class ReducingSink
implements AccumulatingSink<Double, Double, ReducingSink>, Sink.OfDouble {
private double state;
@Override
public void begin(long size) {
state = identity;
}
@Override
public void accept(double t) {
state = operator.applyAsDouble(state, t);
}
@Override
public Double get() {
return state;
}
@Override
public void combine(ReducingSink other) {
accept(other.state);
}
}
return new ReduceOp<Double, Double, ReducingSink>(StreamShape.DOUBLE_VALUE) {
@Override
public ReducingSink makeSink() {
return new ReducingSink();
}
};
}
/**
* Constructs a {@code TerminalOp} that implements a functional reduce on
* {@code double} values, producing an optional double result.
*
* @param operator the combining function
* @return a {@code TerminalOp} implementing the reduction
*/
public static TerminalOp<Double, OptionalDouble>
makeDouble(DoubleBinaryOperator operator) {
Objects.requireNonNull(operator);
class ReducingSink
implements AccumulatingSink<Double, OptionalDouble, ReducingSink>, Sink.OfDouble {
private boolean empty;
private double state;
public void begin(long size) {
empty = true;
state = 0;
}
@Override
public void accept(double t) {
if (empty) {
empty = false;
state = t;
}
else {
state = operator.applyAsDouble(state, t);
}
}
@Override
public OptionalDouble get() {
return empty ? OptionalDouble.empty() : OptionalDouble.of(state);
}
@Override
public void combine(ReducingSink other) {
if (!other.empty)
accept(other.state);
}
}
return new ReduceOp<Double, OptionalDouble, ReducingSink>(StreamShape.DOUBLE_VALUE) {
@Override
public ReducingSink makeSink() {
return new ReducingSink();
}
};
}
/**
* Constructs a {@code TerminalOp} that implements a mutable reduce on
* {@code double} values.
*
* @param <R> the type of the result
* @param supplier a factory to produce a new accumulator of the result type
* @param accumulator a function to incorporate an int into an
* accumulator
* @param combiner a function to combine an accumulator into another
* @return a {@code TerminalOp} implementing the reduction
*/
public static <R> TerminalOp<Double, R>
makeDouble(Supplier<R> supplier,
ObjDoubleConsumer<R> accumulator,
BinaryOperator<R> combiner) {
Objects.requireNonNull(supplier);
Objects.requireNonNull(accumulator);
Objects.requireNonNull(combiner);
class ReducingSink extends Box<R>
implements AccumulatingSink<Double, R, ReducingSink>, Sink.OfDouble {
@Override
public void begin(long size) {
state = supplier.get();
}
@Override
public void accept(double t) {
accumulator.accept(state, t);
}
@Override
public void combine(ReducingSink other) {
state = combiner.apply(state, other.state);
}
}
return new ReduceOp<Double, R, ReducingSink>(StreamShape.DOUBLE_VALUE) {
@Override
public ReducingSink makeSink() {
return new ReducingSink();
}
};
}
/**
* A type of {@code TerminalSink} that implements an associative reducing
* operation on elements of type {@code T} and producing a result of type
* {@code R}.
*
* @param <T> the type of input element to the combining operation
* @param <R> the result type
* @param <K> the type of the {@code AccumulatingSink}.
*/
private interface AccumulatingSink<T, R, K extends AccumulatingSink<T, R, K>>
extends TerminalSink<T, R> {
public void combine(K other);
}
/**
* State box for a single state element, used as a base class for
* {@code AccumulatingSink} instances
*
* @param <U> The type of the state element
*/
private static abstract class Box<U> {
U state;
Box() {} // Avoid creation of special accessor
public U get() {
return state;
}
}
/**
* A {@code TerminalOp} that evaluates a stream pipeline and sends the
* output into an {@code AccumulatingSink}, which performs a reduce
* operation. The {@code AccumulatingSink} must represent an associative
* reducing operation.
*
* @param <T> the output type of the stream pipeline
* @param <R> the result type of the reducing operation
* @param <S> the type of the {@code AccumulatingSink}
*/
private static abstract class ReduceOp<T, R, S extends AccumulatingSink<T, R, S>>
implements TerminalOp<T, R> {
private final StreamShape inputShape;
/**
* Create a {@code ReduceOp} of the specified stream shape which uses
* the specified {@code Supplier} to create accumulating sinks.
*
* @param shape The shape of the stream pipeline
*/
ReduceOp(StreamShape shape) {
inputShape = shape;
}
public abstract S makeSink();
@Override
public StreamShape inputShape() {
return inputShape;
}
@Override
public <P_IN> R evaluateSequential(PipelineHelper<T> helper,
Spliterator<P_IN> spliterator) {
return helper.wrapAndCopyInto(makeSink(), spliterator).get();
}
@Override
public <P_IN> R evaluateParallel(PipelineHelper<T> helper,
Spliterator<P_IN> spliterator) {
return new ReduceTask<>(this, helper, spliterator).invoke().get();
}
}
/**
* A {@code ForkJoinTask} for performing a parallel reduce operation.
*/
private static final class ReduceTask<P_IN, P_OUT, R,
S extends AccumulatingSink<P_OUT, R, S>>
extends AbstractTask<P_IN, P_OUT, S, ReduceTask<P_IN, P_OUT, R, S>> {
private final ReduceOp<P_OUT, R, S> op;
ReduceTask(ReduceOp<P_OUT, R, S> op,
PipelineHelper<P_OUT> helper,
Spliterator<P_IN> spliterator) {
super(helper, spliterator);
this.op = op;
}
ReduceTask(ReduceTask<P_IN, P_OUT, R, S> parent,
Spliterator<P_IN> spliterator) {
super(parent, spliterator);
this.op = parent.op;
}
@Override
protected ReduceTask<P_IN, P_OUT, R, S> makeChild(Spliterator<P_IN> spliterator) {
return new ReduceTask<>(this, spliterator);
}
@Override
protected S doLeaf() {
return helper.wrapAndCopyInto(op.makeSink(), spliterator);
}
@Override
public void onCompletion(CountedCompleter caller) {
if (!isLeaf()) {
S leftResult = leftChild.getLocalResult();
leftResult.combine(rightChild.getLocalResult());
setLocalResult(leftResult);
}
// GC spliterator, left and right child
super.onCompletion(caller);
}
}
}
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.stream;
import java.util.Comparator;
import java.util.Comparators;
import java.util.Iterator;
import java.util.Objects;
import java.util.Optional;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.function.BiConsumer;
import java.util.function.BiFunction;
import java.util.function.BinaryOperator;
import java.util.function.Consumer;
import java.util.function.DoubleConsumer;
import java.util.function.Function;
import java.util.function.IntConsumer;
import java.util.function.IntFunction;
import java.util.function.LongConsumer;
import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.function.ToDoubleFunction;
import java.util.function.ToIntFunction;
import java.util.function.ToLongFunction;
/**
* Abstract base class for an intermediate pipeline stage or pipeline source
* stage implementing whose elements are of type {@code U}.
*
* @param <P_IN> type of elements in the upstream source
* @param <P_OUT> type of elements in produced by this stage
*
* @since 1.8
*/
abstract class ReferencePipeline<P_IN, P_OUT>
extends AbstractPipeline<P_IN, P_OUT, Stream<P_OUT>>
implements Stream<P_OUT> {
/**
* Constructor for the head of a stream pipeline.
*
* @param source {@code Supplier<Spliterator>} describing the stream source
* @param sourceFlags the source flags for the stream source, described in
* {@link StreamOpFlag}
* @param parallel {@code true} if the pipeline is parallel
*/
ReferencePipeline(Supplier<? extends Spliterator<?>> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
/**
* Constructor for the head of a stream pipeline.
*
* @param source {@code Spliterator} describing the stream source
* @param sourceFlags The source flags for the stream source, described in
* {@link StreamOpFlag}
* @param parallel {@code true} if the pipeline is parallel
*/
ReferencePipeline(Spliterator<?> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
/**
* Constructor for appending an intermediate operation onto an existing
* pipeline.
*
* @param upstream the upstream element source.
*/
ReferencePipeline(AbstractPipeline<?, P_IN, ?> upstream, int opFlags) {
super(upstream, opFlags);
}
// Shape-specific methods
@Override
final StreamShape getOutputShape() {
return StreamShape.REFERENCE;
}
@Override
final <P_IN> Node<P_OUT> evaluateToNode(PipelineHelper<P_OUT> helper,
Spliterator<P_IN> spliterator,
boolean flattenTree,
IntFunction<P_OUT[]> generator) {
return Nodes.collect(helper, spliterator, flattenTree, generator);
}
@Override
final <P_IN> Spliterator<P_OUT> wrap(PipelineHelper<P_OUT> ph,
Supplier<Spliterator<P_IN>> supplier,
boolean isParallel) {
return new StreamSpliterators.WrappingSpliterator<>(ph, supplier, isParallel);
}
@Override
final Spliterator<P_OUT> lazySpliterator(Supplier<? extends Spliterator<P_OUT>> supplier) {
return new StreamSpliterators.DelegatingSpliterator<>(supplier);
}
@Override
final void forEachWithCancel(Spliterator<P_OUT> spliterator, Sink<P_OUT> sink) {
do { } while (!sink.cancellationRequested() && spliterator.tryAdvance(sink));
}
@Override
final Node.Builder<P_OUT> makeNodeBuilder(long exactSizeIfKnown, IntFunction<P_OUT[]> generator) {
return Nodes.builder(exactSizeIfKnown, generator);
}
// BaseStream
@Override
public final Iterator<P_OUT> iterator() {
return Spliterators.iteratorFromSpliterator(spliterator());
}
// Stream
// Stateless intermediate operations from Stream
@Override
public Stream<P_OUT> unordered() {
if (!isOrdered())
return this;
return new StatelessOp<P_OUT, P_OUT>(this, StreamShape.REFERENCE, StreamOpFlag.NOT_ORDERED) {
@Override
Sink<P_OUT> opWrapSink(int flags, Sink<P_OUT> sink) {
return sink;
}
};
}
@Override
public final Stream<P_OUT> filter(Predicate<? super P_OUT> predicate) {
Objects.requireNonNull(predicate);
return new StatelessOp<P_OUT, P_OUT>(this, StreamShape.REFERENCE,
StreamOpFlag.NOT_SIZED) {
@Override
Sink<P_OUT> opWrapSink(int flags, Sink<P_OUT> sink) {
return new Sink.ChainedReference<P_OUT>(sink) {
@Override
public void accept(P_OUT u) {
if (predicate.test(u))
downstream.accept(u);
}
};
}
};
}
@Override
public final <R> Stream<R> map(Function<? super P_OUT, ? extends R> mapper) {
Objects.requireNonNull(mapper);
return new StatelessOp<P_OUT, R>(this, StreamShape.REFERENCE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<P_OUT> opWrapSink(int flags, Sink<R> sink) {
return new Sink.ChainedReference<P_OUT>(sink) {
@Override
public void accept(P_OUT u) {
downstream.accept(mapper.apply(u));
}
};
}
};
}
@Override
public final IntStream mapToInt(ToIntFunction<? super P_OUT> mapper) {
Objects.requireNonNull(mapper);
return new IntPipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<P_OUT> opWrapSink(int flags, Sink<Integer> sink) {
return new Sink.ChainedReference<P_OUT>(sink) {
@Override
public void accept(P_OUT u) {
downstream.accept(mapper.applyAsInt(u));
}
};
}
};
}
@Override
public final LongStream mapToLong(ToLongFunction<? super P_OUT> mapper) {
Objects.requireNonNull(mapper);
return new LongPipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<P_OUT> opWrapSink(int flags, Sink<Long> sink) {
return new Sink.ChainedReference<P_OUT>(sink) {
@Override
public void accept(P_OUT u) {
downstream.accept(mapper.applyAsLong(u));
}
};
}
};
}
@Override
public final DoubleStream mapToDouble(ToDoubleFunction<? super P_OUT> mapper) {
Objects.requireNonNull(mapper);
return new DoublePipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
@Override
Sink<P_OUT> opWrapSink(int flags, Sink<Double> sink) {
return new Sink.ChainedReference<P_OUT>(sink) {
@Override
public void accept(P_OUT u) {
downstream.accept(mapper.applyAsDouble(u));
}
};
}
};
}
@Override
public final <R> Stream<R> flatMap(Function<? super P_OUT, ? extends Stream<? extends R>> mapper) {
Objects.requireNonNull(mapper);
// We can do better than this, by polling cancellationRequested when stream is infinite
return new StatelessOp<P_OUT, R>(this, StreamShape.REFERENCE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
@Override
Sink<P_OUT> opWrapSink(int flags, Sink<R> sink) {
return new Sink.ChainedReference<P_OUT>(sink) {
public void accept(P_OUT u) {
// We can do better that this too; optimize for depth=0 case and just grab spliterator and forEach it
Stream<? extends R> result = mapper.apply(u);
if (result != null)
result.sequential().forEach(downstream);
}
};
}
};
}
@Override
public final IntStream flatMapToInt(Function<? super P_OUT, ? extends IntStream> mapper) {
Objects.requireNonNull(mapper);
// We can do better than this, by polling cancellationRequested when stream is infinite
return new IntPipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
@Override
Sink<P_OUT> opWrapSink(int flags, Sink<Integer> sink) {
return new Sink.ChainedReference<P_OUT>(sink) {
IntConsumer downstreamAsInt = downstream::accept;
public void accept(P_OUT u) {
// We can do better that this too; optimize for depth=0 case and just grab spliterator and forEach it
IntStream result = mapper.apply(u);
if (result != null)
result.sequential().forEach(downstreamAsInt);
}
};
}
};
}
@Override
public final DoubleStream flatMapToDouble(Function<? super P_OUT, ? extends DoubleStream> mapper) {
Objects.requireNonNull(mapper);
// We can do better than this, by polling cancellationRequested when stream is infinite
return new DoublePipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
@Override
Sink<P_OUT> opWrapSink(int flags, Sink<Double> sink) {
return new Sink.ChainedReference<P_OUT>(sink) {
DoubleConsumer downstreamAsDouble = downstream::accept;
public void accept(P_OUT u) {
// We can do better that this too; optimize for depth=0 case and just grab spliterator and forEach it
DoubleStream result = mapper.apply(u);
if (result != null)
result.sequential().forEach(downstreamAsDouble);
}
};
}
};
}
@Override
public final LongStream flatMapToLong(Function<? super P_OUT, ? extends LongStream> mapper) {
Objects.requireNonNull(mapper);
// We can do better than this, by polling cancellationRequested when stream is infinite
return new LongPipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
@Override
Sink<P_OUT> opWrapSink(int flags, Sink<Long> sink) {
return new Sink.ChainedReference<P_OUT>(sink) {
LongConsumer downstreamAsLong = downstream::accept;
public void accept(P_OUT u) {
// We can do better that this too; optimize for depth=0 case and just grab spliterator and forEach it
LongStream result = mapper.apply(u);
if (result != null)
result.sequential().forEach(downstreamAsLong);
}
};
}
};
}
@Override
public final Stream<P_OUT> peek(Consumer<? super P_OUT> tee) {
Objects.requireNonNull(tee);
return new StatelessOp<P_OUT, P_OUT>(this, StreamShape.REFERENCE,
0) {
@Override
Sink<P_OUT> opWrapSink(int flags, Sink<P_OUT> sink) {
return new Sink.ChainedReference<P_OUT>(sink) {
@Override
public void accept(P_OUT u) {
tee.accept(u);
downstream.accept(u);
}
};
}
};
}
// Stateful intermediate operations from Stream
@Override
public final Stream<P_OUT> distinct() {
return DistinctOps.makeRef(this);
}
@Override
public final Stream<P_OUT> sorted() {
return SortedOps.makeRef(this);
}
@Override
public final Stream<P_OUT> sorted(Comparator<? super P_OUT> comparator) {
return SortedOps.makeRef(this, comparator);
}
private Stream<P_OUT> slice(long skip, long limit) {
return SliceOps.makeRef(this, skip, limit);
}
@Override
public final Stream<P_OUT> limit(long maxSize) {
if (maxSize < 0)
throw new IllegalArgumentException(Long.toString(maxSize));
return slice(0, maxSize);
}
@Override
public final Stream<P_OUT> substream(long startingOffset) {
if (startingOffset < 0)
throw new IllegalArgumentException(Long.toString(startingOffset));
if (startingOffset == 0)
return this;
else
return slice(startingOffset, -1);
}
@Override
public final Stream<P_OUT> substream(long startingOffset, long endingOffset) {
if (startingOffset < 0 || endingOffset < startingOffset)
throw new IllegalArgumentException(String.format("substream(%d, %d)", startingOffset, endingOffset));
return slice(startingOffset, endingOffset - startingOffset);
}
// Terminal operations from Stream
@Override
public void forEach(Consumer<? super P_OUT> action) {
evaluate(ForEachOps.makeRef(action, false));
}
@Override
public void forEachOrdered(Consumer<? super P_OUT> action) {
evaluate(ForEachOps.makeRef(action, true));
}
@Override
@SuppressWarnings("unchecked")
public final <A> A[] toArray(IntFunction<A[]> generator) {
// Since A has no relation to U (not possible to declare that A is an upper bound of U)
// there will be no static type checking.
// Therefore use a raw type and assume A == U rather than propagating the separation of A and U
// throughout the code-base.
// The runtime type of U is never checked for equality with the component type of the runtime type of A[].
// Runtime checking will be performed when an element is stored in A[], thus if A is not a
// super type of U an ArrayStoreException will be thrown.
IntFunction rawGenerator = (IntFunction) generator;
return (A[]) Nodes.flatten(evaluateToArrayNode(rawGenerator), rawGenerator)
.asArray(rawGenerator);
}
@Override
public final Object[] toArray() {
return toArray(Object[]::new);
}
@Override
public final boolean anyMatch(Predicate<? super P_OUT> predicate) {
return evaluate(MatchOps.makeRef(predicate, MatchOps.MatchKind.ANY));
}
@Override
public final boolean allMatch(Predicate<? super P_OUT> predicate) {
return evaluate(MatchOps.makeRef(predicate, MatchOps.MatchKind.ALL));
}
@Override
public final boolean noneMatch(Predicate<? super P_OUT> predicate) {
return evaluate(MatchOps.makeRef(predicate, MatchOps.MatchKind.NONE));
}
@Override
public final Optional<P_OUT> findFirst() {
return evaluate(FindOps.makeRef(true));
}
@Override
public final Optional<P_OUT> findAny() {
return evaluate(FindOps.makeRef(false));
}
@Override
public final P_OUT reduce(final P_OUT identity, final BinaryOperator<P_OUT> accumulator) {
return evaluate(ReduceOps.makeRef(identity, accumulator, accumulator));
}
@Override
public final Optional<P_OUT> reduce(BinaryOperator<P_OUT> accumulator) {
return evaluate(ReduceOps.makeRef(accumulator));
}
@Override
public final <R> R reduce(R identity, BiFunction<R, ? super P_OUT, R> accumulator, BinaryOperator<R> combiner) {
return evaluate(ReduceOps.makeRef(identity, accumulator, combiner));
}
@Override
public final <R> R collect(Collector<? super P_OUT, R> collector) {
if (isParallel()
&& (collector.characteristics().contains(Collector.Characteristics.CONCURRENT))
&& (!isOrdered() || collector.characteristics().contains(Collector.Characteristics.UNORDERED))) {
R container = collector.resultSupplier().get();
BiFunction<R, ? super P_OUT, R> accumulator = collector.accumulator();
forEach(u -> accumulator.apply(container, u));
return container;
}
return evaluate(ReduceOps.makeRef(collector));
}
@Override
public final <R> R collect(Supplier<R> resultFactory,
BiConsumer<R, ? super P_OUT> accumulator,
BiConsumer<R, R> combiner) {
return evaluate(ReduceOps.makeRef(resultFactory, accumulator, combiner));
}
@Override
public final Optional<P_OUT> max(Comparator<? super P_OUT> comparator) {
return reduce(Comparators.greaterOf(comparator));
}
@Override
public final Optional<P_OUT> min(Comparator<? super P_OUT> comparator) {
return reduce(Comparators.lesserOf(comparator));
}
@Override
public final long count() {
return mapToLong(e -> 1L).sum();
}
//
/**
* Source stage of a ReferencePipeline.
*
* @param <E_IN> type of elements in the upstream source
* @param <E_OUT> type of elements in produced by this stage
* @since 1.8
*/
static class Head<E_IN, E_OUT> extends ReferencePipeline<E_IN, E_OUT> {
/**
* Constructor for the source stage of a Stream.
*
* @param source {@code Supplier<Spliterator>} describing the stream
* source
* @param sourceFlags the source flags for the stream source, described
* in {@link StreamOpFlag}
*/
Head(Supplier<? extends Spliterator<?>> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
/**
* Constructor for the source stage of a Stream.
*
* @param source {@code Spliterator} describing the stream source
* @param sourceFlags the source flags for the stream source, described
* in {@link StreamOpFlag}
*/
Head(Spliterator<?> source,
int sourceFlags, boolean parallel) {
super(source, sourceFlags, parallel);
}
@Override
final boolean opIsStateful() {
throw new UnsupportedOperationException();
}
@Override
final Sink<E_IN> opWrapSink(int flags, Sink<E_OUT> sink) {
throw new UnsupportedOperationException();
}
// Optimized sequential terminal operations for the head of the pipeline
@Override
public void forEach(Consumer<? super E_OUT> action) {
if (!isParallel()) {
sourceStageSpliterator().forEachRemaining(action);
}
else {
super.forEach(action);
}
}
@Override
public void forEachOrdered(Consumer<? super E_OUT> action) {
if (!isParallel()) {
sourceStageSpliterator().forEachRemaining(action);
}
else {
super.forEachOrdered(action);
}
}
}
/**
* Base class for a stateless intermediate stage of a Stream.
*
* @param <E_IN> type of elements in the upstream source
* @param <E_OUT> type of elements in produced by this stage
* @since 1.8
*/
abstract static class StatelessOp<E_IN, E_OUT>
extends ReferencePipeline<E_IN, E_OUT> {
/**
* Construct a new Stream by appending a stateless intermediate
* operation to an existing stream.
*
* @param upstream The upstream pipeline stage
* @param inputShape The stream shape for the upstream pipeline stage
* @param opFlags Operation flags for the new stage
*/
StatelessOp(AbstractPipeline<?, E_IN, ?> upstream,
StreamShape inputShape,
int opFlags) {
super(upstream, opFlags);
assert upstream.getOutputShape() == inputShape;
}
@Override
final boolean opIsStateful() {
return false;
}
}
/**
* Base class for a stateful intermediate stage of a Stream.
*
* @param <E_IN> type of elements in the upstream source
* @param <E_OUT> type of elements in produced by this stage
* @since 1.8
*/
abstract static class StatefulOp<E_IN, E_OUT>
extends ReferencePipeline<E_IN, E_OUT> {
/**
* Construct a new Stream by appending a stateful intermediate operation
* to an existing stream.
* @param upstream The upstream pipeline stage
* @param inputShape The stream shape for the upstream pipeline stage
* @param opFlags Operation flags for the new stage
*/
StatefulOp(AbstractPipeline<?, E_IN, ?> upstream,
StreamShape inputShape,
int opFlags) {
super(upstream, opFlags);
assert upstream.getOutputShape() == inputShape;
}
@Override
final boolean opIsStateful() {
return true;
}
@Override
abstract <P_IN> Node<E_OUT> opEvaluateParallel(PipelineHelper<E_OUT> helper,
Spliterator<P_IN> spliterator,
IntFunction<E_OUT[]> generator);
}
}
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.stream;
import java.util.ArrayList;
import java.util.List;
import java.util.Spliterator;
import java.util.concurrent.CountedCompleter;
import java.util.function.IntFunction;
/**
* Factory for instances of a short-circuiting stateful intermediate operations
* that produce subsequences of their input stream.
*
* @since 1.8
*/
final class SliceOps {
// No instances
private SliceOps() { }
/**
* Appends a "slice" operation to the provided stream. The slice operation
* may be may be skip-only, limit-only, or skip-and-limit.
*
* @param <T> the type of both input and output elements
* @param upstream a reference stream with element type T
* @param skip the number of elements to skip. Must be >= 0.
* @param limit the maximum size of the resulting stream, or -1 if no limit
* is to be imposed
*/
public static <T> Stream<T> makeRef(AbstractPipeline<?, T, ?> upstream,
long skip, long limit) {
if (skip < 0)
throw new IllegalArgumentException("Skip must be non-negative: " + skip);
return new ReferencePipeline.StatefulOp<T,T>(upstream, StreamShape.REFERENCE,
flags(limit)) {
@Override
<P_IN> Node<T> opEvaluateParallel(PipelineHelper<T> helper,
Spliterator<P_IN> spliterator,
IntFunction<T[]> generator) {
return new SliceTask<>(this, helper, spliterator, generator, skip, limit).invoke();
}
@Override
Sink<T> opWrapSink(int flags, Sink<T> sink) {
return new Sink.ChainedReference<T>(sink) {
long n = skip;
long m = limit >= 0 ? limit : Long.MAX_VALUE;
@Override
public void accept(T t) {
if (n == 0) {
if (m > 0) {
m--;
downstream.accept(t);
}
}
else {
n--;
}
}
@Override
public boolean cancellationRequested() {
return m == 0 || downstream.cancellationRequested();
}
};
}
};
}
/**
* Appends a "slice" operation to the provided IntStream. The slice
* operation may be may be skip-only, limit-only, or skip-and-limit.
*
* @param upstream An IntStream
* @param skip The number of elements to skip. Must be >= 0.
* @param limit The maximum size of the resulting stream, or -1 if no limit
* is to be imposed
*/
public static IntStream makeInt(AbstractPipeline<?, Integer, ?> upstream,
long skip, long limit) {
if (skip < 0)
throw new IllegalArgumentException("Skip must be non-negative: " + skip);
return new IntPipeline.StatefulOp<Integer>(upstream, StreamShape.INT_VALUE,
flags(limit)) {
@Override
<P_IN> Node<Integer> opEvaluateParallel(PipelineHelper<Integer> helper,
Spliterator<P_IN> spliterator,
IntFunction<Integer[]> generator) {
return new SliceTask<>(this, helper, spliterator, generator, skip, limit).invoke();
}
@Override
Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
return new Sink.ChainedInt(sink) {
long n = skip;
long m = limit >= 0 ? limit : Long.MAX_VALUE;
@Override
public void accept(int t) {
if (n == 0) {
if (m > 0) {
m--;
downstream.accept(t);
}
}
else {
n--;
}
}
@Override
public boolean cancellationRequested() {
return m == 0 || downstream.cancellationRequested();
}
};
}
};
}
/**
* Appends a "slice" operation to the provided LongStream. The slice
* operation may be may be skip-only, limit-only, or skip-and-limit.
*
* @param upstream A LongStream
* @param skip The number of elements to skip. Must be >= 0.
* @param limit The maximum size of the resulting stream, or -1 if no limit
* is to be imposed
*/
public static LongStream makeLong(AbstractPipeline<?, Long, ?> upstream,
long skip, long limit) {
if (skip < 0)
throw new IllegalArgumentException("Skip must be non-negative: " + skip);
return new LongPipeline.StatefulOp<Long>(upstream, StreamShape.LONG_VALUE,
flags(limit)) {
@Override
<P_IN> Node<Long> opEvaluateParallel(PipelineHelper<Long> helper,
Spliterator<P_IN> spliterator,
IntFunction<Long[]> generator) {
return new SliceTask<>(this, helper, spliterator, generator, skip, limit).invoke();
}
@Override
Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
return new Sink.ChainedLong(sink) {
long n = skip;
long m = limit >= 0 ? limit : Long.MAX_VALUE;
@Override
public void accept(long t) {
if (n == 0) {
if (m > 0) {
m--;
downstream.accept(t);
}
}
else {
n--;
}
}
@Override
public boolean cancellationRequested() {
return m == 0 || downstream.cancellationRequested();
}
};
}
};
}
/**
* Appends a "slice" operation to the provided DoubleStream. The slice
* operation may be may be skip-only, limit-only, or skip-and-limit.
*
* @param upstream A DoubleStream
* @param skip The number of elements to skip. Must be >= 0.
* @param limit The maximum size of the resulting stream, or -1 if no limit
* is to be imposed
*/
public static DoubleStream makeDouble(AbstractPipeline<?, Double, ?> upstream,
long skip, long limit) {
if (skip < 0)
throw new IllegalArgumentException("Skip must be non-negative: " + skip);
return new DoublePipeline.StatefulOp<Double>(upstream, StreamShape.DOUBLE_VALUE,
flags(limit)) {
@Override
<P_IN> Node<Double> opEvaluateParallel(PipelineHelper<Double> helper,
Spliterator<P_IN> spliterator,
IntFunction<Double[]> generator) {
return new SliceTask<>(this, helper, spliterator, generator, skip, limit).invoke();
}
@Override
Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
return new Sink.ChainedDouble(sink) {
long n = skip;
long m = limit >= 0 ? limit : Long.MAX_VALUE;
@Override
public void accept(double t) {
if (n == 0) {
if (m > 0) {
m--;
downstream.accept(t);
}
}
else {
n--;
}
}
@Override
public boolean cancellationRequested() {
return m == 0 || downstream.cancellationRequested();
}
};
}
};
}
private static int flags(long limit) {
return StreamOpFlag.NOT_SIZED | ((limit != -1) ? StreamOpFlag.IS_SHORT_CIRCUIT : 0);
}
// Parallel strategy -- two cases
// IF we have full size information
// - decompose, keeping track of each leaf's (offset, size)
// - calculate leaf only if intersection between (offset, size) and desired slice
// - Construct a Node containing the appropriate sections of the appropriate leaves
// IF we don't
// - decompose, and calculate size of each leaf
// - on complete of any node, compute completed initial size from the root, and if big enough, cancel later nodes
// - @@@ this can be significantly improved
// @@@ Currently we don't do the sized version at all
// @@@ Should take into account ORDERED flag; if not ORDERED, we can limit in temporal order instead
/**
* {@code ForkJoinTask} implementing slice computation.
*
* @param <P_IN> Input element type to the stream pipeline
* @param <P_OUT> Output element type from the stream pipeline
*/
private static final class SliceTask<P_IN, P_OUT>
extends AbstractShortCircuitTask<P_IN, P_OUT, Node<P_OUT>, SliceTask<P_IN, P_OUT>> {
private final AbstractPipeline<P_OUT, P_OUT, ?> op;
private final IntFunction<P_OUT[]> generator;
private final long targetOffset, targetSize;
private long thisNodeSize;
private volatile boolean completed;
SliceTask(AbstractPipeline<?, P_OUT, ?> op,
PipelineHelper<P_OUT> helper,
Spliterator<P_IN> spliterator,
IntFunction<P_OUT[]> generator,
long offset, long size) {
super(helper, spliterator);
this.op = (AbstractPipeline<P_OUT, P_OUT, ?>) op;
this.generator = generator;
this.targetOffset = offset;
this.targetSize = size;
}
SliceTask(SliceTask<P_IN, P_OUT> parent, Spliterator<P_IN> spliterator) {
super(parent, spliterator);
this.op = parent.op;
this.generator = parent.generator;
this.targetOffset = parent.targetOffset;
this.targetSize = parent.targetSize;
}
@Override
protected SliceTask<P_IN, P_OUT> makeChild(Spliterator<P_IN> spliterator) {
return new SliceTask<>(this, spliterator);
}
@Override
protected final Node<P_OUT> getEmptyResult() {
return Nodes.emptyNode(op.getOutputShape());
}
@Override
protected final Node<P_OUT> doLeaf() {
if (isRoot()) {
long sizeIfKnown = StreamOpFlag.SIZED.isPreserved(op.sourceOrOpFlags)
? op.exactOutputSizeIfKnown(spliterator)
: -1;
final Node.Builder<P_OUT> nb = op.makeNodeBuilder(sizeIfKnown, generator);
Sink<P_OUT> opSink = op.opWrapSink(op.sourceOrOpFlags, nb);
if (!StreamOpFlag.SHORT_CIRCUIT.isKnown(op.sourceOrOpFlags))
helper.wrapAndCopyInto(opSink, spliterator);
else
helper.copyIntoWithCancel(helper.wrapSink(opSink), spliterator);
return nb.build();
}
else {
Node<P_OUT> node = helper.wrapAndCopyInto(helper.makeNodeBuilder(-1, generator),
spliterator).build();
thisNodeSize = node.count();
completed = true;
return node;
}
}
@Override
public final void onCompletion(CountedCompleter<?> caller) {
if (!isLeaf()) {
thisNodeSize = leftChild.thisNodeSize + rightChild.thisNodeSize;
completed = true;
if (isRoot()) {
// Only collect nodes once absolute size information is known
ArrayList<Node<P_OUT>> nodes = new ArrayList<>();
visit(nodes, 0);
Node<P_OUT> result;
if (nodes.size() == 0)
result = Nodes.emptyNode(op.getOutputShape());
else if (nodes.size() == 1)
result = nodes.get(0);
else
// This will create a tree of depth 1 and will not be a sub-tree
// for leaf nodes within the require range
result = Nodes.conc(op.getOutputShape(), nodes);
setLocalResult(result);
}
}
if (targetSize >= 0) {
if (((SliceTask<P_IN, P_OUT>) getRoot()).leftSize() >= targetOffset + targetSize)
cancelLaterNodes();
}
// Don't call super.onCompletion(), we don't look at the child nodes until farther up the tree
}
/** Compute the cumulative size of the longest leading prefix of completed children */
private long leftSize() {
if (completed)
return thisNodeSize;
else if (isLeaf())
return 0;
else {
long leftSize = 0;
for (SliceTask<P_IN, P_OUT> child = leftChild, p = null; child != p;
p = child, child = rightChild) {
if (child.completed)
leftSize += child.thisNodeSize;
else {
leftSize += child.leftSize();
break;
}
}
return leftSize;
}
}
private void visit(List<Node<P_OUT>> results, int offset) {
if (!isLeaf()) {
for (SliceTask<P_IN, P_OUT> child = leftChild, p = null; child != p;
p = child, child = rightChild) {
child.visit(results, offset);
offset += child.thisNodeSize;
}
}
else {
if (results.size() == 0) {
if (offset + thisNodeSize >= targetOffset)
results.add(truncateNode(getLocalResult(),
Math.max(0, targetOffset - offset),
targetSize >= 0 ? Math.max(0, offset + thisNodeSize - (targetOffset + targetSize)) : 0));
}
else {
if (targetSize == -1 || offset < targetOffset + targetSize) {
results.add(truncateNode(getLocalResult(),
0,
targetSize >= 0 ? Math.max(0, offset + thisNodeSize - (targetOffset + targetSize)) : 0));
}
}
}
}
/**
* Return a new node describing the result of truncating an existing Node
* at the left and/or right.
*/
private Node<P_OUT> truncateNode(Node<P_OUT> input,
long skipLeft, long skipRight) {
if (skipLeft == 0 && skipRight == 0)
return input;
else {
return Nodes.truncateNode(input, skipLeft, thisNodeSize - skipRight, generator);
}
}
}
// @@@ Currently unused -- optimization for when all sizes are known
// private static class SizedSliceTask<S, T> extends AbstractShortCircuitTask<S, T, Node<T>, SizedSliceTask<S, T>> {
// private final int targetOffset, targetSize;
// private final int offset, size;
//
// private SizedSliceTask(ParallelPipelineHelper<S, T> helper, int offset, int size) {
// super(helper);
// targetOffset = offset;
// targetSize = size;
// this.offset = 0;
// this.size = spliterator.getSizeIfKnown();
// }
//
// private SizedSliceTask(SizedSliceTask<S, T> parent, Spliterator<S> spliterator) {
// // Makes assumptions about order in which siblings are created and linked into parent!
// super(parent, spliterator);
// targetOffset = parent.targetOffset;
// targetSize = parent.targetSize;
// int siblingSizes = 0;
// for (SizedSliceTask<S, T> sibling = parent.children; sibling != null; sibling = sibling.nextSibling)
// siblingSizes += sibling.size;
// size = spliterator.getSizeIfKnown();
// offset = parent.offset + siblingSizes;
// }
//
// @Override
// protected SizedSliceTask<S, T> makeChild(Spliterator<S> spliterator) {
// return new SizedSliceTask<>(this, spliterator);
// }
//
// @Override
// protected Node<T> getEmptyResult() {
// return Nodes.emptyNode();
// }
//
// @Override
// public boolean taskCanceled() {
// if (offset > targetOffset+targetSize || offset+size < targetOffset)
// return true;
// else
// return super.taskCanceled();
// }
//
// @Override
// protected Node<T> doLeaf() {
// int skipLeft = Math.max(0, targetOffset - offset);
// int skipRight = Math.max(0, offset + size - (targetOffset + targetSize));
// if (skipLeft == 0 && skipRight == 0)
// return helper.into(Nodes.<T>makeBuilder(spliterator.getSizeIfKnown())).build();
// else {
// // If we're the first or last node that intersects the target range, peel off irrelevant elements
// int truncatedSize = size - skipLeft - skipRight;
// NodeBuilder<T> builder = Nodes.<T>makeBuilder(truncatedSize);
// Sink<S> wrappedSink = helper.wrapSink(builder);
// wrappedSink.begin(truncatedSize);
// Iterator<S> iterator = spliterator.iterator();
// for (int i=0; i<skipLeft; i++)
// iterator.next();
// for (int i=0; i<truncatedSize; i++)
// wrappedSink.apply(iterator.next());
// wrappedSink.end();
// return builder.build();
// }
// }
//
// @Override
// public void onCompletion(CountedCompleter<?> caller) {
// if (!isLeaf()) {
// Node<T> result = null;
// for (SizedSliceTask<S, T> child = children.nextSibling; child != null; child = child.nextSibling) {
// Node<T> childResult = child.getRawResult();
// if (childResult == null)
// continue;
// else if (result == null)
// result = childResult;
// else
// result = Nodes.node(result, childResult);
// }
// setRawResult(result);
// if (offset <= targetOffset && offset+size >= targetOffset+targetSize)
// shortCircuit(result);
// }
// }
// }
}
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.stream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.Comparators;
import java.util.Objects;
import java.util.Spliterator;
import java.util.concurrent.ForkJoinTask;
import java.util.function.IntFunction;
/**
* Factory methods for transforming streams into sorted streams.
*
* @since 1.8
*/
final class SortedOps {
private SortedOps() { }
/**
* Appends a "sorted" operation to the provided stream.
*
* @param <T> the type of both input and output elements
* @param upstream a reference stream with element type T
*/
static <T> Stream<T> makeRef(AbstractPipeline<?, T, ?> upstream) {
return new OfRef<>(upstream);
}
/**
* Appends a "sorted" operation to the provided stream.
*
* @param <T> the type of both input and output elements
* @param upstream a reference stream with element type T
* @param comparator the comparator to order elements by
*/
static <T> Stream<T> makeRef(AbstractPipeline<?, T, ?> upstream,
Comparator<? super T> comparator) {
return new OfRef<>(upstream, comparator);
}
/**
* Appends a "sorted" operation to the provided stream.
*
* @param <T> the type of both input and output elements
* @param upstream a reference stream with element type T
*/
static <T> IntStream makeInt(AbstractPipeline<?, Integer, ?> upstream) {
return new OfInt(upstream);
}
/**
* Appends a "sorted" operation to the provided stream.
*
* @param <T> the type of both input and output elements
* @param upstream a reference stream with element type T
*/
static <T> LongStream makeLong(AbstractPipeline<?, Long, ?> upstream) {
return new OfLong(upstream);
}
/**
* Appends a "sorted" operation to the provided stream.
*
* @param <T> the type of both input and output elements
* @param upstream a reference stream with element type T
*/
static <T> DoubleStream makeDouble(AbstractPipeline<?, Double, ?> upstream) {
return new OfDouble(upstream);
}
/**
* Specialized subtype for sorting reference streams
*/
private static final class OfRef<T> extends ReferencePipeline.StatefulOp<T, T> {
/**
* Comparator used for sorting
*/
private final boolean isNaturalSort;
private final Comparator<? super T> comparator;
/**
* Sort using natural order of {@literal <T>} which must be
* {@code Comparable}.
*/
OfRef(AbstractPipeline<?, T, ?> upstream) {
super(upstream, StreamShape.REFERENCE,
StreamOpFlag.IS_ORDERED | StreamOpFlag.IS_SORTED);
this.isNaturalSort = true;
// Will throw CCE when we try to sort if T is not Comparable
this.comparator = (Comparator<? super T>) Comparators.naturalOrder();
}
/**
* Sort using the provided comparator.
*
* @param comparator The comparator to be used to evaluate ordering.
*/
OfRef(AbstractPipeline<?, T, ?> upstream, Comparator<? super T> comparator) {
super(upstream, StreamShape.REFERENCE,
StreamOpFlag.IS_ORDERED | StreamOpFlag.NOT_SORTED);
this.isNaturalSort = false;
this.comparator = Objects.requireNonNull(comparator);
}
@Override
public Sink<T> opWrapSink(int flags, Sink sink) {
Objects.requireNonNull(sink);
// If the input is already naturally sorted and this operation
// also naturally sorted then this is a no-op
if (StreamOpFlag.SORTED.isKnown(flags) && isNaturalSort)
return sink;
else if (StreamOpFlag.SIZED.isKnown(flags))
return new SizedRefSortingSink<>(sink, comparator);
else
return new RefSortingSink<>(sink, comparator);
}
@Override
public <P_IN> Node<T> opEvaluateParallel(PipelineHelper<T> helper,
Spliterator<P_IN> spliterator,
IntFunction<T[]> generator) {
// If the input is already naturally sorted and this operation
// naturally sorts then collect the output
if (StreamOpFlag.SORTED.isKnown(helper.getStreamAndOpFlags()) && isNaturalSort) {
return helper.evaluate(spliterator, false, generator);
}
else {
// @@@ Weak two-pass parallel implementation; parallel collect, parallel sort
T[] flattenedData = helper.evaluate(spliterator, true, generator).asArray(generator);
Arrays.parallelSort(flattenedData, comparator);
return Nodes.node(flattenedData);
}
}
}
/**
* Specialized subtype for sorting int streams.
*/
private static final class OfInt extends IntPipeline.StatefulOp<Integer> {
OfInt(AbstractPipeline<?, Integer, ?> upstream) {
super(upstream, StreamShape.INT_VALUE,
StreamOpFlag.IS_ORDERED | StreamOpFlag.IS_SORTED);
}
@Override
public Sink<Integer> opWrapSink(int flags, Sink sink) {
Objects.requireNonNull(sink);
if (StreamOpFlag.SORTED.isKnown(flags))
return sink;
else if (StreamOpFlag.SIZED.isKnown(flags))
return new SizedIntSortingSink(sink);
else
return new IntSortingSink(sink);
}
@Override
public <P_IN> Node<Integer> opEvaluateParallel(PipelineHelper<Integer> helper,
Spliterator<P_IN> spliterator,
IntFunction<Integer[]> generator) {
if (StreamOpFlag.SORTED.isKnown(helper.getStreamAndOpFlags())) {
return helper.evaluate(spliterator, false, generator);
}
else {
Node.OfInt n = (Node.OfInt) helper.evaluate(spliterator, true, generator);
int[] content = n.asIntArray();
Arrays.parallelSort(content);
return Nodes.node(content);
}
}
}
/**
* Specialized subtype for sorting long streams.
*/
private static final class OfLong extends LongPipeline.StatefulOp<Long> {
OfLong(AbstractPipeline<?, Long, ?> upstream) {
super(upstream, StreamShape.LONG_VALUE,
StreamOpFlag.IS_ORDERED | StreamOpFlag.IS_SORTED);
}
@Override
public Sink<Long> opWrapSink(int flags, Sink sink) {
Objects.requireNonNull(sink);
if (StreamOpFlag.SORTED.isKnown(flags))
return sink;
else if (StreamOpFlag.SIZED.isKnown(flags))
return new SizedLongSortingSink(sink);
else
return new LongSortingSink(sink);
}
@Override
public <P_IN> Node<Long> opEvaluateParallel(PipelineHelper<Long> helper,
Spliterator<P_IN> spliterator,
IntFunction<Long[]> generator) {
if (StreamOpFlag.SORTED.isKnown(helper.getStreamAndOpFlags())) {
return helper.evaluate(spliterator, false, generator);
}
else {
Node.OfLong n = (Node.OfLong) helper.evaluate(spliterator, true, generator);
long[] content = n.asLongArray();
Arrays.parallelSort(content);
return Nodes.node(content);
}
}
}
/**
* Specialized subtype for sorting double streams.
*/
private static final class OfDouble extends DoublePipeline.StatefulOp<Double> {
OfDouble(AbstractPipeline<?, Double, ?> upstream) {
super(upstream, StreamShape.DOUBLE_VALUE,
StreamOpFlag.IS_ORDERED | StreamOpFlag.IS_SORTED);
}
@Override
public Sink<Double> opWrapSink(int flags, Sink sink) {
Objects.requireNonNull(sink);
if (StreamOpFlag.SORTED.isKnown(flags))
return sink;
else if (StreamOpFlag.SIZED.isKnown(flags))
return new SizedDoubleSortingSink(sink);
else
return new DoubleSortingSink(sink);
}
@Override
public <P_IN> Node<Double> opEvaluateParallel(PipelineHelper<Double> helper,
Spliterator<P_IN> spliterator,
IntFunction<Double[]> generator) {
if (StreamOpFlag.SORTED.isKnown(helper.getStreamAndOpFlags())) {
return helper.evaluate(spliterator, false, generator);
}
else {
Node.OfDouble n = (Node.OfDouble) helper.evaluate(spliterator, true, generator);
double[] content = n.asDoubleArray();
Arrays.parallelSort(content);
return Nodes.node(content);
}
}
}
/**
* {@link ForkJoinTask} for implementing sort on SIZED reference streams.
*/
private static final class SizedRefSortingSink<T> extends Sink.ChainedReference<T> {
private final Comparator<? super T> comparator;
private T[] array;
private int offset;
SizedRefSortingSink(Sink sink, Comparator<? super T> comparator) {
super(sink);
this.comparator = comparator;
}
@Override
public void begin(long size) {
if (size >= Nodes.MAX_ARRAY_SIZE)
throw new IllegalArgumentException("Stream size exceeds max array size");
array = (T[]) new Object[(int) size];
}
@Override
public void end() {
// Need to use offset rather than array.length since the downstream
// many be short-circuiting
// @@@ A better approach is to know if the downstream short-circuits
// and check sink.cancellationRequested
Arrays.sort(array, 0, offset, comparator);
downstream.begin(offset);
for (int i = 0; i < offset; i++)
downstream.accept(array[i]);
downstream.end();
array = null;
}
@Override
public void accept(T t) {
array[offset++] = t;
}
}
/**
* {@link Sink} for implementing sort on reference streams.
*/
private static final class RefSortingSink<T> extends Sink.ChainedReference<T> {
private final Comparator<? super T> comparator;
private ArrayList<T> list;
RefSortingSink(Sink sink, Comparator<? super T> comparator) {
super(sink);
this.comparator = comparator;
}
@Override
public void begin(long size) {
list = (size >= 0) ? new ArrayList<T>((int) size) : new ArrayList<T>();
}
@Override
public void end() {
list.sort(comparator);
downstream.begin(list.size());
list.forEach(downstream::accept);
downstream.end();
list = null;
}
@Override
public void accept(T t) {
list.add(t);
}
}
/**
* {@link Sink} for implementing sort on SIZED int streams.
*/
private static final class SizedIntSortingSink extends Sink.ChainedInt {
private int[] array;
private int offset;
SizedIntSortingSink(Sink downstream) {
super(downstream);
}
@Override
public void begin(long size) {
if (size >= Nodes.MAX_ARRAY_SIZE)
throw new IllegalArgumentException("Stream size exceeds max array size");
array = new int[(int) size];
}
@Override
public void end() {
Arrays.sort(array, 0, offset);
downstream.begin(offset);
for (int i = 0; i < offset; i++)
downstream.accept(array[i]);
downstream.end();
array = null;
}
@Override
public void accept(int t) {
array[offset++] = t;
}
}
/**
* {@link Sink} for implementing sort on int streams.
*/
private static final class IntSortingSink extends Sink.ChainedInt {
private SpinedBuffer.OfInt b;
IntSortingSink(Sink sink) {
super(sink);
}
@Override
public void begin(long size) {
b = (size > 0) ? new SpinedBuffer.OfInt((int) size) : new SpinedBuffer.OfInt();
}
@Override
public void end() {
int[] ints = b.asIntArray();
Arrays.sort(ints);
downstream.begin(ints.length);
for (int anInt : ints)
downstream.accept(anInt);
downstream.end();
}
@Override
public void accept(int t) {
b.accept(t);
}
}
/**
* {@link Sink} for implementing sort on SIZED long streams.
*/
private static final class SizedLongSortingSink extends Sink.ChainedLong {
private long[] array;
private int offset;
SizedLongSortingSink(Sink downstream) {
super(downstream);
}
@Override
public void begin(long size) {
if (size >= Nodes.MAX_ARRAY_SIZE)
throw new IllegalArgumentException("Stream size exceeds max array size");
array = new long[(int) size];
}
@Override
public void end() {
Arrays.sort(array, 0, offset);
downstream.begin(offset);
for (int i = 0; i < offset; i++)
downstream.accept(array[i]);
downstream.end();
array = null;
}
@Override
public void accept(long t) {
array[offset++] = t;
}
}
/**
* {@link Sink} for implementing sort on long streams.
*/
private static final class LongSortingSink extends Sink.ChainedLong {
private SpinedBuffer.OfLong b;
LongSortingSink(Sink sink) {
super(sink);
}
@Override
public void begin(long size) {
b = (size > 0) ? new SpinedBuffer.OfLong((int) size) : new SpinedBuffer.OfLong();
}
@Override
public void end() {
long[] longs = b.asLongArray();
Arrays.sort(longs);
downstream.begin(longs.length);
for (long aLong : longs)
downstream.accept(aLong);
downstream.end();
}
@Override
public void accept(long t) {
b.accept(t);
}
}
/**
* {@link Sink} for implementing sort on SIZED double streams.
*/
private static final class SizedDoubleSortingSink extends Sink.ChainedDouble {
private double[] array;
private int offset;
SizedDoubleSortingSink(Sink downstream) {
super(downstream);
}
@Override
public void begin(long size) {
if (size >= Nodes.MAX_ARRAY_SIZE)
throw new IllegalArgumentException("Stream size exceeds max array size");
array = new double[(int) size];
}
@Override
public void end() {
Arrays.sort(array, 0, offset);
downstream.begin(offset);
for (int i = 0; i < offset; i++)
downstream.accept(array[i]);
downstream.end();
array = null;
}
@Override
public void accept(double t) {
array[offset++] = t;
}
}
/**
* {@link Sink} for implementing sort on double streams.
*/
private static final class DoubleSortingSink extends Sink.ChainedDouble {
private SpinedBuffer.OfDouble b;
DoubleSortingSink(Sink sink) {
super(sink);
}
@Override
public void begin(long size) {
b = (size > 0) ? new SpinedBuffer.OfDouble((int) size) : new SpinedBuffer.OfDouble();
}
@Override
public void end() {
double[] doubles = b.asDoubleArray();
Arrays.sort(doubles);
downstream.begin(doubles.length);
for (double aDouble : doubles)
downstream.accept(aDouble);
downstream.end();
}
@Override
public void accept(double t) {
b.accept(t);
}
}
}
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.stream;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.PrimitiveIterator;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.function.Consumer;
import java.util.function.DoubleConsumer;
import java.util.function.IntConsumer;
import java.util.function.IntFunction;
import java.util.function.LongConsumer;
/**
* An ordered collection of elements. Elements can be added, but not removed.
* Goes through a building phase, during which elements can be added, and a
* traversal phase, during which elements can be traversed in order but no
* further modifications are possible.
*
* <p> One or more arrays are used to store elements. The use of a multiple
* arrays has better performance characteristics than a single array used by
* {@link ArrayList}, as when the capacity of the list needs to be increased
* no copying of elements is required. This is usually beneficial in the case
* where the results will be traversed a small number of times.
*
* @param <E> the type of elements in this list
* @since 1.8
*/
class SpinedBuffer<E>
extends AbstractSpinedBuffer
implements Consumer<E>, Iterable<E> {
/*
* We optimistically hope that all the data will fit into the first chunk,
* so we try to avoid inflating the spine[] and priorElementCount[] arrays
* prematurely. So methods must be prepared to deal with these arrays being
* null. If spine is non-null, then spineIndex points to the current chunk
* within the spine, otherwise it is zero. The spine and priorElementCount
* arrays are always the same size, and for any i <= spineIndex,
* priorElementCount[i] is the sum of the sizes of all the prior chunks.
*
* The curChunk pointer is always valid. The elementIndex is the index of
* the next element to be written in curChunk; this may be past the end of
* curChunk so we have to check before writing. When we inflate the spine
* array, curChunk becomes the first element in it. When we clear the
* buffer, we discard all chunks except the first one, which we clear,
* restoring it to the initial single-chunk state.
*/
/**
* Chunk that we're currently writing into; may or may not be aliased with
* the first element of the spine.
*/
protected E[] curChunk;
/**
* All chunks, or null if there is only one chunk.
*/
protected E[][] spine;
/**
* Constructs an empty list with the specified initial capacity.
*
* @param initialCapacity the initial capacity of the list
* @throws IllegalArgumentException if the specified initial capacity
* is negative
*/
SpinedBuffer(int initialCapacity) {
super(initialCapacity);
curChunk = (E[]) new Object[1 << initialChunkPower];
}
/**
* Constructs an empty list with an initial capacity of sixteen.
*/
SpinedBuffer() {
super();
curChunk = (E[]) new Object[1 << initialChunkPower];
}
/**
* Returns the current capacity of the buffer
*/
protected long capacity() {
return (spineIndex == 0)
? curChunk.length
: priorElementCount[spineIndex] + spine[spineIndex].length;
}
private void inflateSpine() {
if (spine == null) {
spine = (E[][]) new Object[MIN_SPINE_SIZE][];
priorElementCount = new long[MIN_SPINE_SIZE];
spine[0] = curChunk;
}
}
/**
* Ensure that the buffer has at least capacity to hold the target size
*/
protected final void ensureCapacity(long targetSize) {
long capacity = capacity();
if (targetSize > capacity) {
inflateSpine();
for (int i=spineIndex+1; targetSize > capacity; i++) {
if (i >= spine.length) {
int newSpineSize = spine.length * 2;
spine = Arrays.copyOf(spine, newSpineSize);
priorElementCount = Arrays.copyOf(priorElementCount, newSpineSize);
}
int nextChunkSize = chunkSize(i);
spine[i] = (E[]) new Object[nextChunkSize];
priorElementCount[i] = priorElementCount[i-1] + spine[i-1].length;
capacity += nextChunkSize;
}
}
}
/**
* Force the buffer to increase its capacity.
*/
protected void increaseCapacity() {
ensureCapacity(capacity() + 1);
}
/**
* Retrieve the element at the specified index.
*/
public E get(long index) {
// @@@ can further optimize by caching last seen spineIndex,
// which is going to be right most of the time
if (spineIndex == 0) {
if (index < elementIndex)
return curChunk[((int) index)];
else
throw new IndexOutOfBoundsException(Long.toString(index));
}
if (index >= count())
throw new IndexOutOfBoundsException(Long.toString(index));
for (int j=0; j <= spineIndex; j++)
if (index < priorElementCount[j] + spine[j].length)
return spine[j][((int) (index - priorElementCount[j]))];
throw new IndexOutOfBoundsException(Long.toString(index));
}
/**
* Copy the elements, starting at the specified offset, into the specified
* array.
*/
public void copyInto(E[] array, int offset) {
long finalOffset = offset + count();
if (finalOffset > array.length || finalOffset < offset) {
throw new IndexOutOfBoundsException("does not fit");
}
if (spineIndex == 0)
System.arraycopy(curChunk, 0, array, offset, elementIndex);
else {
// full chunks
for (int i=0; i < spineIndex; i++) {
System.arraycopy(spine[i], 0, array, offset, spine[i].length);
offset += spine[i].length;
}
if (elementIndex > 0)
System.arraycopy(curChunk, 0, array, offset, elementIndex);
}
}
/**
* Create a new array using the specified array factory, and copy the
* elements into it.
*/
public E[] asArray(IntFunction<E[]> arrayFactory) {
// @@@ will fail for size == MAX_VALUE
E[] result = arrayFactory.apply((int) count());
copyInto(result, 0);
return result;
}
@Override
public void clear() {
if (spine != null) {
curChunk = spine[0];
for (int i=0; i<curChunk.length; i++)
curChunk[i] = null;
spine = null;
priorElementCount = null;
}
else {
for (int i=0; i<elementIndex; i++)
curChunk[i] = null;
}
elementIndex = 0;
spineIndex = 0;
}
@Override
public Iterator<E> iterator() {
return Spliterators.iteratorFromSpliterator(spliterator());
}
@Override
public void forEach(Consumer<? super E> consumer) {
// completed chunks, if any
for (int j = 0; j < spineIndex; j++)
for (E t : spine[j])
consumer.accept(t);
// current chunk
for (int i=0; i<elementIndex; i++)
consumer.accept(curChunk[i]);
}
@Override
public void accept(E e) {
if (elementIndex == curChunk.length) {
inflateSpine();
if (spineIndex+1 >= spine.length || spine[spineIndex+1] == null)
increaseCapacity();
elementIndex = 0;
++spineIndex;
curChunk = spine[spineIndex];
}
curChunk[elementIndex++] = e;
}
@Override
public String toString() {
List<E> list = new ArrayList<>();
forEach(list::add);
return "SpinedBuffer:" + list.toString();
}
private static final int SPLITERATOR_CHARACTERISTICS
= Spliterator.SIZED | Spliterator.ORDERED | Spliterator.SUBSIZED;
/**
* Return a {@link Spliterator} describing the contents of the buffer.
*/
public Spliterator<E> spliterator() {
return new Spliterator<E>() {
// The current spine index
int splSpineIndex;
// The current element index into the current spine
int splElementIndex;
// When splSpineIndex >= spineIndex and splElementIndex >= elementIndex then
// this spliterator is fully traversed
// tryAdvance can set splSpineIndex > spineIndex if the last spine is full
// The current spine array
E[] splChunk = (spine == null) ? curChunk : spine[0];
@Override
public long estimateSize() {
return (spine == null)
? (elementIndex - splElementIndex)
: count() - (priorElementCount[splSpineIndex] + splElementIndex);
}
@Override
public int characteristics() {
return SPLITERATOR_CHARACTERISTICS;
}
@Override
public boolean tryAdvance(Consumer<? super E> consumer) {
if (splSpineIndex < spineIndex
|| (splSpineIndex == spineIndex && splElementIndex < elementIndex)) {
consumer.accept(splChunk[splElementIndex++]);
if (splElementIndex == splChunk.length) {
splElementIndex = 0;
++splSpineIndex;
if (spine != null && splSpineIndex < spine.length)
splChunk = spine[splSpineIndex];
}
return true;
}
return false;
}
@Override
public void forEachRemaining(Consumer<? super E> consumer) {
if (splSpineIndex < spineIndex
|| (splSpineIndex == spineIndex && splElementIndex < elementIndex)) {
int i = splElementIndex;
// completed chunks, if any
for (int sp = splSpineIndex; sp < spineIndex; sp++) {
E[] chunk = spine[sp];
for (; i < chunk.length; i++) {
consumer.accept(chunk[i]);
}
i = 0;
}
// current chunk
E[] chunk = curChunk;
int hElementIndex = elementIndex;
for (; i < hElementIndex; i++) {
consumer.accept(chunk[i]);
}
splSpineIndex = spineIndex;
splElementIndex = elementIndex;
}
}
@Override
public Spliterator<E> trySplit() {
if (splSpineIndex < spineIndex) {
Spliterator<E> ret = Arrays.spliterator(spine[splSpineIndex],
splElementIndex, spine[splSpineIndex].length);
splChunk = spine[++splSpineIndex];
splElementIndex = 0;
return ret;
}
else if (splSpineIndex == spineIndex) {
int t = (elementIndex - splElementIndex) / 2;
if (t == 0)
return null;
else {
Spliterator<E> ret = Arrays.spliterator(curChunk, splElementIndex, splElementIndex + t);
splElementIndex += t;
return ret;
}
}
else {
return null;
}
}
};
}
/**
* An ordered collection of primitive values. Elements can be added, but
* not removed. Goes through a building phase, during which elements can be
* added, and a traversal phase, during which elements can be traversed in
* order but no further modifications are possible.
*
* <p> One or more arrays are used to store elements. The use of a multiple
* arrays has better performance characteristics than a single array used by
* {@link ArrayList}, as when the capacity of the list needs to be increased
* no copying of elements is required. This is usually beneficial in the case
* where the results will be traversed a small number of times.
*
* @param <E> the wrapper type for this primitive type
* @param <T_ARR> the array type for this primitive type
* @param <T_CONS> the Consumer type for this primitive type
*/
abstract static class OfPrimitive<E, T_ARR, T_CONS>
extends AbstractSpinedBuffer implements Iterable<E> {
/*
* We optimistically hope that all the data will fit into the first chunk,
* so we try to avoid inflating the spine[] and priorElementCount[] arrays
* prematurely. So methods must be prepared to deal with these arrays being
* null. If spine is non-null, then spineIndex points to the current chunk
* within the spine, otherwise it is zero. The spine and priorElementCount
* arrays are always the same size, and for any i <= spineIndex,
* priorElementCount[i] is the sum of the sizes of all the prior chunks.
*
* The curChunk pointer is always valid. The elementIndex is the index of
* the next element to be written in curChunk; this may be past the end of
* curChunk so we have to check before writing. When we inflate the spine
* array, curChunk becomes the first element in it. When we clear the
* buffer, we discard all chunks except the first one, which we clear,
* restoring it to the initial single-chunk state.
*/
// The chunk we're currently writing into
T_ARR curChunk;
// All chunks, or null if there is only one chunk
T_ARR[] spine;
/**
* Constructs an empty list with the specified initial capacity.
*
* @param initialCapacity the initial capacity of the list
* @throws IllegalArgumentException if the specified initial capacity
* is negative
*/
OfPrimitive(int initialCapacity) {
super(initialCapacity);
curChunk = newArray(1 << initialChunkPower);
}
/**
* Constructs an empty list with an initial capacity of sixteen.
*/
OfPrimitive() {
super();
curChunk = newArray(1 << initialChunkPower);
}
@Override
public abstract Iterator<E> iterator();
@Override
public abstract void forEach(Consumer<? super E> consumer);
/** Create a new array-of-array of the proper type and size */
protected abstract T_ARR[] newArrayArray(int size);
/** Create a new array of the proper type and size */
protected abstract T_ARR newArray(int size);
/** Get the length of an array */
protected abstract int arrayLength(T_ARR array);
/** Iterate an array with the provided consumer */
protected abstract void arrayForEach(T_ARR array, int from, int to,
T_CONS consumer);
protected long capacity() {
return (spineIndex == 0)
? arrayLength(curChunk)
: priorElementCount[spineIndex] + arrayLength(spine[spineIndex]);
}
private void inflateSpine() {
if (spine == null) {
spine = newArrayArray(MIN_SPINE_SIZE);
priorElementCount = new long[MIN_SPINE_SIZE];
spine[0] = curChunk;
}
}
protected final void ensureCapacity(long targetSize) {
long capacity = capacity();
if (targetSize > capacity) {
inflateSpine();
for (int i=spineIndex+1; targetSize > capacity; i++) {
if (i >= spine.length) {
int newSpineSize = spine.length * 2;
spine = Arrays.copyOf(spine, newSpineSize);
priorElementCount = Arrays.copyOf(priorElementCount, newSpineSize);
}
int nextChunkSize = chunkSize(i);
spine[i] = newArray(nextChunkSize);
priorElementCount[i] = priorElementCount[i-1] + arrayLength(spine[i - 1]);
capacity += nextChunkSize;
}
}
}
protected void increaseCapacity() {
ensureCapacity(capacity() + 1);
}
protected int chunkFor(long index) {
if (spineIndex == 0) {
if (index < elementIndex)
return 0;
else
throw new IndexOutOfBoundsException(Long.toString(index));
}
if (index >= count())
throw new IndexOutOfBoundsException(Long.toString(index));
for (int j=0; j <= spineIndex; j++)
if (index < priorElementCount[j] + arrayLength(spine[j]))
return j;
throw new IndexOutOfBoundsException(Long.toString(index));
}
public void copyInto(T_ARR array, int offset) {
long finalOffset = offset + count();
if (finalOffset > arrayLength(array) || finalOffset < offset) {
throw new IndexOutOfBoundsException("does not fit");
}
if (spineIndex == 0)
System.arraycopy(curChunk, 0, array, offset, elementIndex);
else {
// full chunks
for (int i=0; i < spineIndex; i++) {
System.arraycopy(spine[i], 0, array, offset, arrayLength(spine[i]));
offset += arrayLength(spine[i]);
}
if (elementIndex > 0)
System.arraycopy(curChunk, 0, array, offset, elementIndex);
}
}
public T_ARR asPrimitiveArray() {
// @@@ will fail for size == MAX_VALUE
T_ARR result = newArray((int) count());
copyInto(result, 0);
return result;
}
protected void preAccept() {
if (elementIndex == arrayLength(curChunk)) {
inflateSpine();
if (spineIndex+1 >= spine.length || spine[spineIndex+1] == null)
increaseCapacity();
elementIndex = 0;
++spineIndex;
curChunk = spine[spineIndex];
}
}
public void clear() {
if (spine != null) {
curChunk = spine[0];
spine = null;
priorElementCount = null;
}
elementIndex = 0;
spineIndex = 0;
}
public void forEach(T_CONS consumer) {
// completed chunks, if any
for (int j = 0; j < spineIndex; j++)
arrayForEach(spine[j], 0, arrayLength(spine[j]), consumer);
// current chunk
arrayForEach(curChunk, 0, elementIndex, consumer);
}
abstract class BaseSpliterator<T_SPLITER extends Spliterator<E>>
implements Spliterator<E> {
// The current spine index
int splSpineIndex;
// The current element index into the current spine
int splElementIndex;
// When splSpineIndex >= spineIndex and splElementIndex >= elementIndex then
// this spliterator is fully traversed
// tryAdvance can set splSpineIndex > spineIndex if the last spine is full
// The current spine array
T_ARR splChunk = (spine == null) ? curChunk : spine[0];
abstract void arrayForOne(T_ARR array, int index, T_CONS consumer);
abstract T_SPLITER arraySpliterator(T_ARR array, int offset, int len);
@Override
public long estimateSize() {
return (spine == null)
? (elementIndex - splElementIndex)
: count() - (priorElementCount[splSpineIndex] + splElementIndex);
}
@Override
public int characteristics() {
return SPLITERATOR_CHARACTERISTICS;
}
public boolean tryAdvance(T_CONS consumer) {
if (splSpineIndex < spineIndex
|| (splSpineIndex == spineIndex && splElementIndex < elementIndex)) {
arrayForOne(splChunk, splElementIndex++, consumer);
if (splElementIndex == arrayLength(splChunk)) {
splElementIndex = 0;
++splSpineIndex;
if (spine != null && splSpineIndex < spine.length)
splChunk = spine[splSpineIndex];
}
return true;
}
return false;
}
public void forEachRemaining(T_CONS consumer) {
if (splSpineIndex < spineIndex
|| (splSpineIndex == spineIndex && splElementIndex < elementIndex)) {
int i = splElementIndex;
// completed chunks, if any
for (int sp = splSpineIndex; sp < spineIndex; sp++) {
T_ARR chunk = spine[sp];
arrayForEach(chunk, i, arrayLength(chunk), consumer);
i = 0;
}
arrayForEach(curChunk, i, elementIndex, consumer);
splSpineIndex = spineIndex;
splElementIndex = elementIndex;
}
}
@Override
public T_SPLITER trySplit() {
if (splSpineIndex < spineIndex) {
T_SPLITER ret = arraySpliterator(spine[splSpineIndex], splElementIndex,
arrayLength(spine[splSpineIndex]) - splElementIndex);
splChunk = spine[++splSpineIndex];
splElementIndex = 0;
return ret;
}
else if (splSpineIndex == spineIndex) {
int t = (elementIndex - splElementIndex) / 2;
if (t == 0)
return null;
else {
T_SPLITER ret = arraySpliterator(curChunk, splElementIndex, t);
splElementIndex += t;
return ret;
}
}
else {
return null;
}
}
}
}
/**
* An ordered collection of {@code int} values.
*/
static class OfInt extends SpinedBuffer.OfPrimitive<Integer, int[], IntConsumer>
implements IntConsumer {
OfInt() { }
OfInt(int initialCapacity) {
super(initialCapacity);
}
@Override
public void forEach(Consumer<? super Integer> consumer) {
if (consumer instanceof IntConsumer) {
forEach((IntConsumer) consumer);
}
else {
if (Tripwire.ENABLED)
Tripwire.trip(getClass(), "{0} calling SpinedBuffer.OfInt.forEach(Consumer)");
spliterator().forEachRemaining(consumer);
}
}
@Override
protected int[][] newArrayArray(int size) {
return new int[size][];
}
@Override
protected int[] newArray(int size) {
return new int[size];
}
@Override
protected int arrayLength(int[] array) {
return array.length;
}
@Override
protected void arrayForEach(int[] array,
int from, int to,
IntConsumer consumer) {
for (int i = from; i < to; i++)
consumer.accept(array[i]);
}
@Override
public void accept(int i) {
preAccept();
curChunk[elementIndex++] = i;
}
public int get(long index) {
int ch = chunkFor(index);
if (spineIndex == 0 && ch == 0)
return curChunk[(int) index];
else
return spine[ch][(int) (index-priorElementCount[ch])];
}
public int[] asIntArray() {
return asPrimitiveArray();
}
@Override
public PrimitiveIterator.OfInt iterator() {
return Spliterators.iteratorFromSpliterator(spliterator());
}
public Spliterator.OfInt spliterator() {
class Splitr extends BaseSpliterator<Spliterator.OfInt>
implements Spliterator.OfInt {
@Override
void arrayForOne(int[] array, int index, IntConsumer consumer) {
consumer.accept(array[index]);
}
@Override
Spliterator.OfInt arraySpliterator(int[] array, int offset, int len) {
return Arrays.spliterator(array, offset, offset+len);
}
};
return new Splitr();
}
@Override
public String toString() {
int[] array = asIntArray();
if (array.length < 200) {
return String.format("%s[length=%d, chunks=%d]%s",
getClass().getSimpleName(), array.length,
spineIndex, Arrays.toString(array));
}
else {
int[] array2 = Arrays.copyOf(array, 200);
return String.format("%s[length=%d, chunks=%d]%s...",
getClass().getSimpleName(), array.length,
spineIndex, Arrays.toString(array2));
}
}
}
/**
* An ordered collection of {@code long} values.
*/
static class OfLong extends SpinedBuffer.OfPrimitive<Long, long[], LongConsumer>
implements LongConsumer {
OfLong() { }
OfLong(int initialCapacity) {
super(initialCapacity);
}
@Override
public void forEach(Consumer<? super Long> consumer) {
if (consumer instanceof LongConsumer) {
forEach((LongConsumer) consumer);
}
else {
if (Tripwire.ENABLED)
Tripwire.trip(getClass(), "{0} calling SpinedBuffer.OfLong.forEach(Consumer)");
spliterator().forEachRemaining(consumer);
}
}
@Override
protected long[][] newArrayArray(int size) {
return new long[size][];
}
@Override
protected long[] newArray(int size) {
return new long[size];
}
@Override
protected int arrayLength(long[] array) {
return array.length;
}
@Override
protected void arrayForEach(long[] array,
int from, int to,
LongConsumer consumer) {
for (int i = from; i < to; i++)
consumer.accept(array[i]);
}
@Override
public void accept(long i) {
preAccept();
curChunk[elementIndex++] = i;
}
public long get(long index) {
int ch = chunkFor(index);
if (spineIndex == 0 && ch == 0)
return curChunk[(int) index];
else
return spine[ch][(int) (index-priorElementCount[ch])];
}
public long[] asLongArray() {
return asPrimitiveArray();
}
@Override
public PrimitiveIterator.OfLong iterator() {
return Spliterators.iteratorFromSpliterator(spliterator());
}
public Spliterator.OfLong spliterator() {
class Splitr extends BaseSpliterator<Spliterator.OfLong>
implements Spliterator.OfLong {
@Override
void arrayForOne(long[] array, int index, LongConsumer consumer) {
consumer.accept(array[index]);
}
@Override
Spliterator.OfLong arraySpliterator(long[] array, int offset, int len) {
return Arrays.spliterator(array, offset, offset+len);
}
};
return new Splitr();
}
@Override
public String toString() {
long[] array = asLongArray();
if (array.length < 200) {
return String.format("%s[length=%d, chunks=%d]%s",
getClass().getSimpleName(), array.length,
spineIndex, Arrays.toString(array));
}
else {
long[] array2 = Arrays.copyOf(array, 200);
return String.format("%s[length=%d, chunks=%d]%s...",
getClass().getSimpleName(), array.length,
spineIndex, Arrays.toString(array2));
}
}
}
/**
* An ordered collection of {@code double} values.
*/
static class OfDouble
extends SpinedBuffer.OfPrimitive<Double, double[], DoubleConsumer>
implements DoubleConsumer {
OfDouble() { }
OfDouble(int initialCapacity) {
super(initialCapacity);
}
@Override
public void forEach(Consumer<? super Double> consumer) {
if (consumer instanceof DoubleConsumer) {
forEach((DoubleConsumer) consumer);
}
else {
if (Tripwire.ENABLED)
Tripwire.trip(getClass(), "{0} calling SpinedBuffer.OfDouble.forEach(Consumer)");
spliterator().forEachRemaining(consumer);
}
}
@Override
protected double[][] newArrayArray(int size) {
return new double[size][];
}
@Override
protected double[] newArray(int size) {
return new double[size];
}
@Override
protected int arrayLength(double[] array) {
return array.length;
}
@Override
protected void arrayForEach(double[] array,
int from, int to,
DoubleConsumer consumer) {
for (int i = from; i < to; i++)
consumer.accept(array[i]);
}
@Override
public void accept(double i) {
preAccept();
curChunk[elementIndex++] = i;
}
public double get(long index) {
int ch = chunkFor(index);
if (spineIndex == 0 && ch == 0)
return curChunk[(int) index];
else
return spine[ch][(int) (index-priorElementCount[ch])];
}
public double[] asDoubleArray() {
return asPrimitiveArray();
}
@Override
public PrimitiveIterator.OfDouble iterator() {
return Spliterators.iteratorFromSpliterator(spliterator());
}
public Spliterator.OfDouble spliterator() {
class Splitr extends BaseSpliterator<Spliterator.OfDouble>
implements Spliterator.OfDouble {
@Override
void arrayForOne(double[] array, int index, DoubleConsumer consumer) {
consumer.accept(array[index]);
}
@Override
Spliterator.OfDouble arraySpliterator(double[] array, int offset, int len) {
return Arrays.spliterator(array, offset, offset+len);
}
}
return new Splitr();
}
@Override
public String toString() {
double[] array = asDoubleArray();
if (array.length < 200) {
return String.format("%s[length=%d, chunks=%d]%s",
getClass().getSimpleName(), array.length,
spineIndex, Arrays.toString(array));
}
else {
double[] array2 = Arrays.copyOf(array, 200);
return String.format("%s[length=%d, chunks=%d]%s...",
getClass().getSimpleName(), array.length,
spineIndex, Arrays.toString(array2));
}
}
}
}
/*
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.stream;
import java.util.Comparator;
import java.util.Spliterator;
import java.util.function.BooleanSupplier;
import java.util.function.Consumer;
import java.util.function.DoubleConsumer;
import java.util.function.IntConsumer;
import java.util.function.LongConsumer;
import java.util.function.Supplier;
/**
* Spliterator implementations for wrapping and delegating spliterators, used
* in the implementation of the {@link Stream#spliterator()} method.
*
* @since 1.8
*/
class StreamSpliterators {
/**
* Abstract wrapping spliterator that binds to the spliterator of a
* pipeline helper on first operation.
*
* <p>This spliterator is not late-binding and will bind to the source
* spliterator when first operated on.
*
* <p>A wrapping spliterator produced from a sequential stream
* cannot be split if there are stateful operations present.
*/
private static abstract class AbstractWrappingSpliterator<P_IN, P_OUT,
T_BUFFER extends AbstractSpinedBuffer>
implements Spliterator<P_OUT> {
// @@@ Detect if stateful operations are present or not
// If not then can split otherwise cannot
/**
* True if this spliterator supports splitting
*/
final boolean isParallel;
final PipelineHelper<P_OUT> ph;
/**
* Supplier for the source spliterator. Client provides either a
* spliterator or a supplier.
*/
private Supplier<Spliterator<P_IN>> spliteratorSupplier;
/**
* Source spliterator. Either provided from client or obtained from
* supplier.
*/
Spliterator<P_IN> spliterator;
/**
* Sink chain for the downstream stages of the pipeline, ultimately
* leading to the buffer. Used during partial traversal.
*/
Sink<P_IN> bufferSink;
/**
* A function that advances one element of the spliterator, pushing
* it to bufferSink. Returns whether any elements were processed.
* Used during partial traversal.
*/
BooleanSupplier pusher;
/** Next element to consume from the buffer, used during partial traversal */
long nextToConsume;
/** Buffer into which elements are pushed. Used during partial traversal. */
T_BUFFER buffer;
/**
* True if full traversal has occurred (with possible cancelation).
* If doing a partial traversal, there may be still elements in buffer.
*/
boolean finished;
/**
* Construct an AbstractWrappingSpliterator from a
* {@code Supplier<Spliterator>}.
*/
AbstractWrappingSpliterator(PipelineHelper<P_OUT> ph,
Supplier<Spliterator<P_IN>> spliteratorSupplier,
boolean parallel) {
this.ph = ph;
this.spliteratorSupplier = spliteratorSupplier;
this.spliterator = null;
this.isParallel = parallel;
}
/**
* Construct an AbstractWrappingSpliterator from a
* {@code Spliterator}.
*/
AbstractWrappingSpliterator(PipelineHelper<P_OUT> ph,
Spliterator<P_IN> spliterator,
boolean parallel) {
this.ph = ph;
this.spliteratorSupplier = null;
this.spliterator = spliterator;
this.isParallel = parallel;
}
/**
* Called before advancing to set up spliterator, if needed.
*/
final void init() {
if (spliterator == null) {
spliterator = spliteratorSupplier.get();
spliteratorSupplier = null;
}
}
/**
* Get an element from the source, pushing it into the sink chain,
* setting up the buffer if needed
* @return whether there are elements to consume from the buffer
*/
final boolean doAdvance() {
if (buffer == null) {
if (finished)
return false;
init();
initPartialTraversalState();
nextToConsume = 0;
bufferSink.begin(spliterator.getExactSizeIfKnown());
return fillBuffer();
}
else {
++nextToConsume;
boolean hasNext = nextToConsume < buffer.count();
if (!hasNext) {
nextToConsume = 0;
buffer.clear();
hasNext = fillBuffer();
}
return hasNext;
}
}
/**
* Invokes the shape-specific constructor with the provided arguments
* and returns the result.
*/
abstract AbstractWrappingSpliterator<P_IN, P_OUT, ?> wrap(Spliterator<P_IN> s);
/**
* Initializes buffer, sink chain, and pusher for a shape-specific
* implementation.
*/
abstract void initPartialTraversalState();
@Override
public Spliterator<P_OUT> trySplit() {
if (isParallel && !finished) {
init();
Spliterator<P_IN> split = spliterator.trySplit();
return (split == null) ? null : wrap(split);
}
else
return null;
}
/**
* If the buffer is empty, push elements into the sink chain until
* the source is empty or cancellation is requested.
* @return whether there are elements to consume from the buffer
*/
private boolean fillBuffer() {
while (buffer.count() == 0) {
if (bufferSink.cancellationRequested() || !pusher.getAsBoolean()) {
if (finished)
return false;
else {
bufferSink.end(); // might trigger more elements
finished = true;
}
}
}
return true;
}
@Override
public final long estimateSize() {
init();
return StreamOpFlag.SIZED.isKnown(ph.getStreamAndOpFlags())
? spliterator.estimateSize()
: Long.MAX_VALUE;
}
@Override
public final long getExactSizeIfKnown() {
init();
return StreamOpFlag.SIZED.isKnown(ph.getStreamAndOpFlags())
? spliterator.getExactSizeIfKnown()
: -1;
}
@Override
public final int characteristics() {
init();
// Get the characteristics from the pipeline
int c = StreamOpFlag.toCharacteristics(StreamOpFlag.toStreamFlags(ph.getStreamAndOpFlags()));
// Mask off the size and uniform characteristics and replace with
// those of the spliterator
// Note that a non-uniform spliterator can change from something
// with an exact size to an estimate for a sub-split, for example
// with HashSet where the size is known at the top level spliterator
// but for sub-splits only an estimate is known
if ((c & Spliterator.SIZED) != 0) {
c &= ~(Spliterator.SIZED | Spliterator.SUBSIZED);
c |= (spliterator.characteristics() & Spliterator.SIZED & Spliterator.SUBSIZED);
}
return c;
}
@Override
public Comparator<? super P_OUT> getComparator() {
if (!hasCharacteristics(SORTED))
throw new IllegalStateException();
return null;
}
@Override
public final String toString() {
return String.format("%s[%s]", getClass().getName(), spliterator);
}
}
static final class WrappingSpliterator<P_IN, P_OUT>
extends AbstractWrappingSpliterator<P_IN, P_OUT, SpinedBuffer<P_OUT>> {
WrappingSpliterator(PipelineHelper<P_OUT> ph,
Supplier<Spliterator<P_IN>> supplier,
boolean parallel) {
super(ph, supplier, parallel);
}
WrappingSpliterator(PipelineHelper<P_OUT> ph,
Spliterator<P_IN> spliterator,
boolean parallel) {
super(ph, spliterator, parallel);
}
@Override
WrappingSpliterator<P_IN, P_OUT> wrap(Spliterator<P_IN> s) {
return new WrappingSpliterator<>(ph, s, isParallel);
}
@Override
void initPartialTraversalState() {
SpinedBuffer<P_OUT> b = new SpinedBuffer<>();
buffer = b;
bufferSink = ph.wrapSink(b::accept);
pusher = () -> spliterator.tryAdvance(bufferSink);
}
@Override
public boolean tryAdvance(Consumer<? super P_OUT> consumer) {
boolean hasNext = doAdvance();
if (hasNext)
consumer.accept(buffer.get(nextToConsume));
return hasNext;
}
@Override
public void forEachRemaining(Consumer<? super P_OUT> consumer) {
if (buffer == null && !finished) {
init();
ph.wrapAndCopyInto((Sink<P_OUT>) consumer::accept, spliterator);
finished = true;
}
else {
while (tryAdvance(consumer)) { }
}
}
}
static final class IntWrappingSpliterator<P_IN>
extends AbstractWrappingSpliterator<P_IN, Integer, SpinedBuffer.OfInt>
implements Spliterator.OfInt {
IntWrappingSpliterator(PipelineHelper<Integer> ph,
Supplier<Spliterator<P_IN>> supplier,
boolean parallel) {
super(ph, supplier, parallel);
}
IntWrappingSpliterator(PipelineHelper<Integer> ph,
Spliterator<P_IN> spliterator,
boolean parallel) {
super(ph, spliterator, parallel);
}
@Override
AbstractWrappingSpliterator<P_IN, Integer, ?> wrap(Spliterator<P_IN> s) {
return new IntWrappingSpliterator<>(ph, s, isParallel);
}
@Override
void initPartialTraversalState() {
SpinedBuffer.OfInt b = new SpinedBuffer.OfInt();
buffer = b;
bufferSink = ph.wrapSink((Sink.OfInt) b::accept);
pusher = () -> spliterator.tryAdvance(bufferSink);
}
@Override
public Spliterator.OfInt trySplit() {
return (Spliterator.OfInt) super.trySplit();
}
@Override
public boolean tryAdvance(IntConsumer consumer) {
boolean hasNext = doAdvance();
if (hasNext)
consumer.accept(buffer.get(nextToConsume));
return hasNext;
}
@Override
public void forEachRemaining(IntConsumer consumer) {
if (buffer == null && !finished) {
init();
ph.wrapAndCopyInto((Sink.OfInt) consumer::accept, spliterator);
finished = true;
}
else {
while (tryAdvance(consumer)) { }
}
}
}
static final class LongWrappingSpliterator<P_IN>
extends AbstractWrappingSpliterator<P_IN, Long, SpinedBuffer.OfLong>
implements Spliterator.OfLong {
LongWrappingSpliterator(PipelineHelper<Long> ph,
Supplier<Spliterator<P_IN>> supplier,
boolean parallel) {
super(ph, supplier, parallel);
}
LongWrappingSpliterator(PipelineHelper<Long> ph,
Spliterator<P_IN> spliterator,
boolean parallel) {
super(ph, spliterator, parallel);
}
@Override
AbstractWrappingSpliterator<P_IN, Long, ?> wrap(Spliterator<P_IN> s) {
return new LongWrappingSpliterator<>(ph, s, isParallel);
}
@Override
void initPartialTraversalState() {
SpinedBuffer.OfLong b = new SpinedBuffer.OfLong();
buffer = b;
bufferSink = ph.wrapSink((Sink.OfLong) b::accept);
pusher = () -> spliterator.tryAdvance(bufferSink);
}
@Override
public Spliterator.OfLong trySplit() {
return (Spliterator.OfLong) super.trySplit();
}
@Override
public boolean tryAdvance(LongConsumer consumer) {
boolean hasNext = doAdvance();
if (hasNext)
consumer.accept(buffer.get(nextToConsume));
return hasNext;
}
@Override
public void forEachRemaining(LongConsumer consumer) {
if (buffer == null && !finished) {
init();
ph.wrapAndCopyInto((Sink.OfLong) consumer::accept, spliterator);
finished = true;
}
else {
while (tryAdvance(consumer)) { }
}
}
}
static final class DoubleWrappingSpliterator<P_IN>
extends AbstractWrappingSpliterator<P_IN, Double, SpinedBuffer.OfDouble>
implements Spliterator.OfDouble {
DoubleWrappingSpliterator(PipelineHelper<Double> ph,
Supplier<Spliterator<P_IN>> supplier,
boolean parallel) {
super(ph, supplier, parallel);
}
DoubleWrappingSpliterator(PipelineHelper<Double> ph,
Spliterator<P_IN> spliterator,
boolean parallel) {
super(ph, spliterator, parallel);
}
@Override
AbstractWrappingSpliterator<P_IN, Double, ?> wrap(Spliterator<P_IN> s) {
return new DoubleWrappingSpliterator<>(ph, s, isParallel);
}
@Override
void initPartialTraversalState() {
SpinedBuffer.OfDouble b = new SpinedBuffer.OfDouble();
buffer = b;
bufferSink = ph.wrapSink((Sink.OfDouble) b::accept);
pusher = () -> spliterator.tryAdvance(bufferSink);
}
@Override
public Spliterator.OfDouble trySplit() {
return (Spliterator.OfDouble) super.trySplit();
}
@Override
public boolean tryAdvance(DoubleConsumer consumer) {
boolean hasNext = doAdvance();
if (hasNext)
consumer.accept(buffer.get(nextToConsume));
return hasNext;
}
@Override
public void forEachRemaining(DoubleConsumer consumer) {
if (buffer == null && !finished) {
init();
ph.wrapAndCopyInto((Sink.OfDouble) consumer::accept, spliterator);
finished = true;
}
else {
while (tryAdvance(consumer)) { }
}
}
}
/**
* Spliterator implementation that delegates to an underlying spliterator,
* acquiring the spliterator from a {@code Supplier<Spliterator>} on the
* first call to any spliterator method.
* @param <T>
*/
static class DelegatingSpliterator<T> implements Spliterator<T> {
private final Supplier<Spliterator<T>> supplier;
private Spliterator<T> s;
@SuppressWarnings("unchecked")
DelegatingSpliterator(Supplier<? extends Spliterator<T>> supplier) {
this.supplier = (Supplier<Spliterator<T>>) supplier;
}
Spliterator<T> get() {
if (s == null) {
s = supplier.get();
}
return s;
}
@Override
public Spliterator<T> trySplit() {
return get().trySplit();
}
@Override
public boolean tryAdvance(Consumer<? super T> consumer) {
return get().tryAdvance(consumer);
}
@Override
public void forEachRemaining(Consumer<? super T> consumer) {
get().forEachRemaining(consumer);
}
@Override
public long estimateSize() {
return get().estimateSize();
}
@Override
public int characteristics() {
return get().characteristics();
}
@Override
public Comparator<? super T> getComparator() {
return get().getComparator();
}
@Override
public long getExactSizeIfKnown() {
return get().getExactSizeIfKnown();
}
@Override
public String toString() {
return getClass().getName() + "[" + get() + "]";
}
static final class OfInt extends DelegatingSpliterator<Integer> implements Spliterator.OfInt {
private Spliterator.OfInt s;
OfInt(Supplier<Spliterator.OfInt> supplier) {
super(supplier);
}
@Override
Spliterator.OfInt get() {
if (s == null) {
s = (Spliterator.OfInt) super.get();
}
return s;
}
@Override
public Spliterator.OfInt trySplit() {
return get().trySplit();
}
@Override
public boolean tryAdvance(IntConsumer consumer) {
return get().tryAdvance(consumer);
}
@Override
public void forEachRemaining(IntConsumer consumer) {
get().forEachRemaining(consumer);
}
}
static final class OfLong extends DelegatingSpliterator<Long> implements Spliterator.OfLong {
private Spliterator.OfLong s;
OfLong(Supplier<Spliterator.OfLong> supplier) {
super(supplier);
}
@Override
Spliterator.OfLong get() {
if (s == null) {
s = (Spliterator.OfLong) super.get();
}
return s;
}
@Override
public Spliterator.OfLong trySplit() {
return get().trySplit();
}
@Override
public boolean tryAdvance(LongConsumer consumer) {
return get().tryAdvance(consumer);
}
@Override
public void forEachRemaining(LongConsumer consumer) {
get().forEachRemaining(consumer);
}
}
static final class OfDouble extends DelegatingSpliterator<Double> implements Spliterator.OfDouble {
private Spliterator.OfDouble s;
OfDouble(Supplier<Spliterator.OfDouble> supplier) {
super(supplier);
}
@Override
Spliterator.OfDouble get() {
if (s == null) {
s = (Spliterator.OfDouble) super.get();
}
return s;
}
@Override
public Spliterator.OfDouble trySplit() {
return get().trySplit();
}
@Override
public boolean tryAdvance(DoubleConsumer consumer) {
return get().tryAdvance(consumer);
}
@Override
public void forEachRemaining(DoubleConsumer consumer) {
get().forEachRemaining(consumer);
}
}
}
}
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util.stream;
import java.util.Objects;
import java.util.Spliterator;
import java.util.function.Supplier;
/**
* Low-level utility methods for creating and manipulating streams.
*
* <p>This class is mostly for library writers presenting stream views
* of their data structures; most static stream methods for end users are in
* {@link Streams}.
*
* <p>Unless otherwise stated, streams are created as sequential
* streams. A sequential stream can be transformed into a parallel stream by
* calling the {@code parallel()} method on the created stream.
*
* @since 1.8
*/
public class StreamSupport {
/**
* Creates a new sequential {@code Stream} from a {@code Spliterator}.
*
* <p>The spliterator is only traversed, split, or queried for estimated
* size after the terminal operation of the stream pipeline commences.
*
* <p>It is strongly recommended the spliterator report a characteristic of
* {@code IMMUTABLE} or {@code CONCURRENT}, or be
* <a href="Spliterator.html#binding">late-binding</a>. Otherwise,
* {@link #stream(Supplier, int)} should be used to
* reduce the scope of potential interference with the source. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param <T> the type of stream elements
* @param spliterator a {@code Spliterator} describing the stream elements
* @return a new sequential {@code Stream}
*/
public static <T> Stream<T> stream(Spliterator<T> spliterator) {
Objects.requireNonNull(spliterator);
return new ReferencePipeline.Head<>(spliterator,
StreamOpFlag.fromCharacteristics(spliterator),
false);
}
/**
* Creates a new parallel {@code Stream} from a {@code Spliterator}.
*
* <p>The spliterator is only traversed, split, or queried for estimated
* size after the terminal operation of the stream pipeline commences.
*
* <p>It is strongly recommended the spliterator report a characteristic of
* {@code IMMUTABLE} or {@code CONCURRENT}, or be
* <a href="Spliterator.html#binding">late-binding</a>. Otherwise,
* {@link #stream(Supplier, int)} should be used to
* reduce the scope of potential interference with the source. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param <T> the type of stream elements
* @param spliterator a {@code Spliterator} describing the stream elements
* @return a new parallel {@code Stream}
*/
public static <T> Stream<T> parallelStream(Spliterator<T> spliterator) {
Objects.requireNonNull(spliterator);
return new ReferencePipeline.Head<>(spliterator,
StreamOpFlag.fromCharacteristics(spliterator),
true);
}
/**
* Creates a new sequential {@code Stream} from a {@code Supplier} of
* {@code Spliterator}.
*
* <p>The {@link Supplier#get()} method will be invoked on the supplier no
* more than once, and after the terminal operation of the stream pipeline
* commences.
*
* <p>For spliterators that report a characteristic of {@code IMMUTABLE}
* or {@code CONCURRENT}, or that are
* <a href="Spliterator.html#binding">late-binding</a>, it is likely
* more efficient to use {@link #stream(java.util.Spliterator)} instead.
* The use of a {@code Supplier} in this form provides a level of
* indirection that reduces the scope of potential interference with the
* source. Since the supplier is only invoked after the terminal operation
* commences, any modifications to the source up to the start of the
* terminal operation are reflected in the stream result. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param <T> the type of stream elements
* @param supplier a {@code Supplier} of a {@code Spliterator}
* @param characteristics Spliterator characteristics of the supplied
* {@code Spliterator}. The characteristics must be equal to
* {@code source.get().getCharacteristics()}.
* @return a new sequential {@code Stream}
* @see #stream(Spliterator)
*/
public static <T> Stream<T> stream(Supplier<? extends Spliterator<T>> supplier,
int characteristics) {
Objects.requireNonNull(supplier);
return new ReferencePipeline.Head<>(supplier,
StreamOpFlag.fromCharacteristics(characteristics),
false);
}
/**
* Creates a new parallel {@code Stream} from a {@code Supplier} of
* {@code Spliterator}.
*
* <p>The {@link Supplier#get()} method will be invoked on the supplier no
* more than once, and after the terminal operation of the stream pipeline
* commences.
*
* <p>For spliterators that report a characteristic of {@code IMMUTABLE}
* or {@code CONCURRENT}, or that are
* <a href="Spliterator.html#binding">late-binding</a>, it is likely
* more efficient to use {@link #stream(Spliterator)} instead.
* The use of a {@code Supplier} in this form provides a level of
* indirection that reduces the scope of potential interference with the
* source. Since the supplier is only invoked after the terminal operation
* commences, any modifications to the source up to the start of the
* terminal operation are reflected in the stream result. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param <T> the type of stream elements
* @param supplier a {@code Supplier} of a {@code Spliterator}
* @param characteristics Spliterator characteristics of the supplied
* {@code Spliterator}. The characteristics must be equal to
* {@code source.get().getCharacteristics()}
* @return a new parallel {@code Stream}
* @see #parallelStream(Spliterator)
*/
public static <T> Stream<T> parallelStream(Supplier<? extends Spliterator<T>> supplier,
int characteristics) {
Objects.requireNonNull(supplier);
return new ReferencePipeline.Head<>(supplier,
StreamOpFlag.fromCharacteristics(characteristics),
true);
}
/**
* Creates a new sequential {@code IntStream} from a {@code Spliterator.OfInt}.
*
* <p>The spliterator is only traversed, split, or queried for estimated size
* after the terminal operation of the stream pipeline commences.
*
* <p>It is strongly recommended the spliterator report a characteristic of
* {@code IMMUTABLE} or {@code CONCURRENT}, or be
* <a href="Spliterator.html#binding">late-binding</a>. Otherwise,
* {@link #stream(Supplier, int)}} should be used to
* reduce the scope of potential interference with the source. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param spliterator a {@code Spliterator.OfInt} describing the stream elements
* @return a new sequential {@code IntStream}
*/
public static IntStream intStream(Spliterator.OfInt spliterator) {
return new IntPipeline.Head<>(spliterator,
StreamOpFlag.fromCharacteristics(spliterator),
false);
}
/**
* Creates a new parallel {@code IntStream} from a {@code Spliterator.OfInt}.
*
* <p>he spliterator is only traversed, split, or queried for estimated size
* after the terminal operation of the stream pipeline commences.
*
* <p>It is strongly recommended the spliterator report a characteristic of
* {@code IMMUTABLE} or {@code CONCURRENT}, or be
* <a href="Spliterator.html#binding">late-binding</a>. Otherwise,
* {@link #stream(Supplier, int)}} should be used to
* reduce the scope of potential interference with the source. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param spliterator a {@code Spliterator.OfInt} describing the stream elements
* @return a new parallel {@code IntStream}
*/
public static IntStream intParallelStream(Spliterator.OfInt spliterator) {
return new IntPipeline.Head<>(spliterator,
StreamOpFlag.fromCharacteristics(spliterator),
true);
}
/**
* Creates a new sequential {@code IntStream} from a {@code Supplier} of
* {@code Spliterator.OfInt}.
*
* <p>The {@link Supplier#get()} method will be invoked on the supplier no
* more than once, and after the terminal operation of the stream pipeline
* commences.
*
* <p>For spliterators that report a characteristic of {@code IMMUTABLE}
* or {@code CONCURRENT}, or that are
* <a href="Spliterator.html#binding">late-binding</a>, it is likely
* more efficient to use {@link #intStream(Spliterator.OfInt)} instead.
* The use of a {@code Supplier} in this form provides a level of
* indirection that reduces the scope of potential interference with the
* source. Since the supplier is only invoked after the terminal operation
* commences, any modifications to the source up to the start of the
* terminal operation are reflected in the stream result. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param supplier a {@code Supplier} of a {@code Spliterator.OfInt}
* @param characteristics Spliterator characteristics of the supplied
* {@code Spliterator.OfInt}. The characteristics must be equal to
* {@code source.get().getCharacteristics()}
* @return a new sequential {@code IntStream}
* @see #intStream(Spliterator.OfInt)
*/
public static IntStream intStream(Supplier<? extends Spliterator.OfInt> supplier,
int characteristics) {
return new IntPipeline.Head<>(supplier,
StreamOpFlag.fromCharacteristics(characteristics),
false);
}
/**
* Creates a new parallel {@code IntStream} from a {@code Supplier} of
* {@code Spliterator.OfInt}.
*
* <p>The {@link Supplier#get()} method will be invoked on the supplier no
* more than once, and after the terminal operation of the stream pipeline
* commences.
*
* <p>For spliterators that report a characteristic of {@code IMMUTABLE}
* or {@code CONCURRENT}, or that are
* <a href="Spliterator.html#binding">late-binding</a>, it is likely
* more efficient to use {@link #intStream(Spliterator.OfInt)} instead.
* The use of a {@code Supplier} in this form provides a level of
* indirection that reduces the scope of potential interference with the
* source. Since the supplier is only invoked after the terminal operation
* commences, any modifications to the source up to the start of the
* terminal operation are reflected in the stream result. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param supplier a {@code Supplier} of a {@code Spliterator.OfInt}
* @param characteristics Spliterator characteristics of the supplied
* {@code Spliterator.OfInt}. The characteristics must be equal to
* {@code source.get().getCharacteristics()}
* @return a new parallel {@code IntStream}
* @see #intParallelStream(Spliterator.OfInt)
*/
public static IntStream intParallelStream(Supplier<? extends Spliterator.OfInt> supplier,
int characteristics) {
return new IntPipeline.Head<>(supplier,
StreamOpFlag.fromCharacteristics(characteristics),
true);
}
/**
* Creates a new sequential {@code LongStream} from a {@code Spliterator.OfLong}.
*
* <p>The spliterator is only traversed, split, or queried for estimated
* size after the terminal operation of the stream pipeline commences.
*
* <p>It is strongly recommended the spliterator report a characteristic of
* {@code IMMUTABLE} or {@code CONCURRENT}, or be
* <a href="Spliterator.html#binding">late-binding</a>. Otherwise,
* {@link #stream(Supplier, int)} should be used to
* reduce the scope of potential interference with the source. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param spliterator a {@code Spliterator.OfLong} describing the stream
* elements
* @return a new sequential {@code LongStream}
*/
public static LongStream longStream(Spliterator.OfLong spliterator) {
return new LongPipeline.Head<>(spliterator,
StreamOpFlag.fromCharacteristics(spliterator),
false);
}
/**
* Creates a new parallel {@code LongStream} from a {@code Spliterator.OfLong}.
*
* <p>The spliterator is only traversed, split, or queried for estimated
* size after the terminal operation of the stream pipeline commences.
*
* <p>It is strongly recommended the spliterator report a characteristic of
* {@code IMMUTABLE} or {@code CONCURRENT}, or be
* <a href="Spliterator.html#binding">late-binding</a>. Otherwise,
* {@link #stream(Supplier, int)} should be used to
* reduce the scope of potential interference with the source. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param spliterator a {@code Spliterator.OfLong} describing the stream elements
* @return a new parallel {@code LongStream}
*/
public static LongStream longParallelStream(Spliterator.OfLong spliterator) {
return new LongPipeline.Head<>(spliterator,
StreamOpFlag.fromCharacteristics(spliterator),
true);
}
/**
* Creates a new sequential {@code LongStream} from a {@code Supplier} of
* {@code Spliterator.OfLong}.
*
* <p>The {@link Supplier#get()} method will be invoked on the supplier no
* more than once, and after the terminal operation of the stream pipeline
* commences.
*
* <p>For spliterators that report a characteristic of {@code IMMUTABLE}
* or {@code CONCURRENT}, or that are
* <a href="Spliterator.html#binding">late-binding</a>, it is likely
* more efficient to use {@link #longStream(Spliterator.OfLong)} instead.
* The use of a {@code Supplier} in this form provides a level of
* indirection that reduces the scope of potential interference with the
* source. Since the supplier is only invoked after the terminal operation
* commences, any modifications to the source up to the start of the
* terminal operation are reflected in the stream result. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param supplier a {@code Supplier} of a {@code Spliterator.OfLong}
* @param characteristics Spliterator characteristics of the supplied
* {@code Spliterator.OfLong}. The characteristics must be equal to
* {@code source.get().getCharacteristics()}
* @return a new sequential {@code LongStream}
* @see #longStream(Spliterator.OfLong)
*/
public static LongStream longStream(Supplier<? extends Spliterator.OfLong> supplier,
int characteristics) {
return new LongPipeline.Head<>(supplier,
StreamOpFlag.fromCharacteristics(characteristics),
false);
}
/**
* Creates a new parallel {@code LongStream} from a {@code Supplier} of
* {@code Spliterator.OfLong}.
*
* <p>The {@link Supplier#get()} method will be invoked on the supplier no
* more than once, and after the terminal operation of the stream pipeline
* commences.
*
* <p>For spliterators that report a characteristic of {@code IMMUTABLE}
* or {@code CONCURRENT}, or that are
* <a href="Spliterator.html#binding">late-binding</a>, it is likely
* more efficient to use {@link #longStream(Spliterator.OfLong)} instead.
* The use of a {@code Supplier} in this form provides a level of
* indirection that reduces the scope of potential interference with the
* source. Since the supplier is only invoked after the terminal operation
* commences, any modifications to the source up to the start of the
* terminal operation are reflected in the stream result. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param supplier A {@code Supplier} of a {@code Spliterator.OfLong}
* @param characteristics Spliterator characteristics of the supplied
* {@code Spliterator.OfLong}. The characteristics must be equal to
* {@code source.get().getCharacteristics()}
* @return A new parallel {@code LongStream}
* @see #longParallelStream(Spliterator.OfLong)
*/
public static LongStream longParallelStream(Supplier<? extends Spliterator.OfLong> supplier,
int characteristics) {
return new LongPipeline.Head<>(supplier,
StreamOpFlag.fromCharacteristics(characteristics),
true);
}
/**
* Creates a new sequential {@code DoubleStream} from a
* {@code Spliterator.OfDouble}.
*
* <p>The spliterator is only traversed, split, or queried for estimated size
* after the terminal operation of the stream pipeline commences.
*
* <p>It is strongly recommended the spliterator report a characteristic of
* {@code IMMUTABLE} or {@code CONCURRENT}, or be
* <a href="Spliterator.html#binding">late-binding</a>. Otherwise,
* {@link #stream(Supplier, int)} should be used to
* reduce the scope of potential interference with the source. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param spliterator A {@code Spliterator.OfDouble} describing the stream elements
* @return A new sequential {@code DoubleStream}
*/
public static DoubleStream doubleStream(Spliterator.OfDouble spliterator) {
return new DoublePipeline.Head<>(spliterator,
StreamOpFlag.fromCharacteristics(spliterator),
false);
}
/**
* Creates a new parallel {@code DoubleStream} from a
* {@code Spliterator.OfDouble}.
*
* <p>The spliterator is only traversed, split, or queried for estimated size
* after the terminal operation of the stream pipeline commences.
*
* <p>It is strongly recommended the spliterator report a characteristic of
* {@code IMMUTABLE} or {@code CONCURRENT}, or be
* <a href="Spliterator.html#binding">late-binding</a>. Otherwise,
* {@link #stream(Supplier, int)} should be used to
* reduce the scope of potential interference with the source. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param spliterator A {@code Spliterator.OfDouble} describing the stream elements
* @return A new parallel {@code DoubleStream}
*/
public static DoubleStream doubleParallelStream(Spliterator.OfDouble spliterator) {
return new DoublePipeline.Head<>(spliterator,
StreamOpFlag.fromCharacteristics(spliterator),
true);
}
/**
* Creates a new sequential {@code DoubleStream} from a {@code Supplier} of
* {@code Spliterator.OfDouble}.
* <p>
* The {@link Supplier#get()} method will be invoked on the supplier no
* more than once, and after the terminal operation of the stream pipeline
* commences.
* <p>
* For spliterators that report a characteristic of {@code IMMUTABLE}
* or {@code CONCURRENT}, or that are
* <a href="Spliterator.html#binding">late-binding</a>, it is likely
* more efficient to use {@link #doubleStream(Spliterator.OfDouble)} instead.
* The use of a {@code Supplier} in this form provides a level of
* indirection that reduces the scope of potential interference with the
* source. Since the supplier is only invoked after the terminal operation
* commences, any modifications to the source up to the start of the
* terminal operation are reflected in the stream result. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param supplier A {@code Supplier} of a {@code Spliterator.OfDouble}
* @param characteristics Spliterator characteristics of the supplied
* {@code Spliterator.OfDouble}. The characteristics must be equal to
* {@code source.get().getCharacteristics()}
* @return A new sequential {@code DoubleStream}
* @see #doubleStream(Spliterator.OfDouble)
*/
public static DoubleStream doubleStream(Supplier<? extends Spliterator.OfDouble> supplier,
int characteristics) {
return new DoublePipeline.Head<>(supplier,
StreamOpFlag.fromCharacteristics(characteristics),
false);
}
/**
* Creates a new parallel {@code DoubleStream} from a {@code Supplier} of
* {@code Spliterator.OfDouble}.
*
* <p>The {@link Supplier#get()} method will be invoked on the supplier no
* more than once, and after the terminal operation of the stream pipeline
* commences.
*
* <p>For spliterators that report a characteristic of {@code IMMUTABLE}
* or {@code CONCURRENT}, or that are
* <a href="Spliterator.html#binding">late-binding</a>, it is likely
* more efficient to use {@link #doubleStream(Spliterator.OfDouble)} instead.
* The use of a {@code Supplier} in this form provides a level of
* indirection that reduces the scope of potential interference with the
* source. Since the supplier is only invoked after the terminal operation
* commences, any modifications to the source up to the start of the
* terminal operation are reflected in the stream result. See
* <a href="package-summary.html#Non-Interference">Non-Interference</a> for
* more details.
*
* @param supplier a {@code Supplier} of a {@code Spliterator.OfDouble}
* @param characteristics Spliterator characteristics of the supplied
* {@code Spliterator.OfDouble}. The characteristics must be equal to
* {@code source.get().getCharacteristics()}
* @return a new parallel {@code DoubleStream}
* @see #doubleParallelStream(Spliterator.OfDouble)
*/
public static DoubleStream doubleParallelStream(Supplier<? extends Spliterator.OfDouble> supplier,
int characteristics) {
return new DoublePipeline.Head<>(supplier,
StreamOpFlag.fromCharacteristics(characteristics),
true);
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册