feat(jdk8): move files to new folder to avoid resources compiled.
This commit is contained in:
707
jdkSrc/jdk8/java/util/stream/AbstractPipeline.java
Normal file
707
jdkSrc/jdk8/java/util/stream/AbstractPipeline.java
Normal file
@@ -0,0 +1,707 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.Spliterator;
|
||||
import java.util.function.IntFunction;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* Abstract base class for "pipeline" classes, which are the core
|
||||
* implementations of the Stream interface and its primitive specializations.
|
||||
* Manages construction and evaluation of stream pipelines.
|
||||
*
|
||||
* <p>An {@code AbstractPipeline} represents an initial portion of a stream
|
||||
* pipeline, encapsulating a stream source and zero or more intermediate
|
||||
* operations. The individual {@code AbstractPipeline} objects are often
|
||||
* referred to as <em>stages</em>, where each stage describes either the stream
|
||||
* source or an intermediate operation.
|
||||
*
|
||||
* <p>A concrete intermediate stage is generally built from an
|
||||
* {@code AbstractPipeline}, a shape-specific pipeline class which extends it
|
||||
* (e.g., {@code IntPipeline}) which is also abstract, and an operation-specific
|
||||
* concrete class which extends that. {@code AbstractPipeline} contains most of
|
||||
* the mechanics of evaluating the pipeline, and implements methods that will be
|
||||
* used by the operation; the shape-specific classes add helper methods for
|
||||
* dealing with collection of results into the appropriate shape-specific
|
||||
* containers.
|
||||
*
|
||||
* <p>After chaining a new intermediate operation, or executing a terminal
|
||||
* operation, the stream is considered to be consumed, and no more intermediate
|
||||
* or terminal operations are permitted on this stream instance.
|
||||
*
|
||||
* @implNote
|
||||
* <p>For sequential streams, and parallel streams without
|
||||
* <a href="package-summary.html#StreamOps">stateful intermediate
|
||||
* operations</a>, parallel streams, pipeline evaluation is done in a single
|
||||
* pass that "jams" all the operations together. For parallel streams with
|
||||
* stateful operations, execution is divided into segments, where each
|
||||
* stateful operations marks the end of a segment, and each segment is
|
||||
* evaluated separately and the result used as the input to the next
|
||||
* segment. In all cases, the source data is not consumed until a terminal
|
||||
* operation begins.
|
||||
*
|
||||
* @param <E_IN> type of input elements
|
||||
* @param <E_OUT> type of output elements
|
||||
* @param <S> type of the subclass implementing {@code BaseStream}
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract class AbstractPipeline<E_IN, E_OUT, S extends BaseStream<E_OUT, S>>
|
||||
extends PipelineHelper<E_OUT> implements BaseStream<E_OUT, S> {
|
||||
private static final String MSG_STREAM_LINKED = "stream has already been operated upon or closed";
|
||||
private static final String MSG_CONSUMED = "source already consumed or closed";
|
||||
|
||||
/**
|
||||
* Backlink to the head of the pipeline chain (self if this is the source
|
||||
* stage).
|
||||
*/
|
||||
@SuppressWarnings("rawtypes")
|
||||
private final AbstractPipeline sourceStage;
|
||||
|
||||
/**
|
||||
* The "upstream" pipeline, or null if this is the source stage.
|
||||
*/
|
||||
@SuppressWarnings("rawtypes")
|
||||
private final AbstractPipeline previousStage;
|
||||
|
||||
/**
|
||||
* The operation flags for the intermediate operation represented by this
|
||||
* pipeline object.
|
||||
*/
|
||||
protected final int sourceOrOpFlags;
|
||||
|
||||
/**
|
||||
* The next stage in the pipeline, or null if this is the last stage.
|
||||
* Effectively final at the point of linking to the next pipeline.
|
||||
*/
|
||||
@SuppressWarnings("rawtypes")
|
||||
private AbstractPipeline nextStage;
|
||||
|
||||
/**
|
||||
* The number of intermediate operations between this pipeline object
|
||||
* and the stream source if sequential, or the previous stateful if parallel.
|
||||
* Valid at the point of pipeline preparation for evaluation.
|
||||
*/
|
||||
private int depth;
|
||||
|
||||
/**
|
||||
* The combined source and operation flags for the source and all operations
|
||||
* up to and including the operation represented by this pipeline object.
|
||||
* Valid at the point of pipeline preparation for evaluation.
|
||||
*/
|
||||
private int combinedFlags;
|
||||
|
||||
/**
|
||||
* The source spliterator. Only valid for the head pipeline.
|
||||
* Before the pipeline is consumed if non-null then {@code sourceSupplier}
|
||||
* must be null. After the pipeline is consumed if non-null then is set to
|
||||
* null.
|
||||
*/
|
||||
private Spliterator<?> sourceSpliterator;
|
||||
|
||||
/**
|
||||
* The source supplier. Only valid for the head pipeline. Before the
|
||||
* pipeline is consumed if non-null then {@code sourceSpliterator} must be
|
||||
* null. After the pipeline is consumed if non-null then is set to null.
|
||||
*/
|
||||
private Supplier<? extends Spliterator<?>> sourceSupplier;
|
||||
|
||||
/**
|
||||
* True if this pipeline has been linked or consumed
|
||||
*/
|
||||
private boolean linkedOrConsumed;
|
||||
|
||||
/**
|
||||
* True if there are any stateful ops in the pipeline; only valid for the
|
||||
* source stage.
|
||||
*/
|
||||
private boolean sourceAnyStateful;
|
||||
|
||||
private Runnable sourceCloseAction;
|
||||
|
||||
/**
|
||||
* True if pipeline is parallel, otherwise the pipeline is sequential; only
|
||||
* valid for the source stage.
|
||||
*/
|
||||
private boolean parallel;
|
||||
|
||||
/**
|
||||
* Constructor for the head of a stream pipeline.
|
||||
*
|
||||
* @param source {@code Supplier<Spliterator>} describing the stream source
|
||||
* @param sourceFlags The source flags for the stream source, described in
|
||||
* {@link StreamOpFlag}
|
||||
* @param parallel True if the pipeline is parallel
|
||||
*/
|
||||
AbstractPipeline(Supplier<? extends Spliterator<?>> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
this.previousStage = null;
|
||||
this.sourceSupplier = source;
|
||||
this.sourceStage = this;
|
||||
this.sourceOrOpFlags = sourceFlags & StreamOpFlag.STREAM_MASK;
|
||||
// The following is an optimization of:
|
||||
// StreamOpFlag.combineOpFlags(sourceOrOpFlags, StreamOpFlag.INITIAL_OPS_VALUE);
|
||||
this.combinedFlags = (~(sourceOrOpFlags << 1)) & StreamOpFlag.INITIAL_OPS_VALUE;
|
||||
this.depth = 0;
|
||||
this.parallel = parallel;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for the head of a stream pipeline.
|
||||
*
|
||||
* @param source {@code Spliterator} describing the stream source
|
||||
* @param sourceFlags the source flags for the stream source, described in
|
||||
* {@link StreamOpFlag}
|
||||
* @param parallel {@code true} if the pipeline is parallel
|
||||
*/
|
||||
AbstractPipeline(Spliterator<?> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
this.previousStage = null;
|
||||
this.sourceSpliterator = source;
|
||||
this.sourceStage = this;
|
||||
this.sourceOrOpFlags = sourceFlags & StreamOpFlag.STREAM_MASK;
|
||||
// The following is an optimization of:
|
||||
// StreamOpFlag.combineOpFlags(sourceOrOpFlags, StreamOpFlag.INITIAL_OPS_VALUE);
|
||||
this.combinedFlags = (~(sourceOrOpFlags << 1)) & StreamOpFlag.INITIAL_OPS_VALUE;
|
||||
this.depth = 0;
|
||||
this.parallel = parallel;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for appending an intermediate operation stage onto an
|
||||
* existing pipeline.
|
||||
*
|
||||
* @param previousStage the upstream pipeline stage
|
||||
* @param opFlags the operation flags for the new stage, described in
|
||||
* {@link StreamOpFlag}
|
||||
*/
|
||||
AbstractPipeline(AbstractPipeline<?, E_IN, ?> previousStage, int opFlags) {
|
||||
if (previousStage.linkedOrConsumed)
|
||||
throw new IllegalStateException(MSG_STREAM_LINKED);
|
||||
previousStage.linkedOrConsumed = true;
|
||||
previousStage.nextStage = this;
|
||||
|
||||
this.previousStage = previousStage;
|
||||
this.sourceOrOpFlags = opFlags & StreamOpFlag.OP_MASK;
|
||||
this.combinedFlags = StreamOpFlag.combineOpFlags(opFlags, previousStage.combinedFlags);
|
||||
this.sourceStage = previousStage.sourceStage;
|
||||
if (opIsStateful())
|
||||
sourceStage.sourceAnyStateful = true;
|
||||
this.depth = previousStage.depth + 1;
|
||||
}
|
||||
|
||||
|
||||
// Terminal evaluation methods
|
||||
|
||||
/**
|
||||
* Evaluate the pipeline with a terminal operation to produce a result.
|
||||
*
|
||||
* @param <R> the type of result
|
||||
* @param terminalOp the terminal operation to be applied to the pipeline.
|
||||
* @return the result
|
||||
*/
|
||||
final <R> R evaluate(TerminalOp<E_OUT, R> terminalOp) {
|
||||
assert getOutputShape() == terminalOp.inputShape();
|
||||
if (linkedOrConsumed)
|
||||
throw new IllegalStateException(MSG_STREAM_LINKED);
|
||||
linkedOrConsumed = true;
|
||||
|
||||
return isParallel()
|
||||
? terminalOp.evaluateParallel(this, sourceSpliterator(terminalOp.getOpFlags()))
|
||||
: terminalOp.evaluateSequential(this, sourceSpliterator(terminalOp.getOpFlags()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Collect the elements output from the pipeline stage.
|
||||
*
|
||||
* @param generator the array generator to be used to create array instances
|
||||
* @return a flat array-backed Node that holds the collected output elements
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
final Node<E_OUT> evaluateToArrayNode(IntFunction<E_OUT[]> generator) {
|
||||
if (linkedOrConsumed)
|
||||
throw new IllegalStateException(MSG_STREAM_LINKED);
|
||||
linkedOrConsumed = true;
|
||||
|
||||
// If the last intermediate operation is stateful then
|
||||
// evaluate directly to avoid an extra collection step
|
||||
if (isParallel() && previousStage != null && opIsStateful()) {
|
||||
// Set the depth of this, last, pipeline stage to zero to slice the
|
||||
// pipeline such that this operation will not be included in the
|
||||
// upstream slice and upstream operations will not be included
|
||||
// in this slice
|
||||
depth = 0;
|
||||
return opEvaluateParallel(previousStage, previousStage.sourceSpliterator(0), generator);
|
||||
}
|
||||
else {
|
||||
return evaluate(sourceSpliterator(0), true, generator);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the source stage spliterator if this pipeline stage is the source
|
||||
* stage. The pipeline is consumed after this method is called and
|
||||
* returns successfully.
|
||||
*
|
||||
* @return the source stage spliterator
|
||||
* @throws IllegalStateException if this pipeline stage is not the source
|
||||
* stage.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
final Spliterator<E_OUT> sourceStageSpliterator() {
|
||||
if (this != sourceStage)
|
||||
throw new IllegalStateException();
|
||||
|
||||
if (linkedOrConsumed)
|
||||
throw new IllegalStateException(MSG_STREAM_LINKED);
|
||||
linkedOrConsumed = true;
|
||||
|
||||
if (sourceStage.sourceSpliterator != null) {
|
||||
@SuppressWarnings("unchecked")
|
||||
Spliterator<E_OUT> s = sourceStage.sourceSpliterator;
|
||||
sourceStage.sourceSpliterator = null;
|
||||
return s;
|
||||
}
|
||||
else if (sourceStage.sourceSupplier != null) {
|
||||
@SuppressWarnings("unchecked")
|
||||
Spliterator<E_OUT> s = (Spliterator<E_OUT>) sourceStage.sourceSupplier.get();
|
||||
sourceStage.sourceSupplier = null;
|
||||
return s;
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException(MSG_CONSUMED);
|
||||
}
|
||||
}
|
||||
|
||||
// BaseStream
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public final S sequential() {
|
||||
sourceStage.parallel = false;
|
||||
return (S) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public final S parallel() {
|
||||
sourceStage.parallel = true;
|
||||
return (S) this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
linkedOrConsumed = true;
|
||||
sourceSupplier = null;
|
||||
sourceSpliterator = null;
|
||||
if (sourceStage.sourceCloseAction != null) {
|
||||
Runnable closeAction = sourceStage.sourceCloseAction;
|
||||
sourceStage.sourceCloseAction = null;
|
||||
closeAction.run();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public S onClose(Runnable closeHandler) {
|
||||
Objects.requireNonNull(closeHandler);
|
||||
Runnable existingHandler = sourceStage.sourceCloseAction;
|
||||
sourceStage.sourceCloseAction =
|
||||
(existingHandler == null)
|
||||
? closeHandler
|
||||
: Streams.composeWithExceptions(existingHandler, closeHandler);
|
||||
return (S) this;
|
||||
}
|
||||
|
||||
// Primitive specialization use co-variant overrides, hence is not final
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public Spliterator<E_OUT> spliterator() {
|
||||
if (linkedOrConsumed)
|
||||
throw new IllegalStateException(MSG_STREAM_LINKED);
|
||||
linkedOrConsumed = true;
|
||||
|
||||
if (this == sourceStage) {
|
||||
if (sourceStage.sourceSpliterator != null) {
|
||||
@SuppressWarnings("unchecked")
|
||||
Spliterator<E_OUT> s = (Spliterator<E_OUT>) sourceStage.sourceSpliterator;
|
||||
sourceStage.sourceSpliterator = null;
|
||||
return s;
|
||||
}
|
||||
else if (sourceStage.sourceSupplier != null) {
|
||||
@SuppressWarnings("unchecked")
|
||||
Supplier<Spliterator<E_OUT>> s = (Supplier<Spliterator<E_OUT>>) sourceStage.sourceSupplier;
|
||||
sourceStage.sourceSupplier = null;
|
||||
return lazySpliterator(s);
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException(MSG_CONSUMED);
|
||||
}
|
||||
}
|
||||
else {
|
||||
return wrap(this, () -> sourceSpliterator(0), isParallel());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isParallel() {
|
||||
return sourceStage.parallel;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the composition of stream flags of the stream source and all
|
||||
* intermediate operations.
|
||||
*
|
||||
* @return the composition of stream flags of the stream source and all
|
||||
* intermediate operations
|
||||
* @see StreamOpFlag
|
||||
*/
|
||||
final int getStreamFlags() {
|
||||
return StreamOpFlag.toStreamFlags(combinedFlags);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the source spliterator for this pipeline stage. For a sequential or
|
||||
* stateless parallel pipeline, this is the source spliterator. For a
|
||||
* stateful parallel pipeline, this is a spliterator describing the results
|
||||
* of all computations up to and including the most recent stateful
|
||||
* operation.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
private Spliterator<?> sourceSpliterator(int terminalFlags) {
|
||||
// Get the source spliterator of the pipeline
|
||||
Spliterator<?> spliterator = null;
|
||||
if (sourceStage.sourceSpliterator != null) {
|
||||
spliterator = sourceStage.sourceSpliterator;
|
||||
sourceStage.sourceSpliterator = null;
|
||||
}
|
||||
else if (sourceStage.sourceSupplier != null) {
|
||||
spliterator = (Spliterator<?>) sourceStage.sourceSupplier.get();
|
||||
sourceStage.sourceSupplier = null;
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException(MSG_CONSUMED);
|
||||
}
|
||||
|
||||
if (isParallel() && sourceStage.sourceAnyStateful) {
|
||||
// Adapt the source spliterator, evaluating each stateful op
|
||||
// in the pipeline up to and including this pipeline stage.
|
||||
// The depth and flags of each pipeline stage are adjusted accordingly.
|
||||
int depth = 1;
|
||||
for (@SuppressWarnings("rawtypes") AbstractPipeline u = sourceStage, p = sourceStage.nextStage, e = this;
|
||||
u != e;
|
||||
u = p, p = p.nextStage) {
|
||||
|
||||
int thisOpFlags = p.sourceOrOpFlags;
|
||||
if (p.opIsStateful()) {
|
||||
depth = 0;
|
||||
|
||||
if (StreamOpFlag.SHORT_CIRCUIT.isKnown(thisOpFlags)) {
|
||||
// Clear the short circuit flag for next pipeline stage
|
||||
// This stage encapsulates short-circuiting, the next
|
||||
// stage may not have any short-circuit operations, and
|
||||
// if so spliterator.forEachRemaining should be used
|
||||
// for traversal
|
||||
thisOpFlags = thisOpFlags & ~StreamOpFlag.IS_SHORT_CIRCUIT;
|
||||
}
|
||||
|
||||
spliterator = p.opEvaluateParallelLazy(u, spliterator);
|
||||
|
||||
// Inject or clear SIZED on the source pipeline stage
|
||||
// based on the stage's spliterator
|
||||
thisOpFlags = spliterator.hasCharacteristics(Spliterator.SIZED)
|
||||
? (thisOpFlags & ~StreamOpFlag.NOT_SIZED) | StreamOpFlag.IS_SIZED
|
||||
: (thisOpFlags & ~StreamOpFlag.IS_SIZED) | StreamOpFlag.NOT_SIZED;
|
||||
}
|
||||
p.depth = depth++;
|
||||
p.combinedFlags = StreamOpFlag.combineOpFlags(thisOpFlags, u.combinedFlags);
|
||||
}
|
||||
}
|
||||
|
||||
if (terminalFlags != 0) {
|
||||
// Apply flags from the terminal operation to last pipeline stage
|
||||
combinedFlags = StreamOpFlag.combineOpFlags(terminalFlags, combinedFlags);
|
||||
}
|
||||
|
||||
return spliterator;
|
||||
}
|
||||
|
||||
// PipelineHelper
|
||||
|
||||
@Override
|
||||
final StreamShape getSourceShape() {
|
||||
@SuppressWarnings("rawtypes")
|
||||
AbstractPipeline p = AbstractPipeline.this;
|
||||
while (p.depth > 0) {
|
||||
p = p.previousStage;
|
||||
}
|
||||
return p.getOutputShape();
|
||||
}
|
||||
|
||||
@Override
|
||||
final <P_IN> long exactOutputSizeIfKnown(Spliterator<P_IN> spliterator) {
|
||||
return StreamOpFlag.SIZED.isKnown(getStreamAndOpFlags()) ? spliterator.getExactSizeIfKnown() : -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
final <P_IN, S extends Sink<E_OUT>> S wrapAndCopyInto(S sink, Spliterator<P_IN> spliterator) {
|
||||
copyInto(wrapSink(Objects.requireNonNull(sink)), spliterator);
|
||||
return sink;
|
||||
}
|
||||
|
||||
@Override
|
||||
final <P_IN> void copyInto(Sink<P_IN> wrappedSink, Spliterator<P_IN> spliterator) {
|
||||
Objects.requireNonNull(wrappedSink);
|
||||
|
||||
if (!StreamOpFlag.SHORT_CIRCUIT.isKnown(getStreamAndOpFlags())) {
|
||||
wrappedSink.begin(spliterator.getExactSizeIfKnown());
|
||||
spliterator.forEachRemaining(wrappedSink);
|
||||
wrappedSink.end();
|
||||
}
|
||||
else {
|
||||
copyIntoWithCancel(wrappedSink, spliterator);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
final <P_IN> void copyIntoWithCancel(Sink<P_IN> wrappedSink, Spliterator<P_IN> spliterator) {
|
||||
@SuppressWarnings({"rawtypes","unchecked"})
|
||||
AbstractPipeline p = AbstractPipeline.this;
|
||||
while (p.depth > 0) {
|
||||
p = p.previousStage;
|
||||
}
|
||||
wrappedSink.begin(spliterator.getExactSizeIfKnown());
|
||||
p.forEachWithCancel(spliterator, wrappedSink);
|
||||
wrappedSink.end();
|
||||
}
|
||||
|
||||
@Override
|
||||
final int getStreamAndOpFlags() {
|
||||
return combinedFlags;
|
||||
}
|
||||
|
||||
final boolean isOrdered() {
|
||||
return StreamOpFlag.ORDERED.isKnown(combinedFlags);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
final <P_IN> Sink<P_IN> wrapSink(Sink<E_OUT> sink) {
|
||||
Objects.requireNonNull(sink);
|
||||
|
||||
for ( @SuppressWarnings("rawtypes") AbstractPipeline p=AbstractPipeline.this; p.depth > 0; p=p.previousStage) {
|
||||
sink = p.opWrapSink(p.previousStage.combinedFlags, sink);
|
||||
}
|
||||
return (Sink<P_IN>) sink;
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
final <P_IN> Spliterator<E_OUT> wrapSpliterator(Spliterator<P_IN> sourceSpliterator) {
|
||||
if (depth == 0) {
|
||||
return (Spliterator<E_OUT>) sourceSpliterator;
|
||||
}
|
||||
else {
|
||||
return wrap(this, () -> sourceSpliterator, isParallel());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
final <P_IN> Node<E_OUT> evaluate(Spliterator<P_IN> spliterator,
|
||||
boolean flatten,
|
||||
IntFunction<E_OUT[]> generator) {
|
||||
if (isParallel()) {
|
||||
// @@@ Optimize if op of this pipeline stage is a stateful op
|
||||
return evaluateToNode(this, spliterator, flatten, generator);
|
||||
}
|
||||
else {
|
||||
Node.Builder<E_OUT> nb = makeNodeBuilder(
|
||||
exactOutputSizeIfKnown(spliterator), generator);
|
||||
return wrapAndCopyInto(nb, spliterator).build();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Shape-specific abstract methods, implemented by XxxPipeline classes
|
||||
|
||||
/**
|
||||
* Get the output shape of the pipeline. If the pipeline is the head,
|
||||
* then it's output shape corresponds to the shape of the source.
|
||||
* Otherwise, it's output shape corresponds to the output shape of the
|
||||
* associated operation.
|
||||
*
|
||||
* @return the output shape
|
||||
*/
|
||||
abstract StreamShape getOutputShape();
|
||||
|
||||
/**
|
||||
* Collect elements output from a pipeline into a Node that holds elements
|
||||
* of this shape.
|
||||
*
|
||||
* @param helper the pipeline helper describing the pipeline stages
|
||||
* @param spliterator the source spliterator
|
||||
* @param flattenTree true if the returned node should be flattened
|
||||
* @param generator the array generator
|
||||
* @return a Node holding the output of the pipeline
|
||||
*/
|
||||
abstract <P_IN> Node<E_OUT> evaluateToNode(PipelineHelper<E_OUT> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
boolean flattenTree,
|
||||
IntFunction<E_OUT[]> generator);
|
||||
|
||||
/**
|
||||
* Create a spliterator that wraps a source spliterator, compatible with
|
||||
* this stream shape, and operations associated with a {@link
|
||||
* PipelineHelper}.
|
||||
*
|
||||
* @param ph the pipeline helper describing the pipeline stages
|
||||
* @param supplier the supplier of a spliterator
|
||||
* @return a wrapping spliterator compatible with this shape
|
||||
*/
|
||||
abstract <P_IN> Spliterator<E_OUT> wrap(PipelineHelper<E_OUT> ph,
|
||||
Supplier<Spliterator<P_IN>> supplier,
|
||||
boolean isParallel);
|
||||
|
||||
/**
|
||||
* Create a lazy spliterator that wraps and obtains the supplied the
|
||||
* spliterator when a method is invoked on the lazy spliterator.
|
||||
* @param supplier the supplier of a spliterator
|
||||
*/
|
||||
abstract Spliterator<E_OUT> lazySpliterator(Supplier<? extends Spliterator<E_OUT>> supplier);
|
||||
|
||||
/**
|
||||
* Traverse the elements of a spliterator compatible with this stream shape,
|
||||
* pushing those elements into a sink. If the sink requests cancellation,
|
||||
* no further elements will be pulled or pushed.
|
||||
*
|
||||
* @param spliterator the spliterator to pull elements from
|
||||
* @param sink the sink to push elements to
|
||||
*/
|
||||
abstract void forEachWithCancel(Spliterator<E_OUT> spliterator, Sink<E_OUT> sink);
|
||||
|
||||
/**
|
||||
* Make a node builder compatible with this stream shape.
|
||||
*
|
||||
* @param exactSizeIfKnown if {@literal >=0}, then a node builder will be
|
||||
* created that has a fixed capacity of at most sizeIfKnown elements. If
|
||||
* {@literal < 0}, then the node builder has an unfixed capacity. A fixed
|
||||
* capacity node builder will throw exceptions if an element is added after
|
||||
* builder has reached capacity, or is built before the builder has reached
|
||||
* capacity.
|
||||
*
|
||||
* @param generator the array generator to be used to create instances of a
|
||||
* T[] array. For implementations supporting primitive nodes, this parameter
|
||||
* may be ignored.
|
||||
* @return a node builder
|
||||
*/
|
||||
@Override
|
||||
abstract Node.Builder<E_OUT> makeNodeBuilder(long exactSizeIfKnown,
|
||||
IntFunction<E_OUT[]> generator);
|
||||
|
||||
|
||||
// Op-specific abstract methods, implemented by the operation class
|
||||
|
||||
/**
|
||||
* Returns whether this operation is stateful or not. If it is stateful,
|
||||
* then the method
|
||||
* {@link #opEvaluateParallel(PipelineHelper, java.util.Spliterator, java.util.function.IntFunction)}
|
||||
* must be overridden.
|
||||
*
|
||||
* @return {@code true} if this operation is stateful
|
||||
*/
|
||||
abstract boolean opIsStateful();
|
||||
|
||||
/**
|
||||
* Accepts a {@code Sink} which will receive the results of this operation,
|
||||
* and return a {@code Sink} which accepts elements of the input type of
|
||||
* this operation and which performs the operation, passing the results to
|
||||
* the provided {@code Sink}.
|
||||
*
|
||||
* @apiNote
|
||||
* The implementation may use the {@code flags} parameter to optimize the
|
||||
* sink wrapping. For example, if the input is already {@code DISTINCT},
|
||||
* the implementation for the {@code Stream#distinct()} method could just
|
||||
* return the sink it was passed.
|
||||
*
|
||||
* @param flags The combined stream and operation flags up to, but not
|
||||
* including, this operation
|
||||
* @param sink sink to which elements should be sent after processing
|
||||
* @return a sink which accepts elements, perform the operation upon
|
||||
* each element, and passes the results (if any) to the provided
|
||||
* {@code Sink}.
|
||||
*/
|
||||
abstract Sink<E_IN> opWrapSink(int flags, Sink<E_OUT> sink);
|
||||
|
||||
/**
|
||||
* Performs a parallel evaluation of the operation using the specified
|
||||
* {@code PipelineHelper} which describes the upstream intermediate
|
||||
* operations. Only called on stateful operations. If {@link
|
||||
* #opIsStateful()} returns true then implementations must override the
|
||||
* default implementation.
|
||||
*
|
||||
* @implSpec The default implementation always throw
|
||||
* {@code UnsupportedOperationException}.
|
||||
*
|
||||
* @param helper the pipeline helper describing the pipeline stages
|
||||
* @param spliterator the source {@code Spliterator}
|
||||
* @param generator the array generator
|
||||
* @return a {@code Node} describing the result of the evaluation
|
||||
*/
|
||||
<P_IN> Node<E_OUT> opEvaluateParallel(PipelineHelper<E_OUT> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<E_OUT[]> generator) {
|
||||
throw new UnsupportedOperationException("Parallel evaluation is not supported");
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a {@code Spliterator} describing a parallel evaluation of the
|
||||
* operation, using the specified {@code PipelineHelper} which describes the
|
||||
* upstream intermediate operations. Only called on stateful operations.
|
||||
* It is not necessary (though acceptable) to do a full computation of the
|
||||
* result here; it is preferable, if possible, to describe the result via a
|
||||
* lazily evaluated spliterator.
|
||||
*
|
||||
* @implSpec The default implementation behaves as if:
|
||||
* <pre>{@code
|
||||
* return evaluateParallel(helper, i -> (E_OUT[]) new
|
||||
* Object[i]).spliterator();
|
||||
* }</pre>
|
||||
* and is suitable for implementations that cannot do better than a full
|
||||
* synchronous evaluation.
|
||||
*
|
||||
* @param helper the pipeline helper
|
||||
* @param spliterator the source {@code Spliterator}
|
||||
* @return a {@code Spliterator} describing the result of the evaluation
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
<P_IN> Spliterator<E_OUT> opEvaluateParallelLazy(PipelineHelper<E_OUT> helper,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
return opEvaluateParallel(helper, spliterator, i -> (E_OUT[]) new Object[i]).spliterator();
|
||||
}
|
||||
}
|
||||
234
jdkSrc/jdk8/java/util/stream/AbstractShortCircuitTask.java
Normal file
234
jdkSrc/jdk8/java/util/stream/AbstractShortCircuitTask.java
Normal file
@@ -0,0 +1,234 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Spliterator;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* Abstract class for fork-join tasks used to implement short-circuiting
|
||||
* stream ops, which can produce a result without processing all elements of the
|
||||
* stream.
|
||||
*
|
||||
* @param <P_IN> type of input elements to the pipeline
|
||||
* @param <P_OUT> type of output elements from the pipeline
|
||||
* @param <R> type of intermediate result, may be different from operation
|
||||
* result type
|
||||
* @param <K> type of child and sibling tasks
|
||||
* @since 1.8
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
abstract class AbstractShortCircuitTask<P_IN, P_OUT, R,
|
||||
K extends AbstractShortCircuitTask<P_IN, P_OUT, R, K>>
|
||||
extends AbstractTask<P_IN, P_OUT, R, K> {
|
||||
/**
|
||||
* The result for this computation; this is shared among all tasks and set
|
||||
* exactly once
|
||||
*/
|
||||
protected final AtomicReference<R> sharedResult;
|
||||
|
||||
/**
|
||||
* Indicates whether this task has been canceled. Tasks may cancel other
|
||||
* tasks in the computation under various conditions, such as in a
|
||||
* find-first operation, a task that finds a value will cancel all tasks
|
||||
* that are later in the encounter order.
|
||||
*/
|
||||
protected volatile boolean canceled;
|
||||
|
||||
/**
|
||||
* Constructor for root tasks.
|
||||
*
|
||||
* @param helper the {@code PipelineHelper} describing the stream pipeline
|
||||
* up to this operation
|
||||
* @param spliterator the {@code Spliterator} describing the source for this
|
||||
* pipeline
|
||||
*/
|
||||
protected AbstractShortCircuitTask(PipelineHelper<P_OUT> helper,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
super(helper, spliterator);
|
||||
sharedResult = new AtomicReference<>(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for non-root nodes.
|
||||
*
|
||||
* @param parent parent task in the computation tree
|
||||
* @param spliterator the {@code Spliterator} for the portion of the
|
||||
* computation tree described by this task
|
||||
*/
|
||||
protected AbstractShortCircuitTask(K parent,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
super(parent, spliterator);
|
||||
sharedResult = parent.sharedResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value indicating the computation completed with no task
|
||||
* finding a short-circuitable result. For example, for a "find" operation,
|
||||
* this might be null or an empty {@code Optional}.
|
||||
*
|
||||
* @return the result to return when no task finds a result
|
||||
*/
|
||||
protected abstract R getEmptyResult();
|
||||
|
||||
/**
|
||||
* Overrides AbstractTask version to include checks for early
|
||||
* exits while splitting or computing.
|
||||
*/
|
||||
@Override
|
||||
public void compute() {
|
||||
Spliterator<P_IN> rs = spliterator, ls;
|
||||
long sizeEstimate = rs.estimateSize();
|
||||
long sizeThreshold = getTargetSize(sizeEstimate);
|
||||
boolean forkRight = false;
|
||||
@SuppressWarnings("unchecked") K task = (K) this;
|
||||
AtomicReference<R> sr = sharedResult;
|
||||
R result;
|
||||
while ((result = sr.get()) == null) {
|
||||
if (task.taskCanceled()) {
|
||||
result = task.getEmptyResult();
|
||||
break;
|
||||
}
|
||||
if (sizeEstimate <= sizeThreshold || (ls = rs.trySplit()) == null) {
|
||||
result = task.doLeaf();
|
||||
break;
|
||||
}
|
||||
K leftChild, rightChild, taskToFork;
|
||||
task.leftChild = leftChild = task.makeChild(ls);
|
||||
task.rightChild = rightChild = task.makeChild(rs);
|
||||
task.setPendingCount(1);
|
||||
if (forkRight) {
|
||||
forkRight = false;
|
||||
rs = ls;
|
||||
task = leftChild;
|
||||
taskToFork = rightChild;
|
||||
}
|
||||
else {
|
||||
forkRight = true;
|
||||
task = rightChild;
|
||||
taskToFork = leftChild;
|
||||
}
|
||||
taskToFork.fork();
|
||||
sizeEstimate = rs.estimateSize();
|
||||
}
|
||||
task.setLocalResult(result);
|
||||
task.tryComplete();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Declares that a globally valid result has been found. If another task has
|
||||
* not already found the answer, the result is installed in
|
||||
* {@code sharedResult}. The {@code compute()} method will check
|
||||
* {@code sharedResult} before proceeding with computation, so this causes
|
||||
* the computation to terminate early.
|
||||
*
|
||||
* @param result the result found
|
||||
*/
|
||||
protected void shortCircuit(R result) {
|
||||
if (result != null)
|
||||
sharedResult.compareAndSet(null, result);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a local result for this task. If this task is the root, set the
|
||||
* shared result instead (if not already set).
|
||||
*
|
||||
* @param localResult The result to set for this task
|
||||
*/
|
||||
@Override
|
||||
protected void setLocalResult(R localResult) {
|
||||
if (isRoot()) {
|
||||
if (localResult != null)
|
||||
sharedResult.compareAndSet(null, localResult);
|
||||
}
|
||||
else
|
||||
super.setLocalResult(localResult);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the local result for this task
|
||||
*/
|
||||
@Override
|
||||
public R getRawResult() {
|
||||
return getLocalResult();
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the local result for this task. If this task is the root,
|
||||
* retrieves the shared result instead.
|
||||
*/
|
||||
@Override
|
||||
public R getLocalResult() {
|
||||
if (isRoot()) {
|
||||
R answer = sharedResult.get();
|
||||
return (answer == null) ? getEmptyResult() : answer;
|
||||
}
|
||||
else
|
||||
return super.getLocalResult();
|
||||
}
|
||||
|
||||
/**
|
||||
* Mark this task as canceled
|
||||
*/
|
||||
protected void cancel() {
|
||||
canceled = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Queries whether this task is canceled. A task is considered canceled if
|
||||
* it or any of its parents have been canceled.
|
||||
*
|
||||
* @return {@code true} if this task or any parent is canceled.
|
||||
*/
|
||||
protected boolean taskCanceled() {
|
||||
boolean cancel = canceled;
|
||||
if (!cancel) {
|
||||
for (K parent = getParent(); !cancel && parent != null; parent = parent.getParent())
|
||||
cancel = parent.canceled;
|
||||
}
|
||||
|
||||
return cancel;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancels all tasks which succeed this one in the encounter order. This
|
||||
* includes canceling all the current task's right sibling, as well as the
|
||||
* later right siblings of all its parents.
|
||||
*/
|
||||
protected void cancelLaterNodes() {
|
||||
// Go up the tree, cancel right siblings of this node and all parents
|
||||
for (@SuppressWarnings("unchecked") K parent = getParent(), node = (K) this;
|
||||
parent != null;
|
||||
node = parent, parent = parent.getParent()) {
|
||||
// If node is a left child of parent, then has a right sibling
|
||||
if (parent.leftChild == node) {
|
||||
K rightSibling = parent.rightChild;
|
||||
if (!rightSibling.canceled)
|
||||
rightSibling.cancel();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
127
jdkSrc/jdk8/java/util/stream/AbstractSpinedBuffer.java
Normal file
127
jdkSrc/jdk8/java/util/stream/AbstractSpinedBuffer.java
Normal file
@@ -0,0 +1,127 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
/**
|
||||
* Base class for a data structure for gathering elements into a buffer and then
|
||||
* iterating them. Maintains an array of increasingly sized arrays, so there is
|
||||
* no copying cost associated with growing the data structure.
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract class AbstractSpinedBuffer {
|
||||
/**
|
||||
* Minimum power-of-two for the first chunk.
|
||||
*/
|
||||
public static final int MIN_CHUNK_POWER = 4;
|
||||
|
||||
/**
|
||||
* Minimum size for the first chunk.
|
||||
*/
|
||||
public static final int MIN_CHUNK_SIZE = 1 << MIN_CHUNK_POWER;
|
||||
|
||||
/**
|
||||
* Max power-of-two for chunks.
|
||||
*/
|
||||
public static final int MAX_CHUNK_POWER = 30;
|
||||
|
||||
/**
|
||||
* Minimum array size for array-of-chunks.
|
||||
*/
|
||||
public static final int MIN_SPINE_SIZE = 8;
|
||||
|
||||
|
||||
/**
|
||||
* log2 of the size of the first chunk.
|
||||
*/
|
||||
protected final int initialChunkPower;
|
||||
|
||||
/**
|
||||
* Index of the *next* element to write; may point into, or just outside of,
|
||||
* the current chunk.
|
||||
*/
|
||||
protected int elementIndex;
|
||||
|
||||
/**
|
||||
* Index of the *current* chunk in the spine array, if the spine array is
|
||||
* non-null.
|
||||
*/
|
||||
protected int spineIndex;
|
||||
|
||||
/**
|
||||
* Count of elements in all prior chunks.
|
||||
*/
|
||||
protected long[] priorElementCount;
|
||||
|
||||
/**
|
||||
* Construct with an initial capacity of 16.
|
||||
*/
|
||||
protected AbstractSpinedBuffer() {
|
||||
this.initialChunkPower = MIN_CHUNK_POWER;
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct with a specified initial capacity.
|
||||
*
|
||||
* @param initialCapacity The minimum expected number of elements
|
||||
*/
|
||||
protected AbstractSpinedBuffer(int initialCapacity) {
|
||||
if (initialCapacity < 0)
|
||||
throw new IllegalArgumentException("Illegal Capacity: "+ initialCapacity);
|
||||
|
||||
this.initialChunkPower = Math.max(MIN_CHUNK_POWER,
|
||||
Integer.SIZE - Integer.numberOfLeadingZeros(initialCapacity - 1));
|
||||
}
|
||||
|
||||
/**
|
||||
* Is the buffer currently empty?
|
||||
*/
|
||||
public boolean isEmpty() {
|
||||
return (spineIndex == 0) && (elementIndex == 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* How many elements are currently in the buffer?
|
||||
*/
|
||||
public long count() {
|
||||
return (spineIndex == 0)
|
||||
? elementIndex
|
||||
: priorElementCount[spineIndex] + elementIndex;
|
||||
}
|
||||
|
||||
/**
|
||||
* How big should the nth chunk be?
|
||||
*/
|
||||
protected int chunkSize(int n) {
|
||||
int power = (n == 0 || n == 1)
|
||||
? initialChunkPower
|
||||
: Math.min(initialChunkPower + n - 1, AbstractSpinedBuffer.MAX_CHUNK_POWER);
|
||||
return 1 << power;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove all data from the buffer
|
||||
*/
|
||||
public abstract void clear();
|
||||
}
|
||||
363
jdkSrc/jdk8/java/util/stream/AbstractTask.java
Normal file
363
jdkSrc/jdk8/java/util/stream/AbstractTask.java
Normal file
@@ -0,0 +1,363 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Spliterator;
|
||||
import java.util.concurrent.CountedCompleter;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
import java.util.concurrent.ForkJoinWorkerThread;
|
||||
|
||||
/**
|
||||
* Abstract base class for most fork-join tasks used to implement stream ops.
|
||||
* Manages splitting logic, tracking of child tasks, and intermediate results.
|
||||
* Each task is associated with a {@link Spliterator} that describes the portion
|
||||
* of the input associated with the subtree rooted at this task.
|
||||
* Tasks may be leaf nodes (which will traverse the elements of
|
||||
* the {@code Spliterator}) or internal nodes (which split the
|
||||
* {@code Spliterator} into multiple child tasks).
|
||||
*
|
||||
* @implNote
|
||||
* <p>This class is based on {@link CountedCompleter}, a form of fork-join task
|
||||
* where each task has a semaphore-like count of uncompleted children, and the
|
||||
* task is implicitly completed and notified when its last child completes.
|
||||
* Internal node tasks will likely override the {@code onCompletion} method from
|
||||
* {@code CountedCompleter} to merge the results from child tasks into the
|
||||
* current task's result.
|
||||
*
|
||||
* <p>Splitting and setting up the child task links is done by {@code compute()}
|
||||
* for internal nodes. At {@code compute()} time for leaf nodes, it is
|
||||
* guaranteed that the parent's child-related fields (including sibling links
|
||||
* for the parent's children) will be set up for all children.
|
||||
*
|
||||
* <p>For example, a task that performs a reduce would override {@code doLeaf()}
|
||||
* to perform a reduction on that leaf node's chunk using the
|
||||
* {@code Spliterator}, and override {@code onCompletion()} to merge the results
|
||||
* of the child tasks for internal nodes:
|
||||
*
|
||||
* <pre>{@code
|
||||
* protected S doLeaf() {
|
||||
* spliterator.forEach(...);
|
||||
* return localReductionResult;
|
||||
* }
|
||||
*
|
||||
* public void onCompletion(CountedCompleter caller) {
|
||||
* if (!isLeaf()) {
|
||||
* ReduceTask<P_IN, P_OUT, T, R> child = children;
|
||||
* R result = child.getLocalResult();
|
||||
* child = child.nextSibling;
|
||||
* for (; child != null; child = child.nextSibling)
|
||||
* result = combine(result, child.getLocalResult());
|
||||
* setLocalResult(result);
|
||||
* }
|
||||
* }
|
||||
* }</pre>
|
||||
*
|
||||
* <p>Serialization is not supported as there is no intention to serialize
|
||||
* tasks managed by stream ops.
|
||||
*
|
||||
* @param <P_IN> Type of elements input to the pipeline
|
||||
* @param <P_OUT> Type of elements output from the pipeline
|
||||
* @param <R> Type of intermediate result, which may be different from operation
|
||||
* result type
|
||||
* @param <K> Type of parent, child and sibling tasks
|
||||
* @since 1.8
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
abstract class AbstractTask<P_IN, P_OUT, R,
|
||||
K extends AbstractTask<P_IN, P_OUT, R, K>>
|
||||
extends CountedCompleter<R> {
|
||||
|
||||
private static final int LEAF_TARGET = ForkJoinPool.getCommonPoolParallelism() << 2;
|
||||
|
||||
/** The pipeline helper, common to all tasks in a computation */
|
||||
protected final PipelineHelper<P_OUT> helper;
|
||||
|
||||
/**
|
||||
* The spliterator for the portion of the input associated with the subtree
|
||||
* rooted at this task
|
||||
*/
|
||||
protected Spliterator<P_IN> spliterator;
|
||||
|
||||
/** Target leaf size, common to all tasks in a computation */
|
||||
protected long targetSize; // may be laziliy initialized
|
||||
|
||||
/**
|
||||
* The left child.
|
||||
* null if no children
|
||||
* if non-null rightChild is non-null
|
||||
*/
|
||||
protected K leftChild;
|
||||
|
||||
/**
|
||||
* The right child.
|
||||
* null if no children
|
||||
* if non-null leftChild is non-null
|
||||
*/
|
||||
protected K rightChild;
|
||||
|
||||
/** The result of this node, if completed */
|
||||
private R localResult;
|
||||
|
||||
/**
|
||||
* Constructor for root nodes.
|
||||
*
|
||||
* @param helper The {@code PipelineHelper} describing the stream pipeline
|
||||
* up to this operation
|
||||
* @param spliterator The {@code Spliterator} describing the source for this
|
||||
* pipeline
|
||||
*/
|
||||
protected AbstractTask(PipelineHelper<P_OUT> helper,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
super(null);
|
||||
this.helper = helper;
|
||||
this.spliterator = spliterator;
|
||||
this.targetSize = 0L;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for non-root nodes.
|
||||
*
|
||||
* @param parent this node's parent task
|
||||
* @param spliterator {@code Spliterator} describing the subtree rooted at
|
||||
* this node, obtained by splitting the parent {@code Spliterator}
|
||||
*/
|
||||
protected AbstractTask(K parent,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
super(parent);
|
||||
this.spliterator = spliterator;
|
||||
this.helper = parent.helper;
|
||||
this.targetSize = parent.targetSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Default target of leaf tasks for parallel decomposition.
|
||||
* To allow load balancing, we over-partition, currently to approximately
|
||||
* four tasks per processor, which enables others to help out
|
||||
* if leaf tasks are uneven or some processors are otherwise busy.
|
||||
*/
|
||||
public static int getLeafTarget() {
|
||||
Thread t = Thread.currentThread();
|
||||
if (t instanceof ForkJoinWorkerThread) {
|
||||
return ((ForkJoinWorkerThread) t).getPool().getParallelism() << 2;
|
||||
}
|
||||
else {
|
||||
return LEAF_TARGET;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new node of type T whose parent is the receiver; must call
|
||||
* the AbstractTask(T, Spliterator) constructor with the receiver and the
|
||||
* provided Spliterator.
|
||||
*
|
||||
* @param spliterator {@code Spliterator} describing the subtree rooted at
|
||||
* this node, obtained by splitting the parent {@code Spliterator}
|
||||
* @return newly constructed child node
|
||||
*/
|
||||
protected abstract K makeChild(Spliterator<P_IN> spliterator);
|
||||
|
||||
/**
|
||||
* Computes the result associated with a leaf node. Will be called by
|
||||
* {@code compute()} and the result passed to @{code setLocalResult()}
|
||||
*
|
||||
* @return the computed result of a leaf node
|
||||
*/
|
||||
protected abstract R doLeaf();
|
||||
|
||||
/**
|
||||
* Returns a suggested target leaf size based on the initial size estimate.
|
||||
*
|
||||
* @return suggested target leaf size
|
||||
*/
|
||||
public static long suggestTargetSize(long sizeEstimate) {
|
||||
long est = sizeEstimate / getLeafTarget();
|
||||
return est > 0L ? est : 1L;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the targetSize, initializing it via the supplied
|
||||
* size estimate if not already initialized.
|
||||
*/
|
||||
protected final long getTargetSize(long sizeEstimate) {
|
||||
long s;
|
||||
return ((s = targetSize) != 0 ? s :
|
||||
(targetSize = suggestTargetSize(sizeEstimate)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the local result, if any. Subclasses should use
|
||||
* {@link #setLocalResult(Object)} and {@link #getLocalResult()} to manage
|
||||
* results. This returns the local result so that calls from within the
|
||||
* fork-join framework will return the correct result.
|
||||
*
|
||||
* @return local result for this node previously stored with
|
||||
* {@link #setLocalResult}
|
||||
*/
|
||||
@Override
|
||||
public R getRawResult() {
|
||||
return localResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* Does nothing; instead, subclasses should use
|
||||
* {@link #setLocalResult(Object)}} to manage results.
|
||||
*
|
||||
* @param result must be null, or an exception is thrown (this is a safety
|
||||
* tripwire to detect when {@code setRawResult()} is being used
|
||||
* instead of {@code setLocalResult()}
|
||||
*/
|
||||
@Override
|
||||
protected void setRawResult(R result) {
|
||||
if (result != null)
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves a result previously stored with {@link #setLocalResult}
|
||||
*
|
||||
* @return local result for this node previously stored with
|
||||
* {@link #setLocalResult}
|
||||
*/
|
||||
protected R getLocalResult() {
|
||||
return localResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* Associates the result with the task, can be retrieved with
|
||||
* {@link #getLocalResult}
|
||||
*
|
||||
* @param localResult local result for this node
|
||||
*/
|
||||
protected void setLocalResult(R localResult) {
|
||||
this.localResult = localResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether this task is a leaf node. (Only valid after
|
||||
* {@link #compute} has been called on this node). If the node is not a
|
||||
* leaf node, then children will be non-null and numChildren will be
|
||||
* positive.
|
||||
*
|
||||
* @return {@code true} if this task is a leaf node
|
||||
*/
|
||||
protected boolean isLeaf() {
|
||||
return leftChild == null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether this task is the root node
|
||||
*
|
||||
* @return {@code true} if this task is the root node.
|
||||
*/
|
||||
protected boolean isRoot() {
|
||||
return getParent() == null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the parent of this task, or null if this task is the root
|
||||
*
|
||||
* @return the parent of this task, or null if this task is the root
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
protected K getParent() {
|
||||
return (K) getCompleter();
|
||||
}
|
||||
|
||||
/**
|
||||
* Decides whether or not to split a task further or compute it
|
||||
* directly. If computing directly, calls {@code doLeaf} and pass
|
||||
* the result to {@code setRawResult}. Otherwise splits off
|
||||
* subtasks, forking one and continuing as the other.
|
||||
*
|
||||
* <p> The method is structured to conserve resources across a
|
||||
* range of uses. The loop continues with one of the child tasks
|
||||
* when split, to avoid deep recursion. To cope with spliterators
|
||||
* that may be systematically biased toward left-heavy or
|
||||
* right-heavy splits, we alternate which child is forked versus
|
||||
* continued in the loop.
|
||||
*/
|
||||
@Override
|
||||
public void compute() {
|
||||
Spliterator<P_IN> rs = spliterator, ls; // right, left spliterators
|
||||
long sizeEstimate = rs.estimateSize();
|
||||
long sizeThreshold = getTargetSize(sizeEstimate);
|
||||
boolean forkRight = false;
|
||||
@SuppressWarnings("unchecked") K task = (K) this;
|
||||
while (sizeEstimate > sizeThreshold && (ls = rs.trySplit()) != null) {
|
||||
K leftChild, rightChild, taskToFork;
|
||||
task.leftChild = leftChild = task.makeChild(ls);
|
||||
task.rightChild = rightChild = task.makeChild(rs);
|
||||
task.setPendingCount(1);
|
||||
if (forkRight) {
|
||||
forkRight = false;
|
||||
rs = ls;
|
||||
task = leftChild;
|
||||
taskToFork = rightChild;
|
||||
}
|
||||
else {
|
||||
forkRight = true;
|
||||
task = rightChild;
|
||||
taskToFork = leftChild;
|
||||
}
|
||||
taskToFork.fork();
|
||||
sizeEstimate = rs.estimateSize();
|
||||
}
|
||||
task.setLocalResult(task.doLeaf());
|
||||
task.tryComplete();
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* @implNote
|
||||
* Clears spliterator and children fields. Overriders MUST call
|
||||
* {@code super.onCompletion} as the last thing they do if they want these
|
||||
* cleared.
|
||||
*/
|
||||
@Override
|
||||
public void onCompletion(CountedCompleter<?> caller) {
|
||||
spliterator = null;
|
||||
leftChild = rightChild = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether this node is a "leftmost" node -- whether the path from
|
||||
* the root to this node involves only traversing leftmost child links. For
|
||||
* a leaf node, this means it is the first leaf node in the encounter order.
|
||||
*
|
||||
* @return {@code true} if this node is a "leftmost" node
|
||||
*/
|
||||
protected boolean isLeftmostNode() {
|
||||
@SuppressWarnings("unchecked")
|
||||
K node = (K) this;
|
||||
while (node != null) {
|
||||
K parent = node.getParent();
|
||||
if (parent != null && parent.leftChild != node)
|
||||
return false;
|
||||
node = parent;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
160
jdkSrc/jdk8/java/util/stream/BaseStream.java
Normal file
160
jdkSrc/jdk8/java/util/stream/BaseStream.java
Normal file
@@ -0,0 +1,160 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.Spliterator;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.function.IntConsumer;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* Base interface for streams, which are sequences of elements supporting
|
||||
* sequential and parallel aggregate operations. The following example
|
||||
* illustrates an aggregate operation using the stream types {@link Stream}
|
||||
* and {@link IntStream}, computing the sum of the weights of the red widgets:
|
||||
*
|
||||
* <pre>{@code
|
||||
* int sum = widgets.stream()
|
||||
* .filter(w -> w.getColor() == RED)
|
||||
* .mapToInt(w -> w.getWeight())
|
||||
* .sum();
|
||||
* }</pre>
|
||||
*
|
||||
* See the class documentation for {@link Stream} and the package documentation
|
||||
* for <a href="package-summary.html">java.util.stream</a> for additional
|
||||
* specification of streams, stream operations, stream pipelines, and
|
||||
* parallelism, which governs the behavior of all stream types.
|
||||
*
|
||||
* @param <T> the type of the stream elements
|
||||
* @param <S> the type of the stream implementing {@code BaseStream}
|
||||
* @since 1.8
|
||||
* @see Stream
|
||||
* @see IntStream
|
||||
* @see LongStream
|
||||
* @see DoubleStream
|
||||
* @see <a href="package-summary.html">java.util.stream</a>
|
||||
*/
|
||||
public interface BaseStream<T, S extends BaseStream<T, S>>
|
||||
extends AutoCloseable {
|
||||
/**
|
||||
* Returns an iterator for the elements of this stream.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return the element iterator for this stream
|
||||
*/
|
||||
Iterator<T> iterator();
|
||||
|
||||
/**
|
||||
* Returns a spliterator for the elements of this stream.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return the element spliterator for this stream
|
||||
*/
|
||||
Spliterator<T> spliterator();
|
||||
|
||||
/**
|
||||
* Returns whether this stream, if a terminal operation were to be executed,
|
||||
* would execute in parallel. Calling this method after invoking an
|
||||
* terminal stream operation method may yield unpredictable results.
|
||||
*
|
||||
* @return {@code true} if this stream would execute in parallel if executed
|
||||
*/
|
||||
boolean isParallel();
|
||||
|
||||
/**
|
||||
* Returns an equivalent stream that is sequential. May return
|
||||
* itself, either because the stream was already sequential, or because
|
||||
* the underlying stream state was modified to be sequential.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @return a sequential stream
|
||||
*/
|
||||
S sequential();
|
||||
|
||||
/**
|
||||
* Returns an equivalent stream that is parallel. May return
|
||||
* itself, either because the stream was already parallel, or because
|
||||
* the underlying stream state was modified to be parallel.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @return a parallel stream
|
||||
*/
|
||||
S parallel();
|
||||
|
||||
/**
|
||||
* Returns an equivalent stream that is
|
||||
* <a href="package-summary.html#Ordering">unordered</a>. May return
|
||||
* itself, either because the stream was already unordered, or because
|
||||
* the underlying stream state was modified to be unordered.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @return an unordered stream
|
||||
*/
|
||||
S unordered();
|
||||
|
||||
/**
|
||||
* Returns an equivalent stream with an additional close handler. Close
|
||||
* handlers are run when the {@link #close()} method
|
||||
* is called on the stream, and are executed in the order they were
|
||||
* added. All close handlers are run, even if earlier close handlers throw
|
||||
* exceptions. If any close handler throws an exception, the first
|
||||
* exception thrown will be relayed to the caller of {@code close()}, with
|
||||
* any remaining exceptions added to that exception as suppressed exceptions
|
||||
* (unless one of the remaining exceptions is the same exception as the
|
||||
* first exception, since an exception cannot suppress itself.) May
|
||||
* return itself.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param closeHandler A task to execute when the stream is closed
|
||||
* @return a stream with a handler that is run if the stream is closed
|
||||
*/
|
||||
S onClose(Runnable closeHandler);
|
||||
|
||||
/**
|
||||
* Closes this stream, causing all close handlers for this stream pipeline
|
||||
* to be called.
|
||||
*
|
||||
* @see AutoCloseable#close()
|
||||
*/
|
||||
@Override
|
||||
void close();
|
||||
}
|
||||
341
jdkSrc/jdk8/java/util/stream/Collector.java
Normal file
341
jdkSrc/jdk8/java/util/stream/Collector.java
Normal file
@@ -0,0 +1,341 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BinaryOperator;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* A <a href="package-summary.html#Reduction">mutable reduction operation</a> that
|
||||
* accumulates input elements into a mutable result container, optionally transforming
|
||||
* the accumulated result into a final representation after all input elements
|
||||
* have been processed. Reduction operations can be performed either sequentially
|
||||
* or in parallel.
|
||||
*
|
||||
* <p>Examples of mutable reduction operations include:
|
||||
* accumulating elements into a {@code Collection}; concatenating
|
||||
* strings using a {@code StringBuilder}; computing summary information about
|
||||
* elements such as sum, min, max, or average; computing "pivot table" summaries
|
||||
* such as "maximum valued transaction by seller", etc. The class {@link Collectors}
|
||||
* provides implementations of many common mutable reductions.
|
||||
*
|
||||
* <p>A {@code Collector} is specified by four functions that work together to
|
||||
* accumulate entries into a mutable result container, and optionally perform
|
||||
* a final transform on the result. They are: <ul>
|
||||
* <li>creation of a new result container ({@link #supplier()})</li>
|
||||
* <li>incorporating a new data element into a result container ({@link #accumulator()})</li>
|
||||
* <li>combining two result containers into one ({@link #combiner()})</li>
|
||||
* <li>performing an optional final transform on the container ({@link #finisher()})</li>
|
||||
* </ul>
|
||||
*
|
||||
* <p>Collectors also have a set of characteristics, such as
|
||||
* {@link Characteristics#CONCURRENT}, that provide hints that can be used by a
|
||||
* reduction implementation to provide better performance.
|
||||
*
|
||||
* <p>A sequential implementation of a reduction using a collector would
|
||||
* create a single result container using the supplier function, and invoke the
|
||||
* accumulator function once for each input element. A parallel implementation
|
||||
* would partition the input, create a result container for each partition,
|
||||
* accumulate the contents of each partition into a subresult for that partition,
|
||||
* and then use the combiner function to merge the subresults into a combined
|
||||
* result.
|
||||
*
|
||||
* <p>To ensure that sequential and parallel executions produce equivalent
|
||||
* results, the collector functions must satisfy an <em>identity</em> and an
|
||||
* <a href="package-summary.html#Associativity">associativity</a> constraints.
|
||||
*
|
||||
* <p>The identity constraint says that for any partially accumulated result,
|
||||
* combining it with an empty result container must produce an equivalent
|
||||
* result. That is, for a partially accumulated result {@code a} that is the
|
||||
* result of any series of accumulator and combiner invocations, {@code a} must
|
||||
* be equivalent to {@code combiner.apply(a, supplier.get())}.
|
||||
*
|
||||
* <p>The associativity constraint says that splitting the computation must
|
||||
* produce an equivalent result. That is, for any input elements {@code t1}
|
||||
* and {@code t2}, the results {@code r1} and {@code r2} in the computation
|
||||
* below must be equivalent:
|
||||
* <pre>{@code
|
||||
* A a1 = supplier.get();
|
||||
* accumulator.accept(a1, t1);
|
||||
* accumulator.accept(a1, t2);
|
||||
* R r1 = finisher.apply(a1); // result without splitting
|
||||
*
|
||||
* A a2 = supplier.get();
|
||||
* accumulator.accept(a2, t1);
|
||||
* A a3 = supplier.get();
|
||||
* accumulator.accept(a3, t2);
|
||||
* R r2 = finisher.apply(combiner.apply(a2, a3)); // result with splitting
|
||||
* } </pre>
|
||||
*
|
||||
* <p>For collectors that do not have the {@code UNORDERED} characteristic,
|
||||
* two accumulated results {@code a1} and {@code a2} are equivalent if
|
||||
* {@code finisher.apply(a1).equals(finisher.apply(a2))}. For unordered
|
||||
* collectors, equivalence is relaxed to allow for non-equality related to
|
||||
* differences in order. (For example, an unordered collector that accumulated
|
||||
* elements to a {@code List} would consider two lists equivalent if they
|
||||
* contained the same elements, ignoring order.)
|
||||
*
|
||||
* <p>Libraries that implement reduction based on {@code Collector}, such as
|
||||
* {@link Stream#collect(Collector)}, must adhere to the following constraints:
|
||||
* <ul>
|
||||
* <li>The first argument passed to the accumulator function, both
|
||||
* arguments passed to the combiner function, and the argument passed to the
|
||||
* finisher function must be the result of a previous invocation of the
|
||||
* result supplier, accumulator, or combiner functions.</li>
|
||||
* <li>The implementation should not do anything with the result of any of
|
||||
* the result supplier, accumulator, or combiner functions other than to
|
||||
* pass them again to the accumulator, combiner, or finisher functions,
|
||||
* or return them to the caller of the reduction operation.</li>
|
||||
* <li>If a result is passed to the combiner or finisher
|
||||
* function, and the same object is not returned from that function, it is
|
||||
* never used again.</li>
|
||||
* <li>Once a result is passed to the combiner or finisher function, it
|
||||
* is never passed to the accumulator function again.</li>
|
||||
* <li>For non-concurrent collectors, any result returned from the result
|
||||
* supplier, accumulator, or combiner functions must be serially
|
||||
* thread-confined. This enables collection to occur in parallel without
|
||||
* the {@code Collector} needing to implement any additional synchronization.
|
||||
* The reduction implementation must manage that the input is properly
|
||||
* partitioned, that partitions are processed in isolation, and combining
|
||||
* happens only after accumulation is complete.</li>
|
||||
* <li>For concurrent collectors, an implementation is free to (but not
|
||||
* required to) implement reduction concurrently. A concurrent reduction
|
||||
* is one where the accumulator function is called concurrently from
|
||||
* multiple threads, using the same concurrently-modifiable result container,
|
||||
* rather than keeping the result isolated during accumulation.
|
||||
* A concurrent reduction should only be applied if the collector has the
|
||||
* {@link Characteristics#UNORDERED} characteristics or if the
|
||||
* originating data is unordered.</li>
|
||||
* </ul>
|
||||
*
|
||||
* <p>In addition to the predefined implementations in {@link Collectors}, the
|
||||
* static factory methods {@link #of(Supplier, BiConsumer, BinaryOperator, Characteristics...)}
|
||||
* can be used to construct collectors. For example, you could create a collector
|
||||
* that accumulates widgets into a {@code TreeSet} with:
|
||||
*
|
||||
* <pre>{@code
|
||||
* Collector<Widget, ?, TreeSet<Widget>> intoSet =
|
||||
* Collector.of(TreeSet::new, TreeSet::add,
|
||||
* (left, right) -> { left.addAll(right); return left; });
|
||||
* }</pre>
|
||||
*
|
||||
* (This behavior is also implemented by the predefined collector
|
||||
* {@link Collectors#toCollection(Supplier)}).
|
||||
*
|
||||
* @apiNote
|
||||
* Performing a reduction operation with a {@code Collector} should produce a
|
||||
* result equivalent to:
|
||||
* <pre>{@code
|
||||
* R container = collector.supplier().get();
|
||||
* for (T t : data)
|
||||
* collector.accumulator().accept(container, t);
|
||||
* return collector.finisher().apply(container);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>However, the library is free to partition the input, perform the reduction
|
||||
* on the partitions, and then use the combiner function to combine the partial
|
||||
* results to achieve a parallel reduction. (Depending on the specific reduction
|
||||
* operation, this may perform better or worse, depending on the relative cost
|
||||
* of the accumulator and combiner functions.)
|
||||
*
|
||||
* <p>Collectors are designed to be <em>composed</em>; many of the methods
|
||||
* in {@link Collectors} are functions that take a collector and produce
|
||||
* a new collector. For example, given the following collector that computes
|
||||
* the sum of the salaries of a stream of employees:
|
||||
*
|
||||
* <pre>{@code
|
||||
* Collector<Employee, ?, Integer> summingSalaries
|
||||
* = Collectors.summingInt(Employee::getSalary))
|
||||
* }</pre>
|
||||
*
|
||||
* If we wanted to create a collector to tabulate the sum of salaries by
|
||||
* department, we could reuse the "sum of salaries" logic using
|
||||
* {@link Collectors#groupingBy(Function, Collector)}:
|
||||
*
|
||||
* <pre>{@code
|
||||
* Collector<Employee, ?, Map<Department, Integer>> summingSalariesByDept
|
||||
* = Collectors.groupingBy(Employee::getDepartment, summingSalaries);
|
||||
* }</pre>
|
||||
*
|
||||
* @see Stream#collect(Collector)
|
||||
* @see Collectors
|
||||
*
|
||||
* @param <T> the type of input elements to the reduction operation
|
||||
* @param <A> the mutable accumulation type of the reduction operation (often
|
||||
* hidden as an implementation detail)
|
||||
* @param <R> the result type of the reduction operation
|
||||
* @since 1.8
|
||||
*/
|
||||
public interface Collector<T, A, R> {
|
||||
/**
|
||||
* A function that creates and returns a new mutable result container.
|
||||
*
|
||||
* @return a function which returns a new, mutable result container
|
||||
*/
|
||||
Supplier<A> supplier();
|
||||
|
||||
/**
|
||||
* A function that folds a value into a mutable result container.
|
||||
*
|
||||
* @return a function which folds a value into a mutable result container
|
||||
*/
|
||||
BiConsumer<A, T> accumulator();
|
||||
|
||||
/**
|
||||
* A function that accepts two partial results and merges them. The
|
||||
* combiner function may fold state from one argument into the other and
|
||||
* return that, or may return a new result container.
|
||||
*
|
||||
* @return a function which combines two partial results into a combined
|
||||
* result
|
||||
*/
|
||||
BinaryOperator<A> combiner();
|
||||
|
||||
/**
|
||||
* Perform the final transformation from the intermediate accumulation type
|
||||
* {@code A} to the final result type {@code R}.
|
||||
*
|
||||
* <p>If the characteristic {@code IDENTITY_TRANSFORM} is
|
||||
* set, this function may be presumed to be an identity transform with an
|
||||
* unchecked cast from {@code A} to {@code R}.
|
||||
*
|
||||
* @return a function which transforms the intermediate result to the final
|
||||
* result
|
||||
*/
|
||||
Function<A, R> finisher();
|
||||
|
||||
/**
|
||||
* Returns a {@code Set} of {@code Collector.Characteristics} indicating
|
||||
* the characteristics of this Collector. This set should be immutable.
|
||||
*
|
||||
* @return an immutable set of collector characteristics
|
||||
*/
|
||||
Set<Characteristics> characteristics();
|
||||
|
||||
/**
|
||||
* Returns a new {@code Collector} described by the given {@code supplier},
|
||||
* {@code accumulator}, and {@code combiner} functions. The resulting
|
||||
* {@code Collector} has the {@code Collector.Characteristics.IDENTITY_FINISH}
|
||||
* characteristic.
|
||||
*
|
||||
* @param supplier The supplier function for the new collector
|
||||
* @param accumulator The accumulator function for the new collector
|
||||
* @param combiner The combiner function for the new collector
|
||||
* @param characteristics The collector characteristics for the new
|
||||
* collector
|
||||
* @param <T> The type of input elements for the new collector
|
||||
* @param <R> The type of intermediate accumulation result, and final result,
|
||||
* for the new collector
|
||||
* @throws NullPointerException if any argument is null
|
||||
* @return the new {@code Collector}
|
||||
*/
|
||||
public static<T, R> Collector<T, R, R> of(Supplier<R> supplier,
|
||||
BiConsumer<R, T> accumulator,
|
||||
BinaryOperator<R> combiner,
|
||||
Characteristics... characteristics) {
|
||||
Objects.requireNonNull(supplier);
|
||||
Objects.requireNonNull(accumulator);
|
||||
Objects.requireNonNull(combiner);
|
||||
Objects.requireNonNull(characteristics);
|
||||
Set<Characteristics> cs = (characteristics.length == 0)
|
||||
? Collectors.CH_ID
|
||||
: Collections.unmodifiableSet(EnumSet.of(Collector.Characteristics.IDENTITY_FINISH,
|
||||
characteristics));
|
||||
return new Collectors.CollectorImpl<>(supplier, accumulator, combiner, cs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new {@code Collector} described by the given {@code supplier},
|
||||
* {@code accumulator}, {@code combiner}, and {@code finisher} functions.
|
||||
*
|
||||
* @param supplier The supplier function for the new collector
|
||||
* @param accumulator The accumulator function for the new collector
|
||||
* @param combiner The combiner function for the new collector
|
||||
* @param finisher The finisher function for the new collector
|
||||
* @param characteristics The collector characteristics for the new
|
||||
* collector
|
||||
* @param <T> The type of input elements for the new collector
|
||||
* @param <A> The intermediate accumulation type of the new collector
|
||||
* @param <R> The final result type of the new collector
|
||||
* @throws NullPointerException if any argument is null
|
||||
* @return the new {@code Collector}
|
||||
*/
|
||||
public static<T, A, R> Collector<T, A, R> of(Supplier<A> supplier,
|
||||
BiConsumer<A, T> accumulator,
|
||||
BinaryOperator<A> combiner,
|
||||
Function<A, R> finisher,
|
||||
Characteristics... characteristics) {
|
||||
Objects.requireNonNull(supplier);
|
||||
Objects.requireNonNull(accumulator);
|
||||
Objects.requireNonNull(combiner);
|
||||
Objects.requireNonNull(finisher);
|
||||
Objects.requireNonNull(characteristics);
|
||||
Set<Characteristics> cs = Collectors.CH_NOID;
|
||||
if (characteristics.length > 0) {
|
||||
cs = EnumSet.noneOf(Characteristics.class);
|
||||
Collections.addAll(cs, characteristics);
|
||||
cs = Collections.unmodifiableSet(cs);
|
||||
}
|
||||
return new Collectors.CollectorImpl<>(supplier, accumulator, combiner, finisher, cs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Characteristics indicating properties of a {@code Collector}, which can
|
||||
* be used to optimize reduction implementations.
|
||||
*/
|
||||
enum Characteristics {
|
||||
/**
|
||||
* Indicates that this collector is <em>concurrent</em>, meaning that
|
||||
* the result container can support the accumulator function being
|
||||
* called concurrently with the same result container from multiple
|
||||
* threads.
|
||||
*
|
||||
* <p>If a {@code CONCURRENT} collector is not also {@code UNORDERED},
|
||||
* then it should only be evaluated concurrently if applied to an
|
||||
* unordered data source.
|
||||
*/
|
||||
CONCURRENT,
|
||||
|
||||
/**
|
||||
* Indicates that the collection operation does not commit to preserving
|
||||
* the encounter order of input elements. (This might be true if the
|
||||
* result container has no intrinsic order, such as a {@link Set}.)
|
||||
*/
|
||||
UNORDERED,
|
||||
|
||||
/**
|
||||
* Indicates that the finisher function is the identity function and
|
||||
* can be elided. If set, it must be the case that an unchecked cast
|
||||
* from A to R will succeed.
|
||||
*/
|
||||
IDENTITY_FINISH
|
||||
}
|
||||
}
|
||||
1568
jdkSrc/jdk8/java/util/stream/Collectors.java
Normal file
1568
jdkSrc/jdk8/java/util/stream/Collectors.java
Normal file
File diff suppressed because it is too large
Load Diff
183
jdkSrc/jdk8/java/util/stream/DistinctOps.java
Normal file
183
jdkSrc/jdk8/java/util/stream/DistinctOps.java
Normal file
@@ -0,0 +1,183 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.Spliterator;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.IntFunction;
|
||||
|
||||
/**
|
||||
* Factory methods for transforming streams into duplicate-free streams, using
|
||||
* {@link Object#equals(Object)} to determine equality.
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
final class DistinctOps {
|
||||
|
||||
private DistinctOps() { }
|
||||
|
||||
/**
|
||||
* Appends a "distinct" operation to the provided stream, and returns the
|
||||
* new stream.
|
||||
*
|
||||
* @param <T> the type of both input and output elements
|
||||
* @param upstream a reference stream with element type T
|
||||
* @return the new stream
|
||||
*/
|
||||
static <T> ReferencePipeline<T, T> makeRef(AbstractPipeline<?, T, ?> upstream) {
|
||||
return new ReferencePipeline.StatefulOp<T, T>(upstream, StreamShape.REFERENCE,
|
||||
StreamOpFlag.IS_DISTINCT | StreamOpFlag.NOT_SIZED) {
|
||||
|
||||
<P_IN> Node<T> reduce(PipelineHelper<T> helper, Spliterator<P_IN> spliterator) {
|
||||
// If the stream is SORTED then it should also be ORDERED so the following will also
|
||||
// preserve the sort order
|
||||
TerminalOp<T, LinkedHashSet<T>> reduceOp
|
||||
= ReduceOps.<T, LinkedHashSet<T>>makeRef(LinkedHashSet::new, LinkedHashSet::add,
|
||||
LinkedHashSet::addAll);
|
||||
return Nodes.node(reduceOp.evaluateParallel(helper, spliterator));
|
||||
}
|
||||
|
||||
@Override
|
||||
<P_IN> Node<T> opEvaluateParallel(PipelineHelper<T> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<T[]> generator) {
|
||||
if (StreamOpFlag.DISTINCT.isKnown(helper.getStreamAndOpFlags())) {
|
||||
// No-op
|
||||
return helper.evaluate(spliterator, false, generator);
|
||||
}
|
||||
else if (StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
|
||||
return reduce(helper, spliterator);
|
||||
}
|
||||
else {
|
||||
// Holder of null state since ConcurrentHashMap does not support null values
|
||||
AtomicBoolean seenNull = new AtomicBoolean(false);
|
||||
ConcurrentHashMap<T, Boolean> map = new ConcurrentHashMap<>();
|
||||
TerminalOp<T, Void> forEachOp = ForEachOps.makeRef(t -> {
|
||||
if (t == null)
|
||||
seenNull.set(true);
|
||||
else
|
||||
map.putIfAbsent(t, Boolean.TRUE);
|
||||
}, false);
|
||||
forEachOp.evaluateParallel(helper, spliterator);
|
||||
|
||||
// If null has been seen then copy the key set into a HashSet that supports null values
|
||||
// and add null
|
||||
Set<T> keys = map.keySet();
|
||||
if (seenNull.get()) {
|
||||
// TODO Implement a more efficient set-union view, rather than copying
|
||||
keys = new HashSet<>(keys);
|
||||
keys.add(null);
|
||||
}
|
||||
return Nodes.node(keys);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
<P_IN> Spliterator<T> opEvaluateParallelLazy(PipelineHelper<T> helper, Spliterator<P_IN> spliterator) {
|
||||
if (StreamOpFlag.DISTINCT.isKnown(helper.getStreamAndOpFlags())) {
|
||||
// No-op
|
||||
return helper.wrapSpliterator(spliterator);
|
||||
}
|
||||
else if (StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
|
||||
// Not lazy, barrier required to preserve order
|
||||
return reduce(helper, spliterator).spliterator();
|
||||
}
|
||||
else {
|
||||
// Lazy
|
||||
return new StreamSpliterators.DistinctSpliterator<>(helper.wrapSpliterator(spliterator));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
Sink<T> opWrapSink(int flags, Sink<T> sink) {
|
||||
Objects.requireNonNull(sink);
|
||||
|
||||
if (StreamOpFlag.DISTINCT.isKnown(flags)) {
|
||||
return sink;
|
||||
} else if (StreamOpFlag.SORTED.isKnown(flags)) {
|
||||
return new Sink.ChainedReference<T, T>(sink) {
|
||||
boolean seenNull;
|
||||
T lastSeen;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
seenNull = false;
|
||||
lastSeen = null;
|
||||
downstream.begin(-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void end() {
|
||||
seenNull = false;
|
||||
lastSeen = null;
|
||||
downstream.end();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(T t) {
|
||||
if (t == null) {
|
||||
if (!seenNull) {
|
||||
seenNull = true;
|
||||
downstream.accept(lastSeen = null);
|
||||
}
|
||||
} else if (lastSeen == null || !t.equals(lastSeen)) {
|
||||
downstream.accept(lastSeen = t);
|
||||
}
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return new Sink.ChainedReference<T, T>(sink) {
|
||||
Set<T> seen;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
seen = new HashSet<>();
|
||||
downstream.begin(-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void end() {
|
||||
seen = null;
|
||||
downstream.end();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(T t) {
|
||||
if (!seen.contains(t)) {
|
||||
seen.add(t);
|
||||
downstream.accept(t);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
663
jdkSrc/jdk8/java/util/stream/DoublePipeline.java
Normal file
663
jdkSrc/jdk8/java/util/stream/DoublePipeline.java
Normal file
@@ -0,0 +1,663 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.DoubleSummaryStatistics;
|
||||
import java.util.Objects;
|
||||
import java.util.OptionalDouble;
|
||||
import java.util.PrimitiveIterator;
|
||||
import java.util.Spliterator;
|
||||
import java.util.Spliterators;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BinaryOperator;
|
||||
import java.util.function.DoubleBinaryOperator;
|
||||
import java.util.function.DoubleConsumer;
|
||||
import java.util.function.DoubleFunction;
|
||||
import java.util.function.DoublePredicate;
|
||||
import java.util.function.DoubleToIntFunction;
|
||||
import java.util.function.DoubleToLongFunction;
|
||||
import java.util.function.DoubleUnaryOperator;
|
||||
import java.util.function.IntFunction;
|
||||
import java.util.function.ObjDoubleConsumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* Abstract base class for an intermediate pipeline stage or pipeline source
|
||||
* stage implementing whose elements are of type {@code double}.
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract class DoublePipeline<E_IN>
|
||||
extends AbstractPipeline<E_IN, Double, DoubleStream>
|
||||
implements DoubleStream {
|
||||
|
||||
/**
|
||||
* Constructor for the head of a stream pipeline.
|
||||
*
|
||||
* @param source {@code Supplier<Spliterator>} describing the stream source
|
||||
* @param sourceFlags the source flags for the stream source, described in
|
||||
* {@link StreamOpFlag}
|
||||
*/
|
||||
DoublePipeline(Supplier<? extends Spliterator<Double>> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for the head of a stream pipeline.
|
||||
*
|
||||
* @param source {@code Spliterator} describing the stream source
|
||||
* @param sourceFlags the source flags for the stream source, described in
|
||||
* {@link StreamOpFlag}
|
||||
*/
|
||||
DoublePipeline(Spliterator<Double> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for appending an intermediate operation onto an existing
|
||||
* pipeline.
|
||||
*
|
||||
* @param upstream the upstream element source.
|
||||
* @param opFlags the operation flags
|
||||
*/
|
||||
DoublePipeline(AbstractPipeline<?, E_IN, ?> upstream, int opFlags) {
|
||||
super(upstream, opFlags);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adapt a {@code Sink<Double> to a {@code DoubleConsumer}, ideally simply
|
||||
* by casting.
|
||||
*/
|
||||
private static DoubleConsumer adapt(Sink<Double> sink) {
|
||||
if (sink instanceof DoubleConsumer) {
|
||||
return (DoubleConsumer) sink;
|
||||
} else {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(AbstractPipeline.class,
|
||||
"using DoubleStream.adapt(Sink<Double> s)");
|
||||
return sink::accept;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adapt a {@code Spliterator<Double>} to a {@code Spliterator.OfDouble}.
|
||||
*
|
||||
* @implNote
|
||||
* The implementation attempts to cast to a Spliterator.OfDouble, and throws
|
||||
* an exception if this cast is not possible.
|
||||
*/
|
||||
private static Spliterator.OfDouble adapt(Spliterator<Double> s) {
|
||||
if (s instanceof Spliterator.OfDouble) {
|
||||
return (Spliterator.OfDouble) s;
|
||||
} else {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(AbstractPipeline.class,
|
||||
"using DoubleStream.adapt(Spliterator<Double> s)");
|
||||
throw new UnsupportedOperationException("DoubleStream.adapt(Spliterator<Double> s)");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Shape-specific methods
|
||||
|
||||
@Override
|
||||
final StreamShape getOutputShape() {
|
||||
return StreamShape.DOUBLE_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
final <P_IN> Node<Double> evaluateToNode(PipelineHelper<Double> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
boolean flattenTree,
|
||||
IntFunction<Double[]> generator) {
|
||||
return Nodes.collectDouble(helper, spliterator, flattenTree);
|
||||
}
|
||||
|
||||
@Override
|
||||
final <P_IN> Spliterator<Double> wrap(PipelineHelper<Double> ph,
|
||||
Supplier<Spliterator<P_IN>> supplier,
|
||||
boolean isParallel) {
|
||||
return new StreamSpliterators.DoubleWrappingSpliterator<>(ph, supplier, isParallel);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
final Spliterator.OfDouble lazySpliterator(Supplier<? extends Spliterator<Double>> supplier) {
|
||||
return new StreamSpliterators.DelegatingSpliterator.OfDouble((Supplier<Spliterator.OfDouble>) supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
final void forEachWithCancel(Spliterator<Double> spliterator, Sink<Double> sink) {
|
||||
Spliterator.OfDouble spl = adapt(spliterator);
|
||||
DoubleConsumer adaptedSink = adapt(sink);
|
||||
do { } while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
|
||||
}
|
||||
|
||||
@Override
|
||||
final Node.Builder<Double> makeNodeBuilder(long exactSizeIfKnown, IntFunction<Double[]> generator) {
|
||||
return Nodes.doubleBuilder(exactSizeIfKnown);
|
||||
}
|
||||
|
||||
|
||||
// DoubleStream
|
||||
|
||||
@Override
|
||||
public final PrimitiveIterator.OfDouble iterator() {
|
||||
return Spliterators.iterator(spliterator());
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Spliterator.OfDouble spliterator() {
|
||||
return adapt(super.spliterator());
|
||||
}
|
||||
|
||||
// Stateless intermediate ops from DoubleStream
|
||||
|
||||
@Override
|
||||
public final Stream<Double> boxed() {
|
||||
return mapToObj(Double::valueOf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DoubleStream map(DoubleUnaryOperator mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
|
||||
return new Sink.ChainedDouble<Double>(sink) {
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
downstream.accept(mapper.applyAsDouble(t));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final <U> Stream<U> mapToObj(DoubleFunction<? extends U> mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new ReferencePipeline.StatelessOp<Double, U>(this, StreamShape.DOUBLE_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Double> opWrapSink(int flags, Sink<U> sink) {
|
||||
return new Sink.ChainedDouble<U>(sink) {
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
downstream.accept(mapper.apply(t));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final IntStream mapToInt(DoubleToIntFunction mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new IntPipeline.StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Double> opWrapSink(int flags, Sink<Integer> sink) {
|
||||
return new Sink.ChainedDouble<Integer>(sink) {
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
downstream.accept(mapper.applyAsInt(t));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final LongStream mapToLong(DoubleToLongFunction mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new LongPipeline.StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Double> opWrapSink(int flags, Sink<Long> sink) {
|
||||
return new Sink.ChainedDouble<Long>(sink) {
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
downstream.accept(mapper.applyAsLong(t));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DoubleStream flatMap(DoubleFunction<? extends DoubleStream> mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
|
||||
@Override
|
||||
Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
|
||||
return new Sink.ChainedDouble<Double>(sink) {
|
||||
// true if cancellationRequested() has been called
|
||||
boolean cancellationRequestedCalled;
|
||||
|
||||
// cache the consumer to avoid creation on every accepted element
|
||||
DoubleConsumer downstreamAsDouble = downstream::accept;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
try (DoubleStream result = mapper.apply(t)) {
|
||||
if (result != null) {
|
||||
if (!cancellationRequestedCalled) {
|
||||
result.sequential().forEach(downstreamAsDouble);
|
||||
}
|
||||
else {
|
||||
Spliterator.OfDouble s = result.sequential().spliterator();
|
||||
do { } while (!downstream.cancellationRequested() && s.tryAdvance(downstreamAsDouble));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
// If this method is called then an operation within the stream
|
||||
// pipeline is short-circuiting (see AbstractPipeline.copyInto).
|
||||
// Note that we cannot differentiate between an upstream or
|
||||
// downstream operation
|
||||
cancellationRequestedCalled = true;
|
||||
return downstream.cancellationRequested();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public DoubleStream unordered() {
|
||||
if (!isOrdered())
|
||||
return this;
|
||||
return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE, StreamOpFlag.NOT_ORDERED) {
|
||||
@Override
|
||||
Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
|
||||
return sink;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DoubleStream filter(DoublePredicate predicate) {
|
||||
Objects.requireNonNull(predicate);
|
||||
return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
|
||||
StreamOpFlag.NOT_SIZED) {
|
||||
@Override
|
||||
Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
|
||||
return new Sink.ChainedDouble<Double>(sink) {
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
if (predicate.test(t))
|
||||
downstream.accept(t);
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DoubleStream peek(DoubleConsumer action) {
|
||||
Objects.requireNonNull(action);
|
||||
return new StatelessOp<Double>(this, StreamShape.DOUBLE_VALUE,
|
||||
0) {
|
||||
@Override
|
||||
Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
|
||||
return new Sink.ChainedDouble<Double>(sink) {
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
action.accept(t);
|
||||
downstream.accept(t);
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Stateful intermediate ops from DoubleStream
|
||||
|
||||
@Override
|
||||
public final DoubleStream limit(long maxSize) {
|
||||
if (maxSize < 0)
|
||||
throw new IllegalArgumentException(Long.toString(maxSize));
|
||||
return SliceOps.makeDouble(this, (long) 0, maxSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DoubleStream skip(long n) {
|
||||
if (n < 0)
|
||||
throw new IllegalArgumentException(Long.toString(n));
|
||||
if (n == 0)
|
||||
return this;
|
||||
else {
|
||||
long limit = -1;
|
||||
return SliceOps.makeDouble(this, n, limit);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DoubleStream sorted() {
|
||||
return SortedOps.makeDouble(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DoubleStream distinct() {
|
||||
// While functional and quick to implement, this approach is not very efficient.
|
||||
// An efficient version requires a double-specific map/set implementation.
|
||||
return boxed().distinct().mapToDouble(i -> (double) i);
|
||||
}
|
||||
|
||||
// Terminal ops from DoubleStream
|
||||
|
||||
@Override
|
||||
public void forEach(DoubleConsumer consumer) {
|
||||
evaluate(ForEachOps.makeDouble(consumer, false));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachOrdered(DoubleConsumer consumer) {
|
||||
evaluate(ForEachOps.makeDouble(consumer, true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final double sum() {
|
||||
/*
|
||||
* In the arrays allocated for the collect operation, index 0
|
||||
* holds the high-order bits of the running sum, index 1 holds
|
||||
* the low-order bits of the sum computed via compensated
|
||||
* summation, and index 2 holds the simple sum used to compute
|
||||
* the proper result if the stream contains infinite values of
|
||||
* the same sign.
|
||||
*/
|
||||
double[] summation = collect(() -> new double[3],
|
||||
(ll, d) -> {
|
||||
Collectors.sumWithCompensation(ll, d);
|
||||
ll[2] += d;
|
||||
},
|
||||
(ll, rr) -> {
|
||||
Collectors.sumWithCompensation(ll, rr[0]);
|
||||
Collectors.sumWithCompensation(ll, rr[1]);
|
||||
ll[2] += rr[2];
|
||||
});
|
||||
|
||||
return Collectors.computeFinalSum(summation);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalDouble min() {
|
||||
return reduce(Math::min);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalDouble max() {
|
||||
return reduce(Math::max);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* @implNote The {@code double} format can represent all
|
||||
* consecutive integers in the range -2<sup>53</sup> to
|
||||
* 2<sup>53</sup>. If the pipeline has more than 2<sup>53</sup>
|
||||
* values, the divisor in the average computation will saturate at
|
||||
* 2<sup>53</sup>, leading to additional numerical errors.
|
||||
*/
|
||||
@Override
|
||||
public final OptionalDouble average() {
|
||||
/*
|
||||
* In the arrays allocated for the collect operation, index 0
|
||||
* holds the high-order bits of the running sum, index 1 holds
|
||||
* the low-order bits of the sum computed via compensated
|
||||
* summation, index 2 holds the number of values seen, index 3
|
||||
* holds the simple sum.
|
||||
*/
|
||||
double[] avg = collect(() -> new double[4],
|
||||
(ll, d) -> {
|
||||
ll[2]++;
|
||||
Collectors.sumWithCompensation(ll, d);
|
||||
ll[3] += d;
|
||||
},
|
||||
(ll, rr) -> {
|
||||
Collectors.sumWithCompensation(ll, rr[0]);
|
||||
Collectors.sumWithCompensation(ll, rr[1]);
|
||||
ll[2] += rr[2];
|
||||
ll[3] += rr[3];
|
||||
});
|
||||
return avg[2] > 0
|
||||
? OptionalDouble.of(Collectors.computeFinalSum(avg) / avg[2])
|
||||
: OptionalDouble.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final long count() {
|
||||
return mapToLong(e -> 1L).sum();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DoubleSummaryStatistics summaryStatistics() {
|
||||
return collect(DoubleSummaryStatistics::new, DoubleSummaryStatistics::accept,
|
||||
DoubleSummaryStatistics::combine);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final double reduce(double identity, DoubleBinaryOperator op) {
|
||||
return evaluate(ReduceOps.makeDouble(identity, op));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalDouble reduce(DoubleBinaryOperator op) {
|
||||
return evaluate(ReduceOps.makeDouble(op));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final <R> R collect(Supplier<R> supplier,
|
||||
ObjDoubleConsumer<R> accumulator,
|
||||
BiConsumer<R, R> combiner) {
|
||||
Objects.requireNonNull(combiner);
|
||||
BinaryOperator<R> operator = (left, right) -> {
|
||||
combiner.accept(left, right);
|
||||
return left;
|
||||
};
|
||||
return evaluate(ReduceOps.makeDouble(supplier, accumulator, operator));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean anyMatch(DoublePredicate predicate) {
|
||||
return evaluate(MatchOps.makeDouble(predicate, MatchOps.MatchKind.ANY));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean allMatch(DoublePredicate predicate) {
|
||||
return evaluate(MatchOps.makeDouble(predicate, MatchOps.MatchKind.ALL));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean noneMatch(DoublePredicate predicate) {
|
||||
return evaluate(MatchOps.makeDouble(predicate, MatchOps.MatchKind.NONE));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalDouble findFirst() {
|
||||
return evaluate(FindOps.makeDouble(true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalDouble findAny() {
|
||||
return evaluate(FindOps.makeDouble(false));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final double[] toArray() {
|
||||
return Nodes.flattenDouble((Node.OfDouble) evaluateToArrayNode(Double[]::new))
|
||||
.asPrimitiveArray();
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/**
|
||||
* Source stage of a DoubleStream
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
*/
|
||||
static class Head<E_IN> extends DoublePipeline<E_IN> {
|
||||
/**
|
||||
* Constructor for the source stage of a DoubleStream.
|
||||
*
|
||||
* @param source {@code Supplier<Spliterator>} describing the stream
|
||||
* source
|
||||
* @param sourceFlags the source flags for the stream source, described
|
||||
* in {@link StreamOpFlag}
|
||||
* @param parallel {@code true} if the pipeline is parallel
|
||||
*/
|
||||
Head(Supplier<? extends Spliterator<Double>> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for the source stage of a DoubleStream.
|
||||
*
|
||||
* @param source {@code Spliterator} describing the stream source
|
||||
* @param sourceFlags the source flags for the stream source, described
|
||||
* in {@link StreamOpFlag}
|
||||
* @param parallel {@code true} if the pipeline is parallel
|
||||
*/
|
||||
Head(Spliterator<Double> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
@Override
|
||||
final boolean opIsStateful() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
final Sink<E_IN> opWrapSink(int flags, Sink<Double> sink) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
// Optimized sequential terminal operations for the head of the pipeline
|
||||
|
||||
@Override
|
||||
public void forEach(DoubleConsumer consumer) {
|
||||
if (!isParallel()) {
|
||||
adapt(sourceStageSpliterator()).forEachRemaining(consumer);
|
||||
}
|
||||
else {
|
||||
super.forEach(consumer);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachOrdered(DoubleConsumer consumer) {
|
||||
if (!isParallel()) {
|
||||
adapt(sourceStageSpliterator()).forEachRemaining(consumer);
|
||||
}
|
||||
else {
|
||||
super.forEachOrdered(consumer);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Base class for a stateless intermediate stage of a DoubleStream.
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract static class StatelessOp<E_IN> extends DoublePipeline<E_IN> {
|
||||
/**
|
||||
* Construct a new DoubleStream by appending a stateless intermediate
|
||||
* operation to an existing stream.
|
||||
*
|
||||
* @param upstream the upstream pipeline stage
|
||||
* @param inputShape the stream shape for the upstream pipeline stage
|
||||
* @param opFlags operation flags for the new stage
|
||||
*/
|
||||
StatelessOp(AbstractPipeline<?, E_IN, ?> upstream,
|
||||
StreamShape inputShape,
|
||||
int opFlags) {
|
||||
super(upstream, opFlags);
|
||||
assert upstream.getOutputShape() == inputShape;
|
||||
}
|
||||
|
||||
@Override
|
||||
final boolean opIsStateful() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Base class for a stateful intermediate stage of a DoubleStream.
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract static class StatefulOp<E_IN> extends DoublePipeline<E_IN> {
|
||||
/**
|
||||
* Construct a new DoubleStream by appending a stateful intermediate
|
||||
* operation to an existing stream.
|
||||
*
|
||||
* @param upstream the upstream pipeline stage
|
||||
* @param inputShape the stream shape for the upstream pipeline stage
|
||||
* @param opFlags operation flags for the new stage
|
||||
*/
|
||||
StatefulOp(AbstractPipeline<?, E_IN, ?> upstream,
|
||||
StreamShape inputShape,
|
||||
int opFlags) {
|
||||
super(upstream, opFlags);
|
||||
assert upstream.getOutputShape() == inputShape;
|
||||
}
|
||||
|
||||
@Override
|
||||
final boolean opIsStateful() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
abstract <P_IN> Node<Double> opEvaluateParallel(PipelineHelper<Double> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<Double[]> generator);
|
||||
}
|
||||
}
|
||||
894
jdkSrc/jdk8/java/util/stream/DoubleStream.java
Normal file
894
jdkSrc/jdk8/java/util/stream/DoubleStream.java
Normal file
@@ -0,0 +1,894 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.DoubleSummaryStatistics;
|
||||
import java.util.Objects;
|
||||
import java.util.OptionalDouble;
|
||||
import java.util.PrimitiveIterator;
|
||||
import java.util.Spliterator;
|
||||
import java.util.Spliterators;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.DoubleBinaryOperator;
|
||||
import java.util.function.DoubleConsumer;
|
||||
import java.util.function.DoubleFunction;
|
||||
import java.util.function.DoublePredicate;
|
||||
import java.util.function.DoubleSupplier;
|
||||
import java.util.function.DoubleToIntFunction;
|
||||
import java.util.function.DoubleToLongFunction;
|
||||
import java.util.function.DoubleUnaryOperator;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.ObjDoubleConsumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* A sequence of primitive double-valued elements supporting sequential and parallel
|
||||
* aggregate operations. This is the {@code double} primitive specialization of
|
||||
* {@link Stream}.
|
||||
*
|
||||
* <p>The following example illustrates an aggregate operation using
|
||||
* {@link Stream} and {@link DoubleStream}, computing the sum of the weights of the
|
||||
* red widgets:
|
||||
*
|
||||
* <pre>{@code
|
||||
* double sum = widgets.stream()
|
||||
* .filter(w -> w.getColor() == RED)
|
||||
* .mapToDouble(w -> w.getWeight())
|
||||
* .sum();
|
||||
* }</pre>
|
||||
*
|
||||
* See the class documentation for {@link Stream} and the package documentation
|
||||
* for <a href="package-summary.html">java.util.stream</a> for additional
|
||||
* specification of streams, stream operations, stream pipelines, and
|
||||
* parallelism.
|
||||
*
|
||||
* @since 1.8
|
||||
* @see Stream
|
||||
* @see <a href="package-summary.html">java.util.stream</a>
|
||||
*/
|
||||
public interface DoubleStream extends BaseStream<Double, DoubleStream> {
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the elements of this stream that match
|
||||
* the given predicate.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* predicate to apply to each element to determine if it
|
||||
* should be included
|
||||
* @return the new stream
|
||||
*/
|
||||
DoubleStream filter(DoublePredicate predicate);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the results of applying the given
|
||||
* function to the elements of this stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element
|
||||
* @return the new stream
|
||||
*/
|
||||
DoubleStream map(DoubleUnaryOperator mapper);
|
||||
|
||||
/**
|
||||
* Returns an object-valued {@code Stream} consisting of the results of
|
||||
* applying the given function to the elements of this stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">
|
||||
* intermediate operation</a>.
|
||||
*
|
||||
* @param <U> the element type of the new stream
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element
|
||||
* @return the new stream
|
||||
*/
|
||||
<U> Stream<U> mapToObj(DoubleFunction<? extends U> mapper);
|
||||
|
||||
/**
|
||||
* Returns an {@code IntStream} consisting of the results of applying the
|
||||
* given function to the elements of this stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element
|
||||
* @return the new stream
|
||||
*/
|
||||
IntStream mapToInt(DoubleToIntFunction mapper);
|
||||
|
||||
/**
|
||||
* Returns a {@code LongStream} consisting of the results of applying the
|
||||
* given function to the elements of this stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element
|
||||
* @return the new stream
|
||||
*/
|
||||
LongStream mapToLong(DoubleToLongFunction mapper);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the results of replacing each element of
|
||||
* this stream with the contents of a mapped stream produced by applying
|
||||
* the provided mapping function to each element. Each mapped stream is
|
||||
* {@link java.util.stream.BaseStream#close() closed} after its contents
|
||||
* have been placed into this stream. (If a mapped stream is {@code null}
|
||||
* an empty stream is used, instead.)
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element which produces a
|
||||
* {@code DoubleStream} of new values
|
||||
* @return the new stream
|
||||
* @see Stream#flatMap(Function)
|
||||
*/
|
||||
DoubleStream flatMap(DoubleFunction<? extends DoubleStream> mapper);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the distinct elements of this stream. The
|
||||
* elements are compared for equality according to
|
||||
* {@link java.lang.Double#compare(double, double)}.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">stateful
|
||||
* intermediate operation</a>.
|
||||
*
|
||||
* @return the result stream
|
||||
*/
|
||||
DoubleStream distinct();
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the elements of this stream in sorted
|
||||
* order. The elements are compared for equality according to
|
||||
* {@link java.lang.Double#compare(double, double)}.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">stateful
|
||||
* intermediate operation</a>.
|
||||
*
|
||||
* @return the result stream
|
||||
*/
|
||||
DoubleStream sorted();
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the elements of this stream, additionally
|
||||
* performing the provided action on each element as elements are consumed
|
||||
* from the resulting stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* <p>For parallel stream pipelines, the action may be called at
|
||||
* whatever time and in whatever thread the element is made available by the
|
||||
* upstream operation. If the action modifies shared state,
|
||||
* it is responsible for providing the required synchronization.
|
||||
*
|
||||
* @apiNote This method exists mainly to support debugging, where you want
|
||||
* to see the elements as they flow past a certain point in a pipeline:
|
||||
* <pre>{@code
|
||||
* DoubleStream.of(1, 2, 3, 4)
|
||||
* .filter(e -> e > 2)
|
||||
* .peek(e -> System.out.println("Filtered value: " + e))
|
||||
* .map(e -> e * e)
|
||||
* .peek(e -> System.out.println("Mapped value: " + e))
|
||||
* .sum();
|
||||
* }</pre>
|
||||
*
|
||||
* @param action a <a href="package-summary.html#NonInterference">
|
||||
* non-interfering</a> action to perform on the elements as
|
||||
* they are consumed from the stream
|
||||
* @return the new stream
|
||||
*/
|
||||
DoubleStream peek(DoubleConsumer action);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the elements of this stream, truncated
|
||||
* to be no longer than {@code maxSize} in length.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* stateful intermediate operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* While {@code limit()} is generally a cheap operation on sequential
|
||||
* stream pipelines, it can be quite expensive on ordered parallel pipelines,
|
||||
* especially for large values of {@code maxSize}, since {@code limit(n)}
|
||||
* is constrained to return not just any <em>n</em> elements, but the
|
||||
* <em>first n</em> elements in the encounter order. Using an unordered
|
||||
* stream source (such as {@link #generate(DoubleSupplier)}) or removing the
|
||||
* ordering constraint with {@link #unordered()} may result in significant
|
||||
* speedups of {@code limit()} in parallel pipelines, if the semantics of
|
||||
* your situation permit. If consistency with encounter order is required,
|
||||
* and you are experiencing poor performance or memory utilization with
|
||||
* {@code limit()} in parallel pipelines, switching to sequential execution
|
||||
* with {@link #sequential()} may improve performance.
|
||||
*
|
||||
* @param maxSize the number of elements the stream should be limited to
|
||||
* @return the new stream
|
||||
* @throws IllegalArgumentException if {@code maxSize} is negative
|
||||
*/
|
||||
DoubleStream limit(long maxSize);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the remaining elements of this stream
|
||||
* after discarding the first {@code n} elements of the stream.
|
||||
* If this stream contains fewer than {@code n} elements then an
|
||||
* empty stream will be returned.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">stateful
|
||||
* intermediate operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* While {@code skip()} is generally a cheap operation on sequential
|
||||
* stream pipelines, it can be quite expensive on ordered parallel pipelines,
|
||||
* especially for large values of {@code n}, since {@code skip(n)}
|
||||
* is constrained to skip not just any <em>n</em> elements, but the
|
||||
* <em>first n</em> elements in the encounter order. Using an unordered
|
||||
* stream source (such as {@link #generate(DoubleSupplier)}) or removing the
|
||||
* ordering constraint with {@link #unordered()} may result in significant
|
||||
* speedups of {@code skip()} in parallel pipelines, if the semantics of
|
||||
* your situation permit. If consistency with encounter order is required,
|
||||
* and you are experiencing poor performance or memory utilization with
|
||||
* {@code skip()} in parallel pipelines, switching to sequential execution
|
||||
* with {@link #sequential()} may improve performance.
|
||||
*
|
||||
* @param n the number of leading elements to skip
|
||||
* @return the new stream
|
||||
* @throws IllegalArgumentException if {@code n} is negative
|
||||
*/
|
||||
DoubleStream skip(long n);
|
||||
|
||||
/**
|
||||
* Performs an action for each element of this stream.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* <p>For parallel stream pipelines, this operation does <em>not</em>
|
||||
* guarantee to respect the encounter order of the stream, as doing so
|
||||
* would sacrifice the benefit of parallelism. For any given element, the
|
||||
* action may be performed at whatever time and in whatever thread the
|
||||
* library chooses. If the action accesses shared state, it is
|
||||
* responsible for providing the required synchronization.
|
||||
*
|
||||
* @param action a <a href="package-summary.html#NonInterference">
|
||||
* non-interfering</a> action to perform on the elements
|
||||
*/
|
||||
void forEach(DoubleConsumer action);
|
||||
|
||||
/**
|
||||
* Performs an action for each element of this stream, guaranteeing that
|
||||
* each element is processed in encounter order for streams that have a
|
||||
* defined encounter order.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @param action a <a href="package-summary.html#NonInterference">
|
||||
* non-interfering</a> action to perform on the elements
|
||||
* @see #forEach(DoubleConsumer)
|
||||
*/
|
||||
void forEachOrdered(DoubleConsumer action);
|
||||
|
||||
/**
|
||||
* Returns an array containing the elements of this stream.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return an array containing the elements of this stream
|
||||
*/
|
||||
double[] toArray();
|
||||
|
||||
/**
|
||||
* Performs a <a href="package-summary.html#Reduction">reduction</a> on the
|
||||
* elements of this stream, using the provided identity value and an
|
||||
* <a href="package-summary.html#Associativity">associative</a>
|
||||
* accumulation function, and returns the reduced value. This is equivalent
|
||||
* to:
|
||||
* <pre>{@code
|
||||
* double result = identity;
|
||||
* for (double element : this stream)
|
||||
* result = accumulator.applyAsDouble(result, element)
|
||||
* return result;
|
||||
* }</pre>
|
||||
*
|
||||
* but is not constrained to execute sequentially.
|
||||
*
|
||||
* <p>The {@code identity} value must be an identity for the accumulator
|
||||
* function. This means that for all {@code x},
|
||||
* {@code accumulator.apply(identity, x)} is equal to {@code x}.
|
||||
* The {@code accumulator} function must be an
|
||||
* <a href="package-summary.html#Associativity">associative</a> function.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @apiNote Sum, min, max, and average are all special cases of reduction.
|
||||
* Summing a stream of numbers can be expressed as:
|
||||
|
||||
* <pre>{@code
|
||||
* double sum = numbers.reduce(0, (a, b) -> a+b);
|
||||
* }</pre>
|
||||
*
|
||||
* or more compactly:
|
||||
*
|
||||
* <pre>{@code
|
||||
* double sum = numbers.reduce(0, Double::sum);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>While this may seem a more roundabout way to perform an aggregation
|
||||
* compared to simply mutating a running total in a loop, reduction
|
||||
* operations parallelize more gracefully, without needing additional
|
||||
* synchronization and with greatly reduced risk of data races.
|
||||
*
|
||||
* @param identity the identity value for the accumulating function
|
||||
* @param op an <a href="package-summary.html#Associativity">associative</a>,
|
||||
* <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function for combining two values
|
||||
* @return the result of the reduction
|
||||
* @see #sum()
|
||||
* @see #min()
|
||||
* @see #max()
|
||||
* @see #average()
|
||||
*/
|
||||
double reduce(double identity, DoubleBinaryOperator op);
|
||||
|
||||
/**
|
||||
* Performs a <a href="package-summary.html#Reduction">reduction</a> on the
|
||||
* elements of this stream, using an
|
||||
* <a href="package-summary.html#Associativity">associative</a> accumulation
|
||||
* function, and returns an {@code OptionalDouble} describing the reduced
|
||||
* value, if any. This is equivalent to:
|
||||
* <pre>{@code
|
||||
* boolean foundAny = false;
|
||||
* double result = null;
|
||||
* for (double element : this stream) {
|
||||
* if (!foundAny) {
|
||||
* foundAny = true;
|
||||
* result = element;
|
||||
* }
|
||||
* else
|
||||
* result = accumulator.applyAsDouble(result, element);
|
||||
* }
|
||||
* return foundAny ? OptionalDouble.of(result) : OptionalDouble.empty();
|
||||
* }</pre>
|
||||
*
|
||||
* but is not constrained to execute sequentially.
|
||||
*
|
||||
* <p>The {@code accumulator} function must be an
|
||||
* <a href="package-summary.html#Associativity">associative</a> function.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @param op an <a href="package-summary.html#Associativity">associative</a>,
|
||||
* <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function for combining two values
|
||||
* @return the result of the reduction
|
||||
* @see #reduce(double, DoubleBinaryOperator)
|
||||
*/
|
||||
OptionalDouble reduce(DoubleBinaryOperator op);
|
||||
|
||||
/**
|
||||
* Performs a <a href="package-summary.html#MutableReduction">mutable
|
||||
* reduction</a> operation on the elements of this stream. A mutable
|
||||
* reduction is one in which the reduced value is a mutable result container,
|
||||
* such as an {@code ArrayList}, and elements are incorporated by updating
|
||||
* the state of the result rather than by replacing the result. This
|
||||
* produces a result equivalent to:
|
||||
* <pre>{@code
|
||||
* R result = supplier.get();
|
||||
* for (double element : this stream)
|
||||
* accumulator.accept(result, element);
|
||||
* return result;
|
||||
* }</pre>
|
||||
*
|
||||
* <p>Like {@link #reduce(double, DoubleBinaryOperator)}, {@code collect}
|
||||
* operations can be parallelized without requiring additional
|
||||
* synchronization.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @param <R> type of the result
|
||||
* @param supplier a function that creates a new result container. For a
|
||||
* parallel execution, this function may be called
|
||||
* multiple times and must return a fresh value each time.
|
||||
* @param accumulator an <a href="package-summary.html#Associativity">associative</a>,
|
||||
* <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function for incorporating an additional element into a result
|
||||
* @param combiner an <a href="package-summary.html#Associativity">associative</a>,
|
||||
* <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function for combining two values, which must be
|
||||
* compatible with the accumulator function
|
||||
* @return the result of the reduction
|
||||
* @see Stream#collect(Supplier, BiConsumer, BiConsumer)
|
||||
*/
|
||||
<R> R collect(Supplier<R> supplier,
|
||||
ObjDoubleConsumer<R> accumulator,
|
||||
BiConsumer<R, R> combiner);
|
||||
|
||||
/**
|
||||
* Returns the sum of elements in this stream.
|
||||
*
|
||||
* Summation is a special case of a <a
|
||||
* href="package-summary.html#Reduction">reduction</a>. If
|
||||
* floating-point summation were exact, this method would be
|
||||
* equivalent to:
|
||||
*
|
||||
* <pre>{@code
|
||||
* return reduce(0, Double::sum);
|
||||
* }</pre>
|
||||
*
|
||||
* However, since floating-point summation is not exact, the above
|
||||
* code is not necessarily equivalent to the summation computation
|
||||
* done by this method.
|
||||
*
|
||||
* <p>If any stream element is a NaN or the sum is at any point a NaN
|
||||
* then the sum will be NaN.
|
||||
*
|
||||
* The value of a floating-point sum is a function both
|
||||
* of the input values as well as the order of addition
|
||||
* operations. The order of addition operations of this method is
|
||||
* intentionally not defined to allow for implementation
|
||||
* flexibility to improve the speed and accuracy of the computed
|
||||
* result.
|
||||
*
|
||||
* In particular, this method may be implemented using compensated
|
||||
* summation or other technique to reduce the error bound in the
|
||||
* numerical sum compared to a simple summation of {@code double}
|
||||
* values.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @apiNote Elements sorted by increasing absolute magnitude tend
|
||||
* to yield more accurate results.
|
||||
*
|
||||
* @return the sum of elements in this stream
|
||||
*/
|
||||
double sum();
|
||||
|
||||
/**
|
||||
* Returns an {@code OptionalDouble} describing the minimum element of this
|
||||
* stream, or an empty OptionalDouble if this stream is empty. The minimum
|
||||
* element will be {@code Double.NaN} if any stream element was NaN. Unlike
|
||||
* the numerical comparison operators, this method considers negative zero
|
||||
* to be strictly smaller than positive zero. This is a special case of a
|
||||
* <a href="package-summary.html#Reduction">reduction</a> and is
|
||||
* equivalent to:
|
||||
* <pre>{@code
|
||||
* return reduce(Double::min);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return an {@code OptionalDouble} containing the minimum element of this
|
||||
* stream, or an empty optional if the stream is empty
|
||||
*/
|
||||
OptionalDouble min();
|
||||
|
||||
/**
|
||||
* Returns an {@code OptionalDouble} describing the maximum element of this
|
||||
* stream, or an empty OptionalDouble if this stream is empty. The maximum
|
||||
* element will be {@code Double.NaN} if any stream element was NaN. Unlike
|
||||
* the numerical comparison operators, this method considers negative zero
|
||||
* to be strictly smaller than positive zero. This is a
|
||||
* special case of a
|
||||
* <a href="package-summary.html#Reduction">reduction</a> and is
|
||||
* equivalent to:
|
||||
* <pre>{@code
|
||||
* return reduce(Double::max);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return an {@code OptionalDouble} containing the maximum element of this
|
||||
* stream, or an empty optional if the stream is empty
|
||||
*/
|
||||
OptionalDouble max();
|
||||
|
||||
/**
|
||||
* Returns the count of elements in this stream. This is a special case of
|
||||
* a <a href="package-summary.html#Reduction">reduction</a> and is
|
||||
* equivalent to:
|
||||
* <pre>{@code
|
||||
* return mapToLong(e -> 1L).sum();
|
||||
* }</pre>
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
|
||||
*
|
||||
* @return the count of elements in this stream
|
||||
*/
|
||||
long count();
|
||||
|
||||
/**
|
||||
* Returns an {@code OptionalDouble} describing the arithmetic
|
||||
* mean of elements of this stream, or an empty optional if this
|
||||
* stream is empty.
|
||||
*
|
||||
* If any recorded value is a NaN or the sum is at any point a NaN
|
||||
* then the average will be NaN.
|
||||
*
|
||||
* <p>The average returned can vary depending upon the order in
|
||||
* which values are recorded.
|
||||
*
|
||||
* This method may be implemented using compensated summation or
|
||||
* other technique to reduce the error bound in the {@link #sum
|
||||
* numerical sum} used to compute the average.
|
||||
*
|
||||
* <p>The average is a special case of a <a
|
||||
* href="package-summary.html#Reduction">reduction</a>.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @apiNote Elements sorted by increasing absolute magnitude tend
|
||||
* to yield more accurate results.
|
||||
*
|
||||
* @return an {@code OptionalDouble} containing the average element of this
|
||||
* stream, or an empty optional if the stream is empty
|
||||
*/
|
||||
OptionalDouble average();
|
||||
|
||||
/**
|
||||
* Returns a {@code DoubleSummaryStatistics} describing various summary data
|
||||
* about the elements of this stream. This is a special
|
||||
* case of a <a href="package-summary.html#Reduction">reduction</a>.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return a {@code DoubleSummaryStatistics} describing various summary data
|
||||
* about the elements of this stream
|
||||
*/
|
||||
DoubleSummaryStatistics summaryStatistics();
|
||||
|
||||
/**
|
||||
* Returns whether any elements of this stream match the provided
|
||||
* predicate. May not evaluate the predicate on all elements if not
|
||||
* necessary for determining the result. If the stream is empty then
|
||||
* {@code false} is returned and the predicate is not evaluated.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* This method evaluates the <em>existential quantification</em> of the
|
||||
* predicate over the elements of the stream (for some x P(x)).
|
||||
*
|
||||
* @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* predicate to apply to elements of this stream
|
||||
* @return {@code true} if any elements of the stream match the provided
|
||||
* predicate, otherwise {@code false}
|
||||
*/
|
||||
boolean anyMatch(DoublePredicate predicate);
|
||||
|
||||
/**
|
||||
* Returns whether all elements of this stream match the provided predicate.
|
||||
* May not evaluate the predicate on all elements if not necessary for
|
||||
* determining the result. If the stream is empty then {@code true} is
|
||||
* returned and the predicate is not evaluated.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* This method evaluates the <em>universal quantification</em> of the
|
||||
* predicate over the elements of the stream (for all x P(x)). If the
|
||||
* stream is empty, the quantification is said to be <em>vacuously
|
||||
* satisfied</em> and is always {@code true} (regardless of P(x)).
|
||||
*
|
||||
* @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* predicate to apply to elements of this stream
|
||||
* @return {@code true} if either all elements of the stream match the
|
||||
* provided predicate or the stream is empty, otherwise {@code false}
|
||||
*/
|
||||
boolean allMatch(DoublePredicate predicate);
|
||||
|
||||
/**
|
||||
* Returns whether no elements of this stream match the provided predicate.
|
||||
* May not evaluate the predicate on all elements if not necessary for
|
||||
* determining the result. If the stream is empty then {@code true} is
|
||||
* returned and the predicate is not evaluated.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* This method evaluates the <em>universal quantification</em> of the
|
||||
* negated predicate over the elements of the stream (for all x ~P(x)). If
|
||||
* the stream is empty, the quantification is said to be vacuously satisfied
|
||||
* and is always {@code true}, regardless of P(x).
|
||||
*
|
||||
* @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* predicate to apply to elements of this stream
|
||||
* @return {@code true} if either no elements of the stream match the
|
||||
* provided predicate or the stream is empty, otherwise {@code false}
|
||||
*/
|
||||
boolean noneMatch(DoublePredicate predicate);
|
||||
|
||||
/**
|
||||
* Returns an {@link OptionalDouble} describing the first element of this
|
||||
* stream, or an empty {@code OptionalDouble} if the stream is empty. If
|
||||
* the stream has no encounter order, then any element may be returned.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* @return an {@code OptionalDouble} describing the first element of this
|
||||
* stream, or an empty {@code OptionalDouble} if the stream is empty
|
||||
*/
|
||||
OptionalDouble findFirst();
|
||||
|
||||
/**
|
||||
* Returns an {@link OptionalDouble} describing some element of the stream,
|
||||
* or an empty {@code OptionalDouble} if the stream is empty.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* <p>The behavior of this operation is explicitly nondeterministic; it is
|
||||
* free to select any element in the stream. This is to allow for maximal
|
||||
* performance in parallel operations; the cost is that multiple invocations
|
||||
* on the same source may not return the same result. (If a stable result
|
||||
* is desired, use {@link #findFirst()} instead.)
|
||||
*
|
||||
* @return an {@code OptionalDouble} describing some element of this stream,
|
||||
* or an empty {@code OptionalDouble} if the stream is empty
|
||||
* @see #findFirst()
|
||||
*/
|
||||
OptionalDouble findAny();
|
||||
|
||||
/**
|
||||
* Returns a {@code Stream} consisting of the elements of this stream,
|
||||
* boxed to {@code Double}.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @return a {@code Stream} consistent of the elements of this stream,
|
||||
* each boxed to a {@code Double}
|
||||
*/
|
||||
Stream<Double> boxed();
|
||||
|
||||
@Override
|
||||
DoubleStream sequential();
|
||||
|
||||
@Override
|
||||
DoubleStream parallel();
|
||||
|
||||
@Override
|
||||
PrimitiveIterator.OfDouble iterator();
|
||||
|
||||
@Override
|
||||
Spliterator.OfDouble spliterator();
|
||||
|
||||
|
||||
// Static factories
|
||||
|
||||
/**
|
||||
* Returns a builder for a {@code DoubleStream}.
|
||||
*
|
||||
* @return a stream builder
|
||||
*/
|
||||
public static Builder builder() {
|
||||
return new Streams.DoubleStreamBuilderImpl();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an empty sequential {@code DoubleStream}.
|
||||
*
|
||||
* @return an empty sequential stream
|
||||
*/
|
||||
public static DoubleStream empty() {
|
||||
return StreamSupport.doubleStream(Spliterators.emptyDoubleSpliterator(), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a sequential {@code DoubleStream} containing a single element.
|
||||
*
|
||||
* @param t the single element
|
||||
* @return a singleton sequential stream
|
||||
*/
|
||||
public static DoubleStream of(double t) {
|
||||
return StreamSupport.doubleStream(new Streams.DoubleStreamBuilderImpl(t), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a sequential ordered stream whose elements are the specified values.
|
||||
*
|
||||
* @param values the elements of the new stream
|
||||
* @return the new stream
|
||||
*/
|
||||
public static DoubleStream of(double... values) {
|
||||
return Arrays.stream(values);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an infinite sequential ordered {@code DoubleStream} produced by iterative
|
||||
* application of a function {@code f} to an initial element {@code seed},
|
||||
* producing a {@code Stream} consisting of {@code seed}, {@code f(seed)},
|
||||
* {@code f(f(seed))}, etc.
|
||||
*
|
||||
* <p>The first element (position {@code 0}) in the {@code DoubleStream}
|
||||
* will be the provided {@code seed}. For {@code n > 0}, the element at
|
||||
* position {@code n}, will be the result of applying the function {@code f}
|
||||
* to the element at position {@code n - 1}.
|
||||
*
|
||||
* @param seed the initial element
|
||||
* @param f a function to be applied to the previous element to produce
|
||||
* a new element
|
||||
* @return a new sequential {@code DoubleStream}
|
||||
*/
|
||||
public static DoubleStream iterate(final double seed, final DoubleUnaryOperator f) {
|
||||
Objects.requireNonNull(f);
|
||||
final PrimitiveIterator.OfDouble iterator = new PrimitiveIterator.OfDouble() {
|
||||
double t = seed;
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double nextDouble() {
|
||||
double v = t;
|
||||
t = f.applyAsDouble(t);
|
||||
return v;
|
||||
}
|
||||
};
|
||||
return StreamSupport.doubleStream(Spliterators.spliteratorUnknownSize(
|
||||
iterator,
|
||||
Spliterator.ORDERED | Spliterator.IMMUTABLE | Spliterator.NONNULL), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an infinite sequential unordered stream where each element is
|
||||
* generated by the provided {@code DoubleSupplier}. This is suitable for
|
||||
* generating constant streams, streams of random elements, etc.
|
||||
*
|
||||
* @param s the {@code DoubleSupplier} for generated elements
|
||||
* @return a new infinite sequential unordered {@code DoubleStream}
|
||||
*/
|
||||
public static DoubleStream generate(DoubleSupplier s) {
|
||||
Objects.requireNonNull(s);
|
||||
return StreamSupport.doubleStream(
|
||||
new StreamSpliterators.InfiniteSupplyingSpliterator.OfDouble(Long.MAX_VALUE, s), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a lazily concatenated stream whose elements are all the
|
||||
* elements of the first stream followed by all the elements of the
|
||||
* second stream. The resulting stream is ordered if both
|
||||
* of the input streams are ordered, and parallel if either of the input
|
||||
* streams is parallel. When the resulting stream is closed, the close
|
||||
* handlers for both input streams are invoked.
|
||||
*
|
||||
* @implNote
|
||||
* Use caution when constructing streams from repeated concatenation.
|
||||
* Accessing an element of a deeply concatenated stream can result in deep
|
||||
* call chains, or even {@code StackOverflowException}.
|
||||
*
|
||||
* @param a the first stream
|
||||
* @param b the second stream
|
||||
* @return the concatenation of the two input streams
|
||||
*/
|
||||
public static DoubleStream concat(DoubleStream a, DoubleStream b) {
|
||||
Objects.requireNonNull(a);
|
||||
Objects.requireNonNull(b);
|
||||
|
||||
Spliterator.OfDouble split = new Streams.ConcatSpliterator.OfDouble(
|
||||
a.spliterator(), b.spliterator());
|
||||
DoubleStream stream = StreamSupport.doubleStream(split, a.isParallel() || b.isParallel());
|
||||
return stream.onClose(Streams.composedClose(a, b));
|
||||
}
|
||||
|
||||
/**
|
||||
* A mutable builder for a {@code DoubleStream}.
|
||||
*
|
||||
* <p>A stream builder has a lifecycle, which starts in a building
|
||||
* phase, during which elements can be added, and then transitions to a built
|
||||
* phase, after which elements may not be added. The built phase
|
||||
* begins when the {@link #build()} method is called, which creates an
|
||||
* ordered stream whose elements are the elements that were added to the
|
||||
* stream builder, in the order they were added.
|
||||
*
|
||||
* @see DoubleStream#builder()
|
||||
* @since 1.8
|
||||
*/
|
||||
public interface Builder extends DoubleConsumer {
|
||||
|
||||
/**
|
||||
* Adds an element to the stream being built.
|
||||
*
|
||||
* @throws IllegalStateException if the builder has already transitioned
|
||||
* to the built state
|
||||
*/
|
||||
@Override
|
||||
void accept(double t);
|
||||
|
||||
/**
|
||||
* Adds an element to the stream being built.
|
||||
*
|
||||
* @implSpec
|
||||
* The default implementation behaves as if:
|
||||
* <pre>{@code
|
||||
* accept(t)
|
||||
* return this;
|
||||
* }</pre>
|
||||
*
|
||||
* @param t the element to add
|
||||
* @return {@code this} builder
|
||||
* @throws IllegalStateException if the builder has already transitioned
|
||||
* to the built state
|
||||
*/
|
||||
default Builder add(double t) {
|
||||
accept(t);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds the stream, transitioning this builder to the built state.
|
||||
* An {@code IllegalStateException} is thrown if there are further
|
||||
* attempts to operate on the builder after it has entered the built
|
||||
* state.
|
||||
*
|
||||
* @return the built stream
|
||||
* @throws IllegalStateException if the builder has already transitioned
|
||||
* to the built state
|
||||
*/
|
||||
DoubleStream build();
|
||||
}
|
||||
}
|
||||
318
jdkSrc/jdk8/java/util/stream/FindOps.java
Normal file
318
jdkSrc/jdk8/java/util/stream/FindOps.java
Normal file
@@ -0,0 +1,318 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.util.OptionalDouble;
|
||||
import java.util.OptionalInt;
|
||||
import java.util.OptionalLong;
|
||||
import java.util.Spliterator;
|
||||
import java.util.concurrent.CountedCompleter;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* Factory for instances of a short-circuiting {@code TerminalOp} that searches
|
||||
* for an element in a stream pipeline, and terminates when it finds one.
|
||||
* Supported variants include find-first (find the first element in the
|
||||
* encounter order) and find-any (find any element, may not be the first in
|
||||
* encounter order.)
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
final class FindOps {
|
||||
|
||||
private FindOps() { }
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} for streams of objects.
|
||||
*
|
||||
* @param <T> the type of elements of the stream
|
||||
* @param mustFindFirst whether the {@code TerminalOp} must produce the
|
||||
* first element in the encounter order
|
||||
* @return a {@code TerminalOp} implementing the find operation
|
||||
*/
|
||||
public static <T> TerminalOp<T, Optional<T>> makeRef(boolean mustFindFirst) {
|
||||
return new FindOp<>(mustFindFirst, StreamShape.REFERENCE, Optional.empty(),
|
||||
Optional::isPresent, FindSink.OfRef::new);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} for streams of ints.
|
||||
*
|
||||
* @param mustFindFirst whether the {@code TerminalOp} must produce the
|
||||
* first element in the encounter order
|
||||
* @return a {@code TerminalOp} implementing the find operation
|
||||
*/
|
||||
public static TerminalOp<Integer, OptionalInt> makeInt(boolean mustFindFirst) {
|
||||
return new FindOp<>(mustFindFirst, StreamShape.INT_VALUE, OptionalInt.empty(),
|
||||
OptionalInt::isPresent, FindSink.OfInt::new);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} for streams of longs.
|
||||
*
|
||||
* @param mustFindFirst whether the {@code TerminalOp} must produce the
|
||||
* first element in the encounter order
|
||||
* @return a {@code TerminalOp} implementing the find operation
|
||||
*/
|
||||
public static TerminalOp<Long, OptionalLong> makeLong(boolean mustFindFirst) {
|
||||
return new FindOp<>(mustFindFirst, StreamShape.LONG_VALUE, OptionalLong.empty(),
|
||||
OptionalLong::isPresent, FindSink.OfLong::new);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code FindOp} for streams of doubles.
|
||||
*
|
||||
* @param mustFindFirst whether the {@code TerminalOp} must produce the
|
||||
* first element in the encounter order
|
||||
* @return a {@code TerminalOp} implementing the find operation
|
||||
*/
|
||||
public static TerminalOp<Double, OptionalDouble> makeDouble(boolean mustFindFirst) {
|
||||
return new FindOp<>(mustFindFirst, StreamShape.DOUBLE_VALUE, OptionalDouble.empty(),
|
||||
OptionalDouble::isPresent, FindSink.OfDouble::new);
|
||||
}
|
||||
|
||||
/**
|
||||
* A short-circuiting {@code TerminalOp} that searches for an element in a
|
||||
* stream pipeline, and terminates when it finds one. Implements both
|
||||
* find-first (find the first element in the encounter order) and find-any
|
||||
* (find any element, may not be the first in encounter order.)
|
||||
*
|
||||
* @param <T> the output type of the stream pipeline
|
||||
* @param <O> the result type of the find operation, typically an optional
|
||||
* type
|
||||
*/
|
||||
private static final class FindOp<T, O> implements TerminalOp<T, O> {
|
||||
private final StreamShape shape;
|
||||
final boolean mustFindFirst;
|
||||
final O emptyValue;
|
||||
final Predicate<O> presentPredicate;
|
||||
final Supplier<TerminalSink<T, O>> sinkSupplier;
|
||||
|
||||
/**
|
||||
* Constructs a {@code FindOp}.
|
||||
*
|
||||
* @param mustFindFirst if true, must find the first element in
|
||||
* encounter order, otherwise can find any element
|
||||
* @param shape stream shape of elements to search
|
||||
* @param emptyValue result value corresponding to "found nothing"
|
||||
* @param presentPredicate {@code Predicate} on result value
|
||||
* corresponding to "found something"
|
||||
* @param sinkSupplier supplier for a {@code TerminalSink} implementing
|
||||
* the matching functionality
|
||||
*/
|
||||
FindOp(boolean mustFindFirst,
|
||||
StreamShape shape,
|
||||
O emptyValue,
|
||||
Predicate<O> presentPredicate,
|
||||
Supplier<TerminalSink<T, O>> sinkSupplier) {
|
||||
this.mustFindFirst = mustFindFirst;
|
||||
this.shape = shape;
|
||||
this.emptyValue = emptyValue;
|
||||
this.presentPredicate = presentPredicate;
|
||||
this.sinkSupplier = sinkSupplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getOpFlags() {
|
||||
return StreamOpFlag.IS_SHORT_CIRCUIT | (mustFindFirst ? 0 : StreamOpFlag.NOT_ORDERED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamShape inputShape() {
|
||||
return shape;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <S> O evaluateSequential(PipelineHelper<T> helper,
|
||||
Spliterator<S> spliterator) {
|
||||
O result = helper.wrapAndCopyInto(sinkSupplier.get(), spliterator).get();
|
||||
return result != null ? result : emptyValue;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <P_IN> O evaluateParallel(PipelineHelper<T> helper,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
return new FindTask<>(this, helper, spliterator).invoke();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Implementation of @{code TerminalSink} that implements the find
|
||||
* functionality, requesting cancellation when something has been found
|
||||
*
|
||||
* @param <T> The type of input element
|
||||
* @param <O> The result type, typically an optional type
|
||||
*/
|
||||
private static abstract class FindSink<T, O> implements TerminalSink<T, O> {
|
||||
boolean hasValue;
|
||||
T value;
|
||||
|
||||
FindSink() {} // Avoid creation of special accessor
|
||||
|
||||
@Override
|
||||
public void accept(T value) {
|
||||
if (!hasValue) {
|
||||
hasValue = true;
|
||||
this.value = value;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
return hasValue;
|
||||
}
|
||||
|
||||
/** Specialization of {@code FindSink} for reference streams */
|
||||
static final class OfRef<T> extends FindSink<T, Optional<T>> {
|
||||
@Override
|
||||
public Optional<T> get() {
|
||||
return hasValue ? Optional.of(value) : null;
|
||||
}
|
||||
}
|
||||
|
||||
/** Specialization of {@code FindSink} for int streams */
|
||||
static final class OfInt extends FindSink<Integer, OptionalInt>
|
||||
implements Sink.OfInt {
|
||||
@Override
|
||||
public void accept(int value) {
|
||||
// Boxing is OK here, since few values will actually flow into the sink
|
||||
accept((Integer) value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public OptionalInt get() {
|
||||
return hasValue ? OptionalInt.of(value) : null;
|
||||
}
|
||||
}
|
||||
|
||||
/** Specialization of {@code FindSink} for long streams */
|
||||
static final class OfLong extends FindSink<Long, OptionalLong>
|
||||
implements Sink.OfLong {
|
||||
@Override
|
||||
public void accept(long value) {
|
||||
// Boxing is OK here, since few values will actually flow into the sink
|
||||
accept((Long) value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public OptionalLong get() {
|
||||
return hasValue ? OptionalLong.of(value) : null;
|
||||
}
|
||||
}
|
||||
|
||||
/** Specialization of {@code FindSink} for double streams */
|
||||
static final class OfDouble extends FindSink<Double, OptionalDouble>
|
||||
implements Sink.OfDouble {
|
||||
@Override
|
||||
public void accept(double value) {
|
||||
// Boxing is OK here, since few values will actually flow into the sink
|
||||
accept((Double) value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public OptionalDouble get() {
|
||||
return hasValue ? OptionalDouble.of(value) : null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@code ForkJoinTask} implementing parallel short-circuiting search
|
||||
* @param <P_IN> Input element type to the stream pipeline
|
||||
* @param <P_OUT> Output element type from the stream pipeline
|
||||
* @param <O> Result type from the find operation
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
private static final class FindTask<P_IN, P_OUT, O>
|
||||
extends AbstractShortCircuitTask<P_IN, P_OUT, O, FindTask<P_IN, P_OUT, O>> {
|
||||
private final FindOp<P_OUT, O> op;
|
||||
|
||||
FindTask(FindOp<P_OUT, O> op,
|
||||
PipelineHelper<P_OUT> helper,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
super(helper, spliterator);
|
||||
this.op = op;
|
||||
}
|
||||
|
||||
FindTask(FindTask<P_IN, P_OUT, O> parent, Spliterator<P_IN> spliterator) {
|
||||
super(parent, spliterator);
|
||||
this.op = parent.op;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FindTask<P_IN, P_OUT, O> makeChild(Spliterator<P_IN> spliterator) {
|
||||
return new FindTask<>(this, spliterator);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected O getEmptyResult() {
|
||||
return op.emptyValue;
|
||||
}
|
||||
|
||||
private void foundResult(O answer) {
|
||||
if (isLeftmostNode())
|
||||
shortCircuit(answer);
|
||||
else
|
||||
cancelLaterNodes();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected O doLeaf() {
|
||||
O result = helper.wrapAndCopyInto(op.sinkSupplier.get(), spliterator).get();
|
||||
if (!op.mustFindFirst) {
|
||||
if (result != null)
|
||||
shortCircuit(result);
|
||||
return null;
|
||||
}
|
||||
else {
|
||||
if (result != null) {
|
||||
foundResult(result);
|
||||
return result;
|
||||
}
|
||||
else
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onCompletion(CountedCompleter<?> caller) {
|
||||
if (op.mustFindFirst) {
|
||||
for (FindTask<P_IN, P_OUT, O> child = leftChild, p = null; child != p;
|
||||
p = child, child = rightChild) {
|
||||
O result = child.getLocalResult();
|
||||
if (result != null && op.presentPredicate.test(result)) {
|
||||
setLocalResult(result);
|
||||
foundResult(result);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
super.onCompletion(caller);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
507
jdkSrc/jdk8/java/util/stream/ForEachOps.java
Normal file
507
jdkSrc/jdk8/java/util/stream/ForEachOps.java
Normal file
@@ -0,0 +1,507 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.Spliterator;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountedCompleter;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.DoubleConsumer;
|
||||
import java.util.function.IntConsumer;
|
||||
import java.util.function.IntFunction;
|
||||
import java.util.function.LongConsumer;
|
||||
|
||||
/**
|
||||
* Factory for creating instances of {@code TerminalOp} that perform an
|
||||
* action for every element of a stream. Supported variants include unordered
|
||||
* traversal (elements are provided to the {@code Consumer} as soon as they are
|
||||
* available), and ordered traversal (elements are provided to the
|
||||
* {@code Consumer} in encounter order.)
|
||||
*
|
||||
* <p>Elements are provided to the {@code Consumer} on whatever thread and
|
||||
* whatever order they become available. For ordered traversals, it is
|
||||
* guaranteed that processing an element <em>happens-before</em> processing
|
||||
* subsequent elements in the encounter order.
|
||||
*
|
||||
* <p>Exceptions occurring as a result of sending an element to the
|
||||
* {@code Consumer} will be relayed to the caller and traversal will be
|
||||
* prematurely terminated.
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
final class ForEachOps {
|
||||
|
||||
private ForEachOps() { }
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that perform an action for every element
|
||||
* of a stream.
|
||||
*
|
||||
* @param action the {@code Consumer} that receives all elements of a
|
||||
* stream
|
||||
* @param ordered whether an ordered traversal is requested
|
||||
* @param <T> the type of the stream elements
|
||||
* @return the {@code TerminalOp} instance
|
||||
*/
|
||||
public static <T> TerminalOp<T, Void> makeRef(Consumer<? super T> action,
|
||||
boolean ordered) {
|
||||
Objects.requireNonNull(action);
|
||||
return new ForEachOp.OfRef<>(action, ordered);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that perform an action for every element
|
||||
* of an {@code IntStream}.
|
||||
*
|
||||
* @param action the {@code IntConsumer} that receives all elements of a
|
||||
* stream
|
||||
* @param ordered whether an ordered traversal is requested
|
||||
* @return the {@code TerminalOp} instance
|
||||
*/
|
||||
public static TerminalOp<Integer, Void> makeInt(IntConsumer action,
|
||||
boolean ordered) {
|
||||
Objects.requireNonNull(action);
|
||||
return new ForEachOp.OfInt(action, ordered);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that perform an action for every element
|
||||
* of a {@code LongStream}.
|
||||
*
|
||||
* @param action the {@code LongConsumer} that receives all elements of a
|
||||
* stream
|
||||
* @param ordered whether an ordered traversal is requested
|
||||
* @return the {@code TerminalOp} instance
|
||||
*/
|
||||
public static TerminalOp<Long, Void> makeLong(LongConsumer action,
|
||||
boolean ordered) {
|
||||
Objects.requireNonNull(action);
|
||||
return new ForEachOp.OfLong(action, ordered);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that perform an action for every element
|
||||
* of a {@code DoubleStream}.
|
||||
*
|
||||
* @param action the {@code DoubleConsumer} that receives all elements of
|
||||
* a stream
|
||||
* @param ordered whether an ordered traversal is requested
|
||||
* @return the {@code TerminalOp} instance
|
||||
*/
|
||||
public static TerminalOp<Double, Void> makeDouble(DoubleConsumer action,
|
||||
boolean ordered) {
|
||||
Objects.requireNonNull(action);
|
||||
return new ForEachOp.OfDouble(action, ordered);
|
||||
}
|
||||
|
||||
/**
|
||||
* A {@code TerminalOp} that evaluates a stream pipeline and sends the
|
||||
* output to itself as a {@code TerminalSink}. Elements will be sent in
|
||||
* whatever thread they become available. If the traversal is unordered,
|
||||
* they will be sent independent of the stream's encounter order.
|
||||
*
|
||||
* <p>This terminal operation is stateless. For parallel evaluation, each
|
||||
* leaf instance of a {@code ForEachTask} will send elements to the same
|
||||
* {@code TerminalSink} reference that is an instance of this class.
|
||||
*
|
||||
* @param <T> the output type of the stream pipeline
|
||||
*/
|
||||
static abstract class ForEachOp<T>
|
||||
implements TerminalOp<T, Void>, TerminalSink<T, Void> {
|
||||
private final boolean ordered;
|
||||
|
||||
protected ForEachOp(boolean ordered) {
|
||||
this.ordered = ordered;
|
||||
}
|
||||
|
||||
// TerminalOp
|
||||
|
||||
@Override
|
||||
public int getOpFlags() {
|
||||
return ordered ? 0 : StreamOpFlag.NOT_ORDERED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <S> Void evaluateSequential(PipelineHelper<T> helper,
|
||||
Spliterator<S> spliterator) {
|
||||
return helper.wrapAndCopyInto(this, spliterator).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <S> Void evaluateParallel(PipelineHelper<T> helper,
|
||||
Spliterator<S> spliterator) {
|
||||
if (ordered)
|
||||
new ForEachOrderedTask<>(helper, spliterator, this).invoke();
|
||||
else
|
||||
new ForEachTask<>(helper, spliterator, helper.wrapSink(this)).invoke();
|
||||
return null;
|
||||
}
|
||||
|
||||
// TerminalSink
|
||||
|
||||
@Override
|
||||
public Void get() {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Implementations
|
||||
|
||||
/** Implementation class for reference streams */
|
||||
static final class OfRef<T> extends ForEachOp<T> {
|
||||
final Consumer<? super T> consumer;
|
||||
|
||||
OfRef(Consumer<? super T> consumer, boolean ordered) {
|
||||
super(ordered);
|
||||
this.consumer = consumer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(T t) {
|
||||
consumer.accept(t);
|
||||
}
|
||||
}
|
||||
|
||||
/** Implementation class for {@code IntStream} */
|
||||
static final class OfInt extends ForEachOp<Integer>
|
||||
implements Sink.OfInt {
|
||||
final IntConsumer consumer;
|
||||
|
||||
OfInt(IntConsumer consumer, boolean ordered) {
|
||||
super(ordered);
|
||||
this.consumer = consumer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamShape inputShape() {
|
||||
return StreamShape.INT_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
consumer.accept(t);
|
||||
}
|
||||
}
|
||||
|
||||
/** Implementation class for {@code LongStream} */
|
||||
static final class OfLong extends ForEachOp<Long>
|
||||
implements Sink.OfLong {
|
||||
final LongConsumer consumer;
|
||||
|
||||
OfLong(LongConsumer consumer, boolean ordered) {
|
||||
super(ordered);
|
||||
this.consumer = consumer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamShape inputShape() {
|
||||
return StreamShape.LONG_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
consumer.accept(t);
|
||||
}
|
||||
}
|
||||
|
||||
/** Implementation class for {@code DoubleStream} */
|
||||
static final class OfDouble extends ForEachOp<Double>
|
||||
implements Sink.OfDouble {
|
||||
final DoubleConsumer consumer;
|
||||
|
||||
OfDouble(DoubleConsumer consumer, boolean ordered) {
|
||||
super(ordered);
|
||||
this.consumer = consumer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamShape inputShape() {
|
||||
return StreamShape.DOUBLE_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
consumer.accept(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** A {@code ForkJoinTask} for performing a parallel for-each operation */
|
||||
@SuppressWarnings("serial")
|
||||
static final class ForEachTask<S, T> extends CountedCompleter<Void> {
|
||||
private Spliterator<S> spliterator;
|
||||
private final Sink<S> sink;
|
||||
private final PipelineHelper<T> helper;
|
||||
private long targetSize;
|
||||
|
||||
ForEachTask(PipelineHelper<T> helper,
|
||||
Spliterator<S> spliterator,
|
||||
Sink<S> sink) {
|
||||
super(null);
|
||||
this.sink = sink;
|
||||
this.helper = helper;
|
||||
this.spliterator = spliterator;
|
||||
this.targetSize = 0L;
|
||||
}
|
||||
|
||||
ForEachTask(ForEachTask<S, T> parent, Spliterator<S> spliterator) {
|
||||
super(parent);
|
||||
this.spliterator = spliterator;
|
||||
this.sink = parent.sink;
|
||||
this.targetSize = parent.targetSize;
|
||||
this.helper = parent.helper;
|
||||
}
|
||||
|
||||
// Similar to AbstractTask but doesn't need to track child tasks
|
||||
public void compute() {
|
||||
Spliterator<S> rightSplit = spliterator, leftSplit;
|
||||
long sizeEstimate = rightSplit.estimateSize(), sizeThreshold;
|
||||
if ((sizeThreshold = targetSize) == 0L)
|
||||
targetSize = sizeThreshold = AbstractTask.suggestTargetSize(sizeEstimate);
|
||||
boolean isShortCircuit = StreamOpFlag.SHORT_CIRCUIT.isKnown(helper.getStreamAndOpFlags());
|
||||
boolean forkRight = false;
|
||||
Sink<S> taskSink = sink;
|
||||
ForEachTask<S, T> task = this;
|
||||
while (!isShortCircuit || !taskSink.cancellationRequested()) {
|
||||
if (sizeEstimate <= sizeThreshold ||
|
||||
(leftSplit = rightSplit.trySplit()) == null) {
|
||||
task.helper.copyInto(taskSink, rightSplit);
|
||||
break;
|
||||
}
|
||||
ForEachTask<S, T> leftTask = new ForEachTask<>(task, leftSplit);
|
||||
task.addToPendingCount(1);
|
||||
ForEachTask<S, T> taskToFork;
|
||||
if (forkRight) {
|
||||
forkRight = false;
|
||||
rightSplit = leftSplit;
|
||||
taskToFork = task;
|
||||
task = leftTask;
|
||||
}
|
||||
else {
|
||||
forkRight = true;
|
||||
taskToFork = leftTask;
|
||||
}
|
||||
taskToFork.fork();
|
||||
sizeEstimate = rightSplit.estimateSize();
|
||||
}
|
||||
task.spliterator = null;
|
||||
task.propagateCompletion();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A {@code ForkJoinTask} for performing a parallel for-each operation
|
||||
* which visits the elements in encounter order
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
static final class ForEachOrderedTask<S, T> extends CountedCompleter<Void> {
|
||||
/*
|
||||
* Our goal is to ensure that the elements associated with a task are
|
||||
* processed according to an in-order traversal of the computation tree.
|
||||
* We use completion counts for representing these dependencies, so that
|
||||
* a task does not complete until all the tasks preceding it in this
|
||||
* order complete. We use the "completion map" to associate the next
|
||||
* task in this order for any left child. We increase the pending count
|
||||
* of any node on the right side of such a mapping by one to indicate
|
||||
* its dependency, and when a node on the left side of such a mapping
|
||||
* completes, it decrements the pending count of its corresponding right
|
||||
* side. As the computation tree is expanded by splitting, we must
|
||||
* atomically update the mappings to maintain the invariant that the
|
||||
* completion map maps left children to the next node in the in-order
|
||||
* traversal.
|
||||
*
|
||||
* Take, for example, the following computation tree of tasks:
|
||||
*
|
||||
* a
|
||||
* / \
|
||||
* b c
|
||||
* / \ / \
|
||||
* d e f g
|
||||
*
|
||||
* The complete map will contain (not necessarily all at the same time)
|
||||
* the following associations:
|
||||
*
|
||||
* d -> e
|
||||
* b -> f
|
||||
* f -> g
|
||||
*
|
||||
* Tasks e, f, g will have their pending counts increased by 1.
|
||||
*
|
||||
* The following relationships hold:
|
||||
*
|
||||
* - completion of d "happens-before" e;
|
||||
* - completion of d and e "happens-before b;
|
||||
* - completion of b "happens-before" f; and
|
||||
* - completion of f "happens-before" g
|
||||
*
|
||||
* Thus overall the "happens-before" relationship holds for the
|
||||
* reporting of elements, covered by tasks d, e, f and g, as specified
|
||||
* by the forEachOrdered operation.
|
||||
*/
|
||||
|
||||
private final PipelineHelper<T> helper;
|
||||
private Spliterator<S> spliterator;
|
||||
private final long targetSize;
|
||||
private final ConcurrentHashMap<ForEachOrderedTask<S, T>, ForEachOrderedTask<S, T>> completionMap;
|
||||
private final Sink<T> action;
|
||||
private final ForEachOrderedTask<S, T> leftPredecessor;
|
||||
private Node<T> node;
|
||||
|
||||
protected ForEachOrderedTask(PipelineHelper<T> helper,
|
||||
Spliterator<S> spliterator,
|
||||
Sink<T> action) {
|
||||
super(null);
|
||||
this.helper = helper;
|
||||
this.spliterator = spliterator;
|
||||
this.targetSize = AbstractTask.suggestTargetSize(spliterator.estimateSize());
|
||||
// Size map to avoid concurrent re-sizes
|
||||
this.completionMap = new ConcurrentHashMap<>(Math.max(16, AbstractTask.getLeafTarget() << 1));
|
||||
this.action = action;
|
||||
this.leftPredecessor = null;
|
||||
}
|
||||
|
||||
ForEachOrderedTask(ForEachOrderedTask<S, T> parent,
|
||||
Spliterator<S> spliterator,
|
||||
ForEachOrderedTask<S, T> leftPredecessor) {
|
||||
super(parent);
|
||||
this.helper = parent.helper;
|
||||
this.spliterator = spliterator;
|
||||
this.targetSize = parent.targetSize;
|
||||
this.completionMap = parent.completionMap;
|
||||
this.action = parent.action;
|
||||
this.leftPredecessor = leftPredecessor;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void compute() {
|
||||
doCompute(this);
|
||||
}
|
||||
|
||||
private static <S, T> void doCompute(ForEachOrderedTask<S, T> task) {
|
||||
Spliterator<S> rightSplit = task.spliterator, leftSplit;
|
||||
long sizeThreshold = task.targetSize;
|
||||
boolean forkRight = false;
|
||||
while (rightSplit.estimateSize() > sizeThreshold &&
|
||||
(leftSplit = rightSplit.trySplit()) != null) {
|
||||
ForEachOrderedTask<S, T> leftChild =
|
||||
new ForEachOrderedTask<>(task, leftSplit, task.leftPredecessor);
|
||||
ForEachOrderedTask<S, T> rightChild =
|
||||
new ForEachOrderedTask<>(task, rightSplit, leftChild);
|
||||
|
||||
// Fork the parent task
|
||||
// Completion of the left and right children "happens-before"
|
||||
// completion of the parent
|
||||
task.addToPendingCount(1);
|
||||
// Completion of the left child "happens-before" completion of
|
||||
// the right child
|
||||
rightChild.addToPendingCount(1);
|
||||
task.completionMap.put(leftChild, rightChild);
|
||||
|
||||
// If task is not on the left spine
|
||||
if (task.leftPredecessor != null) {
|
||||
/*
|
||||
* Completion of left-predecessor, or left subtree,
|
||||
* "happens-before" completion of left-most leaf node of
|
||||
* right subtree.
|
||||
* The left child's pending count needs to be updated before
|
||||
* it is associated in the completion map, otherwise the
|
||||
* left child can complete prematurely and violate the
|
||||
* "happens-before" constraint.
|
||||
*/
|
||||
leftChild.addToPendingCount(1);
|
||||
// Update association of left-predecessor to left-most
|
||||
// leaf node of right subtree
|
||||
if (task.completionMap.replace(task.leftPredecessor, task, leftChild)) {
|
||||
// If replaced, adjust the pending count of the parent
|
||||
// to complete when its children complete
|
||||
task.addToPendingCount(-1);
|
||||
} else {
|
||||
// Left-predecessor has already completed, parent's
|
||||
// pending count is adjusted by left-predecessor;
|
||||
// left child is ready to complete
|
||||
leftChild.addToPendingCount(-1);
|
||||
}
|
||||
}
|
||||
|
||||
ForEachOrderedTask<S, T> taskToFork;
|
||||
if (forkRight) {
|
||||
forkRight = false;
|
||||
rightSplit = leftSplit;
|
||||
task = leftChild;
|
||||
taskToFork = rightChild;
|
||||
}
|
||||
else {
|
||||
forkRight = true;
|
||||
task = rightChild;
|
||||
taskToFork = leftChild;
|
||||
}
|
||||
taskToFork.fork();
|
||||
}
|
||||
|
||||
/*
|
||||
* Task's pending count is either 0 or 1. If 1 then the completion
|
||||
* map will contain a value that is task, and two calls to
|
||||
* tryComplete are required for completion, one below and one
|
||||
* triggered by the completion of task's left-predecessor in
|
||||
* onCompletion. Therefore there is no data race within the if
|
||||
* block.
|
||||
*/
|
||||
if (task.getPendingCount() > 0) {
|
||||
// Cannot complete just yet so buffer elements into a Node
|
||||
// for use when completion occurs
|
||||
@SuppressWarnings("unchecked")
|
||||
IntFunction<T[]> generator = size -> (T[]) new Object[size];
|
||||
Node.Builder<T> nb = task.helper.makeNodeBuilder(
|
||||
task.helper.exactOutputSizeIfKnown(rightSplit),
|
||||
generator);
|
||||
task.node = task.helper.wrapAndCopyInto(nb, rightSplit).build();
|
||||
task.spliterator = null;
|
||||
}
|
||||
task.tryComplete();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onCompletion(CountedCompleter<?> caller) {
|
||||
if (node != null) {
|
||||
// Dump buffered elements from this leaf into the sink
|
||||
node.forEach(action);
|
||||
node = null;
|
||||
}
|
||||
else if (spliterator != null) {
|
||||
// Dump elements output from this leaf's pipeline into the sink
|
||||
helper.wrapAndCopyInto(action, spliterator);
|
||||
spliterator = null;
|
||||
}
|
||||
|
||||
// The completion of this task *and* the dumping of elements
|
||||
// "happens-before" completion of the associated left-most leaf task
|
||||
// of right subtree (if any, which can be this task's right sibling)
|
||||
//
|
||||
ForEachOrderedTask<S, T> leftDescendant = completionMap.remove(this);
|
||||
if (leftDescendant != null)
|
||||
leftDescendant.tryComplete();
|
||||
}
|
||||
}
|
||||
}
|
||||
657
jdkSrc/jdk8/java/util/stream/IntPipeline.java
Normal file
657
jdkSrc/jdk8/java/util/stream/IntPipeline.java
Normal file
@@ -0,0 +1,657 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.IntSummaryStatistics;
|
||||
import java.util.Objects;
|
||||
import java.util.OptionalDouble;
|
||||
import java.util.OptionalInt;
|
||||
import java.util.PrimitiveIterator;
|
||||
import java.util.Spliterator;
|
||||
import java.util.Spliterators;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BinaryOperator;
|
||||
import java.util.function.IntBinaryOperator;
|
||||
import java.util.function.IntConsumer;
|
||||
import java.util.function.IntFunction;
|
||||
import java.util.function.IntPredicate;
|
||||
import java.util.function.IntToDoubleFunction;
|
||||
import java.util.function.IntToLongFunction;
|
||||
import java.util.function.IntUnaryOperator;
|
||||
import java.util.function.ObjIntConsumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* Abstract base class for an intermediate pipeline stage or pipeline source
|
||||
* stage implementing whose elements are of type {@code int}.
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract class IntPipeline<E_IN>
|
||||
extends AbstractPipeline<E_IN, Integer, IntStream>
|
||||
implements IntStream {
|
||||
|
||||
/**
|
||||
* Constructor for the head of a stream pipeline.
|
||||
*
|
||||
* @param source {@code Supplier<Spliterator>} describing the stream source
|
||||
* @param sourceFlags The source flags for the stream source, described in
|
||||
* {@link StreamOpFlag}
|
||||
* @param parallel {@code true} if the pipeline is parallel
|
||||
*/
|
||||
IntPipeline(Supplier<? extends Spliterator<Integer>> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for the head of a stream pipeline.
|
||||
*
|
||||
* @param source {@code Spliterator} describing the stream source
|
||||
* @param sourceFlags The source flags for the stream source, described in
|
||||
* {@link StreamOpFlag}
|
||||
* @param parallel {@code true} if the pipeline is parallel
|
||||
*/
|
||||
IntPipeline(Spliterator<Integer> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for appending an intermediate operation onto an existing
|
||||
* pipeline.
|
||||
*
|
||||
* @param upstream the upstream element source
|
||||
* @param opFlags the operation flags for the new operation
|
||||
*/
|
||||
IntPipeline(AbstractPipeline<?, E_IN, ?> upstream, int opFlags) {
|
||||
super(upstream, opFlags);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adapt a {@code Sink<Integer> to an {@code IntConsumer}, ideally simply
|
||||
* by casting.
|
||||
*/
|
||||
private static IntConsumer adapt(Sink<Integer> sink) {
|
||||
if (sink instanceof IntConsumer) {
|
||||
return (IntConsumer) sink;
|
||||
}
|
||||
else {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(AbstractPipeline.class,
|
||||
"using IntStream.adapt(Sink<Integer> s)");
|
||||
return sink::accept;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adapt a {@code Spliterator<Integer>} to a {@code Spliterator.OfInt}.
|
||||
*
|
||||
* @implNote
|
||||
* The implementation attempts to cast to a Spliterator.OfInt, and throws an
|
||||
* exception if this cast is not possible.
|
||||
*/
|
||||
private static Spliterator.OfInt adapt(Spliterator<Integer> s) {
|
||||
if (s instanceof Spliterator.OfInt) {
|
||||
return (Spliterator.OfInt) s;
|
||||
}
|
||||
else {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(AbstractPipeline.class,
|
||||
"using IntStream.adapt(Spliterator<Integer> s)");
|
||||
throw new UnsupportedOperationException("IntStream.adapt(Spliterator<Integer> s)");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Shape-specific methods
|
||||
|
||||
@Override
|
||||
final StreamShape getOutputShape() {
|
||||
return StreamShape.INT_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
final <P_IN> Node<Integer> evaluateToNode(PipelineHelper<Integer> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
boolean flattenTree,
|
||||
IntFunction<Integer[]> generator) {
|
||||
return Nodes.collectInt(helper, spliterator, flattenTree);
|
||||
}
|
||||
|
||||
@Override
|
||||
final <P_IN> Spliterator<Integer> wrap(PipelineHelper<Integer> ph,
|
||||
Supplier<Spliterator<P_IN>> supplier,
|
||||
boolean isParallel) {
|
||||
return new StreamSpliterators.IntWrappingSpliterator<>(ph, supplier, isParallel);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
final Spliterator.OfInt lazySpliterator(Supplier<? extends Spliterator<Integer>> supplier) {
|
||||
return new StreamSpliterators.DelegatingSpliterator.OfInt((Supplier<Spliterator.OfInt>) supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
final void forEachWithCancel(Spliterator<Integer> spliterator, Sink<Integer> sink) {
|
||||
Spliterator.OfInt spl = adapt(spliterator);
|
||||
IntConsumer adaptedSink = adapt(sink);
|
||||
do { } while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
|
||||
}
|
||||
|
||||
@Override
|
||||
final Node.Builder<Integer> makeNodeBuilder(long exactSizeIfKnown,
|
||||
IntFunction<Integer[]> generator) {
|
||||
return Nodes.intBuilder(exactSizeIfKnown);
|
||||
}
|
||||
|
||||
|
||||
// IntStream
|
||||
|
||||
@Override
|
||||
public final PrimitiveIterator.OfInt iterator() {
|
||||
return Spliterators.iterator(spliterator());
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Spliterator.OfInt spliterator() {
|
||||
return adapt(super.spliterator());
|
||||
}
|
||||
|
||||
// Stateless intermediate ops from IntStream
|
||||
|
||||
@Override
|
||||
public final LongStream asLongStream() {
|
||||
return new LongPipeline.StatelessOp<Integer>(this, StreamShape.INT_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Integer> opWrapSink(int flags, Sink<Long> sink) {
|
||||
return new Sink.ChainedInt<Long>(sink) {
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
downstream.accept((long) t);
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DoubleStream asDoubleStream() {
|
||||
return new DoublePipeline.StatelessOp<Integer>(this, StreamShape.INT_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Integer> opWrapSink(int flags, Sink<Double> sink) {
|
||||
return new Sink.ChainedInt<Double>(sink) {
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
downstream.accept((double) t);
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Stream<Integer> boxed() {
|
||||
return mapToObj(Integer::valueOf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final IntStream map(IntUnaryOperator mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new StatelessOp<Integer>(this, StreamShape.INT_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
|
||||
return new Sink.ChainedInt<Integer>(sink) {
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
downstream.accept(mapper.applyAsInt(t));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final <U> Stream<U> mapToObj(IntFunction<? extends U> mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new ReferencePipeline.StatelessOp<Integer, U>(this, StreamShape.INT_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Integer> opWrapSink(int flags, Sink<U> sink) {
|
||||
return new Sink.ChainedInt<U>(sink) {
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
downstream.accept(mapper.apply(t));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final LongStream mapToLong(IntToLongFunction mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new LongPipeline.StatelessOp<Integer>(this, StreamShape.INT_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Integer> opWrapSink(int flags, Sink<Long> sink) {
|
||||
return new Sink.ChainedInt<Long>(sink) {
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
downstream.accept(mapper.applyAsLong(t));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DoubleStream mapToDouble(IntToDoubleFunction mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new DoublePipeline.StatelessOp<Integer>(this, StreamShape.INT_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Integer> opWrapSink(int flags, Sink<Double> sink) {
|
||||
return new Sink.ChainedInt<Double>(sink) {
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
downstream.accept(mapper.applyAsDouble(t));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final IntStream flatMap(IntFunction<? extends IntStream> mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new StatelessOp<Integer>(this, StreamShape.INT_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
|
||||
@Override
|
||||
Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
|
||||
return new Sink.ChainedInt<Integer>(sink) {
|
||||
// true if cancellationRequested() has been called
|
||||
boolean cancellationRequestedCalled;
|
||||
|
||||
// cache the consumer to avoid creation on every accepted element
|
||||
IntConsumer downstreamAsInt = downstream::accept;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
try (IntStream result = mapper.apply(t)) {
|
||||
if (result != null) {
|
||||
if (!cancellationRequestedCalled) {
|
||||
result.sequential().forEach(downstreamAsInt);
|
||||
}
|
||||
else {
|
||||
Spliterator.OfInt s = result.sequential().spliterator();
|
||||
do { } while (!downstream.cancellationRequested() && s.tryAdvance(downstreamAsInt));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
// If this method is called then an operation within the stream
|
||||
// pipeline is short-circuiting (see AbstractPipeline.copyInto).
|
||||
// Note that we cannot differentiate between an upstream or
|
||||
// downstream operation
|
||||
cancellationRequestedCalled = true;
|
||||
return downstream.cancellationRequested();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public IntStream unordered() {
|
||||
if (!isOrdered())
|
||||
return this;
|
||||
return new StatelessOp<Integer>(this, StreamShape.INT_VALUE, StreamOpFlag.NOT_ORDERED) {
|
||||
@Override
|
||||
Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
|
||||
return sink;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final IntStream filter(IntPredicate predicate) {
|
||||
Objects.requireNonNull(predicate);
|
||||
return new StatelessOp<Integer>(this, StreamShape.INT_VALUE,
|
||||
StreamOpFlag.NOT_SIZED) {
|
||||
@Override
|
||||
Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
|
||||
return new Sink.ChainedInt<Integer>(sink) {
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
if (predicate.test(t))
|
||||
downstream.accept(t);
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final IntStream peek(IntConsumer action) {
|
||||
Objects.requireNonNull(action);
|
||||
return new StatelessOp<Integer>(this, StreamShape.INT_VALUE,
|
||||
0) {
|
||||
@Override
|
||||
Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
|
||||
return new Sink.ChainedInt<Integer>(sink) {
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
action.accept(t);
|
||||
downstream.accept(t);
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Stateful intermediate ops from IntStream
|
||||
|
||||
@Override
|
||||
public final IntStream limit(long maxSize) {
|
||||
if (maxSize < 0)
|
||||
throw new IllegalArgumentException(Long.toString(maxSize));
|
||||
return SliceOps.makeInt(this, 0, maxSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final IntStream skip(long n) {
|
||||
if (n < 0)
|
||||
throw new IllegalArgumentException(Long.toString(n));
|
||||
if (n == 0)
|
||||
return this;
|
||||
else
|
||||
return SliceOps.makeInt(this, n, -1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final IntStream sorted() {
|
||||
return SortedOps.makeInt(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final IntStream distinct() {
|
||||
// While functional and quick to implement, this approach is not very efficient.
|
||||
// An efficient version requires an int-specific map/set implementation.
|
||||
return boxed().distinct().mapToInt(i -> i);
|
||||
}
|
||||
|
||||
// Terminal ops from IntStream
|
||||
|
||||
@Override
|
||||
public void forEach(IntConsumer action) {
|
||||
evaluate(ForEachOps.makeInt(action, false));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachOrdered(IntConsumer action) {
|
||||
evaluate(ForEachOps.makeInt(action, true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final int sum() {
|
||||
return reduce(0, Integer::sum);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalInt min() {
|
||||
return reduce(Math::min);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalInt max() {
|
||||
return reduce(Math::max);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final long count() {
|
||||
return mapToLong(e -> 1L).sum();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalDouble average() {
|
||||
long[] avg = collect(() -> new long[2],
|
||||
(ll, i) -> {
|
||||
ll[0]++;
|
||||
ll[1] += i;
|
||||
},
|
||||
(ll, rr) -> {
|
||||
ll[0] += rr[0];
|
||||
ll[1] += rr[1];
|
||||
});
|
||||
return avg[0] > 0
|
||||
? OptionalDouble.of((double) avg[1] / avg[0])
|
||||
: OptionalDouble.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final IntSummaryStatistics summaryStatistics() {
|
||||
return collect(IntSummaryStatistics::new, IntSummaryStatistics::accept,
|
||||
IntSummaryStatistics::combine);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final int reduce(int identity, IntBinaryOperator op) {
|
||||
return evaluate(ReduceOps.makeInt(identity, op));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalInt reduce(IntBinaryOperator op) {
|
||||
return evaluate(ReduceOps.makeInt(op));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final <R> R collect(Supplier<R> supplier,
|
||||
ObjIntConsumer<R> accumulator,
|
||||
BiConsumer<R, R> combiner) {
|
||||
Objects.requireNonNull(combiner);
|
||||
BinaryOperator<R> operator = (left, right) -> {
|
||||
combiner.accept(left, right);
|
||||
return left;
|
||||
};
|
||||
return evaluate(ReduceOps.makeInt(supplier, accumulator, operator));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean anyMatch(IntPredicate predicate) {
|
||||
return evaluate(MatchOps.makeInt(predicate, MatchOps.MatchKind.ANY));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean allMatch(IntPredicate predicate) {
|
||||
return evaluate(MatchOps.makeInt(predicate, MatchOps.MatchKind.ALL));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean noneMatch(IntPredicate predicate) {
|
||||
return evaluate(MatchOps.makeInt(predicate, MatchOps.MatchKind.NONE));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalInt findFirst() {
|
||||
return evaluate(FindOps.makeInt(true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalInt findAny() {
|
||||
return evaluate(FindOps.makeInt(false));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final int[] toArray() {
|
||||
return Nodes.flattenInt((Node.OfInt) evaluateToArrayNode(Integer[]::new))
|
||||
.asPrimitiveArray();
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/**
|
||||
* Source stage of an IntStream.
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
* @since 1.8
|
||||
*/
|
||||
static class Head<E_IN> extends IntPipeline<E_IN> {
|
||||
/**
|
||||
* Constructor for the source stage of an IntStream.
|
||||
*
|
||||
* @param source {@code Supplier<Spliterator>} describing the stream
|
||||
* source
|
||||
* @param sourceFlags the source flags for the stream source, described
|
||||
* in {@link StreamOpFlag}
|
||||
* @param parallel {@code true} if the pipeline is parallel
|
||||
*/
|
||||
Head(Supplier<? extends Spliterator<Integer>> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for the source stage of an IntStream.
|
||||
*
|
||||
* @param source {@code Spliterator} describing the stream source
|
||||
* @param sourceFlags the source flags for the stream source, described
|
||||
* in {@link StreamOpFlag}
|
||||
* @param parallel {@code true} if the pipeline is parallel
|
||||
*/
|
||||
Head(Spliterator<Integer> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
@Override
|
||||
final boolean opIsStateful() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
final Sink<E_IN> opWrapSink(int flags, Sink<Integer> sink) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
// Optimized sequential terminal operations for the head of the pipeline
|
||||
|
||||
@Override
|
||||
public void forEach(IntConsumer action) {
|
||||
if (!isParallel()) {
|
||||
adapt(sourceStageSpliterator()).forEachRemaining(action);
|
||||
}
|
||||
else {
|
||||
super.forEach(action);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachOrdered(IntConsumer action) {
|
||||
if (!isParallel()) {
|
||||
adapt(sourceStageSpliterator()).forEachRemaining(action);
|
||||
}
|
||||
else {
|
||||
super.forEachOrdered(action);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Base class for a stateless intermediate stage of an IntStream
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract static class StatelessOp<E_IN> extends IntPipeline<E_IN> {
|
||||
/**
|
||||
* Construct a new IntStream by appending a stateless intermediate
|
||||
* operation to an existing stream.
|
||||
* @param upstream The upstream pipeline stage
|
||||
* @param inputShape The stream shape for the upstream pipeline stage
|
||||
* @param opFlags Operation flags for the new stage
|
||||
*/
|
||||
StatelessOp(AbstractPipeline<?, E_IN, ?> upstream,
|
||||
StreamShape inputShape,
|
||||
int opFlags) {
|
||||
super(upstream, opFlags);
|
||||
assert upstream.getOutputShape() == inputShape;
|
||||
}
|
||||
|
||||
@Override
|
||||
final boolean opIsStateful() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Base class for a stateful intermediate stage of an IntStream.
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract static class StatefulOp<E_IN> extends IntPipeline<E_IN> {
|
||||
/**
|
||||
* Construct a new IntStream by appending a stateful intermediate
|
||||
* operation to an existing stream.
|
||||
* @param upstream The upstream pipeline stage
|
||||
* @param inputShape The stream shape for the upstream pipeline stage
|
||||
* @param opFlags Operation flags for the new stage
|
||||
*/
|
||||
StatefulOp(AbstractPipeline<?, E_IN, ?> upstream,
|
||||
StreamShape inputShape,
|
||||
int opFlags) {
|
||||
super(upstream, opFlags);
|
||||
assert upstream.getOutputShape() == inputShape;
|
||||
}
|
||||
|
||||
@Override
|
||||
final boolean opIsStateful() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
abstract <P_IN> Node<Integer> opEvaluateParallel(PipelineHelper<Integer> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<Integer[]> generator);
|
||||
}
|
||||
}
|
||||
912
jdkSrc/jdk8/java/util/stream/IntStream.java
Normal file
912
jdkSrc/jdk8/java/util/stream/IntStream.java
Normal file
@@ -0,0 +1,912 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.IntSummaryStatistics;
|
||||
import java.util.Objects;
|
||||
import java.util.OptionalDouble;
|
||||
import java.util.OptionalInt;
|
||||
import java.util.PrimitiveIterator;
|
||||
import java.util.Spliterator;
|
||||
import java.util.Spliterators;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.IntBinaryOperator;
|
||||
import java.util.function.IntConsumer;
|
||||
import java.util.function.IntFunction;
|
||||
import java.util.function.IntPredicate;
|
||||
import java.util.function.IntSupplier;
|
||||
import java.util.function.IntToDoubleFunction;
|
||||
import java.util.function.IntToLongFunction;
|
||||
import java.util.function.IntUnaryOperator;
|
||||
import java.util.function.ObjIntConsumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* A sequence of primitive int-valued elements supporting sequential and parallel
|
||||
* aggregate operations. This is the {@code int} primitive specialization of
|
||||
* {@link Stream}.
|
||||
*
|
||||
* <p>The following example illustrates an aggregate operation using
|
||||
* {@link Stream} and {@link IntStream}, computing the sum of the weights of the
|
||||
* red widgets:
|
||||
*
|
||||
* <pre>{@code
|
||||
* int sum = widgets.stream()
|
||||
* .filter(w -> w.getColor() == RED)
|
||||
* .mapToInt(w -> w.getWeight())
|
||||
* .sum();
|
||||
* }</pre>
|
||||
*
|
||||
* See the class documentation for {@link Stream} and the package documentation
|
||||
* for <a href="package-summary.html">java.util.stream</a> for additional
|
||||
* specification of streams, stream operations, stream pipelines, and
|
||||
* parallelism.
|
||||
*
|
||||
* @since 1.8
|
||||
* @see Stream
|
||||
* @see <a href="package-summary.html">java.util.stream</a>
|
||||
*/
|
||||
public interface IntStream extends BaseStream<Integer, IntStream> {
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the elements of this stream that match
|
||||
* the given predicate.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* predicate to apply to each element to determine if it
|
||||
* should be included
|
||||
* @return the new stream
|
||||
*/
|
||||
IntStream filter(IntPredicate predicate);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the results of applying the given
|
||||
* function to the elements of this stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element
|
||||
* @return the new stream
|
||||
*/
|
||||
IntStream map(IntUnaryOperator mapper);
|
||||
|
||||
/**
|
||||
* Returns an object-valued {@code Stream} consisting of the results of
|
||||
* applying the given function to the elements of this stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">
|
||||
* intermediate operation</a>.
|
||||
*
|
||||
* @param <U> the element type of the new stream
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element
|
||||
* @return the new stream
|
||||
*/
|
||||
<U> Stream<U> mapToObj(IntFunction<? extends U> mapper);
|
||||
|
||||
/**
|
||||
* Returns a {@code LongStream} consisting of the results of applying the
|
||||
* given function to the elements of this stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element
|
||||
* @return the new stream
|
||||
*/
|
||||
LongStream mapToLong(IntToLongFunction mapper);
|
||||
|
||||
/**
|
||||
* Returns a {@code DoubleStream} consisting of the results of applying the
|
||||
* given function to the elements of this stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element
|
||||
* @return the new stream
|
||||
*/
|
||||
DoubleStream mapToDouble(IntToDoubleFunction mapper);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the results of replacing each element of
|
||||
* this stream with the contents of a mapped stream produced by applying
|
||||
* the provided mapping function to each element. Each mapped stream is
|
||||
* {@link java.util.stream.BaseStream#close() closed} after its contents
|
||||
* have been placed into this stream. (If a mapped stream is {@code null}
|
||||
* an empty stream is used, instead.)
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element which produces an
|
||||
* {@code IntStream} of new values
|
||||
* @return the new stream
|
||||
* @see Stream#flatMap(Function)
|
||||
*/
|
||||
IntStream flatMap(IntFunction<? extends IntStream> mapper);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the distinct elements of this stream.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">stateful
|
||||
* intermediate operation</a>.
|
||||
*
|
||||
* @return the new stream
|
||||
*/
|
||||
IntStream distinct();
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the elements of this stream in sorted
|
||||
* order.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">stateful
|
||||
* intermediate operation</a>.
|
||||
*
|
||||
* @return the new stream
|
||||
*/
|
||||
IntStream sorted();
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the elements of this stream, additionally
|
||||
* performing the provided action on each element as elements are consumed
|
||||
* from the resulting stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* <p>For parallel stream pipelines, the action may be called at
|
||||
* whatever time and in whatever thread the element is made available by the
|
||||
* upstream operation. If the action modifies shared state,
|
||||
* it is responsible for providing the required synchronization.
|
||||
*
|
||||
* @apiNote This method exists mainly to support debugging, where you want
|
||||
* to see the elements as they flow past a certain point in a pipeline:
|
||||
* <pre>{@code
|
||||
* IntStream.of(1, 2, 3, 4)
|
||||
* .filter(e -> e > 2)
|
||||
* .peek(e -> System.out.println("Filtered value: " + e))
|
||||
* .map(e -> e * e)
|
||||
* .peek(e -> System.out.println("Mapped value: " + e))
|
||||
* .sum();
|
||||
* }</pre>
|
||||
*
|
||||
* @param action a <a href="package-summary.html#NonInterference">
|
||||
* non-interfering</a> action to perform on the elements as
|
||||
* they are consumed from the stream
|
||||
* @return the new stream
|
||||
*/
|
||||
IntStream peek(IntConsumer action);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the elements of this stream, truncated
|
||||
* to be no longer than {@code maxSize} in length.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* stateful intermediate operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* While {@code limit()} is generally a cheap operation on sequential
|
||||
* stream pipelines, it can be quite expensive on ordered parallel pipelines,
|
||||
* especially for large values of {@code maxSize}, since {@code limit(n)}
|
||||
* is constrained to return not just any <em>n</em> elements, but the
|
||||
* <em>first n</em> elements in the encounter order. Using an unordered
|
||||
* stream source (such as {@link #generate(IntSupplier)}) or removing the
|
||||
* ordering constraint with {@link #unordered()} may result in significant
|
||||
* speedups of {@code limit()} in parallel pipelines, if the semantics of
|
||||
* your situation permit. If consistency with encounter order is required,
|
||||
* and you are experiencing poor performance or memory utilization with
|
||||
* {@code limit()} in parallel pipelines, switching to sequential execution
|
||||
* with {@link #sequential()} may improve performance.
|
||||
*
|
||||
* @param maxSize the number of elements the stream should be limited to
|
||||
* @return the new stream
|
||||
* @throws IllegalArgumentException if {@code maxSize} is negative
|
||||
*/
|
||||
IntStream limit(long maxSize);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the remaining elements of this stream
|
||||
* after discarding the first {@code n} elements of the stream.
|
||||
* If this stream contains fewer than {@code n} elements then an
|
||||
* empty stream will be returned.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">stateful
|
||||
* intermediate operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* While {@code skip()} is generally a cheap operation on sequential
|
||||
* stream pipelines, it can be quite expensive on ordered parallel pipelines,
|
||||
* especially for large values of {@code n}, since {@code skip(n)}
|
||||
* is constrained to skip not just any <em>n</em> elements, but the
|
||||
* <em>first n</em> elements in the encounter order. Using an unordered
|
||||
* stream source (such as {@link #generate(IntSupplier)}) or removing the
|
||||
* ordering constraint with {@link #unordered()} may result in significant
|
||||
* speedups of {@code skip()} in parallel pipelines, if the semantics of
|
||||
* your situation permit. If consistency with encounter order is required,
|
||||
* and you are experiencing poor performance or memory utilization with
|
||||
* {@code skip()} in parallel pipelines, switching to sequential execution
|
||||
* with {@link #sequential()} may improve performance.
|
||||
*
|
||||
* @param n the number of leading elements to skip
|
||||
* @return the new stream
|
||||
* @throws IllegalArgumentException if {@code n} is negative
|
||||
*/
|
||||
IntStream skip(long n);
|
||||
|
||||
/**
|
||||
* Performs an action for each element of this stream.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* <p>For parallel stream pipelines, this operation does <em>not</em>
|
||||
* guarantee to respect the encounter order of the stream, as doing so
|
||||
* would sacrifice the benefit of parallelism. For any given element, the
|
||||
* action may be performed at whatever time and in whatever thread the
|
||||
* library chooses. If the action accesses shared state, it is
|
||||
* responsible for providing the required synchronization.
|
||||
*
|
||||
* @param action a <a href="package-summary.html#NonInterference">
|
||||
* non-interfering</a> action to perform on the elements
|
||||
*/
|
||||
void forEach(IntConsumer action);
|
||||
|
||||
/**
|
||||
* Performs an action for each element of this stream, guaranteeing that
|
||||
* each element is processed in encounter order for streams that have a
|
||||
* defined encounter order.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @param action a <a href="package-summary.html#NonInterference">
|
||||
* non-interfering</a> action to perform on the elements
|
||||
* @see #forEach(IntConsumer)
|
||||
*/
|
||||
void forEachOrdered(IntConsumer action);
|
||||
|
||||
/**
|
||||
* Returns an array containing the elements of this stream.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return an array containing the elements of this stream
|
||||
*/
|
||||
int[] toArray();
|
||||
|
||||
/**
|
||||
* Performs a <a href="package-summary.html#Reduction">reduction</a> on the
|
||||
* elements of this stream, using the provided identity value and an
|
||||
* <a href="package-summary.html#Associativity">associative</a>
|
||||
* accumulation function, and returns the reduced value. This is equivalent
|
||||
* to:
|
||||
* <pre>{@code
|
||||
* int result = identity;
|
||||
* for (int element : this stream)
|
||||
* result = accumulator.applyAsInt(result, element)
|
||||
* return result;
|
||||
* }</pre>
|
||||
*
|
||||
* but is not constrained to execute sequentially.
|
||||
*
|
||||
* <p>The {@code identity} value must be an identity for the accumulator
|
||||
* function. This means that for all {@code x},
|
||||
* {@code accumulator.apply(identity, x)} is equal to {@code x}.
|
||||
* The {@code accumulator} function must be an
|
||||
* <a href="package-summary.html#Associativity">associative</a> function.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @apiNote Sum, min, max, and average are all special cases of reduction.
|
||||
* Summing a stream of numbers can be expressed as:
|
||||
*
|
||||
* <pre>{@code
|
||||
* int sum = integers.reduce(0, (a, b) -> a+b);
|
||||
* }</pre>
|
||||
*
|
||||
* or more compactly:
|
||||
*
|
||||
* <pre>{@code
|
||||
* int sum = integers.reduce(0, Integer::sum);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>While this may seem a more roundabout way to perform an aggregation
|
||||
* compared to simply mutating a running total in a loop, reduction
|
||||
* operations parallelize more gracefully, without needing additional
|
||||
* synchronization and with greatly reduced risk of data races.
|
||||
*
|
||||
* @param identity the identity value for the accumulating function
|
||||
* @param op an <a href="package-summary.html#Associativity">associative</a>,
|
||||
* <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function for combining two values
|
||||
* @return the result of the reduction
|
||||
* @see #sum()
|
||||
* @see #min()
|
||||
* @see #max()
|
||||
* @see #average()
|
||||
*/
|
||||
int reduce(int identity, IntBinaryOperator op);
|
||||
|
||||
/**
|
||||
* Performs a <a href="package-summary.html#Reduction">reduction</a> on the
|
||||
* elements of this stream, using an
|
||||
* <a href="package-summary.html#Associativity">associative</a> accumulation
|
||||
* function, and returns an {@code OptionalInt} describing the reduced value,
|
||||
* if any. This is equivalent to:
|
||||
* <pre>{@code
|
||||
* boolean foundAny = false;
|
||||
* int result = null;
|
||||
* for (int element : this stream) {
|
||||
* if (!foundAny) {
|
||||
* foundAny = true;
|
||||
* result = element;
|
||||
* }
|
||||
* else
|
||||
* result = accumulator.applyAsInt(result, element);
|
||||
* }
|
||||
* return foundAny ? OptionalInt.of(result) : OptionalInt.empty();
|
||||
* }</pre>
|
||||
*
|
||||
* but is not constrained to execute sequentially.
|
||||
*
|
||||
* <p>The {@code accumulator} function must be an
|
||||
* <a href="package-summary.html#Associativity">associative</a> function.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @param op an <a href="package-summary.html#Associativity">associative</a>,
|
||||
* <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function for combining two values
|
||||
* @return the result of the reduction
|
||||
* @see #reduce(int, IntBinaryOperator)
|
||||
*/
|
||||
OptionalInt reduce(IntBinaryOperator op);
|
||||
|
||||
/**
|
||||
* Performs a <a href="package-summary.html#MutableReduction">mutable
|
||||
* reduction</a> operation on the elements of this stream. A mutable
|
||||
* reduction is one in which the reduced value is a mutable result container,
|
||||
* such as an {@code ArrayList}, and elements are incorporated by updating
|
||||
* the state of the result rather than by replacing the result. This
|
||||
* produces a result equivalent to:
|
||||
* <pre>{@code
|
||||
* R result = supplier.get();
|
||||
* for (int element : this stream)
|
||||
* accumulator.accept(result, element);
|
||||
* return result;
|
||||
* }</pre>
|
||||
*
|
||||
* <p>Like {@link #reduce(int, IntBinaryOperator)}, {@code collect} operations
|
||||
* can be parallelized without requiring additional synchronization.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @param <R> type of the result
|
||||
* @param supplier a function that creates a new result container. For a
|
||||
* parallel execution, this function may be called
|
||||
* multiple times and must return a fresh value each time.
|
||||
* @param accumulator an <a href="package-summary.html#Associativity">associative</a>,
|
||||
* <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function for incorporating an additional element into a result
|
||||
* @param combiner an <a href="package-summary.html#Associativity">associative</a>,
|
||||
* <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function for combining two values, which must be
|
||||
* compatible with the accumulator function
|
||||
* @return the result of the reduction
|
||||
* @see Stream#collect(Supplier, BiConsumer, BiConsumer)
|
||||
*/
|
||||
<R> R collect(Supplier<R> supplier,
|
||||
ObjIntConsumer<R> accumulator,
|
||||
BiConsumer<R, R> combiner);
|
||||
|
||||
/**
|
||||
* Returns the sum of elements in this stream. This is a special case
|
||||
* of a <a href="package-summary.html#Reduction">reduction</a>
|
||||
* and is equivalent to:
|
||||
* <pre>{@code
|
||||
* return reduce(0, Integer::sum);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return the sum of elements in this stream
|
||||
*/
|
||||
int sum();
|
||||
|
||||
/**
|
||||
* Returns an {@code OptionalInt} describing the minimum element of this
|
||||
* stream, or an empty optional if this stream is empty. This is a special
|
||||
* case of a <a href="package-summary.html#Reduction">reduction</a>
|
||||
* and is equivalent to:
|
||||
* <pre>{@code
|
||||
* return reduce(Integer::min);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
|
||||
*
|
||||
* @return an {@code OptionalInt} containing the minimum element of this
|
||||
* stream, or an empty {@code OptionalInt} if the stream is empty
|
||||
*/
|
||||
OptionalInt min();
|
||||
|
||||
/**
|
||||
* Returns an {@code OptionalInt} describing the maximum element of this
|
||||
* stream, or an empty optional if this stream is empty. This is a special
|
||||
* case of a <a href="package-summary.html#Reduction">reduction</a>
|
||||
* and is equivalent to:
|
||||
* <pre>{@code
|
||||
* return reduce(Integer::max);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return an {@code OptionalInt} containing the maximum element of this
|
||||
* stream, or an empty {@code OptionalInt} if the stream is empty
|
||||
*/
|
||||
OptionalInt max();
|
||||
|
||||
/**
|
||||
* Returns the count of elements in this stream. This is a special case of
|
||||
* a <a href="package-summary.html#Reduction">reduction</a> and is
|
||||
* equivalent to:
|
||||
* <pre>{@code
|
||||
* return mapToLong(e -> 1L).sum();
|
||||
* }</pre>
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
|
||||
*
|
||||
* @return the count of elements in this stream
|
||||
*/
|
||||
long count();
|
||||
|
||||
/**
|
||||
* Returns an {@code OptionalDouble} describing the arithmetic mean of elements of
|
||||
* this stream, or an empty optional if this stream is empty. This is a
|
||||
* special case of a
|
||||
* <a href="package-summary.html#Reduction">reduction</a>.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return an {@code OptionalDouble} containing the average element of this
|
||||
* stream, or an empty optional if the stream is empty
|
||||
*/
|
||||
OptionalDouble average();
|
||||
|
||||
/**
|
||||
* Returns an {@code IntSummaryStatistics} describing various
|
||||
* summary data about the elements of this stream. This is a special
|
||||
* case of a <a href="package-summary.html#Reduction">reduction</a>.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return an {@code IntSummaryStatistics} describing various summary data
|
||||
* about the elements of this stream
|
||||
*/
|
||||
IntSummaryStatistics summaryStatistics();
|
||||
|
||||
/**
|
||||
* Returns whether any elements of this stream match the provided
|
||||
* predicate. May not evaluate the predicate on all elements if not
|
||||
* necessary for determining the result. If the stream is empty then
|
||||
* {@code false} is returned and the predicate is not evaluated.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* This method evaluates the <em>existential quantification</em> of the
|
||||
* predicate over the elements of the stream (for some x P(x)).
|
||||
*
|
||||
* @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* predicate to apply to elements of this stream
|
||||
* @return {@code true} if any elements of the stream match the provided
|
||||
* predicate, otherwise {@code false}
|
||||
*/
|
||||
boolean anyMatch(IntPredicate predicate);
|
||||
|
||||
/**
|
||||
* Returns whether all elements of this stream match the provided predicate.
|
||||
* May not evaluate the predicate on all elements if not necessary for
|
||||
* determining the result. If the stream is empty then {@code true} is
|
||||
* returned and the predicate is not evaluated.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* This method evaluates the <em>universal quantification</em> of the
|
||||
* predicate over the elements of the stream (for all x P(x)). If the
|
||||
* stream is empty, the quantification is said to be <em>vacuously
|
||||
* satisfied</em> and is always {@code true} (regardless of P(x)).
|
||||
*
|
||||
* @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* predicate to apply to elements of this stream
|
||||
* @return {@code true} if either all elements of the stream match the
|
||||
* provided predicate or the stream is empty, otherwise {@code false}
|
||||
*/
|
||||
boolean allMatch(IntPredicate predicate);
|
||||
|
||||
/**
|
||||
* Returns whether no elements of this stream match the provided predicate.
|
||||
* May not evaluate the predicate on all elements if not necessary for
|
||||
* determining the result. If the stream is empty then {@code true} is
|
||||
* returned and the predicate is not evaluated.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* This method evaluates the <em>universal quantification</em> of the
|
||||
* negated predicate over the elements of the stream (for all x ~P(x)). If
|
||||
* the stream is empty, the quantification is said to be vacuously satisfied
|
||||
* and is always {@code true}, regardless of P(x).
|
||||
*
|
||||
* @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* predicate to apply to elements of this stream
|
||||
* @return {@code true} if either no elements of the stream match the
|
||||
* provided predicate or the stream is empty, otherwise {@code false}
|
||||
*/
|
||||
boolean noneMatch(IntPredicate predicate);
|
||||
|
||||
/**
|
||||
* Returns an {@link OptionalInt} describing the first element of this
|
||||
* stream, or an empty {@code OptionalInt} if the stream is empty. If the
|
||||
* stream has no encounter order, then any element may be returned.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* @return an {@code OptionalInt} describing the first element of this stream,
|
||||
* or an empty {@code OptionalInt} if the stream is empty
|
||||
*/
|
||||
OptionalInt findFirst();
|
||||
|
||||
/**
|
||||
* Returns an {@link OptionalInt} describing some element of the stream, or
|
||||
* an empty {@code OptionalInt} if the stream is empty.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* <p>The behavior of this operation is explicitly nondeterministic; it is
|
||||
* free to select any element in the stream. This is to allow for maximal
|
||||
* performance in parallel operations; the cost is that multiple invocations
|
||||
* on the same source may not return the same result. (If a stable result
|
||||
* is desired, use {@link #findFirst()} instead.)
|
||||
*
|
||||
* @return an {@code OptionalInt} describing some element of this stream, or
|
||||
* an empty {@code OptionalInt} if the stream is empty
|
||||
* @see #findFirst()
|
||||
*/
|
||||
OptionalInt findAny();
|
||||
|
||||
/**
|
||||
* Returns a {@code LongStream} consisting of the elements of this stream,
|
||||
* converted to {@code long}.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @return a {@code LongStream} consisting of the elements of this stream,
|
||||
* converted to {@code long}
|
||||
*/
|
||||
LongStream asLongStream();
|
||||
|
||||
/**
|
||||
* Returns a {@code DoubleStream} consisting of the elements of this stream,
|
||||
* converted to {@code double}.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @return a {@code DoubleStream} consisting of the elements of this stream,
|
||||
* converted to {@code double}
|
||||
*/
|
||||
DoubleStream asDoubleStream();
|
||||
|
||||
/**
|
||||
* Returns a {@code Stream} consisting of the elements of this stream,
|
||||
* each boxed to an {@code Integer}.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @return a {@code Stream} consistent of the elements of this stream,
|
||||
* each boxed to an {@code Integer}
|
||||
*/
|
||||
Stream<Integer> boxed();
|
||||
|
||||
@Override
|
||||
IntStream sequential();
|
||||
|
||||
@Override
|
||||
IntStream parallel();
|
||||
|
||||
@Override
|
||||
PrimitiveIterator.OfInt iterator();
|
||||
|
||||
@Override
|
||||
Spliterator.OfInt spliterator();
|
||||
|
||||
// Static factories
|
||||
|
||||
/**
|
||||
* Returns a builder for an {@code IntStream}.
|
||||
*
|
||||
* @return a stream builder
|
||||
*/
|
||||
public static Builder builder() {
|
||||
return new Streams.IntStreamBuilderImpl();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an empty sequential {@code IntStream}.
|
||||
*
|
||||
* @return an empty sequential stream
|
||||
*/
|
||||
public static IntStream empty() {
|
||||
return StreamSupport.intStream(Spliterators.emptyIntSpliterator(), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a sequential {@code IntStream} containing a single element.
|
||||
*
|
||||
* @param t the single element
|
||||
* @return a singleton sequential stream
|
||||
*/
|
||||
public static IntStream of(int t) {
|
||||
return StreamSupport.intStream(new Streams.IntStreamBuilderImpl(t), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a sequential ordered stream whose elements are the specified values.
|
||||
*
|
||||
* @param values the elements of the new stream
|
||||
* @return the new stream
|
||||
*/
|
||||
public static IntStream of(int... values) {
|
||||
return Arrays.stream(values);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an infinite sequential ordered {@code IntStream} produced by iterative
|
||||
* application of a function {@code f} to an initial element {@code seed},
|
||||
* producing a {@code Stream} consisting of {@code seed}, {@code f(seed)},
|
||||
* {@code f(f(seed))}, etc.
|
||||
*
|
||||
* <p>The first element (position {@code 0}) in the {@code IntStream} will be
|
||||
* the provided {@code seed}. For {@code n > 0}, the element at position
|
||||
* {@code n}, will be the result of applying the function {@code f} to the
|
||||
* element at position {@code n - 1}.
|
||||
*
|
||||
* @param seed the initial element
|
||||
* @param f a function to be applied to the previous element to produce
|
||||
* a new element
|
||||
* @return A new sequential {@code IntStream}
|
||||
*/
|
||||
public static IntStream iterate(final int seed, final IntUnaryOperator f) {
|
||||
Objects.requireNonNull(f);
|
||||
final PrimitiveIterator.OfInt iterator = new PrimitiveIterator.OfInt() {
|
||||
int t = seed;
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextInt() {
|
||||
int v = t;
|
||||
t = f.applyAsInt(t);
|
||||
return v;
|
||||
}
|
||||
};
|
||||
return StreamSupport.intStream(Spliterators.spliteratorUnknownSize(
|
||||
iterator,
|
||||
Spliterator.ORDERED | Spliterator.IMMUTABLE | Spliterator.NONNULL), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an infinite sequential unordered stream where each element is
|
||||
* generated by the provided {@code IntSupplier}. This is suitable for
|
||||
* generating constant streams, streams of random elements, etc.
|
||||
*
|
||||
* @param s the {@code IntSupplier} for generated elements
|
||||
* @return a new infinite sequential unordered {@code IntStream}
|
||||
*/
|
||||
public static IntStream generate(IntSupplier s) {
|
||||
Objects.requireNonNull(s);
|
||||
return StreamSupport.intStream(
|
||||
new StreamSpliterators.InfiniteSupplyingSpliterator.OfInt(Long.MAX_VALUE, s), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a sequential ordered {@code IntStream} from {@code startInclusive}
|
||||
* (inclusive) to {@code endExclusive} (exclusive) by an incremental step of
|
||||
* {@code 1}.
|
||||
*
|
||||
* @apiNote
|
||||
* <p>An equivalent sequence of increasing values can be produced
|
||||
* sequentially using a {@code for} loop as follows:
|
||||
* <pre>{@code
|
||||
* for (int i = startInclusive; i < endExclusive ; i++) { ... }
|
||||
* }</pre>
|
||||
*
|
||||
* @param startInclusive the (inclusive) initial value
|
||||
* @param endExclusive the exclusive upper bound
|
||||
* @return a sequential {@code IntStream} for the range of {@code int}
|
||||
* elements
|
||||
*/
|
||||
public static IntStream range(int startInclusive, int endExclusive) {
|
||||
if (startInclusive >= endExclusive) {
|
||||
return empty();
|
||||
} else {
|
||||
return StreamSupport.intStream(
|
||||
new Streams.RangeIntSpliterator(startInclusive, endExclusive, false), false);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a sequential ordered {@code IntStream} from {@code startInclusive}
|
||||
* (inclusive) to {@code endInclusive} (inclusive) by an incremental step of
|
||||
* {@code 1}.
|
||||
*
|
||||
* @apiNote
|
||||
* <p>An equivalent sequence of increasing values can be produced
|
||||
* sequentially using a {@code for} loop as follows:
|
||||
* <pre>{@code
|
||||
* for (int i = startInclusive; i <= endInclusive ; i++) { ... }
|
||||
* }</pre>
|
||||
*
|
||||
* @param startInclusive the (inclusive) initial value
|
||||
* @param endInclusive the inclusive upper bound
|
||||
* @return a sequential {@code IntStream} for the range of {@code int}
|
||||
* elements
|
||||
*/
|
||||
public static IntStream rangeClosed(int startInclusive, int endInclusive) {
|
||||
if (startInclusive > endInclusive) {
|
||||
return empty();
|
||||
} else {
|
||||
return StreamSupport.intStream(
|
||||
new Streams.RangeIntSpliterator(startInclusive, endInclusive, true), false);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a lazily concatenated stream whose elements are all the
|
||||
* elements of the first stream followed by all the elements of the
|
||||
* second stream. The resulting stream is ordered if both
|
||||
* of the input streams are ordered, and parallel if either of the input
|
||||
* streams is parallel. When the resulting stream is closed, the close
|
||||
* handlers for both input streams are invoked.
|
||||
*
|
||||
* @implNote
|
||||
* Use caution when constructing streams from repeated concatenation.
|
||||
* Accessing an element of a deeply concatenated stream can result in deep
|
||||
* call chains, or even {@code StackOverflowException}.
|
||||
*
|
||||
* @param a the first stream
|
||||
* @param b the second stream
|
||||
* @return the concatenation of the two input streams
|
||||
*/
|
||||
public static IntStream concat(IntStream a, IntStream b) {
|
||||
Objects.requireNonNull(a);
|
||||
Objects.requireNonNull(b);
|
||||
|
||||
Spliterator.OfInt split = new Streams.ConcatSpliterator.OfInt(
|
||||
a.spliterator(), b.spliterator());
|
||||
IntStream stream = StreamSupport.intStream(split, a.isParallel() || b.isParallel());
|
||||
return stream.onClose(Streams.composedClose(a, b));
|
||||
}
|
||||
|
||||
/**
|
||||
* A mutable builder for an {@code IntStream}.
|
||||
*
|
||||
* <p>A stream builder has a lifecycle, which starts in a building
|
||||
* phase, during which elements can be added, and then transitions to a built
|
||||
* phase, after which elements may not be added. The built phase
|
||||
* begins when the {@link #build()} method is called, which creates an
|
||||
* ordered stream whose elements are the elements that were added to the
|
||||
* stream builder, in the order they were added.
|
||||
*
|
||||
* @see IntStream#builder()
|
||||
* @since 1.8
|
||||
*/
|
||||
public interface Builder extends IntConsumer {
|
||||
|
||||
/**
|
||||
* Adds an element to the stream being built.
|
||||
*
|
||||
* @throws IllegalStateException if the builder has already transitioned
|
||||
* to the built state
|
||||
*/
|
||||
@Override
|
||||
void accept(int t);
|
||||
|
||||
/**
|
||||
* Adds an element to the stream being built.
|
||||
*
|
||||
* @implSpec
|
||||
* The default implementation behaves as if:
|
||||
* <pre>{@code
|
||||
* accept(t)
|
||||
* return this;
|
||||
* }</pre>
|
||||
*
|
||||
* @param t the element to add
|
||||
* @return {@code this} builder
|
||||
* @throws IllegalStateException if the builder has already transitioned
|
||||
* to the built state
|
||||
*/
|
||||
default Builder add(int t) {
|
||||
accept(t);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds the stream, transitioning this builder to the built state.
|
||||
* An {@code IllegalStateException} is thrown if there are further
|
||||
* attempts to operate on the builder after it has entered the built
|
||||
* state.
|
||||
*
|
||||
* @return the built stream
|
||||
* @throws IllegalStateException if the builder has already transitioned to
|
||||
* the built state
|
||||
*/
|
||||
IntStream build();
|
||||
}
|
||||
}
|
||||
637
jdkSrc/jdk8/java/util/stream/LongPipeline.java
Normal file
637
jdkSrc/jdk8/java/util/stream/LongPipeline.java
Normal file
@@ -0,0 +1,637 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.LongSummaryStatistics;
|
||||
import java.util.Objects;
|
||||
import java.util.OptionalDouble;
|
||||
import java.util.OptionalLong;
|
||||
import java.util.PrimitiveIterator;
|
||||
import java.util.Spliterator;
|
||||
import java.util.Spliterators;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BinaryOperator;
|
||||
import java.util.function.IntFunction;
|
||||
import java.util.function.LongBinaryOperator;
|
||||
import java.util.function.LongConsumer;
|
||||
import java.util.function.LongFunction;
|
||||
import java.util.function.LongPredicate;
|
||||
import java.util.function.LongToDoubleFunction;
|
||||
import java.util.function.LongToIntFunction;
|
||||
import java.util.function.LongUnaryOperator;
|
||||
import java.util.function.ObjLongConsumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* Abstract base class for an intermediate pipeline stage or pipeline source
|
||||
* stage implementing whose elements are of type {@code long}.
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract class LongPipeline<E_IN>
|
||||
extends AbstractPipeline<E_IN, Long, LongStream>
|
||||
implements LongStream {
|
||||
|
||||
/**
|
||||
* Constructor for the head of a stream pipeline.
|
||||
*
|
||||
* @param source {@code Supplier<Spliterator>} describing the stream source
|
||||
* @param sourceFlags the source flags for the stream source, described in
|
||||
* {@link StreamOpFlag}
|
||||
* @param parallel {@code true} if the pipeline is parallel
|
||||
*/
|
||||
LongPipeline(Supplier<? extends Spliterator<Long>> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for the head of a stream pipeline.
|
||||
*
|
||||
* @param source {@code Spliterator} describing the stream source
|
||||
* @param sourceFlags the source flags for the stream source, described in
|
||||
* {@link StreamOpFlag}
|
||||
* @param parallel {@code true} if the pipeline is parallel
|
||||
*/
|
||||
LongPipeline(Spliterator<Long> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for appending an intermediate operation onto an existing pipeline.
|
||||
*
|
||||
* @param upstream the upstream element source.
|
||||
* @param opFlags the operation flags
|
||||
*/
|
||||
LongPipeline(AbstractPipeline<?, E_IN, ?> upstream, int opFlags) {
|
||||
super(upstream, opFlags);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adapt a {@code Sink<Long> to an {@code LongConsumer}, ideally simply
|
||||
* by casting.
|
||||
*/
|
||||
private static LongConsumer adapt(Sink<Long> sink) {
|
||||
if (sink instanceof LongConsumer) {
|
||||
return (LongConsumer) sink;
|
||||
} else {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(AbstractPipeline.class,
|
||||
"using LongStream.adapt(Sink<Long> s)");
|
||||
return sink::accept;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adapt a {@code Spliterator<Long>} to a {@code Spliterator.OfLong}.
|
||||
*
|
||||
* @implNote
|
||||
* The implementation attempts to cast to a Spliterator.OfLong, and throws
|
||||
* an exception if this cast is not possible.
|
||||
*/
|
||||
private static Spliterator.OfLong adapt(Spliterator<Long> s) {
|
||||
if (s instanceof Spliterator.OfLong) {
|
||||
return (Spliterator.OfLong) s;
|
||||
} else {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(AbstractPipeline.class,
|
||||
"using LongStream.adapt(Spliterator<Long> s)");
|
||||
throw new UnsupportedOperationException("LongStream.adapt(Spliterator<Long> s)");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Shape-specific methods
|
||||
|
||||
@Override
|
||||
final StreamShape getOutputShape() {
|
||||
return StreamShape.LONG_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
final <P_IN> Node<Long> evaluateToNode(PipelineHelper<Long> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
boolean flattenTree,
|
||||
IntFunction<Long[]> generator) {
|
||||
return Nodes.collectLong(helper, spliterator, flattenTree);
|
||||
}
|
||||
|
||||
@Override
|
||||
final <P_IN> Spliterator<Long> wrap(PipelineHelper<Long> ph,
|
||||
Supplier<Spliterator<P_IN>> supplier,
|
||||
boolean isParallel) {
|
||||
return new StreamSpliterators.LongWrappingSpliterator<>(ph, supplier, isParallel);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
final Spliterator.OfLong lazySpliterator(Supplier<? extends Spliterator<Long>> supplier) {
|
||||
return new StreamSpliterators.DelegatingSpliterator.OfLong((Supplier<Spliterator.OfLong>) supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
final void forEachWithCancel(Spliterator<Long> spliterator, Sink<Long> sink) {
|
||||
Spliterator.OfLong spl = adapt(spliterator);
|
||||
LongConsumer adaptedSink = adapt(sink);
|
||||
do { } while (!sink.cancellationRequested() && spl.tryAdvance(adaptedSink));
|
||||
}
|
||||
|
||||
@Override
|
||||
final Node.Builder<Long> makeNodeBuilder(long exactSizeIfKnown, IntFunction<Long[]> generator) {
|
||||
return Nodes.longBuilder(exactSizeIfKnown);
|
||||
}
|
||||
|
||||
|
||||
// LongStream
|
||||
|
||||
@Override
|
||||
public final PrimitiveIterator.OfLong iterator() {
|
||||
return Spliterators.iterator(spliterator());
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Spliterator.OfLong spliterator() {
|
||||
return adapt(super.spliterator());
|
||||
}
|
||||
|
||||
// Stateless intermediate ops from LongStream
|
||||
|
||||
@Override
|
||||
public final DoubleStream asDoubleStream() {
|
||||
return new DoublePipeline.StatelessOp<Long>(this, StreamShape.LONG_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Long> opWrapSink(int flags, Sink<Double> sink) {
|
||||
return new Sink.ChainedLong<Double>(sink) {
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
downstream.accept((double) t);
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Stream<Long> boxed() {
|
||||
return mapToObj(Long::valueOf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final LongStream map(LongUnaryOperator mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new StatelessOp<Long>(this, StreamShape.LONG_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
|
||||
return new Sink.ChainedLong<Long>(sink) {
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
downstream.accept(mapper.applyAsLong(t));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final <U> Stream<U> mapToObj(LongFunction<? extends U> mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new ReferencePipeline.StatelessOp<Long, U>(this, StreamShape.LONG_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Long> opWrapSink(int flags, Sink<U> sink) {
|
||||
return new Sink.ChainedLong<U>(sink) {
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
downstream.accept(mapper.apply(t));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final IntStream mapToInt(LongToIntFunction mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new IntPipeline.StatelessOp<Long>(this, StreamShape.LONG_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Long> opWrapSink(int flags, Sink<Integer> sink) {
|
||||
return new Sink.ChainedLong<Integer>(sink) {
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
downstream.accept(mapper.applyAsInt(t));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DoubleStream mapToDouble(LongToDoubleFunction mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new DoublePipeline.StatelessOp<Long>(this, StreamShape.LONG_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<Long> opWrapSink(int flags, Sink<Double> sink) {
|
||||
return new Sink.ChainedLong<Double>(sink) {
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
downstream.accept(mapper.applyAsDouble(t));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final LongStream flatMap(LongFunction<? extends LongStream> mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new StatelessOp<Long>(this, StreamShape.LONG_VALUE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
|
||||
@Override
|
||||
Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
|
||||
return new Sink.ChainedLong<Long>(sink) {
|
||||
// true if cancellationRequested() has been called
|
||||
boolean cancellationRequestedCalled;
|
||||
|
||||
// cache the consumer to avoid creation on every accepted element
|
||||
LongConsumer downstreamAsLong = downstream::accept;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
try (LongStream result = mapper.apply(t)) {
|
||||
if (result != null) {
|
||||
if (!cancellationRequestedCalled) {
|
||||
result.sequential().forEach(downstreamAsLong);
|
||||
}
|
||||
else {
|
||||
Spliterator.OfLong s = result.sequential().spliterator();
|
||||
do { } while (!downstream.cancellationRequested() && s.tryAdvance(downstreamAsLong));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
// If this method is called then an operation within the stream
|
||||
// pipeline is short-circuiting (see AbstractPipeline.copyInto).
|
||||
// Note that we cannot differentiate between an upstream or
|
||||
// downstream operation
|
||||
cancellationRequestedCalled = true;
|
||||
return downstream.cancellationRequested();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public LongStream unordered() {
|
||||
if (!isOrdered())
|
||||
return this;
|
||||
return new StatelessOp<Long>(this, StreamShape.LONG_VALUE, StreamOpFlag.NOT_ORDERED) {
|
||||
@Override
|
||||
Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
|
||||
return sink;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final LongStream filter(LongPredicate predicate) {
|
||||
Objects.requireNonNull(predicate);
|
||||
return new StatelessOp<Long>(this, StreamShape.LONG_VALUE,
|
||||
StreamOpFlag.NOT_SIZED) {
|
||||
@Override
|
||||
Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
|
||||
return new Sink.ChainedLong<Long>(sink) {
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
if (predicate.test(t))
|
||||
downstream.accept(t);
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final LongStream peek(LongConsumer action) {
|
||||
Objects.requireNonNull(action);
|
||||
return new StatelessOp<Long>(this, StreamShape.LONG_VALUE,
|
||||
0) {
|
||||
@Override
|
||||
Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
|
||||
return new Sink.ChainedLong<Long>(sink) {
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
action.accept(t);
|
||||
downstream.accept(t);
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Stateful intermediate ops from LongStream
|
||||
|
||||
@Override
|
||||
public final LongStream limit(long maxSize) {
|
||||
if (maxSize < 0)
|
||||
throw new IllegalArgumentException(Long.toString(maxSize));
|
||||
return SliceOps.makeLong(this, 0, maxSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final LongStream skip(long n) {
|
||||
if (n < 0)
|
||||
throw new IllegalArgumentException(Long.toString(n));
|
||||
if (n == 0)
|
||||
return this;
|
||||
else
|
||||
return SliceOps.makeLong(this, n, -1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final LongStream sorted() {
|
||||
return SortedOps.makeLong(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final LongStream distinct() {
|
||||
// While functional and quick to implement, this approach is not very efficient.
|
||||
// An efficient version requires a long-specific map/set implementation.
|
||||
return boxed().distinct().mapToLong(i -> (long) i);
|
||||
}
|
||||
|
||||
// Terminal ops from LongStream
|
||||
|
||||
@Override
|
||||
public void forEach(LongConsumer action) {
|
||||
evaluate(ForEachOps.makeLong(action, false));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachOrdered(LongConsumer action) {
|
||||
evaluate(ForEachOps.makeLong(action, true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final long sum() {
|
||||
// use better algorithm to compensate for intermediate overflow?
|
||||
return reduce(0, Long::sum);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalLong min() {
|
||||
return reduce(Math::min);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalLong max() {
|
||||
return reduce(Math::max);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalDouble average() {
|
||||
long[] avg = collect(() -> new long[2],
|
||||
(ll, i) -> {
|
||||
ll[0]++;
|
||||
ll[1] += i;
|
||||
},
|
||||
(ll, rr) -> {
|
||||
ll[0] += rr[0];
|
||||
ll[1] += rr[1];
|
||||
});
|
||||
return avg[0] > 0
|
||||
? OptionalDouble.of((double) avg[1] / avg[0])
|
||||
: OptionalDouble.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final long count() {
|
||||
return map(e -> 1L).sum();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final LongSummaryStatistics summaryStatistics() {
|
||||
return collect(LongSummaryStatistics::new, LongSummaryStatistics::accept,
|
||||
LongSummaryStatistics::combine);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final long reduce(long identity, LongBinaryOperator op) {
|
||||
return evaluate(ReduceOps.makeLong(identity, op));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalLong reduce(LongBinaryOperator op) {
|
||||
return evaluate(ReduceOps.makeLong(op));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final <R> R collect(Supplier<R> supplier,
|
||||
ObjLongConsumer<R> accumulator,
|
||||
BiConsumer<R, R> combiner) {
|
||||
Objects.requireNonNull(combiner);
|
||||
BinaryOperator<R> operator = (left, right) -> {
|
||||
combiner.accept(left, right);
|
||||
return left;
|
||||
};
|
||||
return evaluate(ReduceOps.makeLong(supplier, accumulator, operator));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean anyMatch(LongPredicate predicate) {
|
||||
return evaluate(MatchOps.makeLong(predicate, MatchOps.MatchKind.ANY));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean allMatch(LongPredicate predicate) {
|
||||
return evaluate(MatchOps.makeLong(predicate, MatchOps.MatchKind.ALL));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean noneMatch(LongPredicate predicate) {
|
||||
return evaluate(MatchOps.makeLong(predicate, MatchOps.MatchKind.NONE));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalLong findFirst() {
|
||||
return evaluate(FindOps.makeLong(true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final OptionalLong findAny() {
|
||||
return evaluate(FindOps.makeLong(false));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final long[] toArray() {
|
||||
return Nodes.flattenLong((Node.OfLong) evaluateToArrayNode(Long[]::new))
|
||||
.asPrimitiveArray();
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
|
||||
/**
|
||||
* Source stage of a LongPipeline.
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
* @since 1.8
|
||||
*/
|
||||
static class Head<E_IN> extends LongPipeline<E_IN> {
|
||||
/**
|
||||
* Constructor for the source stage of a LongStream.
|
||||
*
|
||||
* @param source {@code Supplier<Spliterator>} describing the stream
|
||||
* source
|
||||
* @param sourceFlags the source flags for the stream source, described
|
||||
* in {@link StreamOpFlag}
|
||||
* @param parallel {@code true} if the pipeline is parallel
|
||||
*/
|
||||
Head(Supplier<? extends Spliterator<Long>> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for the source stage of a LongStream.
|
||||
*
|
||||
* @param source {@code Spliterator} describing the stream source
|
||||
* @param sourceFlags the source flags for the stream source, described
|
||||
* in {@link StreamOpFlag}
|
||||
* @param parallel {@code true} if the pipeline is parallel
|
||||
*/
|
||||
Head(Spliterator<Long> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
@Override
|
||||
final boolean opIsStateful() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
final Sink<E_IN> opWrapSink(int flags, Sink<Long> sink) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
// Optimized sequential terminal operations for the head of the pipeline
|
||||
|
||||
@Override
|
||||
public void forEach(LongConsumer action) {
|
||||
if (!isParallel()) {
|
||||
adapt(sourceStageSpliterator()).forEachRemaining(action);
|
||||
} else {
|
||||
super.forEach(action);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachOrdered(LongConsumer action) {
|
||||
if (!isParallel()) {
|
||||
adapt(sourceStageSpliterator()).forEachRemaining(action);
|
||||
} else {
|
||||
super.forEachOrdered(action);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Base class for a stateless intermediate stage of a LongStream.
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract static class StatelessOp<E_IN> extends LongPipeline<E_IN> {
|
||||
/**
|
||||
* Construct a new LongStream by appending a stateless intermediate
|
||||
* operation to an existing stream.
|
||||
* @param upstream The upstream pipeline stage
|
||||
* @param inputShape The stream shape for the upstream pipeline stage
|
||||
* @param opFlags Operation flags for the new stage
|
||||
*/
|
||||
StatelessOp(AbstractPipeline<?, E_IN, ?> upstream,
|
||||
StreamShape inputShape,
|
||||
int opFlags) {
|
||||
super(upstream, opFlags);
|
||||
assert upstream.getOutputShape() == inputShape;
|
||||
}
|
||||
|
||||
@Override
|
||||
final boolean opIsStateful() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Base class for a stateful intermediate stage of a LongStream.
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract static class StatefulOp<E_IN> extends LongPipeline<E_IN> {
|
||||
/**
|
||||
* Construct a new LongStream by appending a stateful intermediate
|
||||
* operation to an existing stream.
|
||||
* @param upstream The upstream pipeline stage
|
||||
* @param inputShape The stream shape for the upstream pipeline stage
|
||||
* @param opFlags Operation flags for the new stage
|
||||
*/
|
||||
StatefulOp(AbstractPipeline<?, E_IN, ?> upstream,
|
||||
StreamShape inputShape,
|
||||
int opFlags) {
|
||||
super(upstream, opFlags);
|
||||
assert upstream.getOutputShape() == inputShape;
|
||||
}
|
||||
|
||||
@Override
|
||||
final boolean opIsStateful() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
abstract <P_IN> Node<Long> opEvaluateParallel(PipelineHelper<Long> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<Long[]> generator);
|
||||
}
|
||||
}
|
||||
920
jdkSrc/jdk8/java/util/stream/LongStream.java
Normal file
920
jdkSrc/jdk8/java/util/stream/LongStream.java
Normal file
@@ -0,0 +1,920 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.LongSummaryStatistics;
|
||||
import java.util.Objects;
|
||||
import java.util.OptionalDouble;
|
||||
import java.util.OptionalLong;
|
||||
import java.util.PrimitiveIterator;
|
||||
import java.util.Spliterator;
|
||||
import java.util.Spliterators;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.LongBinaryOperator;
|
||||
import java.util.function.LongConsumer;
|
||||
import java.util.function.LongFunction;
|
||||
import java.util.function.LongPredicate;
|
||||
import java.util.function.LongSupplier;
|
||||
import java.util.function.LongToDoubleFunction;
|
||||
import java.util.function.LongToIntFunction;
|
||||
import java.util.function.LongUnaryOperator;
|
||||
import java.util.function.ObjLongConsumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* A sequence of primitive long-valued elements supporting sequential and parallel
|
||||
* aggregate operations. This is the {@code long} primitive specialization of
|
||||
* {@link Stream}.
|
||||
*
|
||||
* <p>The following example illustrates an aggregate operation using
|
||||
* {@link Stream} and {@link LongStream}, computing the sum of the weights of the
|
||||
* red widgets:
|
||||
*
|
||||
* <pre>{@code
|
||||
* long sum = widgets.stream()
|
||||
* .filter(w -> w.getColor() == RED)
|
||||
* .mapToLong(w -> w.getWeight())
|
||||
* .sum();
|
||||
* }</pre>
|
||||
*
|
||||
* See the class documentation for {@link Stream} and the package documentation
|
||||
* for <a href="package-summary.html">java.util.stream</a> for additional
|
||||
* specification of streams, stream operations, stream pipelines, and
|
||||
* parallelism.
|
||||
*
|
||||
* @since 1.8
|
||||
* @see Stream
|
||||
* @see <a href="package-summary.html">java.util.stream</a>
|
||||
*/
|
||||
public interface LongStream extends BaseStream<Long, LongStream> {
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the elements of this stream that match
|
||||
* the given predicate.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* predicate to apply to each element to determine if it
|
||||
* should be included
|
||||
* @return the new stream
|
||||
*/
|
||||
LongStream filter(LongPredicate predicate);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the results of applying the given
|
||||
* function to the elements of this stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element
|
||||
* @return the new stream
|
||||
*/
|
||||
LongStream map(LongUnaryOperator mapper);
|
||||
|
||||
/**
|
||||
* Returns an object-valued {@code Stream} consisting of the results of
|
||||
* applying the given function to the elements of this stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">
|
||||
* intermediate operation</a>.
|
||||
*
|
||||
* @param <U> the element type of the new stream
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element
|
||||
* @return the new stream
|
||||
*/
|
||||
<U> Stream<U> mapToObj(LongFunction<? extends U> mapper);
|
||||
|
||||
/**
|
||||
* Returns an {@code IntStream} consisting of the results of applying the
|
||||
* given function to the elements of this stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element
|
||||
* @return the new stream
|
||||
*/
|
||||
IntStream mapToInt(LongToIntFunction mapper);
|
||||
|
||||
/**
|
||||
* Returns a {@code DoubleStream} consisting of the results of applying the
|
||||
* given function to the elements of this stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element
|
||||
* @return the new stream
|
||||
*/
|
||||
DoubleStream mapToDouble(LongToDoubleFunction mapper);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the results of replacing each element of
|
||||
* this stream with the contents of a mapped stream produced by applying
|
||||
* the provided mapping function to each element. Each mapped stream is
|
||||
* {@link java.util.stream.BaseStream#close() closed} after its contents
|
||||
* have been placed into this stream. (If a mapped stream is {@code null}
|
||||
* an empty stream is used, instead.)
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @param mapper a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function to apply to each element which produces a
|
||||
* {@code LongStream} of new values
|
||||
* @return the new stream
|
||||
* @see Stream#flatMap(Function)
|
||||
*/
|
||||
LongStream flatMap(LongFunction<? extends LongStream> mapper);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the distinct elements of this stream.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">stateful
|
||||
* intermediate operation</a>.
|
||||
*
|
||||
* @return the new stream
|
||||
*/
|
||||
LongStream distinct();
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the elements of this stream in sorted
|
||||
* order.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">stateful
|
||||
* intermediate operation</a>.
|
||||
*
|
||||
* @return the new stream
|
||||
*/
|
||||
LongStream sorted();
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the elements of this stream, additionally
|
||||
* performing the provided action on each element as elements are consumed
|
||||
* from the resulting stream.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* <p>For parallel stream pipelines, the action may be called at
|
||||
* whatever time and in whatever thread the element is made available by the
|
||||
* upstream operation. If the action modifies shared state,
|
||||
* it is responsible for providing the required synchronization.
|
||||
*
|
||||
* @apiNote This method exists mainly to support debugging, where you want
|
||||
* to see the elements as they flow past a certain point in a pipeline:
|
||||
* <pre>{@code
|
||||
* LongStream.of(1, 2, 3, 4)
|
||||
* .filter(e -> e > 2)
|
||||
* .peek(e -> System.out.println("Filtered value: " + e))
|
||||
* .map(e -> e * e)
|
||||
* .peek(e -> System.out.println("Mapped value: " + e))
|
||||
* .sum();
|
||||
* }</pre>
|
||||
*
|
||||
* @param action a <a href="package-summary.html#NonInterference">
|
||||
* non-interfering</a> action to perform on the elements as
|
||||
* they are consumed from the stream
|
||||
* @return the new stream
|
||||
*/
|
||||
LongStream peek(LongConsumer action);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the elements of this stream, truncated
|
||||
* to be no longer than {@code maxSize} in length.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* stateful intermediate operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* While {@code limit()} is generally a cheap operation on sequential
|
||||
* stream pipelines, it can be quite expensive on ordered parallel pipelines,
|
||||
* especially for large values of {@code maxSize}, since {@code limit(n)}
|
||||
* is constrained to return not just any <em>n</em> elements, but the
|
||||
* <em>first n</em> elements in the encounter order. Using an unordered
|
||||
* stream source (such as {@link #generate(LongSupplier)}) or removing the
|
||||
* ordering constraint with {@link #unordered()} may result in significant
|
||||
* speedups of {@code limit()} in parallel pipelines, if the semantics of
|
||||
* your situation permit. If consistency with encounter order is required,
|
||||
* and you are experiencing poor performance or memory utilization with
|
||||
* {@code limit()} in parallel pipelines, switching to sequential execution
|
||||
* with {@link #sequential()} may improve performance.
|
||||
*
|
||||
* @param maxSize the number of elements the stream should be limited to
|
||||
* @return the new stream
|
||||
* @throws IllegalArgumentException if {@code maxSize} is negative
|
||||
*/
|
||||
LongStream limit(long maxSize);
|
||||
|
||||
/**
|
||||
* Returns a stream consisting of the remaining elements of this stream
|
||||
* after discarding the first {@code n} elements of the stream.
|
||||
* If this stream contains fewer than {@code n} elements then an
|
||||
* empty stream will be returned.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">stateful
|
||||
* intermediate operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* While {@code skip()} is generally a cheap operation on sequential
|
||||
* stream pipelines, it can be quite expensive on ordered parallel pipelines,
|
||||
* especially for large values of {@code n}, since {@code skip(n)}
|
||||
* is constrained to skip not just any <em>n</em> elements, but the
|
||||
* <em>first n</em> elements in the encounter order. Using an unordered
|
||||
* stream source (such as {@link #generate(LongSupplier)}) or removing the
|
||||
* ordering constraint with {@link #unordered()} may result in significant
|
||||
* speedups of {@code skip()} in parallel pipelines, if the semantics of
|
||||
* your situation permit. If consistency with encounter order is required,
|
||||
* and you are experiencing poor performance or memory utilization with
|
||||
* {@code skip()} in parallel pipelines, switching to sequential execution
|
||||
* with {@link #sequential()} may improve performance.
|
||||
*
|
||||
* @param n the number of leading elements to skip
|
||||
* @return the new stream
|
||||
* @throws IllegalArgumentException if {@code n} is negative
|
||||
*/
|
||||
LongStream skip(long n);
|
||||
|
||||
/**
|
||||
* Performs an action for each element of this stream.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* <p>For parallel stream pipelines, this operation does <em>not</em>
|
||||
* guarantee to respect the encounter order of the stream, as doing so
|
||||
* would sacrifice the benefit of parallelism. For any given element, the
|
||||
* action may be performed at whatever time and in whatever thread the
|
||||
* library chooses. If the action accesses shared state, it is
|
||||
* responsible for providing the required synchronization.
|
||||
*
|
||||
* @param action a <a href="package-summary.html#NonInterference">
|
||||
* non-interfering</a> action to perform on the elements
|
||||
*/
|
||||
void forEach(LongConsumer action);
|
||||
|
||||
/**
|
||||
* Performs an action for each element of this stream, guaranteeing that
|
||||
* each element is processed in encounter order for streams that have a
|
||||
* defined encounter order.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @param action a <a href="package-summary.html#NonInterference">
|
||||
* non-interfering</a> action to perform on the elements
|
||||
* @see #forEach(LongConsumer)
|
||||
*/
|
||||
void forEachOrdered(LongConsumer action);
|
||||
|
||||
/**
|
||||
* Returns an array containing the elements of this stream.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return an array containing the elements of this stream
|
||||
*/
|
||||
long[] toArray();
|
||||
|
||||
/**
|
||||
* Performs a <a href="package-summary.html#Reduction">reduction</a> on the
|
||||
* elements of this stream, using the provided identity value and an
|
||||
* <a href="package-summary.html#Associativity">associative</a>
|
||||
* accumulation function, and returns the reduced value. This is equivalent
|
||||
* to:
|
||||
* <pre>{@code
|
||||
* long result = identity;
|
||||
* for (long element : this stream)
|
||||
* result = accumulator.applyAsLong(result, element)
|
||||
* return result;
|
||||
* }</pre>
|
||||
*
|
||||
* but is not constrained to execute sequentially.
|
||||
*
|
||||
* <p>The {@code identity} value must be an identity for the accumulator
|
||||
* function. This means that for all {@code x},
|
||||
* {@code accumulator.apply(identity, x)} is equal to {@code x}.
|
||||
* The {@code accumulator} function must be an
|
||||
* <a href="package-summary.html#Associativity">associative</a> function.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @apiNote Sum, min, max, and average are all special cases of reduction.
|
||||
* Summing a stream of numbers can be expressed as:
|
||||
*
|
||||
* <pre>{@code
|
||||
* long sum = integers.reduce(0, (a, b) -> a+b);
|
||||
* }</pre>
|
||||
*
|
||||
* or more compactly:
|
||||
*
|
||||
* <pre>{@code
|
||||
* long sum = integers.reduce(0, Long::sum);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>While this may seem a more roundabout way to perform an aggregation
|
||||
* compared to simply mutating a running total in a loop, reduction
|
||||
* operations parallelize more gracefully, without needing additional
|
||||
* synchronization and with greatly reduced risk of data races.
|
||||
*
|
||||
* @param identity the identity value for the accumulating function
|
||||
* @param op an <a href="package-summary.html#Associativity">associative</a>,
|
||||
* <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function for combining two values
|
||||
* @return the result of the reduction
|
||||
* @see #sum()
|
||||
* @see #min()
|
||||
* @see #max()
|
||||
* @see #average()
|
||||
*/
|
||||
long reduce(long identity, LongBinaryOperator op);
|
||||
|
||||
/**
|
||||
* Performs a <a href="package-summary.html#Reduction">reduction</a> on the
|
||||
* elements of this stream, using an
|
||||
* <a href="package-summary.html#Associativity">associative</a> accumulation
|
||||
* function, and returns an {@code OptionalLong} describing the reduced value,
|
||||
* if any. This is equivalent to:
|
||||
* <pre>{@code
|
||||
* boolean foundAny = false;
|
||||
* long result = null;
|
||||
* for (long element : this stream) {
|
||||
* if (!foundAny) {
|
||||
* foundAny = true;
|
||||
* result = element;
|
||||
* }
|
||||
* else
|
||||
* result = accumulator.applyAsLong(result, element);
|
||||
* }
|
||||
* return foundAny ? OptionalLong.of(result) : OptionalLong.empty();
|
||||
* }</pre>
|
||||
*
|
||||
* but is not constrained to execute sequentially.
|
||||
*
|
||||
* <p>The {@code accumulator} function must be an
|
||||
* <a href="package-summary.html#Associativity">associative</a> function.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @param op an <a href="package-summary.html#Associativity">associative</a>,
|
||||
* <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function for combining two values
|
||||
* @return the result of the reduction
|
||||
* @see #reduce(long, LongBinaryOperator)
|
||||
*/
|
||||
OptionalLong reduce(LongBinaryOperator op);
|
||||
|
||||
/**
|
||||
* Performs a <a href="package-summary.html#MutableReduction">mutable
|
||||
* reduction</a> operation on the elements of this stream. A mutable
|
||||
* reduction is one in which the reduced value is a mutable result container,
|
||||
* such as an {@code ArrayList}, and elements are incorporated by updating
|
||||
* the state of the result rather than by replacing the result. This
|
||||
* produces a result equivalent to:
|
||||
* <pre>{@code
|
||||
* R result = supplier.get();
|
||||
* for (long element : this stream)
|
||||
* accumulator.accept(result, element);
|
||||
* return result;
|
||||
* }</pre>
|
||||
*
|
||||
* <p>Like {@link #reduce(long, LongBinaryOperator)}, {@code collect} operations
|
||||
* can be parallelized without requiring additional synchronization.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @param <R> type of the result
|
||||
* @param supplier a function that creates a new result container. For a
|
||||
* parallel execution, this function may be called
|
||||
* multiple times and must return a fresh value each time.
|
||||
* @param accumulator an <a href="package-summary.html#Associativity">associative</a>,
|
||||
* <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function for incorporating an additional element into a result
|
||||
* @param combiner an <a href="package-summary.html#Associativity">associative</a>,
|
||||
* <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* function for combining two values, which must be
|
||||
* compatible with the accumulator function
|
||||
* @return the result of the reduction
|
||||
* @see Stream#collect(Supplier, BiConsumer, BiConsumer)
|
||||
*/
|
||||
<R> R collect(Supplier<R> supplier,
|
||||
ObjLongConsumer<R> accumulator,
|
||||
BiConsumer<R, R> combiner);
|
||||
|
||||
/**
|
||||
* Returns the sum of elements in this stream. This is a special case
|
||||
* of a <a href="package-summary.html#Reduction">reduction</a>
|
||||
* and is equivalent to:
|
||||
* <pre>{@code
|
||||
* return reduce(0, Long::sum);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return the sum of elements in this stream
|
||||
*/
|
||||
long sum();
|
||||
|
||||
/**
|
||||
* Returns an {@code OptionalLong} describing the minimum element of this
|
||||
* stream, or an empty optional if this stream is empty. This is a special
|
||||
* case of a <a href="package-summary.html#Reduction">reduction</a>
|
||||
* and is equivalent to:
|
||||
* <pre>{@code
|
||||
* return reduce(Long::min);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
|
||||
*
|
||||
* @return an {@code OptionalLong} containing the minimum element of this
|
||||
* stream, or an empty {@code OptionalLong} if the stream is empty
|
||||
*/
|
||||
OptionalLong min();
|
||||
|
||||
/**
|
||||
* Returns an {@code OptionalLong} describing the maximum element of this
|
||||
* stream, or an empty optional if this stream is empty. This is a special
|
||||
* case of a <a href="package-summary.html#Reduction">reduction</a>
|
||||
* and is equivalent to:
|
||||
* <pre>{@code
|
||||
* return reduce(Long::max);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return an {@code OptionalLong} containing the maximum element of this
|
||||
* stream, or an empty {@code OptionalLong} if the stream is empty
|
||||
*/
|
||||
OptionalLong max();
|
||||
|
||||
/**
|
||||
* Returns the count of elements in this stream. This is a special case of
|
||||
* a <a href="package-summary.html#Reduction">reduction</a> and is
|
||||
* equivalent to:
|
||||
* <pre>{@code
|
||||
* return map(e -> 1L).sum();
|
||||
* }</pre>
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal operation</a>.
|
||||
*
|
||||
* @return the count of elements in this stream
|
||||
*/
|
||||
long count();
|
||||
|
||||
/**
|
||||
* Returns an {@code OptionalDouble} describing the arithmetic mean of elements of
|
||||
* this stream, or an empty optional if this stream is empty. This is a
|
||||
* special case of a
|
||||
* <a href="package-summary.html#Reduction">reduction</a>.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return an {@code OptionalDouble} containing the average element of this
|
||||
* stream, or an empty optional if the stream is empty
|
||||
*/
|
||||
OptionalDouble average();
|
||||
|
||||
/**
|
||||
* Returns a {@code LongSummaryStatistics} describing various summary data
|
||||
* about the elements of this stream. This is a special case of a
|
||||
* <a href="package-summary.html#Reduction">reduction</a>.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">terminal
|
||||
* operation</a>.
|
||||
*
|
||||
* @return a {@code LongSummaryStatistics} describing various summary data
|
||||
* about the elements of this stream
|
||||
*/
|
||||
LongSummaryStatistics summaryStatistics();
|
||||
|
||||
/**
|
||||
* Returns whether any elements of this stream match the provided
|
||||
* predicate. May not evaluate the predicate on all elements if not
|
||||
* necessary for determining the result. If the stream is empty then
|
||||
* {@code false} is returned and the predicate is not evaluated.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* This method evaluates the <em>existential quantification</em> of the
|
||||
* predicate over the elements of the stream (for some x P(x)).
|
||||
*
|
||||
* @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* predicate to apply to elements of this stream
|
||||
* @return {@code true} if any elements of the stream match the provided
|
||||
* predicate, otherwise {@code false}
|
||||
*/
|
||||
boolean anyMatch(LongPredicate predicate);
|
||||
|
||||
/**
|
||||
* Returns whether all elements of this stream match the provided predicate.
|
||||
* May not evaluate the predicate on all elements if not necessary for
|
||||
* determining the result. If the stream is empty then {@code true} is
|
||||
* returned and the predicate is not evaluated.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* This method evaluates the <em>universal quantification</em> of the
|
||||
* predicate over the elements of the stream (for all x P(x)). If the
|
||||
* stream is empty, the quantification is said to be <em>vacuously
|
||||
* satisfied</em> and is always {@code true} (regardless of P(x)).
|
||||
*
|
||||
* @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* predicate to apply to elements of this stream
|
||||
* @return {@code true} if either all elements of the stream match the
|
||||
* provided predicate or the stream is empty, otherwise {@code false}
|
||||
*/
|
||||
boolean allMatch(LongPredicate predicate);
|
||||
|
||||
/**
|
||||
* Returns whether no elements of this stream match the provided predicate.
|
||||
* May not evaluate the predicate on all elements if not necessary for
|
||||
* determining the result. If the stream is empty then {@code true} is
|
||||
* returned and the predicate is not evaluated.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* @apiNote
|
||||
* This method evaluates the <em>universal quantification</em> of the
|
||||
* negated predicate over the elements of the stream (for all x ~P(x)). If
|
||||
* the stream is empty, the quantification is said to be vacuously satisfied
|
||||
* and is always {@code true}, regardless of P(x).
|
||||
*
|
||||
* @param predicate a <a href="package-summary.html#NonInterference">non-interfering</a>,
|
||||
* <a href="package-summary.html#Statelessness">stateless</a>
|
||||
* predicate to apply to elements of this stream
|
||||
* @return {@code true} if either no elements of the stream match the
|
||||
* provided predicate or the stream is empty, otherwise {@code false}
|
||||
*/
|
||||
boolean noneMatch(LongPredicate predicate);
|
||||
|
||||
/**
|
||||
* Returns an {@link OptionalLong} describing the first element of this
|
||||
* stream, or an empty {@code OptionalLong} if the stream is empty. If the
|
||||
* stream has no encounter order, then any element may be returned.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* @return an {@code OptionalLong} describing the first element of this
|
||||
* stream, or an empty {@code OptionalLong} if the stream is empty
|
||||
*/
|
||||
OptionalLong findFirst();
|
||||
|
||||
/**
|
||||
* Returns an {@link OptionalLong} describing some element of the stream, or
|
||||
* an empty {@code OptionalLong} if the stream is empty.
|
||||
*
|
||||
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
|
||||
* terminal operation</a>.
|
||||
*
|
||||
* <p>The behavior of this operation is explicitly nondeterministic; it is
|
||||
* free to select any element in the stream. This is to allow for maximal
|
||||
* performance in parallel operations; the cost is that multiple invocations
|
||||
* on the same source may not return the same result. (If a stable result
|
||||
* is desired, use {@link #findFirst()} instead.)
|
||||
*
|
||||
* @return an {@code OptionalLong} describing some element of this stream,
|
||||
* or an empty {@code OptionalLong} if the stream is empty
|
||||
* @see #findFirst()
|
||||
*/
|
||||
OptionalLong findAny();
|
||||
|
||||
/**
|
||||
* Returns a {@code DoubleStream} consisting of the elements of this stream,
|
||||
* converted to {@code double}.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @return a {@code DoubleStream} consisting of the elements of this stream,
|
||||
* converted to {@code double}
|
||||
*/
|
||||
DoubleStream asDoubleStream();
|
||||
|
||||
/**
|
||||
* Returns a {@code Stream} consisting of the elements of this stream,
|
||||
* each boxed to a {@code Long}.
|
||||
*
|
||||
* <p>This is an <a href="package-summary.html#StreamOps">intermediate
|
||||
* operation</a>.
|
||||
*
|
||||
* @return a {@code Stream} consistent of the elements of this stream,
|
||||
* each boxed to {@code Long}
|
||||
*/
|
||||
Stream<Long> boxed();
|
||||
|
||||
@Override
|
||||
LongStream sequential();
|
||||
|
||||
@Override
|
||||
LongStream parallel();
|
||||
|
||||
@Override
|
||||
PrimitiveIterator.OfLong iterator();
|
||||
|
||||
@Override
|
||||
Spliterator.OfLong spliterator();
|
||||
|
||||
// Static factories
|
||||
|
||||
/**
|
||||
* Returns a builder for a {@code LongStream}.
|
||||
*
|
||||
* @return a stream builder
|
||||
*/
|
||||
public static Builder builder() {
|
||||
return new Streams.LongStreamBuilderImpl();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an empty sequential {@code LongStream}.
|
||||
*
|
||||
* @return an empty sequential stream
|
||||
*/
|
||||
public static LongStream empty() {
|
||||
return StreamSupport.longStream(Spliterators.emptyLongSpliterator(), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a sequential {@code LongStream} containing a single element.
|
||||
*
|
||||
* @param t the single element
|
||||
* @return a singleton sequential stream
|
||||
*/
|
||||
public static LongStream of(long t) {
|
||||
return StreamSupport.longStream(new Streams.LongStreamBuilderImpl(t), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a sequential ordered stream whose elements are the specified values.
|
||||
*
|
||||
* @param values the elements of the new stream
|
||||
* @return the new stream
|
||||
*/
|
||||
public static LongStream of(long... values) {
|
||||
return Arrays.stream(values);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an infinite sequential ordered {@code LongStream} produced by iterative
|
||||
* application of a function {@code f} to an initial element {@code seed},
|
||||
* producing a {@code Stream} consisting of {@code seed}, {@code f(seed)},
|
||||
* {@code f(f(seed))}, etc.
|
||||
*
|
||||
* <p>The first element (position {@code 0}) in the {@code LongStream} will
|
||||
* be the provided {@code seed}. For {@code n > 0}, the element at position
|
||||
* {@code n}, will be the result of applying the function {@code f} to the
|
||||
* element at position {@code n - 1}.
|
||||
*
|
||||
* @param seed the initial element
|
||||
* @param f a function to be applied to the previous element to produce
|
||||
* a new element
|
||||
* @return a new sequential {@code LongStream}
|
||||
*/
|
||||
public static LongStream iterate(final long seed, final LongUnaryOperator f) {
|
||||
Objects.requireNonNull(f);
|
||||
final PrimitiveIterator.OfLong iterator = new PrimitiveIterator.OfLong() {
|
||||
long t = seed;
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long nextLong() {
|
||||
long v = t;
|
||||
t = f.applyAsLong(t);
|
||||
return v;
|
||||
}
|
||||
};
|
||||
return StreamSupport.longStream(Spliterators.spliteratorUnknownSize(
|
||||
iterator,
|
||||
Spliterator.ORDERED | Spliterator.IMMUTABLE | Spliterator.NONNULL), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an infinite sequential unordered stream where each element is
|
||||
* generated by the provided {@code LongSupplier}. This is suitable for
|
||||
* generating constant streams, streams of random elements, etc.
|
||||
*
|
||||
* @param s the {@code LongSupplier} for generated elements
|
||||
* @return a new infinite sequential unordered {@code LongStream}
|
||||
*/
|
||||
public static LongStream generate(LongSupplier s) {
|
||||
Objects.requireNonNull(s);
|
||||
return StreamSupport.longStream(
|
||||
new StreamSpliterators.InfiniteSupplyingSpliterator.OfLong(Long.MAX_VALUE, s), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a sequential ordered {@code LongStream} from {@code startInclusive}
|
||||
* (inclusive) to {@code endExclusive} (exclusive) by an incremental step of
|
||||
* {@code 1}.
|
||||
*
|
||||
* @apiNote
|
||||
* <p>An equivalent sequence of increasing values can be produced
|
||||
* sequentially using a {@code for} loop as follows:
|
||||
* <pre>{@code
|
||||
* for (long i = startInclusive; i < endExclusive ; i++) { ... }
|
||||
* }</pre>
|
||||
*
|
||||
* @param startInclusive the (inclusive) initial value
|
||||
* @param endExclusive the exclusive upper bound
|
||||
* @return a sequential {@code LongStream} for the range of {@code long}
|
||||
* elements
|
||||
*/
|
||||
public static LongStream range(long startInclusive, final long endExclusive) {
|
||||
if (startInclusive >= endExclusive) {
|
||||
return empty();
|
||||
} else if (endExclusive - startInclusive < 0) {
|
||||
// Size of range > Long.MAX_VALUE
|
||||
// Split the range in two and concatenate
|
||||
// Note: if the range is [Long.MIN_VALUE, Long.MAX_VALUE) then
|
||||
// the lower range, [Long.MIN_VALUE, 0) will be further split in two
|
||||
long m = startInclusive + Long.divideUnsigned(endExclusive - startInclusive, 2) + 1;
|
||||
return concat(range(startInclusive, m), range(m, endExclusive));
|
||||
} else {
|
||||
return StreamSupport.longStream(
|
||||
new Streams.RangeLongSpliterator(startInclusive, endExclusive, false), false);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a sequential ordered {@code LongStream} from {@code startInclusive}
|
||||
* (inclusive) to {@code endInclusive} (inclusive) by an incremental step of
|
||||
* {@code 1}.
|
||||
*
|
||||
* @apiNote
|
||||
* <p>An equivalent sequence of increasing values can be produced
|
||||
* sequentially using a {@code for} loop as follows:
|
||||
* <pre>{@code
|
||||
* for (long i = startInclusive; i <= endInclusive ; i++) { ... }
|
||||
* }</pre>
|
||||
*
|
||||
* @param startInclusive the (inclusive) initial value
|
||||
* @param endInclusive the inclusive upper bound
|
||||
* @return a sequential {@code LongStream} for the range of {@code long}
|
||||
* elements
|
||||
*/
|
||||
public static LongStream rangeClosed(long startInclusive, final long endInclusive) {
|
||||
if (startInclusive > endInclusive) {
|
||||
return empty();
|
||||
} else if (endInclusive - startInclusive + 1 <= 0) {
|
||||
// Size of range > Long.MAX_VALUE
|
||||
// Split the range in two and concatenate
|
||||
// Note: if the range is [Long.MIN_VALUE, Long.MAX_VALUE] then
|
||||
// the lower range, [Long.MIN_VALUE, 0), and upper range,
|
||||
// [0, Long.MAX_VALUE], will both be further split in two
|
||||
long m = startInclusive + Long.divideUnsigned(endInclusive - startInclusive, 2) + 1;
|
||||
return concat(range(startInclusive, m), rangeClosed(m, endInclusive));
|
||||
} else {
|
||||
return StreamSupport.longStream(
|
||||
new Streams.RangeLongSpliterator(startInclusive, endInclusive, true), false);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a lazily concatenated stream whose elements are all the
|
||||
* elements of the first stream followed by all the elements of the
|
||||
* second stream. The resulting stream is ordered if both
|
||||
* of the input streams are ordered, and parallel if either of the input
|
||||
* streams is parallel. When the resulting stream is closed, the close
|
||||
* handlers for both input streams are invoked.
|
||||
*
|
||||
* @implNote
|
||||
* Use caution when constructing streams from repeated concatenation.
|
||||
* Accessing an element of a deeply concatenated stream can result in deep
|
||||
* call chains, or even {@code StackOverflowException}.
|
||||
*
|
||||
* @param a the first stream
|
||||
* @param b the second stream
|
||||
* @return the concatenation of the two input streams
|
||||
*/
|
||||
public static LongStream concat(LongStream a, LongStream b) {
|
||||
Objects.requireNonNull(a);
|
||||
Objects.requireNonNull(b);
|
||||
|
||||
Spliterator.OfLong split = new Streams.ConcatSpliterator.OfLong(
|
||||
a.spliterator(), b.spliterator());
|
||||
LongStream stream = StreamSupport.longStream(split, a.isParallel() || b.isParallel());
|
||||
return stream.onClose(Streams.composedClose(a, b));
|
||||
}
|
||||
|
||||
/**
|
||||
* A mutable builder for a {@code LongStream}.
|
||||
*
|
||||
* <p>A stream builder has a lifecycle, which starts in a building
|
||||
* phase, during which elements can be added, and then transitions to a built
|
||||
* phase, after which elements may not be added. The built phase begins
|
||||
* begins when the {@link #build()} method is called, which creates an
|
||||
* ordered stream whose elements are the elements that were added to the
|
||||
* stream builder, in the order they were added.
|
||||
*
|
||||
* @see LongStream#builder()
|
||||
* @since 1.8
|
||||
*/
|
||||
public interface Builder extends LongConsumer {
|
||||
|
||||
/**
|
||||
* Adds an element to the stream being built.
|
||||
*
|
||||
* @throws IllegalStateException if the builder has already transitioned
|
||||
* to the built state
|
||||
*/
|
||||
@Override
|
||||
void accept(long t);
|
||||
|
||||
/**
|
||||
* Adds an element to the stream being built.
|
||||
*
|
||||
* @implSpec
|
||||
* The default implementation behaves as if:
|
||||
* <pre>{@code
|
||||
* accept(t)
|
||||
* return this;
|
||||
* }</pre>
|
||||
*
|
||||
* @param t the element to add
|
||||
* @return {@code this} builder
|
||||
* @throws IllegalStateException if the builder has already transitioned
|
||||
* to the built state
|
||||
*/
|
||||
default Builder add(long t) {
|
||||
accept(t);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds the stream, transitioning this builder to the built state.
|
||||
* An {@code IllegalStateException} is thrown if there are further
|
||||
* attempts to operate on the builder after it has entered the built
|
||||
* state.
|
||||
*
|
||||
* @return the built stream
|
||||
* @throws IllegalStateException if the builder has already transitioned
|
||||
* to the built state
|
||||
*/
|
||||
LongStream build();
|
||||
}
|
||||
}
|
||||
318
jdkSrc/jdk8/java/util/stream/MatchOps.java
Normal file
318
jdkSrc/jdk8/java/util/stream/MatchOps.java
Normal file
@@ -0,0 +1,318 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.Spliterator;
|
||||
import java.util.function.DoublePredicate;
|
||||
import java.util.function.IntPredicate;
|
||||
import java.util.function.LongPredicate;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* Factory for instances of a short-circuiting {@code TerminalOp} that implement
|
||||
* quantified predicate matching on the elements of a stream. Supported variants
|
||||
* include match-all, match-any, and match-none.
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
final class MatchOps {
|
||||
|
||||
private MatchOps() { }
|
||||
|
||||
/**
|
||||
* Enum describing quantified match options -- all match, any match, none
|
||||
* match.
|
||||
*/
|
||||
enum MatchKind {
|
||||
/** Do all elements match the predicate? */
|
||||
ANY(true, true),
|
||||
|
||||
/** Do any elements match the predicate? */
|
||||
ALL(false, false),
|
||||
|
||||
/** Do no elements match the predicate? */
|
||||
NONE(true, false);
|
||||
|
||||
private final boolean stopOnPredicateMatches;
|
||||
private final boolean shortCircuitResult;
|
||||
|
||||
private MatchKind(boolean stopOnPredicateMatches,
|
||||
boolean shortCircuitResult) {
|
||||
this.stopOnPredicateMatches = stopOnPredicateMatches;
|
||||
this.shortCircuitResult = shortCircuitResult;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a quantified predicate matcher for a Stream.
|
||||
*
|
||||
* @param <T> the type of stream elements
|
||||
* @param predicate the {@code Predicate} to apply to stream elements
|
||||
* @param matchKind the kind of quantified match (all, any, none)
|
||||
* @return a {@code TerminalOp} implementing the desired quantified match
|
||||
* criteria
|
||||
*/
|
||||
public static <T> TerminalOp<T, Boolean> makeRef(Predicate<? super T> predicate,
|
||||
MatchKind matchKind) {
|
||||
Objects.requireNonNull(predicate);
|
||||
Objects.requireNonNull(matchKind);
|
||||
class MatchSink extends BooleanTerminalSink<T> {
|
||||
MatchSink() {
|
||||
super(matchKind);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(T t) {
|
||||
if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) {
|
||||
stop = true;
|
||||
value = matchKind.shortCircuitResult;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new MatchOp<>(StreamShape.REFERENCE, matchKind, MatchSink::new);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a quantified predicate matcher for an {@code IntStream}.
|
||||
*
|
||||
* @param predicate the {@code Predicate} to apply to stream elements
|
||||
* @param matchKind the kind of quantified match (all, any, none)
|
||||
* @return a {@code TerminalOp} implementing the desired quantified match
|
||||
* criteria
|
||||
*/
|
||||
public static TerminalOp<Integer, Boolean> makeInt(IntPredicate predicate,
|
||||
MatchKind matchKind) {
|
||||
Objects.requireNonNull(predicate);
|
||||
Objects.requireNonNull(matchKind);
|
||||
class MatchSink extends BooleanTerminalSink<Integer> implements Sink.OfInt {
|
||||
MatchSink() {
|
||||
super(matchKind);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) {
|
||||
stop = true;
|
||||
value = matchKind.shortCircuitResult;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new MatchOp<>(StreamShape.INT_VALUE, matchKind, MatchSink::new);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a quantified predicate matcher for a {@code LongStream}.
|
||||
*
|
||||
* @param predicate the {@code Predicate} to apply to stream elements
|
||||
* @param matchKind the kind of quantified match (all, any, none)
|
||||
* @return a {@code TerminalOp} implementing the desired quantified match
|
||||
* criteria
|
||||
*/
|
||||
public static TerminalOp<Long, Boolean> makeLong(LongPredicate predicate,
|
||||
MatchKind matchKind) {
|
||||
Objects.requireNonNull(predicate);
|
||||
Objects.requireNonNull(matchKind);
|
||||
class MatchSink extends BooleanTerminalSink<Long> implements Sink.OfLong {
|
||||
|
||||
MatchSink() {
|
||||
super(matchKind);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) {
|
||||
stop = true;
|
||||
value = matchKind.shortCircuitResult;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new MatchOp<>(StreamShape.LONG_VALUE, matchKind, MatchSink::new);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a quantified predicate matcher for a {@code DoubleStream}.
|
||||
*
|
||||
* @param predicate the {@code Predicate} to apply to stream elements
|
||||
* @param matchKind the kind of quantified match (all, any, none)
|
||||
* @return a {@code TerminalOp} implementing the desired quantified match
|
||||
* criteria
|
||||
*/
|
||||
public static TerminalOp<Double, Boolean> makeDouble(DoublePredicate predicate,
|
||||
MatchKind matchKind) {
|
||||
Objects.requireNonNull(predicate);
|
||||
Objects.requireNonNull(matchKind);
|
||||
class MatchSink extends BooleanTerminalSink<Double> implements Sink.OfDouble {
|
||||
|
||||
MatchSink() {
|
||||
super(matchKind);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
if (!stop && predicate.test(t) == matchKind.stopOnPredicateMatches) {
|
||||
stop = true;
|
||||
value = matchKind.shortCircuitResult;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new MatchOp<>(StreamShape.DOUBLE_VALUE, matchKind, MatchSink::new);
|
||||
}
|
||||
|
||||
/**
|
||||
* A short-circuiting {@code TerminalOp} that evaluates a predicate on the
|
||||
* elements of a stream and determines whether all, any or none of those
|
||||
* elements match the predicate.
|
||||
*
|
||||
* @param <T> the output type of the stream pipeline
|
||||
*/
|
||||
private static final class MatchOp<T> implements TerminalOp<T, Boolean> {
|
||||
private final StreamShape inputShape;
|
||||
final MatchKind matchKind;
|
||||
final Supplier<BooleanTerminalSink<T>> sinkSupplier;
|
||||
|
||||
/**
|
||||
* Constructs a {@code MatchOp}.
|
||||
*
|
||||
* @param shape the output shape of the stream pipeline
|
||||
* @param matchKind the kind of quantified match (all, any, none)
|
||||
* @param sinkSupplier {@code Supplier} for a {@code Sink} of the
|
||||
* appropriate shape which implements the matching operation
|
||||
*/
|
||||
MatchOp(StreamShape shape,
|
||||
MatchKind matchKind,
|
||||
Supplier<BooleanTerminalSink<T>> sinkSupplier) {
|
||||
this.inputShape = shape;
|
||||
this.matchKind = matchKind;
|
||||
this.sinkSupplier = sinkSupplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getOpFlags() {
|
||||
return StreamOpFlag.IS_SHORT_CIRCUIT | StreamOpFlag.NOT_ORDERED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public StreamShape inputShape() {
|
||||
return inputShape;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <S> Boolean evaluateSequential(PipelineHelper<T> helper,
|
||||
Spliterator<S> spliterator) {
|
||||
return helper.wrapAndCopyInto(sinkSupplier.get(), spliterator).getAndClearState();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <S> Boolean evaluateParallel(PipelineHelper<T> helper,
|
||||
Spliterator<S> spliterator) {
|
||||
// Approach for parallel implementation:
|
||||
// - Decompose as per usual
|
||||
// - run match on leaf chunks, call result "b"
|
||||
// - if b == matchKind.shortCircuitOn, complete early and return b
|
||||
// - else if we complete normally, return !shortCircuitOn
|
||||
|
||||
return new MatchTask<>(this, helper, spliterator).invoke();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Boolean specific terminal sink to avoid the boxing costs when returning
|
||||
* results. Subclasses implement the shape-specific functionality.
|
||||
*
|
||||
* @param <T> The output type of the stream pipeline
|
||||
*/
|
||||
private static abstract class BooleanTerminalSink<T> implements Sink<T> {
|
||||
boolean stop;
|
||||
boolean value;
|
||||
|
||||
BooleanTerminalSink(MatchKind matchKind) {
|
||||
value = !matchKind.shortCircuitResult;
|
||||
}
|
||||
|
||||
public boolean getAndClearState() {
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
return stop;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ForkJoinTask implementation to implement a parallel short-circuiting
|
||||
* quantified match
|
||||
*
|
||||
* @param <P_IN> the type of source elements for the pipeline
|
||||
* @param <P_OUT> the type of output elements for the pipeline
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
private static final class MatchTask<P_IN, P_OUT>
|
||||
extends AbstractShortCircuitTask<P_IN, P_OUT, Boolean, MatchTask<P_IN, P_OUT>> {
|
||||
private final MatchOp<P_OUT> op;
|
||||
|
||||
/**
|
||||
* Constructor for root node
|
||||
*/
|
||||
MatchTask(MatchOp<P_OUT> op, PipelineHelper<P_OUT> helper,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
super(helper, spliterator);
|
||||
this.op = op;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for non-root node
|
||||
*/
|
||||
MatchTask(MatchTask<P_IN, P_OUT> parent, Spliterator<P_IN> spliterator) {
|
||||
super(parent, spliterator);
|
||||
this.op = parent.op;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected MatchTask<P_IN, P_OUT> makeChild(Spliterator<P_IN> spliterator) {
|
||||
return new MatchTask<>(this, spliterator);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Boolean doLeaf() {
|
||||
boolean b = helper.wrapAndCopyInto(op.sinkSupplier.get(), spliterator).getAndClearState();
|
||||
if (b == op.matchKind.shortCircuitResult)
|
||||
shortCircuit(b);
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Boolean getEmptyResult() {
|
||||
return !op.matchKind.shortCircuitResult;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
531
jdkSrc/jdk8/java/util/stream/Node.java
Normal file
531
jdkSrc/jdk8/java/util/stream/Node.java
Normal file
@@ -0,0 +1,531 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Spliterator;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.DoubleConsumer;
|
||||
import java.util.function.IntConsumer;
|
||||
import java.util.function.IntFunction;
|
||||
import java.util.function.LongConsumer;
|
||||
|
||||
/**
|
||||
* An immutable container for describing an ordered sequence of elements of some
|
||||
* type {@code T}.
|
||||
*
|
||||
* <p>A {@code Node} contains a fixed number of elements, which can be accessed
|
||||
* via the {@link #count}, {@link #spliterator}, {@link #forEach},
|
||||
* {@link #asArray}, or {@link #copyInto} methods. A {@code Node} may have zero
|
||||
* or more child {@code Node}s; if it has no children (accessed via
|
||||
* {@link #getChildCount} and {@link #getChild(int)}, it is considered <em>flat
|
||||
* </em> or a <em>leaf</em>; if it has children, it is considered an
|
||||
* <em>internal</em> node. The size of an internal node is the sum of sizes of
|
||||
* its children.
|
||||
*
|
||||
* @apiNote
|
||||
* <p>A {@code Node} typically does not store the elements directly, but instead
|
||||
* mediates access to one or more existing (effectively immutable) data
|
||||
* structures such as a {@code Collection}, array, or a set of other
|
||||
* {@code Node}s. Commonly {@code Node}s are formed into a tree whose shape
|
||||
* corresponds to the computation tree that produced the elements that are
|
||||
* contained in the leaf nodes. The use of {@code Node} within the stream
|
||||
* framework is largely to avoid copying data unnecessarily during parallel
|
||||
* operations.
|
||||
*
|
||||
* @param <T> the type of elements.
|
||||
* @since 1.8
|
||||
*/
|
||||
interface Node<T> {
|
||||
|
||||
/**
|
||||
* Returns a {@link Spliterator} describing the elements contained in this
|
||||
* {@code Node}.
|
||||
*
|
||||
* @return a {@code Spliterator} describing the elements contained in this
|
||||
* {@code Node}
|
||||
*/
|
||||
Spliterator<T> spliterator();
|
||||
|
||||
/**
|
||||
* Traverses the elements of this node, and invoke the provided
|
||||
* {@code Consumer} with each element. Elements are provided in encounter
|
||||
* order if the source for the {@code Node} has a defined encounter order.
|
||||
*
|
||||
* @param consumer a {@code Consumer} that is to be invoked with each
|
||||
* element in this {@code Node}
|
||||
*/
|
||||
void forEach(Consumer<? super T> consumer);
|
||||
|
||||
/**
|
||||
* Returns the number of child nodes of this node.
|
||||
*
|
||||
* @implSpec The default implementation returns zero.
|
||||
*
|
||||
* @return the number of child nodes
|
||||
*/
|
||||
default int getChildCount() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the child {@code Node} at a given index.
|
||||
*
|
||||
* @implSpec The default implementation always throws
|
||||
* {@code IndexOutOfBoundsException}.
|
||||
*
|
||||
* @param i the index to the child node
|
||||
* @return the child node
|
||||
* @throws IndexOutOfBoundsException if the index is less than 0 or greater
|
||||
* than or equal to the number of child nodes
|
||||
*/
|
||||
default Node<T> getChild(int i) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a node describing a subsequence of the elements of this node,
|
||||
* starting at the given inclusive start offset and ending at the given
|
||||
* exclusive end offset.
|
||||
*
|
||||
* @param from The (inclusive) starting offset of elements to include, must
|
||||
* be in range 0..count().
|
||||
* @param to The (exclusive) end offset of elements to include, must be
|
||||
* in range 0..count().
|
||||
* @param generator A function to be used to create a new array, if needed,
|
||||
* for reference nodes.
|
||||
* @return the truncated node
|
||||
*/
|
||||
default Node<T> truncate(long from, long to, IntFunction<T[]> generator) {
|
||||
if (from == 0 && to == count())
|
||||
return this;
|
||||
Spliterator<T> spliterator = spliterator();
|
||||
long size = to - from;
|
||||
Node.Builder<T> nodeBuilder = Nodes.builder(size, generator);
|
||||
nodeBuilder.begin(size);
|
||||
for (int i = 0; i < from && spliterator.tryAdvance(e -> { }); i++) { }
|
||||
for (int i = 0; (i < size) && spliterator.tryAdvance(nodeBuilder); i++) { }
|
||||
nodeBuilder.end();
|
||||
return nodeBuilder.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides an array view of the contents of this node.
|
||||
*
|
||||
* <p>Depending on the underlying implementation, this may return a
|
||||
* reference to an internal array rather than a copy. Since the returned
|
||||
* array may be shared, the returned array should not be modified. The
|
||||
* {@code generator} function may be consulted to create the array if a new
|
||||
* array needs to be created.
|
||||
*
|
||||
* @param generator a factory function which takes an integer parameter and
|
||||
* returns a new, empty array of that size and of the appropriate
|
||||
* array type
|
||||
* @return an array containing the contents of this {@code Node}
|
||||
*/
|
||||
T[] asArray(IntFunction<T[]> generator);
|
||||
|
||||
/**
|
||||
* Copies the content of this {@code Node} into an array, starting at a
|
||||
* given offset into the array. It is the caller's responsibility to ensure
|
||||
* there is sufficient room in the array, otherwise unspecified behaviour
|
||||
* will occur if the array length is less than the number of elements
|
||||
* contained in this node.
|
||||
*
|
||||
* @param array the array into which to copy the contents of this
|
||||
* {@code Node}
|
||||
* @param offset the starting offset within the array
|
||||
* @throws IndexOutOfBoundsException if copying would cause access of data
|
||||
* outside array bounds
|
||||
* @throws NullPointerException if {@code array} is {@code null}
|
||||
*/
|
||||
void copyInto(T[] array, int offset);
|
||||
|
||||
/**
|
||||
* Gets the {@code StreamShape} associated with this {@code Node}.
|
||||
*
|
||||
* @implSpec The default in {@code Node} returns
|
||||
* {@code StreamShape.REFERENCE}
|
||||
*
|
||||
* @return the stream shape associated with this node
|
||||
*/
|
||||
default StreamShape getShape() {
|
||||
return StreamShape.REFERENCE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of elements contained in this node.
|
||||
*
|
||||
* @return the number of elements contained in this node
|
||||
*/
|
||||
long count();
|
||||
|
||||
/**
|
||||
* A mutable builder for a {@code Node} that implements {@link Sink}, which
|
||||
* builds a flat node containing the elements that have been pushed to it.
|
||||
*/
|
||||
interface Builder<T> extends Sink<T> {
|
||||
|
||||
/**
|
||||
* Builds the node. Should be called after all elements have been
|
||||
* pushed and signalled with an invocation of {@link Sink#end()}.
|
||||
*
|
||||
* @return the resulting {@code Node}
|
||||
*/
|
||||
Node<T> build();
|
||||
|
||||
/**
|
||||
* Specialized @{code Node.Builder} for int elements
|
||||
*/
|
||||
interface OfInt extends Node.Builder<Integer>, Sink.OfInt {
|
||||
@Override
|
||||
Node.OfInt build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Specialized @{code Node.Builder} for long elements
|
||||
*/
|
||||
interface OfLong extends Node.Builder<Long>, Sink.OfLong {
|
||||
@Override
|
||||
Node.OfLong build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Specialized @{code Node.Builder} for double elements
|
||||
*/
|
||||
interface OfDouble extends Node.Builder<Double>, Sink.OfDouble {
|
||||
@Override
|
||||
Node.OfDouble build();
|
||||
}
|
||||
}
|
||||
|
||||
public interface OfPrimitive<T, T_CONS, T_ARR,
|
||||
T_SPLITR extends Spliterator.OfPrimitive<T, T_CONS, T_SPLITR>,
|
||||
T_NODE extends OfPrimitive<T, T_CONS, T_ARR, T_SPLITR, T_NODE>>
|
||||
extends Node<T> {
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* @return a {@link Spliterator.OfPrimitive} describing the elements of
|
||||
* this node
|
||||
*/
|
||||
@Override
|
||||
T_SPLITR spliterator();
|
||||
|
||||
/**
|
||||
* Traverses the elements of this node, and invoke the provided
|
||||
* {@code action} with each element.
|
||||
*
|
||||
* @param action a consumer that is to be invoked with each
|
||||
* element in this {@code Node.OfPrimitive}
|
||||
*/
|
||||
@SuppressWarnings("overloads")
|
||||
void forEach(T_CONS action);
|
||||
|
||||
@Override
|
||||
default T_NODE getChild(int i) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
|
||||
T_NODE truncate(long from, long to, IntFunction<T[]> generator);
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* @implSpec the default implementation invokes the generator to create
|
||||
* an instance of a boxed primitive array with a length of
|
||||
* {@link #count()} and then invokes {@link #copyInto(T[], int)} with
|
||||
* that array at an offset of 0.
|
||||
*/
|
||||
@Override
|
||||
default T[] asArray(IntFunction<T[]> generator) {
|
||||
if (java.util.stream.Tripwire.ENABLED)
|
||||
java.util.stream.Tripwire.trip(getClass(), "{0} calling Node.OfPrimitive.asArray");
|
||||
|
||||
long size = count();
|
||||
if (size >= Nodes.MAX_ARRAY_SIZE)
|
||||
throw new IllegalArgumentException(Nodes.BAD_SIZE);
|
||||
T[] boxed = generator.apply((int) count());
|
||||
copyInto(boxed, 0);
|
||||
return boxed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Views this node as a primitive array.
|
||||
*
|
||||
* <p>Depending on the underlying implementation this may return a
|
||||
* reference to an internal array rather than a copy. It is the callers
|
||||
* responsibility to decide if either this node or the array is utilized
|
||||
* as the primary reference for the data.</p>
|
||||
*
|
||||
* @return an array containing the contents of this {@code Node}
|
||||
*/
|
||||
T_ARR asPrimitiveArray();
|
||||
|
||||
/**
|
||||
* Creates a new primitive array.
|
||||
*
|
||||
* @param count the length of the primitive array.
|
||||
* @return the new primitive array.
|
||||
*/
|
||||
T_ARR newArray(int count);
|
||||
|
||||
/**
|
||||
* Copies the content of this {@code Node} into a primitive array,
|
||||
* starting at a given offset into the array. It is the caller's
|
||||
* responsibility to ensure there is sufficient room in the array.
|
||||
*
|
||||
* @param array the array into which to copy the contents of this
|
||||
* {@code Node}
|
||||
* @param offset the starting offset within the array
|
||||
* @throws IndexOutOfBoundsException if copying would cause access of
|
||||
* data outside array bounds
|
||||
* @throws NullPointerException if {@code array} is {@code null}
|
||||
*/
|
||||
void copyInto(T_ARR array, int offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* Specialized {@code Node} for int elements
|
||||
*/
|
||||
interface OfInt extends OfPrimitive<Integer, IntConsumer, int[], Spliterator.OfInt, OfInt> {
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* @param consumer a {@code Consumer} that is to be invoked with each
|
||||
* element in this {@code Node}. If this is an
|
||||
* {@code IntConsumer}, it is cast to {@code IntConsumer} so the
|
||||
* elements may be processed without boxing.
|
||||
*/
|
||||
@Override
|
||||
default void forEach(Consumer<? super Integer> consumer) {
|
||||
if (consumer instanceof IntConsumer) {
|
||||
forEach((IntConsumer) consumer);
|
||||
}
|
||||
else {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(getClass(), "{0} calling Node.OfInt.forEachRemaining(Consumer)");
|
||||
spliterator().forEachRemaining(consumer);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* @implSpec the default implementation invokes {@link #asPrimitiveArray()} to
|
||||
* obtain an int[] array then and copies the elements from that int[]
|
||||
* array into the boxed Integer[] array. This is not efficient and it
|
||||
* is recommended to invoke {@link #copyInto(Object, int)}.
|
||||
*/
|
||||
@Override
|
||||
default void copyInto(Integer[] boxed, int offset) {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(getClass(), "{0} calling Node.OfInt.copyInto(Integer[], int)");
|
||||
|
||||
int[] array = asPrimitiveArray();
|
||||
for (int i = 0; i < array.length; i++) {
|
||||
boxed[offset + i] = array[i];
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
default Node.OfInt truncate(long from, long to, IntFunction<Integer[]> generator) {
|
||||
if (from == 0 && to == count())
|
||||
return this;
|
||||
long size = to - from;
|
||||
Spliterator.OfInt spliterator = spliterator();
|
||||
Node.Builder.OfInt nodeBuilder = Nodes.intBuilder(size);
|
||||
nodeBuilder.begin(size);
|
||||
for (int i = 0; i < from && spliterator.tryAdvance((IntConsumer) e -> { }); i++) { }
|
||||
for (int i = 0; (i < size) && spliterator.tryAdvance((IntConsumer) nodeBuilder); i++) { }
|
||||
nodeBuilder.end();
|
||||
return nodeBuilder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
default int[] newArray(int count) {
|
||||
return new int[count];
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* @implSpec The default in {@code Node.OfInt} returns
|
||||
* {@code StreamShape.INT_VALUE}
|
||||
*/
|
||||
default StreamShape getShape() {
|
||||
return StreamShape.INT_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Specialized {@code Node} for long elements
|
||||
*/
|
||||
interface OfLong extends OfPrimitive<Long, LongConsumer, long[], Spliterator.OfLong, OfLong> {
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* @param consumer A {@code Consumer} that is to be invoked with each
|
||||
* element in this {@code Node}. If this is an
|
||||
* {@code LongConsumer}, it is cast to {@code LongConsumer} so
|
||||
* the elements may be processed without boxing.
|
||||
*/
|
||||
@Override
|
||||
default void forEach(Consumer<? super Long> consumer) {
|
||||
if (consumer instanceof LongConsumer) {
|
||||
forEach((LongConsumer) consumer);
|
||||
}
|
||||
else {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(getClass(), "{0} calling Node.OfLong.forEachRemaining(Consumer)");
|
||||
spliterator().forEachRemaining(consumer);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* @implSpec the default implementation invokes {@link #asPrimitiveArray()}
|
||||
* to obtain a long[] array then and copies the elements from that
|
||||
* long[] array into the boxed Long[] array. This is not efficient and
|
||||
* it is recommended to invoke {@link #copyInto(Object, int)}.
|
||||
*/
|
||||
@Override
|
||||
default void copyInto(Long[] boxed, int offset) {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(getClass(), "{0} calling Node.OfInt.copyInto(Long[], int)");
|
||||
|
||||
long[] array = asPrimitiveArray();
|
||||
for (int i = 0; i < array.length; i++) {
|
||||
boxed[offset + i] = array[i];
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
default Node.OfLong truncate(long from, long to, IntFunction<Long[]> generator) {
|
||||
if (from == 0 && to == count())
|
||||
return this;
|
||||
long size = to - from;
|
||||
Spliterator.OfLong spliterator = spliterator();
|
||||
Node.Builder.OfLong nodeBuilder = Nodes.longBuilder(size);
|
||||
nodeBuilder.begin(size);
|
||||
for (int i = 0; i < from && spliterator.tryAdvance((LongConsumer) e -> { }); i++) { }
|
||||
for (int i = 0; (i < size) && spliterator.tryAdvance((LongConsumer) nodeBuilder); i++) { }
|
||||
nodeBuilder.end();
|
||||
return nodeBuilder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
default long[] newArray(int count) {
|
||||
return new long[count];
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
* @implSpec The default in {@code Node.OfLong} returns
|
||||
* {@code StreamShape.LONG_VALUE}
|
||||
*/
|
||||
default StreamShape getShape() {
|
||||
return StreamShape.LONG_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Specialized {@code Node} for double elements
|
||||
*/
|
||||
interface OfDouble extends OfPrimitive<Double, DoubleConsumer, double[], Spliterator.OfDouble, OfDouble> {
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* @param consumer A {@code Consumer} that is to be invoked with each
|
||||
* element in this {@code Node}. If this is an
|
||||
* {@code DoubleConsumer}, it is cast to {@code DoubleConsumer}
|
||||
* so the elements may be processed without boxing.
|
||||
*/
|
||||
@Override
|
||||
default void forEach(Consumer<? super Double> consumer) {
|
||||
if (consumer instanceof DoubleConsumer) {
|
||||
forEach((DoubleConsumer) consumer);
|
||||
}
|
||||
else {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(getClass(), "{0} calling Node.OfLong.forEachRemaining(Consumer)");
|
||||
spliterator().forEachRemaining(consumer);
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* @implSpec the default implementation invokes {@link #asPrimitiveArray()}
|
||||
* to obtain a double[] array then and copies the elements from that
|
||||
* double[] array into the boxed Double[] array. This is not efficient
|
||||
* and it is recommended to invoke {@link #copyInto(Object, int)}.
|
||||
*/
|
||||
@Override
|
||||
default void copyInto(Double[] boxed, int offset) {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(getClass(), "{0} calling Node.OfDouble.copyInto(Double[], int)");
|
||||
|
||||
double[] array = asPrimitiveArray();
|
||||
for (int i = 0; i < array.length; i++) {
|
||||
boxed[offset + i] = array[i];
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
default Node.OfDouble truncate(long from, long to, IntFunction<Double[]> generator) {
|
||||
if (from == 0 && to == count())
|
||||
return this;
|
||||
long size = to - from;
|
||||
Spliterator.OfDouble spliterator = spliterator();
|
||||
Node.Builder.OfDouble nodeBuilder = Nodes.doubleBuilder(size);
|
||||
nodeBuilder.begin(size);
|
||||
for (int i = 0; i < from && spliterator.tryAdvance((DoubleConsumer) e -> { }); i++) { }
|
||||
for (int i = 0; (i < size) && spliterator.tryAdvance((DoubleConsumer) nodeBuilder); i++) { }
|
||||
nodeBuilder.end();
|
||||
return nodeBuilder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
default double[] newArray(int count) {
|
||||
return new double[count];
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*
|
||||
* @implSpec The default in {@code Node.OfDouble} returns
|
||||
* {@code StreamShape.DOUBLE_VALUE}
|
||||
*/
|
||||
default StreamShape getShape() {
|
||||
return StreamShape.DOUBLE_VALUE;
|
||||
}
|
||||
}
|
||||
}
|
||||
2227
jdkSrc/jdk8/java/util/stream/Nodes.java
Normal file
2227
jdkSrc/jdk8/java/util/stream/Nodes.java
Normal file
File diff suppressed because it is too large
Load Diff
203
jdkSrc/jdk8/java/util/stream/PipelineHelper.java
Normal file
203
jdkSrc/jdk8/java/util/stream/PipelineHelper.java
Normal file
@@ -0,0 +1,203 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Spliterator;
|
||||
import java.util.function.IntFunction;
|
||||
|
||||
/**
|
||||
* Helper class for executing <a href="package-summary.html#StreamOps">
|
||||
* stream pipelines</a>, capturing all of the information about a stream
|
||||
* pipeline (output shape, intermediate operations, stream flags, parallelism,
|
||||
* etc) in one place.
|
||||
*
|
||||
* <p>
|
||||
* A {@code PipelineHelper} describes the initial segment of a stream pipeline,
|
||||
* including its source, intermediate operations, and may additionally
|
||||
* incorporate information about the terminal (or stateful) operation which
|
||||
* follows the last intermediate operation described by this
|
||||
* {@code PipelineHelper}. The {@code PipelineHelper} is passed to the
|
||||
* {@link TerminalOp#evaluateParallel(PipelineHelper, java.util.Spliterator)},
|
||||
* {@link TerminalOp#evaluateSequential(PipelineHelper, java.util.Spliterator)},
|
||||
* and {@link AbstractPipeline#opEvaluateParallel(PipelineHelper, java.util.Spliterator,
|
||||
* java.util.function.IntFunction)}, methods, which can use the
|
||||
* {@code PipelineHelper} to access information about the pipeline such as
|
||||
* head shape, stream flags, and size, and use the helper methods
|
||||
* such as {@link #wrapAndCopyInto(Sink, Spliterator)},
|
||||
* {@link #copyInto(Sink, Spliterator)}, and {@link #wrapSink(Sink)} to execute
|
||||
* pipeline operations.
|
||||
*
|
||||
* @param <P_OUT> type of output elements from the pipeline
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract class PipelineHelper<P_OUT> {
|
||||
|
||||
/**
|
||||
* Gets the stream shape for the source of the pipeline segment.
|
||||
*
|
||||
* @return the stream shape for the source of the pipeline segment.
|
||||
*/
|
||||
abstract StreamShape getSourceShape();
|
||||
|
||||
/**
|
||||
* Gets the combined stream and operation flags for the output of the described
|
||||
* pipeline. This will incorporate stream flags from the stream source, all
|
||||
* the intermediate operations and the terminal operation.
|
||||
*
|
||||
* @return the combined stream and operation flags
|
||||
* @see StreamOpFlag
|
||||
*/
|
||||
abstract int getStreamAndOpFlags();
|
||||
|
||||
/**
|
||||
* Returns the exact output size of the portion of the output resulting from
|
||||
* applying the pipeline stages described by this {@code PipelineHelper} to
|
||||
* the portion of the input described by the provided
|
||||
* {@code Spliterator}, if known. If not known or known infinite, will
|
||||
* return {@code -1}.
|
||||
*
|
||||
* @apiNote
|
||||
* The exact output size is known if the {@code Spliterator} has the
|
||||
* {@code SIZED} characteristic, and the operation flags
|
||||
* {@link StreamOpFlag#SIZED} is known on the combined stream and operation
|
||||
* flags.
|
||||
*
|
||||
* @param spliterator the spliterator describing the relevant portion of the
|
||||
* source data
|
||||
* @return the exact size if known, or -1 if infinite or unknown
|
||||
*/
|
||||
abstract<P_IN> long exactOutputSizeIfKnown(Spliterator<P_IN> spliterator);
|
||||
|
||||
/**
|
||||
* Applies the pipeline stages described by this {@code PipelineHelper} to
|
||||
* the provided {@code Spliterator} and send the results to the provided
|
||||
* {@code Sink}.
|
||||
*
|
||||
* @implSpec
|
||||
* The implementation behaves as if:
|
||||
* <pre>{@code
|
||||
* intoWrapped(wrapSink(sink), spliterator);
|
||||
* }</pre>
|
||||
*
|
||||
* @param sink the {@code Sink} to receive the results
|
||||
* @param spliterator the spliterator describing the source input to process
|
||||
*/
|
||||
abstract<P_IN, S extends Sink<P_OUT>> S wrapAndCopyInto(S sink, Spliterator<P_IN> spliterator);
|
||||
|
||||
/**
|
||||
* Pushes elements obtained from the {@code Spliterator} into the provided
|
||||
* {@code Sink}. If the stream pipeline is known to have short-circuiting
|
||||
* stages in it (see {@link StreamOpFlag#SHORT_CIRCUIT}), the
|
||||
* {@link Sink#cancellationRequested()} is checked after each
|
||||
* element, stopping if cancellation is requested.
|
||||
*
|
||||
* @implSpec
|
||||
* This method conforms to the {@code Sink} protocol of calling
|
||||
* {@code Sink.begin} before pushing elements, via {@code Sink.accept}, and
|
||||
* calling {@code Sink.end} after all elements have been pushed.
|
||||
*
|
||||
* @param wrappedSink the destination {@code Sink}
|
||||
* @param spliterator the source {@code Spliterator}
|
||||
*/
|
||||
abstract<P_IN> void copyInto(Sink<P_IN> wrappedSink, Spliterator<P_IN> spliterator);
|
||||
|
||||
/**
|
||||
* Pushes elements obtained from the {@code Spliterator} into the provided
|
||||
* {@code Sink}, checking {@link Sink#cancellationRequested()} after each
|
||||
* element, and stopping if cancellation is requested.
|
||||
*
|
||||
* @implSpec
|
||||
* This method conforms to the {@code Sink} protocol of calling
|
||||
* {@code Sink.begin} before pushing elements, via {@code Sink.accept}, and
|
||||
* calling {@code Sink.end} after all elements have been pushed or if
|
||||
* cancellation is requested.
|
||||
*
|
||||
* @param wrappedSink the destination {@code Sink}
|
||||
* @param spliterator the source {@code Spliterator}
|
||||
*/
|
||||
abstract <P_IN> void copyIntoWithCancel(Sink<P_IN> wrappedSink, Spliterator<P_IN> spliterator);
|
||||
|
||||
/**
|
||||
* Takes a {@code Sink} that accepts elements of the output type of the
|
||||
* {@code PipelineHelper}, and wrap it with a {@code Sink} that accepts
|
||||
* elements of the input type and implements all the intermediate operations
|
||||
* described by this {@code PipelineHelper}, delivering the result into the
|
||||
* provided {@code Sink}.
|
||||
*
|
||||
* @param sink the {@code Sink} to receive the results
|
||||
* @return a {@code Sink} that implements the pipeline stages and sends
|
||||
* results to the provided {@code Sink}
|
||||
*/
|
||||
abstract<P_IN> Sink<P_IN> wrapSink(Sink<P_OUT> sink);
|
||||
|
||||
/**
|
||||
*
|
||||
* @param spliterator
|
||||
* @param <P_IN>
|
||||
* @return
|
||||
*/
|
||||
abstract<P_IN> Spliterator<P_OUT> wrapSpliterator(Spliterator<P_IN> spliterator);
|
||||
|
||||
/**
|
||||
* Constructs a @{link Node.Builder} compatible with the output shape of
|
||||
* this {@code PipelineHelper}.
|
||||
*
|
||||
* @param exactSizeIfKnown if >=0 then a builder will be created that has a
|
||||
* fixed capacity of exactly sizeIfKnown elements; if < 0 then the
|
||||
* builder has variable capacity. A fixed capacity builder will fail
|
||||
* if an element is added after the builder has reached capacity.
|
||||
* @param generator a factory function for array instances
|
||||
* @return a {@code Node.Builder} compatible with the output shape of this
|
||||
* {@code PipelineHelper}
|
||||
*/
|
||||
abstract Node.Builder<P_OUT> makeNodeBuilder(long exactSizeIfKnown,
|
||||
IntFunction<P_OUT[]> generator);
|
||||
|
||||
/**
|
||||
* Collects all output elements resulting from applying the pipeline stages
|
||||
* to the source {@code Spliterator} into a {@code Node}.
|
||||
*
|
||||
* @implNote
|
||||
* If the pipeline has no intermediate operations and the source is backed
|
||||
* by a {@code Node} then that {@code Node} will be returned (or flattened
|
||||
* and then returned). This reduces copying for a pipeline consisting of a
|
||||
* stateful operation followed by a terminal operation that returns an
|
||||
* array, such as:
|
||||
* <pre>{@code
|
||||
* stream.sorted().toArray();
|
||||
* }</pre>
|
||||
*
|
||||
* @param spliterator the source {@code Spliterator}
|
||||
* @param flatten if true and the pipeline is a parallel pipeline then the
|
||||
* {@code Node} returned will contain no children, otherwise the
|
||||
* {@code Node} may represent the root in a tree that reflects the
|
||||
* shape of the computation tree.
|
||||
* @param generator a factory function for array instances
|
||||
* @return the {@code Node} containing all output elements
|
||||
*/
|
||||
abstract<P_IN> Node<P_OUT> evaluate(Spliterator<P_IN> spliterator,
|
||||
boolean flatten,
|
||||
IntFunction<P_OUT[]> generator);
|
||||
}
|
||||
761
jdkSrc/jdk8/java/util/stream/ReduceOps.java
Normal file
761
jdkSrc/jdk8/java/util/stream/ReduceOps.java
Normal file
@@ -0,0 +1,761 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.OptionalDouble;
|
||||
import java.util.OptionalInt;
|
||||
import java.util.OptionalLong;
|
||||
import java.util.Spliterator;
|
||||
import java.util.concurrent.CountedCompleter;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.BinaryOperator;
|
||||
import java.util.function.DoubleBinaryOperator;
|
||||
import java.util.function.IntBinaryOperator;
|
||||
import java.util.function.LongBinaryOperator;
|
||||
import java.util.function.ObjDoubleConsumer;
|
||||
import java.util.function.ObjIntConsumer;
|
||||
import java.util.function.ObjLongConsumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* Factory for creating instances of {@code TerminalOp} that implement
|
||||
* reductions.
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
final class ReduceOps {
|
||||
|
||||
private ReduceOps() { }
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that implements a functional reduce on
|
||||
* reference values.
|
||||
*
|
||||
* @param <T> the type of the input elements
|
||||
* @param <U> the type of the result
|
||||
* @param seed the identity element for the reduction
|
||||
* @param reducer the accumulating function that incorporates an additional
|
||||
* input element into the result
|
||||
* @param combiner the combining function that combines two intermediate
|
||||
* results
|
||||
* @return a {@code TerminalOp} implementing the reduction
|
||||
*/
|
||||
public static <T, U> TerminalOp<T, U>
|
||||
makeRef(U seed, BiFunction<U, ? super T, U> reducer, BinaryOperator<U> combiner) {
|
||||
Objects.requireNonNull(reducer);
|
||||
Objects.requireNonNull(combiner);
|
||||
class ReducingSink extends Box<U> implements AccumulatingSink<T, U, ReducingSink> {
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
state = seed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(T t) {
|
||||
state = reducer.apply(state, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void combine(ReducingSink other) {
|
||||
state = combiner.apply(state, other.state);
|
||||
}
|
||||
}
|
||||
return new ReduceOp<T, U, ReducingSink>(StreamShape.REFERENCE) {
|
||||
@Override
|
||||
public ReducingSink makeSink() {
|
||||
return new ReducingSink();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that implements a functional reduce on
|
||||
* reference values producing an optional reference result.
|
||||
*
|
||||
* @param <T> The type of the input elements, and the type of the result
|
||||
* @param operator The reducing function
|
||||
* @return A {@code TerminalOp} implementing the reduction
|
||||
*/
|
||||
public static <T> TerminalOp<T, Optional<T>>
|
||||
makeRef(BinaryOperator<T> operator) {
|
||||
Objects.requireNonNull(operator);
|
||||
class ReducingSink
|
||||
implements AccumulatingSink<T, Optional<T>, ReducingSink> {
|
||||
private boolean empty;
|
||||
private T state;
|
||||
|
||||
public void begin(long size) {
|
||||
empty = true;
|
||||
state = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(T t) {
|
||||
if (empty) {
|
||||
empty = false;
|
||||
state = t;
|
||||
} else {
|
||||
state = operator.apply(state, t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Optional<T> get() {
|
||||
return empty ? Optional.empty() : Optional.of(state);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void combine(ReducingSink other) {
|
||||
if (!other.empty)
|
||||
accept(other.state);
|
||||
}
|
||||
}
|
||||
return new ReduceOp<T, Optional<T>, ReducingSink>(StreamShape.REFERENCE) {
|
||||
@Override
|
||||
public ReducingSink makeSink() {
|
||||
return new ReducingSink();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that implements a mutable reduce on
|
||||
* reference values.
|
||||
*
|
||||
* @param <T> the type of the input elements
|
||||
* @param <I> the type of the intermediate reduction result
|
||||
* @param collector a {@code Collector} defining the reduction
|
||||
* @return a {@code ReduceOp} implementing the reduction
|
||||
*/
|
||||
public static <T, I> TerminalOp<T, I>
|
||||
makeRef(Collector<? super T, I, ?> collector) {
|
||||
Supplier<I> supplier = Objects.requireNonNull(collector).supplier();
|
||||
BiConsumer<I, ? super T> accumulator = collector.accumulator();
|
||||
BinaryOperator<I> combiner = collector.combiner();
|
||||
class ReducingSink extends Box<I>
|
||||
implements AccumulatingSink<T, I, ReducingSink> {
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
state = supplier.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(T t) {
|
||||
accumulator.accept(state, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void combine(ReducingSink other) {
|
||||
state = combiner.apply(state, other.state);
|
||||
}
|
||||
}
|
||||
return new ReduceOp<T, I, ReducingSink>(StreamShape.REFERENCE) {
|
||||
@Override
|
||||
public ReducingSink makeSink() {
|
||||
return new ReducingSink();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getOpFlags() {
|
||||
return collector.characteristics().contains(Collector.Characteristics.UNORDERED)
|
||||
? StreamOpFlag.NOT_ORDERED
|
||||
: 0;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that implements a mutable reduce on
|
||||
* reference values.
|
||||
*
|
||||
* @param <T> the type of the input elements
|
||||
* @param <R> the type of the result
|
||||
* @param seedFactory a factory to produce a new base accumulator
|
||||
* @param accumulator a function to incorporate an element into an
|
||||
* accumulator
|
||||
* @param reducer a function to combine an accumulator into another
|
||||
* @return a {@code TerminalOp} implementing the reduction
|
||||
*/
|
||||
public static <T, R> TerminalOp<T, R>
|
||||
makeRef(Supplier<R> seedFactory,
|
||||
BiConsumer<R, ? super T> accumulator,
|
||||
BiConsumer<R,R> reducer) {
|
||||
Objects.requireNonNull(seedFactory);
|
||||
Objects.requireNonNull(accumulator);
|
||||
Objects.requireNonNull(reducer);
|
||||
class ReducingSink extends Box<R>
|
||||
implements AccumulatingSink<T, R, ReducingSink> {
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
state = seedFactory.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(T t) {
|
||||
accumulator.accept(state, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void combine(ReducingSink other) {
|
||||
reducer.accept(state, other.state);
|
||||
}
|
||||
}
|
||||
return new ReduceOp<T, R, ReducingSink>(StreamShape.REFERENCE) {
|
||||
@Override
|
||||
public ReducingSink makeSink() {
|
||||
return new ReducingSink();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that implements a functional reduce on
|
||||
* {@code int} values.
|
||||
*
|
||||
* @param identity the identity for the combining function
|
||||
* @param operator the combining function
|
||||
* @return a {@code TerminalOp} implementing the reduction
|
||||
*/
|
||||
public static TerminalOp<Integer, Integer>
|
||||
makeInt(int identity, IntBinaryOperator operator) {
|
||||
Objects.requireNonNull(operator);
|
||||
class ReducingSink
|
||||
implements AccumulatingSink<Integer, Integer, ReducingSink>, Sink.OfInt {
|
||||
private int state;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
state = identity;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
state = operator.applyAsInt(state, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer get() {
|
||||
return state;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void combine(ReducingSink other) {
|
||||
accept(other.state);
|
||||
}
|
||||
}
|
||||
return new ReduceOp<Integer, Integer, ReducingSink>(StreamShape.INT_VALUE) {
|
||||
@Override
|
||||
public ReducingSink makeSink() {
|
||||
return new ReducingSink();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that implements a functional reduce on
|
||||
* {@code int} values, producing an optional integer result.
|
||||
*
|
||||
* @param operator the combining function
|
||||
* @return a {@code TerminalOp} implementing the reduction
|
||||
*/
|
||||
public static TerminalOp<Integer, OptionalInt>
|
||||
makeInt(IntBinaryOperator operator) {
|
||||
Objects.requireNonNull(operator);
|
||||
class ReducingSink
|
||||
implements AccumulatingSink<Integer, OptionalInt, ReducingSink>, Sink.OfInt {
|
||||
private boolean empty;
|
||||
private int state;
|
||||
|
||||
public void begin(long size) {
|
||||
empty = true;
|
||||
state = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
if (empty) {
|
||||
empty = false;
|
||||
state = t;
|
||||
}
|
||||
else {
|
||||
state = operator.applyAsInt(state, t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public OptionalInt get() {
|
||||
return empty ? OptionalInt.empty() : OptionalInt.of(state);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void combine(ReducingSink other) {
|
||||
if (!other.empty)
|
||||
accept(other.state);
|
||||
}
|
||||
}
|
||||
return new ReduceOp<Integer, OptionalInt, ReducingSink>(StreamShape.INT_VALUE) {
|
||||
@Override
|
||||
public ReducingSink makeSink() {
|
||||
return new ReducingSink();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that implements a mutable reduce on
|
||||
* {@code int} values.
|
||||
*
|
||||
* @param <R> The type of the result
|
||||
* @param supplier a factory to produce a new accumulator of the result type
|
||||
* @param accumulator a function to incorporate an int into an
|
||||
* accumulator
|
||||
* @param combiner a function to combine an accumulator into another
|
||||
* @return A {@code ReduceOp} implementing the reduction
|
||||
*/
|
||||
public static <R> TerminalOp<Integer, R>
|
||||
makeInt(Supplier<R> supplier,
|
||||
ObjIntConsumer<R> accumulator,
|
||||
BinaryOperator<R> combiner) {
|
||||
Objects.requireNonNull(supplier);
|
||||
Objects.requireNonNull(accumulator);
|
||||
Objects.requireNonNull(combiner);
|
||||
class ReducingSink extends Box<R>
|
||||
implements AccumulatingSink<Integer, R, ReducingSink>, Sink.OfInt {
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
state = supplier.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
accumulator.accept(state, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void combine(ReducingSink other) {
|
||||
state = combiner.apply(state, other.state);
|
||||
}
|
||||
}
|
||||
return new ReduceOp<Integer, R, ReducingSink>(StreamShape.INT_VALUE) {
|
||||
@Override
|
||||
public ReducingSink makeSink() {
|
||||
return new ReducingSink();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that implements a functional reduce on
|
||||
* {@code long} values.
|
||||
*
|
||||
* @param identity the identity for the combining function
|
||||
* @param operator the combining function
|
||||
* @return a {@code TerminalOp} implementing the reduction
|
||||
*/
|
||||
public static TerminalOp<Long, Long>
|
||||
makeLong(long identity, LongBinaryOperator operator) {
|
||||
Objects.requireNonNull(operator);
|
||||
class ReducingSink
|
||||
implements AccumulatingSink<Long, Long, ReducingSink>, Sink.OfLong {
|
||||
private long state;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
state = identity;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
state = operator.applyAsLong(state, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long get() {
|
||||
return state;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void combine(ReducingSink other) {
|
||||
accept(other.state);
|
||||
}
|
||||
}
|
||||
return new ReduceOp<Long, Long, ReducingSink>(StreamShape.LONG_VALUE) {
|
||||
@Override
|
||||
public ReducingSink makeSink() {
|
||||
return new ReducingSink();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that implements a functional reduce on
|
||||
* {@code long} values, producing an optional long result.
|
||||
*
|
||||
* @param operator the combining function
|
||||
* @return a {@code TerminalOp} implementing the reduction
|
||||
*/
|
||||
public static TerminalOp<Long, OptionalLong>
|
||||
makeLong(LongBinaryOperator operator) {
|
||||
Objects.requireNonNull(operator);
|
||||
class ReducingSink
|
||||
implements AccumulatingSink<Long, OptionalLong, ReducingSink>, Sink.OfLong {
|
||||
private boolean empty;
|
||||
private long state;
|
||||
|
||||
public void begin(long size) {
|
||||
empty = true;
|
||||
state = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
if (empty) {
|
||||
empty = false;
|
||||
state = t;
|
||||
}
|
||||
else {
|
||||
state = operator.applyAsLong(state, t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public OptionalLong get() {
|
||||
return empty ? OptionalLong.empty() : OptionalLong.of(state);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void combine(ReducingSink other) {
|
||||
if (!other.empty)
|
||||
accept(other.state);
|
||||
}
|
||||
}
|
||||
return new ReduceOp<Long, OptionalLong, ReducingSink>(StreamShape.LONG_VALUE) {
|
||||
@Override
|
||||
public ReducingSink makeSink() {
|
||||
return new ReducingSink();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that implements a mutable reduce on
|
||||
* {@code long} values.
|
||||
*
|
||||
* @param <R> the type of the result
|
||||
* @param supplier a factory to produce a new accumulator of the result type
|
||||
* @param accumulator a function to incorporate an int into an
|
||||
* accumulator
|
||||
* @param combiner a function to combine an accumulator into another
|
||||
* @return a {@code TerminalOp} implementing the reduction
|
||||
*/
|
||||
public static <R> TerminalOp<Long, R>
|
||||
makeLong(Supplier<R> supplier,
|
||||
ObjLongConsumer<R> accumulator,
|
||||
BinaryOperator<R> combiner) {
|
||||
Objects.requireNonNull(supplier);
|
||||
Objects.requireNonNull(accumulator);
|
||||
Objects.requireNonNull(combiner);
|
||||
class ReducingSink extends Box<R>
|
||||
implements AccumulatingSink<Long, R, ReducingSink>, Sink.OfLong {
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
state = supplier.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
accumulator.accept(state, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void combine(ReducingSink other) {
|
||||
state = combiner.apply(state, other.state);
|
||||
}
|
||||
}
|
||||
return new ReduceOp<Long, R, ReducingSink>(StreamShape.LONG_VALUE) {
|
||||
@Override
|
||||
public ReducingSink makeSink() {
|
||||
return new ReducingSink();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that implements a functional reduce on
|
||||
* {@code double} values.
|
||||
*
|
||||
* @param identity the identity for the combining function
|
||||
* @param operator the combining function
|
||||
* @return a {@code TerminalOp} implementing the reduction
|
||||
*/
|
||||
public static TerminalOp<Double, Double>
|
||||
makeDouble(double identity, DoubleBinaryOperator operator) {
|
||||
Objects.requireNonNull(operator);
|
||||
class ReducingSink
|
||||
implements AccumulatingSink<Double, Double, ReducingSink>, Sink.OfDouble {
|
||||
private double state;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
state = identity;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
state = operator.applyAsDouble(state, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Double get() {
|
||||
return state;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void combine(ReducingSink other) {
|
||||
accept(other.state);
|
||||
}
|
||||
}
|
||||
return new ReduceOp<Double, Double, ReducingSink>(StreamShape.DOUBLE_VALUE) {
|
||||
@Override
|
||||
public ReducingSink makeSink() {
|
||||
return new ReducingSink();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that implements a functional reduce on
|
||||
* {@code double} values, producing an optional double result.
|
||||
*
|
||||
* @param operator the combining function
|
||||
* @return a {@code TerminalOp} implementing the reduction
|
||||
*/
|
||||
public static TerminalOp<Double, OptionalDouble>
|
||||
makeDouble(DoubleBinaryOperator operator) {
|
||||
Objects.requireNonNull(operator);
|
||||
class ReducingSink
|
||||
implements AccumulatingSink<Double, OptionalDouble, ReducingSink>, Sink.OfDouble {
|
||||
private boolean empty;
|
||||
private double state;
|
||||
|
||||
public void begin(long size) {
|
||||
empty = true;
|
||||
state = 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
if (empty) {
|
||||
empty = false;
|
||||
state = t;
|
||||
}
|
||||
else {
|
||||
state = operator.applyAsDouble(state, t);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public OptionalDouble get() {
|
||||
return empty ? OptionalDouble.empty() : OptionalDouble.of(state);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void combine(ReducingSink other) {
|
||||
if (!other.empty)
|
||||
accept(other.state);
|
||||
}
|
||||
}
|
||||
return new ReduceOp<Double, OptionalDouble, ReducingSink>(StreamShape.DOUBLE_VALUE) {
|
||||
@Override
|
||||
public ReducingSink makeSink() {
|
||||
return new ReducingSink();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a {@code TerminalOp} that implements a mutable reduce on
|
||||
* {@code double} values.
|
||||
*
|
||||
* @param <R> the type of the result
|
||||
* @param supplier a factory to produce a new accumulator of the result type
|
||||
* @param accumulator a function to incorporate an int into an
|
||||
* accumulator
|
||||
* @param combiner a function to combine an accumulator into another
|
||||
* @return a {@code TerminalOp} implementing the reduction
|
||||
*/
|
||||
public static <R> TerminalOp<Double, R>
|
||||
makeDouble(Supplier<R> supplier,
|
||||
ObjDoubleConsumer<R> accumulator,
|
||||
BinaryOperator<R> combiner) {
|
||||
Objects.requireNonNull(supplier);
|
||||
Objects.requireNonNull(accumulator);
|
||||
Objects.requireNonNull(combiner);
|
||||
class ReducingSink extends Box<R>
|
||||
implements AccumulatingSink<Double, R, ReducingSink>, Sink.OfDouble {
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
state = supplier.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
accumulator.accept(state, t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void combine(ReducingSink other) {
|
||||
state = combiner.apply(state, other.state);
|
||||
}
|
||||
}
|
||||
return new ReduceOp<Double, R, ReducingSink>(StreamShape.DOUBLE_VALUE) {
|
||||
@Override
|
||||
public ReducingSink makeSink() {
|
||||
return new ReducingSink();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* A type of {@code TerminalSink} that implements an associative reducing
|
||||
* operation on elements of type {@code T} and producing a result of type
|
||||
* {@code R}.
|
||||
*
|
||||
* @param <T> the type of input element to the combining operation
|
||||
* @param <R> the result type
|
||||
* @param <K> the type of the {@code AccumulatingSink}.
|
||||
*/
|
||||
private interface AccumulatingSink<T, R, K extends AccumulatingSink<T, R, K>>
|
||||
extends TerminalSink<T, R> {
|
||||
public void combine(K other);
|
||||
}
|
||||
|
||||
/**
|
||||
* State box for a single state element, used as a base class for
|
||||
* {@code AccumulatingSink} instances
|
||||
*
|
||||
* @param <U> The type of the state element
|
||||
*/
|
||||
private static abstract class Box<U> {
|
||||
U state;
|
||||
|
||||
Box() {} // Avoid creation of special accessor
|
||||
|
||||
public U get() {
|
||||
return state;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A {@code TerminalOp} that evaluates a stream pipeline and sends the
|
||||
* output into an {@code AccumulatingSink}, which performs a reduce
|
||||
* operation. The {@code AccumulatingSink} must represent an associative
|
||||
* reducing operation.
|
||||
*
|
||||
* @param <T> the output type of the stream pipeline
|
||||
* @param <R> the result type of the reducing operation
|
||||
* @param <S> the type of the {@code AccumulatingSink}
|
||||
*/
|
||||
private static abstract class ReduceOp<T, R, S extends AccumulatingSink<T, R, S>>
|
||||
implements TerminalOp<T, R> {
|
||||
private final StreamShape inputShape;
|
||||
|
||||
/**
|
||||
* Create a {@code ReduceOp} of the specified stream shape which uses
|
||||
* the specified {@code Supplier} to create accumulating sinks.
|
||||
*
|
||||
* @param shape The shape of the stream pipeline
|
||||
*/
|
||||
ReduceOp(StreamShape shape) {
|
||||
inputShape = shape;
|
||||
}
|
||||
|
||||
public abstract S makeSink();
|
||||
|
||||
@Override
|
||||
public StreamShape inputShape() {
|
||||
return inputShape;
|
||||
}
|
||||
|
||||
@Override
|
||||
public <P_IN> R evaluateSequential(PipelineHelper<T> helper,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
return helper.wrapAndCopyInto(makeSink(), spliterator).get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <P_IN> R evaluateParallel(PipelineHelper<T> helper,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
return new ReduceTask<>(this, helper, spliterator).invoke().get();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A {@code ForkJoinTask} for performing a parallel reduce operation.
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
private static final class ReduceTask<P_IN, P_OUT, R,
|
||||
S extends AccumulatingSink<P_OUT, R, S>>
|
||||
extends AbstractTask<P_IN, P_OUT, S, ReduceTask<P_IN, P_OUT, R, S>> {
|
||||
private final ReduceOp<P_OUT, R, S> op;
|
||||
|
||||
ReduceTask(ReduceOp<P_OUT, R, S> op,
|
||||
PipelineHelper<P_OUT> helper,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
super(helper, spliterator);
|
||||
this.op = op;
|
||||
}
|
||||
|
||||
ReduceTask(ReduceTask<P_IN, P_OUT, R, S> parent,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
super(parent, spliterator);
|
||||
this.op = parent.op;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReduceTask<P_IN, P_OUT, R, S> makeChild(Spliterator<P_IN> spliterator) {
|
||||
return new ReduceTask<>(this, spliterator);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected S doLeaf() {
|
||||
return helper.wrapAndCopyInto(op.makeSink(), spliterator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onCompletion(CountedCompleter<?> caller) {
|
||||
if (!isLeaf()) {
|
||||
S leftResult = leftChild.getLocalResult();
|
||||
leftResult.combine(rightChild.getLocalResult());
|
||||
setLocalResult(leftResult);
|
||||
}
|
||||
// GC spliterator, left and right child
|
||||
super.onCompletion(caller);
|
||||
}
|
||||
}
|
||||
}
|
||||
728
jdkSrc/jdk8/java/util/stream/ReferencePipeline.java
Normal file
728
jdkSrc/jdk8/java/util/stream/ReferencePipeline.java
Normal file
@@ -0,0 +1,728 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Comparator;
|
||||
import java.util.Iterator;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.Spliterator;
|
||||
import java.util.Spliterators;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.BinaryOperator;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.DoubleConsumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.IntConsumer;
|
||||
import java.util.function.IntFunction;
|
||||
import java.util.function.LongConsumer;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.function.ToDoubleFunction;
|
||||
import java.util.function.ToIntFunction;
|
||||
import java.util.function.ToLongFunction;
|
||||
|
||||
/**
|
||||
* Abstract base class for an intermediate pipeline stage or pipeline source
|
||||
* stage implementing whose elements are of type {@code U}.
|
||||
*
|
||||
* @param <P_IN> type of elements in the upstream source
|
||||
* @param <P_OUT> type of elements in produced by this stage
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract class ReferencePipeline<P_IN, P_OUT>
|
||||
extends AbstractPipeline<P_IN, P_OUT, Stream<P_OUT>>
|
||||
implements Stream<P_OUT> {
|
||||
|
||||
/**
|
||||
* Constructor for the head of a stream pipeline.
|
||||
*
|
||||
* @param source {@code Supplier<Spliterator>} describing the stream source
|
||||
* @param sourceFlags the source flags for the stream source, described in
|
||||
* {@link StreamOpFlag}
|
||||
* @param parallel {@code true} if the pipeline is parallel
|
||||
*/
|
||||
ReferencePipeline(Supplier<? extends Spliterator<?>> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for the head of a stream pipeline.
|
||||
*
|
||||
* @param source {@code Spliterator} describing the stream source
|
||||
* @param sourceFlags The source flags for the stream source, described in
|
||||
* {@link StreamOpFlag}
|
||||
* @param parallel {@code true} if the pipeline is parallel
|
||||
*/
|
||||
ReferencePipeline(Spliterator<?> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for appending an intermediate operation onto an existing
|
||||
* pipeline.
|
||||
*
|
||||
* @param upstream the upstream element source.
|
||||
*/
|
||||
ReferencePipeline(AbstractPipeline<?, P_IN, ?> upstream, int opFlags) {
|
||||
super(upstream, opFlags);
|
||||
}
|
||||
|
||||
// Shape-specific methods
|
||||
|
||||
@Override
|
||||
final StreamShape getOutputShape() {
|
||||
return StreamShape.REFERENCE;
|
||||
}
|
||||
|
||||
@Override
|
||||
final <P_IN> Node<P_OUT> evaluateToNode(PipelineHelper<P_OUT> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
boolean flattenTree,
|
||||
IntFunction<P_OUT[]> generator) {
|
||||
return Nodes.collect(helper, spliterator, flattenTree, generator);
|
||||
}
|
||||
|
||||
@Override
|
||||
final <P_IN> Spliterator<P_OUT> wrap(PipelineHelper<P_OUT> ph,
|
||||
Supplier<Spliterator<P_IN>> supplier,
|
||||
boolean isParallel) {
|
||||
return new StreamSpliterators.WrappingSpliterator<>(ph, supplier, isParallel);
|
||||
}
|
||||
|
||||
@Override
|
||||
final Spliterator<P_OUT> lazySpliterator(Supplier<? extends Spliterator<P_OUT>> supplier) {
|
||||
return new StreamSpliterators.DelegatingSpliterator<>(supplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
final void forEachWithCancel(Spliterator<P_OUT> spliterator, Sink<P_OUT> sink) {
|
||||
do { } while (!sink.cancellationRequested() && spliterator.tryAdvance(sink));
|
||||
}
|
||||
|
||||
@Override
|
||||
final Node.Builder<P_OUT> makeNodeBuilder(long exactSizeIfKnown, IntFunction<P_OUT[]> generator) {
|
||||
return Nodes.builder(exactSizeIfKnown, generator);
|
||||
}
|
||||
|
||||
|
||||
// BaseStream
|
||||
|
||||
@Override
|
||||
public final Iterator<P_OUT> iterator() {
|
||||
return Spliterators.iterator(spliterator());
|
||||
}
|
||||
|
||||
|
||||
// Stream
|
||||
|
||||
// Stateless intermediate operations from Stream
|
||||
|
||||
@Override
|
||||
public Stream<P_OUT> unordered() {
|
||||
if (!isOrdered())
|
||||
return this;
|
||||
return new StatelessOp<P_OUT, P_OUT>(this, StreamShape.REFERENCE, StreamOpFlag.NOT_ORDERED) {
|
||||
@Override
|
||||
Sink<P_OUT> opWrapSink(int flags, Sink<P_OUT> sink) {
|
||||
return sink;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Stream<P_OUT> filter(Predicate<? super P_OUT> predicate) {
|
||||
Objects.requireNonNull(predicate);
|
||||
return new StatelessOp<P_OUT, P_OUT>(this, StreamShape.REFERENCE,
|
||||
StreamOpFlag.NOT_SIZED) {
|
||||
@Override
|
||||
Sink<P_OUT> opWrapSink(int flags, Sink<P_OUT> sink) {
|
||||
return new Sink.ChainedReference<P_OUT, P_OUT>(sink) {
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(P_OUT u) {
|
||||
if (predicate.test(u))
|
||||
downstream.accept(u);
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public final <R> Stream<R> map(Function<? super P_OUT, ? extends R> mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new StatelessOp<P_OUT, R>(this, StreamShape.REFERENCE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<P_OUT> opWrapSink(int flags, Sink<R> sink) {
|
||||
return new Sink.ChainedReference<P_OUT, R>(sink) {
|
||||
@Override
|
||||
public void accept(P_OUT u) {
|
||||
downstream.accept(mapper.apply(u));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final IntStream mapToInt(ToIntFunction<? super P_OUT> mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new IntPipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<P_OUT> opWrapSink(int flags, Sink<Integer> sink) {
|
||||
return new Sink.ChainedReference<P_OUT, Integer>(sink) {
|
||||
@Override
|
||||
public void accept(P_OUT u) {
|
||||
downstream.accept(mapper.applyAsInt(u));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final LongStream mapToLong(ToLongFunction<? super P_OUT> mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new LongPipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<P_OUT> opWrapSink(int flags, Sink<Long> sink) {
|
||||
return new Sink.ChainedReference<P_OUT, Long>(sink) {
|
||||
@Override
|
||||
public void accept(P_OUT u) {
|
||||
downstream.accept(mapper.applyAsLong(u));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DoubleStream mapToDouble(ToDoubleFunction<? super P_OUT> mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new DoublePipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT) {
|
||||
@Override
|
||||
Sink<P_OUT> opWrapSink(int flags, Sink<Double> sink) {
|
||||
return new Sink.ChainedReference<P_OUT, Double>(sink) {
|
||||
@Override
|
||||
public void accept(P_OUT u) {
|
||||
downstream.accept(mapper.applyAsDouble(u));
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final <R> Stream<R> flatMap(Function<? super P_OUT, ? extends Stream<? extends R>> mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new StatelessOp<P_OUT, R>(this, StreamShape.REFERENCE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
|
||||
@Override
|
||||
Sink<P_OUT> opWrapSink(int flags, Sink<R> sink) {
|
||||
return new Sink.ChainedReference<P_OUT, R>(sink) {
|
||||
// true if cancellationRequested() has been called
|
||||
boolean cancellationRequestedCalled;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(P_OUT u) {
|
||||
try (Stream<? extends R> result = mapper.apply(u)) {
|
||||
if (result != null) {
|
||||
if (!cancellationRequestedCalled) {
|
||||
result.sequential().forEach(downstream);
|
||||
}
|
||||
else {
|
||||
Spliterator<? extends R> s = result.sequential().spliterator();
|
||||
do { } while (!downstream.cancellationRequested() && s.tryAdvance(downstream));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
// If this method is called then an operation within the stream
|
||||
// pipeline is short-circuiting (see AbstractPipeline.copyInto).
|
||||
// Note that we cannot differentiate between an upstream or
|
||||
// downstream operation
|
||||
cancellationRequestedCalled = true;
|
||||
return downstream.cancellationRequested();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final IntStream flatMapToInt(Function<? super P_OUT, ? extends IntStream> mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new IntPipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
|
||||
@Override
|
||||
Sink<P_OUT> opWrapSink(int flags, Sink<Integer> sink) {
|
||||
return new Sink.ChainedReference<P_OUT, Integer>(sink) {
|
||||
// true if cancellationRequested() has been called
|
||||
boolean cancellationRequestedCalled;
|
||||
|
||||
// cache the consumer to avoid creation on every accepted element
|
||||
IntConsumer downstreamAsInt = downstream::accept;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(P_OUT u) {
|
||||
try (IntStream result = mapper.apply(u)) {
|
||||
if (result != null) {
|
||||
if (!cancellationRequestedCalled) {
|
||||
result.sequential().forEach(downstreamAsInt);
|
||||
}
|
||||
else {
|
||||
Spliterator.OfInt s = result.sequential().spliterator();
|
||||
do { } while (!downstream.cancellationRequested() && s.tryAdvance(downstreamAsInt));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
cancellationRequestedCalled = true;
|
||||
return downstream.cancellationRequested();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final DoubleStream flatMapToDouble(Function<? super P_OUT, ? extends DoubleStream> mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
return new DoublePipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
|
||||
@Override
|
||||
Sink<P_OUT> opWrapSink(int flags, Sink<Double> sink) {
|
||||
return new Sink.ChainedReference<P_OUT, Double>(sink) {
|
||||
// true if cancellationRequested() has been called
|
||||
boolean cancellationRequestedCalled;
|
||||
|
||||
// cache the consumer to avoid creation on every accepted element
|
||||
DoubleConsumer downstreamAsDouble = downstream::accept;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(P_OUT u) {
|
||||
try (DoubleStream result = mapper.apply(u)) {
|
||||
if (result != null) {
|
||||
if (!cancellationRequestedCalled) {
|
||||
result.sequential().forEach(downstreamAsDouble);
|
||||
}
|
||||
else {
|
||||
Spliterator.OfDouble s = result.sequential().spliterator();
|
||||
do { } while (!downstream.cancellationRequested() && s.tryAdvance(downstreamAsDouble));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
cancellationRequestedCalled = true;
|
||||
return downstream.cancellationRequested();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final LongStream flatMapToLong(Function<? super P_OUT, ? extends LongStream> mapper) {
|
||||
Objects.requireNonNull(mapper);
|
||||
// We can do better than this, by polling cancellationRequested when stream is infinite
|
||||
return new LongPipeline.StatelessOp<P_OUT>(this, StreamShape.REFERENCE,
|
||||
StreamOpFlag.NOT_SORTED | StreamOpFlag.NOT_DISTINCT | StreamOpFlag.NOT_SIZED) {
|
||||
@Override
|
||||
Sink<P_OUT> opWrapSink(int flags, Sink<Long> sink) {
|
||||
return new Sink.ChainedReference<P_OUT, Long>(sink) {
|
||||
// true if cancellationRequested() has been called
|
||||
boolean cancellationRequestedCalled;
|
||||
|
||||
// cache the consumer to avoid creation on every accepted element
|
||||
LongConsumer downstreamAsLong = downstream::accept;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(-1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(P_OUT u) {
|
||||
try (LongStream result = mapper.apply(u)) {
|
||||
if (result != null) {
|
||||
if (!cancellationRequestedCalled) {
|
||||
result.sequential().forEach(downstreamAsLong);
|
||||
}
|
||||
else {
|
||||
Spliterator.OfLong s = result.sequential().spliterator();
|
||||
do { } while (!downstream.cancellationRequested() && s.tryAdvance(downstreamAsLong));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
cancellationRequestedCalled = true;
|
||||
return downstream.cancellationRequested();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Stream<P_OUT> peek(Consumer<? super P_OUT> action) {
|
||||
Objects.requireNonNull(action);
|
||||
return new StatelessOp<P_OUT, P_OUT>(this, StreamShape.REFERENCE,
|
||||
0) {
|
||||
@Override
|
||||
Sink<P_OUT> opWrapSink(int flags, Sink<P_OUT> sink) {
|
||||
return new Sink.ChainedReference<P_OUT, P_OUT>(sink) {
|
||||
@Override
|
||||
public void accept(P_OUT u) {
|
||||
action.accept(u);
|
||||
downstream.accept(u);
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Stateful intermediate operations from Stream
|
||||
|
||||
@Override
|
||||
public final Stream<P_OUT> distinct() {
|
||||
return DistinctOps.makeRef(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Stream<P_OUT> sorted() {
|
||||
return SortedOps.makeRef(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Stream<P_OUT> sorted(Comparator<? super P_OUT> comparator) {
|
||||
return SortedOps.makeRef(this, comparator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Stream<P_OUT> limit(long maxSize) {
|
||||
if (maxSize < 0)
|
||||
throw new IllegalArgumentException(Long.toString(maxSize));
|
||||
return SliceOps.makeRef(this, 0, maxSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Stream<P_OUT> skip(long n) {
|
||||
if (n < 0)
|
||||
throw new IllegalArgumentException(Long.toString(n));
|
||||
if (n == 0)
|
||||
return this;
|
||||
else
|
||||
return SliceOps.makeRef(this, n, -1);
|
||||
}
|
||||
|
||||
// Terminal operations from Stream
|
||||
|
||||
@Override
|
||||
public void forEach(Consumer<? super P_OUT> action) {
|
||||
evaluate(ForEachOps.makeRef(action, false));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachOrdered(Consumer<? super P_OUT> action) {
|
||||
evaluate(ForEachOps.makeRef(action, true));
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public final <A> A[] toArray(IntFunction<A[]> generator) {
|
||||
// Since A has no relation to U (not possible to declare that A is an upper bound of U)
|
||||
// there will be no static type checking.
|
||||
// Therefore use a raw type and assume A == U rather than propagating the separation of A and U
|
||||
// throughout the code-base.
|
||||
// The runtime type of U is never checked for equality with the component type of the runtime type of A[].
|
||||
// Runtime checking will be performed when an element is stored in A[], thus if A is not a
|
||||
// super type of U an ArrayStoreException will be thrown.
|
||||
@SuppressWarnings("rawtypes")
|
||||
IntFunction rawGenerator = (IntFunction) generator;
|
||||
return (A[]) Nodes.flatten(evaluateToArrayNode(rawGenerator), rawGenerator)
|
||||
.asArray(rawGenerator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Object[] toArray() {
|
||||
return toArray(Object[]::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean anyMatch(Predicate<? super P_OUT> predicate) {
|
||||
return evaluate(MatchOps.makeRef(predicate, MatchOps.MatchKind.ANY));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean allMatch(Predicate<? super P_OUT> predicate) {
|
||||
return evaluate(MatchOps.makeRef(predicate, MatchOps.MatchKind.ALL));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean noneMatch(Predicate<? super P_OUT> predicate) {
|
||||
return evaluate(MatchOps.makeRef(predicate, MatchOps.MatchKind.NONE));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Optional<P_OUT> findFirst() {
|
||||
return evaluate(FindOps.makeRef(true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Optional<P_OUT> findAny() {
|
||||
return evaluate(FindOps.makeRef(false));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final P_OUT reduce(final P_OUT identity, final BinaryOperator<P_OUT> accumulator) {
|
||||
return evaluate(ReduceOps.makeRef(identity, accumulator, accumulator));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Optional<P_OUT> reduce(BinaryOperator<P_OUT> accumulator) {
|
||||
return evaluate(ReduceOps.makeRef(accumulator));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final <R> R reduce(R identity, BiFunction<R, ? super P_OUT, R> accumulator, BinaryOperator<R> combiner) {
|
||||
return evaluate(ReduceOps.makeRef(identity, accumulator, combiner));
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public final <R, A> R collect(Collector<? super P_OUT, A, R> collector) {
|
||||
A container;
|
||||
if (isParallel()
|
||||
&& (collector.characteristics().contains(Collector.Characteristics.CONCURRENT))
|
||||
&& (!isOrdered() || collector.characteristics().contains(Collector.Characteristics.UNORDERED))) {
|
||||
container = collector.supplier().get();
|
||||
BiConsumer<A, ? super P_OUT> accumulator = collector.accumulator();
|
||||
forEach(u -> accumulator.accept(container, u));
|
||||
}
|
||||
else {
|
||||
container = evaluate(ReduceOps.makeRef(collector));
|
||||
}
|
||||
return collector.characteristics().contains(Collector.Characteristics.IDENTITY_FINISH)
|
||||
? (R) container
|
||||
: collector.finisher().apply(container);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final <R> R collect(Supplier<R> supplier,
|
||||
BiConsumer<R, ? super P_OUT> accumulator,
|
||||
BiConsumer<R, R> combiner) {
|
||||
return evaluate(ReduceOps.makeRef(supplier, accumulator, combiner));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Optional<P_OUT> max(Comparator<? super P_OUT> comparator) {
|
||||
return reduce(BinaryOperator.maxBy(comparator));
|
||||
}
|
||||
|
||||
@Override
|
||||
public final Optional<P_OUT> min(Comparator<? super P_OUT> comparator) {
|
||||
return reduce(BinaryOperator.minBy(comparator));
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public final long count() {
|
||||
return mapToLong(e -> 1L).sum();
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
|
||||
/**
|
||||
* Source stage of a ReferencePipeline.
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
* @param <E_OUT> type of elements in produced by this stage
|
||||
* @since 1.8
|
||||
*/
|
||||
static class Head<E_IN, E_OUT> extends ReferencePipeline<E_IN, E_OUT> {
|
||||
/**
|
||||
* Constructor for the source stage of a Stream.
|
||||
*
|
||||
* @param source {@code Supplier<Spliterator>} describing the stream
|
||||
* source
|
||||
* @param sourceFlags the source flags for the stream source, described
|
||||
* in {@link StreamOpFlag}
|
||||
*/
|
||||
Head(Supplier<? extends Spliterator<?>> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor for the source stage of a Stream.
|
||||
*
|
||||
* @param source {@code Spliterator} describing the stream source
|
||||
* @param sourceFlags the source flags for the stream source, described
|
||||
* in {@link StreamOpFlag}
|
||||
*/
|
||||
Head(Spliterator<?> source,
|
||||
int sourceFlags, boolean parallel) {
|
||||
super(source, sourceFlags, parallel);
|
||||
}
|
||||
|
||||
@Override
|
||||
final boolean opIsStateful() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
final Sink<E_IN> opWrapSink(int flags, Sink<E_OUT> sink) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
// Optimized sequential terminal operations for the head of the pipeline
|
||||
|
||||
@Override
|
||||
public void forEach(Consumer<? super E_OUT> action) {
|
||||
if (!isParallel()) {
|
||||
sourceStageSpliterator().forEachRemaining(action);
|
||||
}
|
||||
else {
|
||||
super.forEach(action);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachOrdered(Consumer<? super E_OUT> action) {
|
||||
if (!isParallel()) {
|
||||
sourceStageSpliterator().forEachRemaining(action);
|
||||
}
|
||||
else {
|
||||
super.forEachOrdered(action);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Base class for a stateless intermediate stage of a Stream.
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
* @param <E_OUT> type of elements in produced by this stage
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract static class StatelessOp<E_IN, E_OUT>
|
||||
extends ReferencePipeline<E_IN, E_OUT> {
|
||||
/**
|
||||
* Construct a new Stream by appending a stateless intermediate
|
||||
* operation to an existing stream.
|
||||
*
|
||||
* @param upstream The upstream pipeline stage
|
||||
* @param inputShape The stream shape for the upstream pipeline stage
|
||||
* @param opFlags Operation flags for the new stage
|
||||
*/
|
||||
StatelessOp(AbstractPipeline<?, E_IN, ?> upstream,
|
||||
StreamShape inputShape,
|
||||
int opFlags) {
|
||||
super(upstream, opFlags);
|
||||
assert upstream.getOutputShape() == inputShape;
|
||||
}
|
||||
|
||||
@Override
|
||||
final boolean opIsStateful() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Base class for a stateful intermediate stage of a Stream.
|
||||
*
|
||||
* @param <E_IN> type of elements in the upstream source
|
||||
* @param <E_OUT> type of elements in produced by this stage
|
||||
* @since 1.8
|
||||
*/
|
||||
abstract static class StatefulOp<E_IN, E_OUT>
|
||||
extends ReferencePipeline<E_IN, E_OUT> {
|
||||
/**
|
||||
* Construct a new Stream by appending a stateful intermediate operation
|
||||
* to an existing stream.
|
||||
* @param upstream The upstream pipeline stage
|
||||
* @param inputShape The stream shape for the upstream pipeline stage
|
||||
* @param opFlags Operation flags for the new stage
|
||||
*/
|
||||
StatefulOp(AbstractPipeline<?, E_IN, ?> upstream,
|
||||
StreamShape inputShape,
|
||||
int opFlags) {
|
||||
super(upstream, opFlags);
|
||||
assert upstream.getOutputShape() == inputShape;
|
||||
}
|
||||
|
||||
@Override
|
||||
final boolean opIsStateful() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
abstract <P_IN> Node<E_OUT> opEvaluateParallel(PipelineHelper<E_OUT> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<E_OUT[]> generator);
|
||||
}
|
||||
}
|
||||
362
jdkSrc/jdk8/java/util/stream/Sink.java
Normal file
362
jdkSrc/jdk8/java/util/stream/Sink.java
Normal file
@@ -0,0 +1,362 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.DoubleConsumer;
|
||||
import java.util.function.IntConsumer;
|
||||
import java.util.function.LongConsumer;
|
||||
|
||||
/**
|
||||
* An extension of {@link Consumer} used to conduct values through the stages of
|
||||
* a stream pipeline, with additional methods to manage size information,
|
||||
* control flow, etc. Before calling the {@code accept()} method on a
|
||||
* {@code Sink} for the first time, you must first call the {@code begin()}
|
||||
* method to inform it that data is coming (optionally informing the sink how
|
||||
* much data is coming), and after all data has been sent, you must call the
|
||||
* {@code end()} method. After calling {@code end()}, you should not call
|
||||
* {@code accept()} without again calling {@code begin()}. {@code Sink} also
|
||||
* offers a mechanism by which the sink can cooperatively signal that it does
|
||||
* not wish to receive any more data (the {@code cancellationRequested()}
|
||||
* method), which a source can poll before sending more data to the
|
||||
* {@code Sink}.
|
||||
*
|
||||
* <p>A sink may be in one of two states: an initial state and an active state.
|
||||
* It starts out in the initial state; the {@code begin()} method transitions
|
||||
* it to the active state, and the {@code end()} method transitions it back into
|
||||
* the initial state, where it can be re-used. Data-accepting methods (such as
|
||||
* {@code accept()} are only valid in the active state.
|
||||
*
|
||||
* @apiNote
|
||||
* A stream pipeline consists of a source, zero or more intermediate stages
|
||||
* (such as filtering or mapping), and a terminal stage, such as reduction or
|
||||
* for-each. For concreteness, consider the pipeline:
|
||||
*
|
||||
* <pre>{@code
|
||||
* int longestStringLengthStartingWithA
|
||||
* = strings.stream()
|
||||
* .filter(s -> s.startsWith("A"))
|
||||
* .mapToInt(String::length)
|
||||
* .max();
|
||||
* }</pre>
|
||||
*
|
||||
* <p>Here, we have three stages, filtering, mapping, and reducing. The
|
||||
* filtering stage consumes strings and emits a subset of those strings; the
|
||||
* mapping stage consumes strings and emits ints; the reduction stage consumes
|
||||
* those ints and computes the maximal value.
|
||||
*
|
||||
* <p>A {@code Sink} instance is used to represent each stage of this pipeline,
|
||||
* whether the stage accepts objects, ints, longs, or doubles. Sink has entry
|
||||
* points for {@code accept(Object)}, {@code accept(int)}, etc, so that we do
|
||||
* not need a specialized interface for each primitive specialization. (It
|
||||
* might be called a "kitchen sink" for this omnivorous tendency.) The entry
|
||||
* point to the pipeline is the {@code Sink} for the filtering stage, which
|
||||
* sends some elements "downstream" -- into the {@code Sink} for the mapping
|
||||
* stage, which in turn sends integral values downstream into the {@code Sink}
|
||||
* for the reduction stage. The {@code Sink} implementations associated with a
|
||||
* given stage is expected to know the data type for the next stage, and call
|
||||
* the correct {@code accept} method on its downstream {@code Sink}. Similarly,
|
||||
* each stage must implement the correct {@code accept} method corresponding to
|
||||
* the data type it accepts.
|
||||
*
|
||||
* <p>The specialized subtypes such as {@link Sink.OfInt} override
|
||||
* {@code accept(Object)} to call the appropriate primitive specialization of
|
||||
* {@code accept}, implement the appropriate primitive specialization of
|
||||
* {@code Consumer}, and re-abstract the appropriate primitive specialization of
|
||||
* {@code accept}.
|
||||
*
|
||||
* <p>The chaining subtypes such as {@link ChainedInt} not only implement
|
||||
* {@code Sink.OfInt}, but also maintain a {@code downstream} field which
|
||||
* represents the downstream {@code Sink}, and implement the methods
|
||||
* {@code begin()}, {@code end()}, and {@code cancellationRequested()} to
|
||||
* delegate to the downstream {@code Sink}. Most implementations of
|
||||
* intermediate operations will use these chaining wrappers. For example, the
|
||||
* mapping stage in the above example would look like:
|
||||
*
|
||||
* <pre>{@code
|
||||
* IntSink is = new Sink.ChainedReference<U>(sink) {
|
||||
* public void accept(U u) {
|
||||
* downstream.accept(mapper.applyAsInt(u));
|
||||
* }
|
||||
* };
|
||||
* }</pre>
|
||||
*
|
||||
* <p>Here, we implement {@code Sink.ChainedReference<U>}, meaning that we expect
|
||||
* to receive elements of type {@code U} as input, and pass the downstream sink
|
||||
* to the constructor. Because the next stage expects to receive integers, we
|
||||
* must call the {@code accept(int)} method when emitting values to the downstream.
|
||||
* The {@code accept()} method applies the mapping function from {@code U} to
|
||||
* {@code int} and passes the resulting value to the downstream {@code Sink}.
|
||||
*
|
||||
* @param <T> type of elements for value streams
|
||||
* @since 1.8
|
||||
*/
|
||||
interface Sink<T> extends Consumer<T> {
|
||||
/**
|
||||
* Resets the sink state to receive a fresh data set. This must be called
|
||||
* before sending any data to the sink. After calling {@link #end()},
|
||||
* you may call this method to reset the sink for another calculation.
|
||||
* @param size The exact size of the data to be pushed downstream, if
|
||||
* known or {@code -1} if unknown or infinite.
|
||||
*
|
||||
* <p>Prior to this call, the sink must be in the initial state, and after
|
||||
* this call it is in the active state.
|
||||
*/
|
||||
default void begin(long size) {}
|
||||
|
||||
/**
|
||||
* Indicates that all elements have been pushed. If the {@code Sink} is
|
||||
* stateful, it should send any stored state downstream at this time, and
|
||||
* should clear any accumulated state (and associated resources).
|
||||
*
|
||||
* <p>Prior to this call, the sink must be in the active state, and after
|
||||
* this call it is returned to the initial state.
|
||||
*/
|
||||
default void end() {}
|
||||
|
||||
/**
|
||||
* Indicates that this {@code Sink} does not wish to receive any more data.
|
||||
*
|
||||
* @implSpec The default implementation always returns false.
|
||||
*
|
||||
* @return true if cancellation is requested
|
||||
*/
|
||||
default boolean cancellationRequested() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Accepts an int value.
|
||||
*
|
||||
* @implSpec The default implementation throws IllegalStateException.
|
||||
*
|
||||
* @throws IllegalStateException if this sink does not accept int values
|
||||
*/
|
||||
default void accept(int value) {
|
||||
throw new IllegalStateException("called wrong accept method");
|
||||
}
|
||||
|
||||
/**
|
||||
* Accepts a long value.
|
||||
*
|
||||
* @implSpec The default implementation throws IllegalStateException.
|
||||
*
|
||||
* @throws IllegalStateException if this sink does not accept long values
|
||||
*/
|
||||
default void accept(long value) {
|
||||
throw new IllegalStateException("called wrong accept method");
|
||||
}
|
||||
|
||||
/**
|
||||
* Accepts a double value.
|
||||
*
|
||||
* @implSpec The default implementation throws IllegalStateException.
|
||||
*
|
||||
* @throws IllegalStateException if this sink does not accept double values
|
||||
*/
|
||||
default void accept(double value) {
|
||||
throw new IllegalStateException("called wrong accept method");
|
||||
}
|
||||
|
||||
/**
|
||||
* {@code Sink} that implements {@code Sink<Integer>}, re-abstracts
|
||||
* {@code accept(int)}, and wires {@code accept(Integer)} to bridge to
|
||||
* {@code accept(int)}.
|
||||
*/
|
||||
interface OfInt extends Sink<Integer>, IntConsumer {
|
||||
@Override
|
||||
void accept(int value);
|
||||
|
||||
@Override
|
||||
default void accept(Integer i) {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(getClass(), "{0} calling Sink.OfInt.accept(Integer)");
|
||||
accept(i.intValue());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@code Sink} that implements {@code Sink<Long>}, re-abstracts
|
||||
* {@code accept(long)}, and wires {@code accept(Long)} to bridge to
|
||||
* {@code accept(long)}.
|
||||
*/
|
||||
interface OfLong extends Sink<Long>, LongConsumer {
|
||||
@Override
|
||||
void accept(long value);
|
||||
|
||||
@Override
|
||||
default void accept(Long i) {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(getClass(), "{0} calling Sink.OfLong.accept(Long)");
|
||||
accept(i.longValue());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@code Sink} that implements {@code Sink<Double>}, re-abstracts
|
||||
* {@code accept(double)}, and wires {@code accept(Double)} to bridge to
|
||||
* {@code accept(double)}.
|
||||
*/
|
||||
interface OfDouble extends Sink<Double>, DoubleConsumer {
|
||||
@Override
|
||||
void accept(double value);
|
||||
|
||||
@Override
|
||||
default void accept(Double i) {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(getClass(), "{0} calling Sink.OfDouble.accept(Double)");
|
||||
accept(i.doubleValue());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Abstract {@code Sink} implementation for creating chains of
|
||||
* sinks. The {@code begin}, {@code end}, and
|
||||
* {@code cancellationRequested} methods are wired to chain to the
|
||||
* downstream {@code Sink}. This implementation takes a downstream
|
||||
* {@code Sink} of unknown input shape and produces a {@code Sink<T>}. The
|
||||
* implementation of the {@code accept()} method must call the correct
|
||||
* {@code accept()} method on the downstream {@code Sink}.
|
||||
*/
|
||||
static abstract class ChainedReference<T, E_OUT> implements Sink<T> {
|
||||
protected final Sink<? super E_OUT> downstream;
|
||||
|
||||
public ChainedReference(Sink<? super E_OUT> downstream) {
|
||||
this.downstream = Objects.requireNonNull(downstream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(size);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void end() {
|
||||
downstream.end();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
return downstream.cancellationRequested();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Abstract {@code Sink} implementation designed for creating chains of
|
||||
* sinks. The {@code begin}, {@code end}, and
|
||||
* {@code cancellationRequested} methods are wired to chain to the
|
||||
* downstream {@code Sink}. This implementation takes a downstream
|
||||
* {@code Sink} of unknown input shape and produces a {@code Sink.OfInt}.
|
||||
* The implementation of the {@code accept()} method must call the correct
|
||||
* {@code accept()} method on the downstream {@code Sink}.
|
||||
*/
|
||||
static abstract class ChainedInt<E_OUT> implements Sink.OfInt {
|
||||
protected final Sink<? super E_OUT> downstream;
|
||||
|
||||
public ChainedInt(Sink<? super E_OUT> downstream) {
|
||||
this.downstream = Objects.requireNonNull(downstream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(size);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void end() {
|
||||
downstream.end();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
return downstream.cancellationRequested();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Abstract {@code Sink} implementation designed for creating chains of
|
||||
* sinks. The {@code begin}, {@code end}, and
|
||||
* {@code cancellationRequested} methods are wired to chain to the
|
||||
* downstream {@code Sink}. This implementation takes a downstream
|
||||
* {@code Sink} of unknown input shape and produces a {@code Sink.OfLong}.
|
||||
* The implementation of the {@code accept()} method must call the correct
|
||||
* {@code accept()} method on the downstream {@code Sink}.
|
||||
*/
|
||||
static abstract class ChainedLong<E_OUT> implements Sink.OfLong {
|
||||
protected final Sink<? super E_OUT> downstream;
|
||||
|
||||
public ChainedLong(Sink<? super E_OUT> downstream) {
|
||||
this.downstream = Objects.requireNonNull(downstream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(size);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void end() {
|
||||
downstream.end();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
return downstream.cancellationRequested();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Abstract {@code Sink} implementation designed for creating chains of
|
||||
* sinks. The {@code begin}, {@code end}, and
|
||||
* {@code cancellationRequested} methods are wired to chain to the
|
||||
* downstream {@code Sink}. This implementation takes a downstream
|
||||
* {@code Sink} of unknown input shape and produces a {@code Sink.OfDouble}.
|
||||
* The implementation of the {@code accept()} method must call the correct
|
||||
* {@code accept()} method on the downstream {@code Sink}.
|
||||
*/
|
||||
static abstract class ChainedDouble<E_OUT> implements Sink.OfDouble {
|
||||
protected final Sink<? super E_OUT> downstream;
|
||||
|
||||
public ChainedDouble(Sink<? super E_OUT> downstream) {
|
||||
this.downstream = Objects.requireNonNull(downstream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(size);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void end() {
|
||||
downstream.end();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
return downstream.cancellationRequested();
|
||||
}
|
||||
}
|
||||
}
|
||||
715
jdkSrc/jdk8/java/util/stream/SliceOps.java
Normal file
715
jdkSrc/jdk8/java/util/stream/SliceOps.java
Normal file
@@ -0,0 +1,715 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Spliterator;
|
||||
import java.util.concurrent.CountedCompleter;
|
||||
import java.util.function.IntFunction;
|
||||
|
||||
/**
|
||||
* Factory for instances of a short-circuiting stateful intermediate operations
|
||||
* that produce subsequences of their input stream.
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
final class SliceOps {
|
||||
|
||||
// No instances
|
||||
private SliceOps() { }
|
||||
|
||||
/**
|
||||
* Calculates the sliced size given the current size, number of elements
|
||||
* skip, and the number of elements to limit.
|
||||
*
|
||||
* @param size the current size
|
||||
* @param skip the number of elements to skip, assumed to be >= 0
|
||||
* @param limit the number of elements to limit, assumed to be >= 0, with
|
||||
* a value of {@code Long.MAX_VALUE} if there is no limit
|
||||
* @return the sliced size
|
||||
*/
|
||||
private static long calcSize(long size, long skip, long limit) {
|
||||
return size >= 0 ? Math.max(-1, Math.min(size - skip, limit)) : -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the slice fence, which is one past the index of the slice
|
||||
* range
|
||||
* @param skip the number of elements to skip, assumed to be >= 0
|
||||
* @param limit the number of elements to limit, assumed to be >= 0, with
|
||||
* a value of {@code Long.MAX_VALUE} if there is no limit
|
||||
* @return the slice fence.
|
||||
*/
|
||||
private static long calcSliceFence(long skip, long limit) {
|
||||
long sliceFence = limit >= 0 ? skip + limit : Long.MAX_VALUE;
|
||||
// Check for overflow
|
||||
return (sliceFence >= 0) ? sliceFence : Long.MAX_VALUE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a slice spliterator given a stream shape governing the
|
||||
* spliterator type. Requires that the underlying Spliterator
|
||||
* be SUBSIZED.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
private static <P_IN> Spliterator<P_IN> sliceSpliterator(StreamShape shape,
|
||||
Spliterator<P_IN> s,
|
||||
long skip, long limit) {
|
||||
assert s.hasCharacteristics(Spliterator.SUBSIZED);
|
||||
long sliceFence = calcSliceFence(skip, limit);
|
||||
switch (shape) {
|
||||
case REFERENCE:
|
||||
return new StreamSpliterators
|
||||
.SliceSpliterator.OfRef<>(s, skip, sliceFence);
|
||||
case INT_VALUE:
|
||||
return (Spliterator<P_IN>) new StreamSpliterators
|
||||
.SliceSpliterator.OfInt((Spliterator.OfInt) s, skip, sliceFence);
|
||||
case LONG_VALUE:
|
||||
return (Spliterator<P_IN>) new StreamSpliterators
|
||||
.SliceSpliterator.OfLong((Spliterator.OfLong) s, skip, sliceFence);
|
||||
case DOUBLE_VALUE:
|
||||
return (Spliterator<P_IN>) new StreamSpliterators
|
||||
.SliceSpliterator.OfDouble((Spliterator.OfDouble) s, skip, sliceFence);
|
||||
default:
|
||||
throw new IllegalStateException("Unknown shape " + shape);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private static <T> IntFunction<T[]> castingArray() {
|
||||
return size -> (T[]) new Object[size];
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends a "slice" operation to the provided stream. The slice operation
|
||||
* may be may be skip-only, limit-only, or skip-and-limit.
|
||||
*
|
||||
* @param <T> the type of both input and output elements
|
||||
* @param upstream a reference stream with element type T
|
||||
* @param skip the number of elements to skip. Must be >= 0.
|
||||
* @param limit the maximum size of the resulting stream, or -1 if no limit
|
||||
* is to be imposed
|
||||
*/
|
||||
public static <T> Stream<T> makeRef(AbstractPipeline<?, T, ?> upstream,
|
||||
long skip, long limit) {
|
||||
if (skip < 0)
|
||||
throw new IllegalArgumentException("Skip must be non-negative: " + skip);
|
||||
|
||||
return new ReferencePipeline.StatefulOp<T, T>(upstream, StreamShape.REFERENCE,
|
||||
flags(limit)) {
|
||||
Spliterator<T> unorderedSkipLimitSpliterator(Spliterator<T> s,
|
||||
long skip, long limit, long sizeIfKnown) {
|
||||
if (skip <= sizeIfKnown) {
|
||||
// Use just the limit if the number of elements
|
||||
// to skip is <= the known pipeline size
|
||||
limit = limit >= 0 ? Math.min(limit, sizeIfKnown - skip) : sizeIfKnown - skip;
|
||||
skip = 0;
|
||||
}
|
||||
return new StreamSpliterators.UnorderedSliceSpliterator.OfRef<>(s, skip, limit);
|
||||
}
|
||||
|
||||
@Override
|
||||
<P_IN> Spliterator<T> opEvaluateParallelLazy(PipelineHelper<T> helper, Spliterator<P_IN> spliterator) {
|
||||
long size = helper.exactOutputSizeIfKnown(spliterator);
|
||||
if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
|
||||
return new StreamSpliterators.SliceSpliterator.OfRef<>(
|
||||
helper.wrapSpliterator(spliterator),
|
||||
skip,
|
||||
calcSliceFence(skip, limit));
|
||||
} else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
|
||||
return unorderedSkipLimitSpliterator(
|
||||
helper.wrapSpliterator(spliterator),
|
||||
skip, limit, size);
|
||||
}
|
||||
else {
|
||||
// @@@ OOMEs will occur for LongStream.longs().filter(i -> true).limit(n)
|
||||
// regardless of the value of n
|
||||
// Need to adjust the target size of splitting for the
|
||||
// SliceTask from say (size / k) to say min(size / k, 1 << 14)
|
||||
// This will limit the size of the buffers created at the leaf nodes
|
||||
// cancellation will be more aggressive cancelling later tasks
|
||||
// if the target slice size has been reached from a given task,
|
||||
// cancellation should also clear local results if any
|
||||
return new SliceTask<>(this, helper, spliterator, castingArray(), skip, limit).
|
||||
invoke().spliterator();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
<P_IN> Node<T> opEvaluateParallel(PipelineHelper<T> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<T[]> generator) {
|
||||
long size = helper.exactOutputSizeIfKnown(spliterator);
|
||||
if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
|
||||
// Because the pipeline is SIZED the slice spliterator
|
||||
// can be created from the source, this requires matching
|
||||
// to shape of the source, and is potentially more efficient
|
||||
// than creating the slice spliterator from the pipeline
|
||||
// wrapping spliterator
|
||||
Spliterator<P_IN> s = sliceSpliterator(helper.getSourceShape(), spliterator, skip, limit);
|
||||
return Nodes.collect(helper, s, true, generator);
|
||||
} else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
|
||||
Spliterator<T> s = unorderedSkipLimitSpliterator(
|
||||
helper.wrapSpliterator(spliterator),
|
||||
skip, limit, size);
|
||||
// Collect using this pipeline, which is empty and therefore
|
||||
// can be used with the pipeline wrapping spliterator
|
||||
// Note that we cannot create a slice spliterator from
|
||||
// the source spliterator if the pipeline is not SIZED
|
||||
return Nodes.collect(this, s, true, generator);
|
||||
}
|
||||
else {
|
||||
return new SliceTask<>(this, helper, spliterator, generator, skip, limit).
|
||||
invoke();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
Sink<T> opWrapSink(int flags, Sink<T> sink) {
|
||||
return new Sink.ChainedReference<T, T>(sink) {
|
||||
long n = skip;
|
||||
long m = limit >= 0 ? limit : Long.MAX_VALUE;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(calcSize(size, skip, m));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(T t) {
|
||||
if (n == 0) {
|
||||
if (m > 0) {
|
||||
m--;
|
||||
downstream.accept(t);
|
||||
}
|
||||
}
|
||||
else {
|
||||
n--;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
return m == 0 || downstream.cancellationRequested();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends a "slice" operation to the provided IntStream. The slice
|
||||
* operation may be may be skip-only, limit-only, or skip-and-limit.
|
||||
*
|
||||
* @param upstream An IntStream
|
||||
* @param skip The number of elements to skip. Must be >= 0.
|
||||
* @param limit The maximum size of the resulting stream, or -1 if no limit
|
||||
* is to be imposed
|
||||
*/
|
||||
public static IntStream makeInt(AbstractPipeline<?, Integer, ?> upstream,
|
||||
long skip, long limit) {
|
||||
if (skip < 0)
|
||||
throw new IllegalArgumentException("Skip must be non-negative: " + skip);
|
||||
|
||||
return new IntPipeline.StatefulOp<Integer>(upstream, StreamShape.INT_VALUE,
|
||||
flags(limit)) {
|
||||
Spliterator.OfInt unorderedSkipLimitSpliterator(
|
||||
Spliterator.OfInt s, long skip, long limit, long sizeIfKnown) {
|
||||
if (skip <= sizeIfKnown) {
|
||||
// Use just the limit if the number of elements
|
||||
// to skip is <= the known pipeline size
|
||||
limit = limit >= 0 ? Math.min(limit, sizeIfKnown - skip) : sizeIfKnown - skip;
|
||||
skip = 0;
|
||||
}
|
||||
return new StreamSpliterators.UnorderedSliceSpliterator.OfInt(s, skip, limit);
|
||||
}
|
||||
|
||||
@Override
|
||||
<P_IN> Spliterator<Integer> opEvaluateParallelLazy(PipelineHelper<Integer> helper,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
long size = helper.exactOutputSizeIfKnown(spliterator);
|
||||
if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
|
||||
return new StreamSpliterators.SliceSpliterator.OfInt(
|
||||
(Spliterator.OfInt) helper.wrapSpliterator(spliterator),
|
||||
skip,
|
||||
calcSliceFence(skip, limit));
|
||||
} else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
|
||||
return unorderedSkipLimitSpliterator(
|
||||
(Spliterator.OfInt) helper.wrapSpliterator(spliterator),
|
||||
skip, limit, size);
|
||||
}
|
||||
else {
|
||||
return new SliceTask<>(this, helper, spliterator, Integer[]::new, skip, limit).
|
||||
invoke().spliterator();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
<P_IN> Node<Integer> opEvaluateParallel(PipelineHelper<Integer> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<Integer[]> generator) {
|
||||
long size = helper.exactOutputSizeIfKnown(spliterator);
|
||||
if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
|
||||
// Because the pipeline is SIZED the slice spliterator
|
||||
// can be created from the source, this requires matching
|
||||
// to shape of the source, and is potentially more efficient
|
||||
// than creating the slice spliterator from the pipeline
|
||||
// wrapping spliterator
|
||||
Spliterator<P_IN> s = sliceSpliterator(helper.getSourceShape(), spliterator, skip, limit);
|
||||
return Nodes.collectInt(helper, s, true);
|
||||
} else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
|
||||
Spliterator.OfInt s = unorderedSkipLimitSpliterator(
|
||||
(Spliterator.OfInt) helper.wrapSpliterator(spliterator),
|
||||
skip, limit, size);
|
||||
// Collect using this pipeline, which is empty and therefore
|
||||
// can be used with the pipeline wrapping spliterator
|
||||
// Note that we cannot create a slice spliterator from
|
||||
// the source spliterator if the pipeline is not SIZED
|
||||
return Nodes.collectInt(this, s, true);
|
||||
}
|
||||
else {
|
||||
return new SliceTask<>(this, helper, spliterator, generator, skip, limit).
|
||||
invoke();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
|
||||
return new Sink.ChainedInt<Integer>(sink) {
|
||||
long n = skip;
|
||||
long m = limit >= 0 ? limit : Long.MAX_VALUE;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(calcSize(size, skip, m));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
if (n == 0) {
|
||||
if (m > 0) {
|
||||
m--;
|
||||
downstream.accept(t);
|
||||
}
|
||||
}
|
||||
else {
|
||||
n--;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
return m == 0 || downstream.cancellationRequested();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends a "slice" operation to the provided LongStream. The slice
|
||||
* operation may be may be skip-only, limit-only, or skip-and-limit.
|
||||
*
|
||||
* @param upstream A LongStream
|
||||
* @param skip The number of elements to skip. Must be >= 0.
|
||||
* @param limit The maximum size of the resulting stream, or -1 if no limit
|
||||
* is to be imposed
|
||||
*/
|
||||
public static LongStream makeLong(AbstractPipeline<?, Long, ?> upstream,
|
||||
long skip, long limit) {
|
||||
if (skip < 0)
|
||||
throw new IllegalArgumentException("Skip must be non-negative: " + skip);
|
||||
|
||||
return new LongPipeline.StatefulOp<Long>(upstream, StreamShape.LONG_VALUE,
|
||||
flags(limit)) {
|
||||
Spliterator.OfLong unorderedSkipLimitSpliterator(
|
||||
Spliterator.OfLong s, long skip, long limit, long sizeIfKnown) {
|
||||
if (skip <= sizeIfKnown) {
|
||||
// Use just the limit if the number of elements
|
||||
// to skip is <= the known pipeline size
|
||||
limit = limit >= 0 ? Math.min(limit, sizeIfKnown - skip) : sizeIfKnown - skip;
|
||||
skip = 0;
|
||||
}
|
||||
return new StreamSpliterators.UnorderedSliceSpliterator.OfLong(s, skip, limit);
|
||||
}
|
||||
|
||||
@Override
|
||||
<P_IN> Spliterator<Long> opEvaluateParallelLazy(PipelineHelper<Long> helper,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
long size = helper.exactOutputSizeIfKnown(spliterator);
|
||||
if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
|
||||
return new StreamSpliterators.SliceSpliterator.OfLong(
|
||||
(Spliterator.OfLong) helper.wrapSpliterator(spliterator),
|
||||
skip,
|
||||
calcSliceFence(skip, limit));
|
||||
} else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
|
||||
return unorderedSkipLimitSpliterator(
|
||||
(Spliterator.OfLong) helper.wrapSpliterator(spliterator),
|
||||
skip, limit, size);
|
||||
}
|
||||
else {
|
||||
return new SliceTask<>(this, helper, spliterator, Long[]::new, skip, limit).
|
||||
invoke().spliterator();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
<P_IN> Node<Long> opEvaluateParallel(PipelineHelper<Long> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<Long[]> generator) {
|
||||
long size = helper.exactOutputSizeIfKnown(spliterator);
|
||||
if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
|
||||
// Because the pipeline is SIZED the slice spliterator
|
||||
// can be created from the source, this requires matching
|
||||
// to shape of the source, and is potentially more efficient
|
||||
// than creating the slice spliterator from the pipeline
|
||||
// wrapping spliterator
|
||||
Spliterator<P_IN> s = sliceSpliterator(helper.getSourceShape(), spliterator, skip, limit);
|
||||
return Nodes.collectLong(helper, s, true);
|
||||
} else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
|
||||
Spliterator.OfLong s = unorderedSkipLimitSpliterator(
|
||||
(Spliterator.OfLong) helper.wrapSpliterator(spliterator),
|
||||
skip, limit, size);
|
||||
// Collect using this pipeline, which is empty and therefore
|
||||
// can be used with the pipeline wrapping spliterator
|
||||
// Note that we cannot create a slice spliterator from
|
||||
// the source spliterator if the pipeline is not SIZED
|
||||
return Nodes.collectLong(this, s, true);
|
||||
}
|
||||
else {
|
||||
return new SliceTask<>(this, helper, spliterator, generator, skip, limit).
|
||||
invoke();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
|
||||
return new Sink.ChainedLong<Long>(sink) {
|
||||
long n = skip;
|
||||
long m = limit >= 0 ? limit : Long.MAX_VALUE;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(calcSize(size, skip, m));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
if (n == 0) {
|
||||
if (m > 0) {
|
||||
m--;
|
||||
downstream.accept(t);
|
||||
}
|
||||
}
|
||||
else {
|
||||
n--;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
return m == 0 || downstream.cancellationRequested();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends a "slice" operation to the provided DoubleStream. The slice
|
||||
* operation may be may be skip-only, limit-only, or skip-and-limit.
|
||||
*
|
||||
* @param upstream A DoubleStream
|
||||
* @param skip The number of elements to skip. Must be >= 0.
|
||||
* @param limit The maximum size of the resulting stream, or -1 if no limit
|
||||
* is to be imposed
|
||||
*/
|
||||
public static DoubleStream makeDouble(AbstractPipeline<?, Double, ?> upstream,
|
||||
long skip, long limit) {
|
||||
if (skip < 0)
|
||||
throw new IllegalArgumentException("Skip must be non-negative: " + skip);
|
||||
|
||||
return new DoublePipeline.StatefulOp<Double>(upstream, StreamShape.DOUBLE_VALUE,
|
||||
flags(limit)) {
|
||||
Spliterator.OfDouble unorderedSkipLimitSpliterator(
|
||||
Spliterator.OfDouble s, long skip, long limit, long sizeIfKnown) {
|
||||
if (skip <= sizeIfKnown) {
|
||||
// Use just the limit if the number of elements
|
||||
// to skip is <= the known pipeline size
|
||||
limit = limit >= 0 ? Math.min(limit, sizeIfKnown - skip) : sizeIfKnown - skip;
|
||||
skip = 0;
|
||||
}
|
||||
return new StreamSpliterators.UnorderedSliceSpliterator.OfDouble(s, skip, limit);
|
||||
}
|
||||
|
||||
@Override
|
||||
<P_IN> Spliterator<Double> opEvaluateParallelLazy(PipelineHelper<Double> helper,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
long size = helper.exactOutputSizeIfKnown(spliterator);
|
||||
if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
|
||||
return new StreamSpliterators.SliceSpliterator.OfDouble(
|
||||
(Spliterator.OfDouble) helper.wrapSpliterator(spliterator),
|
||||
skip,
|
||||
calcSliceFence(skip, limit));
|
||||
} else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
|
||||
return unorderedSkipLimitSpliterator(
|
||||
(Spliterator.OfDouble) helper.wrapSpliterator(spliterator),
|
||||
skip, limit, size);
|
||||
}
|
||||
else {
|
||||
return new SliceTask<>(this, helper, spliterator, Double[]::new, skip, limit).
|
||||
invoke().spliterator();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
<P_IN> Node<Double> opEvaluateParallel(PipelineHelper<Double> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<Double[]> generator) {
|
||||
long size = helper.exactOutputSizeIfKnown(spliterator);
|
||||
if (size > 0 && spliterator.hasCharacteristics(Spliterator.SUBSIZED)) {
|
||||
// Because the pipeline is SIZED the slice spliterator
|
||||
// can be created from the source, this requires matching
|
||||
// to shape of the source, and is potentially more efficient
|
||||
// than creating the slice spliterator from the pipeline
|
||||
// wrapping spliterator
|
||||
Spliterator<P_IN> s = sliceSpliterator(helper.getSourceShape(), spliterator, skip, limit);
|
||||
return Nodes.collectDouble(helper, s, true);
|
||||
} else if (!StreamOpFlag.ORDERED.isKnown(helper.getStreamAndOpFlags())) {
|
||||
Spliterator.OfDouble s = unorderedSkipLimitSpliterator(
|
||||
(Spliterator.OfDouble) helper.wrapSpliterator(spliterator),
|
||||
skip, limit, size);
|
||||
// Collect using this pipeline, which is empty and therefore
|
||||
// can be used with the pipeline wrapping spliterator
|
||||
// Note that we cannot create a slice spliterator from
|
||||
// the source spliterator if the pipeline is not SIZED
|
||||
return Nodes.collectDouble(this, s, true);
|
||||
}
|
||||
else {
|
||||
return new SliceTask<>(this, helper, spliterator, generator, skip, limit).
|
||||
invoke();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
|
||||
return new Sink.ChainedDouble<Double>(sink) {
|
||||
long n = skip;
|
||||
long m = limit >= 0 ? limit : Long.MAX_VALUE;
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
downstream.begin(calcSize(size, skip, m));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
if (n == 0) {
|
||||
if (m > 0) {
|
||||
m--;
|
||||
downstream.accept(t);
|
||||
}
|
||||
}
|
||||
else {
|
||||
n--;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean cancellationRequested() {
|
||||
return m == 0 || downstream.cancellationRequested();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private static int flags(long limit) {
|
||||
return StreamOpFlag.NOT_SIZED | ((limit != -1) ? StreamOpFlag.IS_SHORT_CIRCUIT : 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@code ForkJoinTask} implementing slice computation.
|
||||
*
|
||||
* @param <P_IN> Input element type to the stream pipeline
|
||||
* @param <P_OUT> Output element type from the stream pipeline
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
private static final class SliceTask<P_IN, P_OUT>
|
||||
extends AbstractShortCircuitTask<P_IN, P_OUT, Node<P_OUT>, SliceTask<P_IN, P_OUT>> {
|
||||
private final AbstractPipeline<P_OUT, P_OUT, ?> op;
|
||||
private final IntFunction<P_OUT[]> generator;
|
||||
private final long targetOffset, targetSize;
|
||||
private long thisNodeSize;
|
||||
|
||||
private volatile boolean completed;
|
||||
|
||||
SliceTask(AbstractPipeline<P_OUT, P_OUT, ?> op,
|
||||
PipelineHelper<P_OUT> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<P_OUT[]> generator,
|
||||
long offset, long size) {
|
||||
super(helper, spliterator);
|
||||
this.op = op;
|
||||
this.generator = generator;
|
||||
this.targetOffset = offset;
|
||||
this.targetSize = size;
|
||||
}
|
||||
|
||||
SliceTask(SliceTask<P_IN, P_OUT> parent, Spliterator<P_IN> spliterator) {
|
||||
super(parent, spliterator);
|
||||
this.op = parent.op;
|
||||
this.generator = parent.generator;
|
||||
this.targetOffset = parent.targetOffset;
|
||||
this.targetSize = parent.targetSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SliceTask<P_IN, P_OUT> makeChild(Spliterator<P_IN> spliterator) {
|
||||
return new SliceTask<>(this, spliterator);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final Node<P_OUT> getEmptyResult() {
|
||||
return Nodes.emptyNode(op.getOutputShape());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final Node<P_OUT> doLeaf() {
|
||||
if (isRoot()) {
|
||||
long sizeIfKnown = StreamOpFlag.SIZED.isPreserved(op.sourceOrOpFlags)
|
||||
? op.exactOutputSizeIfKnown(spliterator)
|
||||
: -1;
|
||||
final Node.Builder<P_OUT> nb = op.makeNodeBuilder(sizeIfKnown, generator);
|
||||
Sink<P_OUT> opSink = op.opWrapSink(helper.getStreamAndOpFlags(), nb);
|
||||
helper.copyIntoWithCancel(helper.wrapSink(opSink), spliterator);
|
||||
// There is no need to truncate since the op performs the
|
||||
// skipping and limiting of elements
|
||||
return nb.build();
|
||||
}
|
||||
else {
|
||||
Node<P_OUT> node = helper.wrapAndCopyInto(helper.makeNodeBuilder(-1, generator),
|
||||
spliterator).build();
|
||||
thisNodeSize = node.count();
|
||||
completed = true;
|
||||
spliterator = null;
|
||||
return node;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final void onCompletion(CountedCompleter<?> caller) {
|
||||
if (!isLeaf()) {
|
||||
Node<P_OUT> result;
|
||||
thisNodeSize = leftChild.thisNodeSize + rightChild.thisNodeSize;
|
||||
if (canceled) {
|
||||
thisNodeSize = 0;
|
||||
result = getEmptyResult();
|
||||
}
|
||||
else if (thisNodeSize == 0)
|
||||
result = getEmptyResult();
|
||||
else if (leftChild.thisNodeSize == 0)
|
||||
result = rightChild.getLocalResult();
|
||||
else {
|
||||
result = Nodes.conc(op.getOutputShape(),
|
||||
leftChild.getLocalResult(), rightChild.getLocalResult());
|
||||
}
|
||||
setLocalResult(isRoot() ? doTruncate(result) : result);
|
||||
completed = true;
|
||||
}
|
||||
if (targetSize >= 0
|
||||
&& !isRoot()
|
||||
&& isLeftCompleted(targetOffset + targetSize))
|
||||
cancelLaterNodes();
|
||||
|
||||
super.onCompletion(caller);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void cancel() {
|
||||
super.cancel();
|
||||
if (completed)
|
||||
setLocalResult(getEmptyResult());
|
||||
}
|
||||
|
||||
private Node<P_OUT> doTruncate(Node<P_OUT> input) {
|
||||
long to = targetSize >= 0 ? Math.min(input.count(), targetOffset + targetSize) : thisNodeSize;
|
||||
return input.truncate(targetOffset, to, generator);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if the number of completed elements in this node and nodes
|
||||
* to the left of this node is greater than or equal to the target size.
|
||||
*
|
||||
* @param target the target size
|
||||
* @return true if the number of elements is greater than or equal to
|
||||
* the target size, otherwise false.
|
||||
*/
|
||||
private boolean isLeftCompleted(long target) {
|
||||
long size = completed ? thisNodeSize : completedSize(target);
|
||||
if (size >= target)
|
||||
return true;
|
||||
for (SliceTask<P_IN, P_OUT> parent = getParent(), node = this;
|
||||
parent != null;
|
||||
node = parent, parent = parent.getParent()) {
|
||||
if (node == parent.rightChild) {
|
||||
SliceTask<P_IN, P_OUT> left = parent.leftChild;
|
||||
if (left != null) {
|
||||
size += left.completedSize(target);
|
||||
if (size >= target)
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return size >= target;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the number of completed elements in this node.
|
||||
* <p>
|
||||
* Computation terminates if all nodes have been processed or the
|
||||
* number of completed elements is greater than or equal to the target
|
||||
* size.
|
||||
*
|
||||
* @param target the target size
|
||||
* @return the number of completed elements
|
||||
*/
|
||||
private long completedSize(long target) {
|
||||
if (completed)
|
||||
return thisNodeSize;
|
||||
else {
|
||||
SliceTask<P_IN, P_OUT> left = leftChild;
|
||||
SliceTask<P_IN, P_OUT> right = rightChild;
|
||||
if (left == null || right == null) {
|
||||
// must be completed
|
||||
return thisNodeSize;
|
||||
}
|
||||
else {
|
||||
long leftSize = left.completedSize(target);
|
||||
return (leftSize >= target) ? leftSize : leftSize + right.completedSize(target);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
709
jdkSrc/jdk8/java/util/stream/SortedOps.java
Normal file
709
jdkSrc/jdk8/java/util/stream/SortedOps.java
Normal file
@@ -0,0 +1,709 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.Objects;
|
||||
import java.util.Spliterator;
|
||||
import java.util.function.IntFunction;
|
||||
|
||||
|
||||
/**
|
||||
* Factory methods for transforming streams into sorted streams.
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
final class SortedOps {
|
||||
|
||||
private SortedOps() { }
|
||||
|
||||
/**
|
||||
* Appends a "sorted" operation to the provided stream.
|
||||
*
|
||||
* @param <T> the type of both input and output elements
|
||||
* @param upstream a reference stream with element type T
|
||||
*/
|
||||
static <T> Stream<T> makeRef(AbstractPipeline<?, T, ?> upstream) {
|
||||
return new OfRef<>(upstream);
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends a "sorted" operation to the provided stream.
|
||||
*
|
||||
* @param <T> the type of both input and output elements
|
||||
* @param upstream a reference stream with element type T
|
||||
* @param comparator the comparator to order elements by
|
||||
*/
|
||||
static <T> Stream<T> makeRef(AbstractPipeline<?, T, ?> upstream,
|
||||
Comparator<? super T> comparator) {
|
||||
return new OfRef<>(upstream, comparator);
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends a "sorted" operation to the provided stream.
|
||||
*
|
||||
* @param <T> the type of both input and output elements
|
||||
* @param upstream a reference stream with element type T
|
||||
*/
|
||||
static <T> IntStream makeInt(AbstractPipeline<?, Integer, ?> upstream) {
|
||||
return new OfInt(upstream);
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends a "sorted" operation to the provided stream.
|
||||
*
|
||||
* @param <T> the type of both input and output elements
|
||||
* @param upstream a reference stream with element type T
|
||||
*/
|
||||
static <T> LongStream makeLong(AbstractPipeline<?, Long, ?> upstream) {
|
||||
return new OfLong(upstream);
|
||||
}
|
||||
|
||||
/**
|
||||
* Appends a "sorted" operation to the provided stream.
|
||||
*
|
||||
* @param <T> the type of both input and output elements
|
||||
* @param upstream a reference stream with element type T
|
||||
*/
|
||||
static <T> DoubleStream makeDouble(AbstractPipeline<?, Double, ?> upstream) {
|
||||
return new OfDouble(upstream);
|
||||
}
|
||||
|
||||
/**
|
||||
* Specialized subtype for sorting reference streams
|
||||
*/
|
||||
private static final class OfRef<T> extends ReferencePipeline.StatefulOp<T, T> {
|
||||
/**
|
||||
* Comparator used for sorting
|
||||
*/
|
||||
private final boolean isNaturalSort;
|
||||
private final Comparator<? super T> comparator;
|
||||
|
||||
/**
|
||||
* Sort using natural order of {@literal <T>} which must be
|
||||
* {@code Comparable}.
|
||||
*/
|
||||
OfRef(AbstractPipeline<?, T, ?> upstream) {
|
||||
super(upstream, StreamShape.REFERENCE,
|
||||
StreamOpFlag.IS_ORDERED | StreamOpFlag.IS_SORTED);
|
||||
this.isNaturalSort = true;
|
||||
// Will throw CCE when we try to sort if T is not Comparable
|
||||
@SuppressWarnings("unchecked")
|
||||
Comparator<? super T> comp = (Comparator<? super T>) Comparator.naturalOrder();
|
||||
this.comparator = comp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sort using the provided comparator.
|
||||
*
|
||||
* @param comparator The comparator to be used to evaluate ordering.
|
||||
*/
|
||||
OfRef(AbstractPipeline<?, T, ?> upstream, Comparator<? super T> comparator) {
|
||||
super(upstream, StreamShape.REFERENCE,
|
||||
StreamOpFlag.IS_ORDERED | StreamOpFlag.NOT_SORTED);
|
||||
this.isNaturalSort = false;
|
||||
this.comparator = Objects.requireNonNull(comparator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Sink<T> opWrapSink(int flags, Sink<T> sink) {
|
||||
Objects.requireNonNull(sink);
|
||||
|
||||
// If the input is already naturally sorted and this operation
|
||||
// also naturally sorted then this is a no-op
|
||||
if (StreamOpFlag.SORTED.isKnown(flags) && isNaturalSort)
|
||||
return sink;
|
||||
else if (StreamOpFlag.SIZED.isKnown(flags))
|
||||
return new SizedRefSortingSink<>(sink, comparator);
|
||||
else
|
||||
return new RefSortingSink<>(sink, comparator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <P_IN> Node<T> opEvaluateParallel(PipelineHelper<T> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<T[]> generator) {
|
||||
// If the input is already naturally sorted and this operation
|
||||
// naturally sorts then collect the output
|
||||
if (StreamOpFlag.SORTED.isKnown(helper.getStreamAndOpFlags()) && isNaturalSort) {
|
||||
return helper.evaluate(spliterator, false, generator);
|
||||
}
|
||||
else {
|
||||
// @@@ Weak two-pass parallel implementation; parallel collect, parallel sort
|
||||
T[] flattenedData = helper.evaluate(spliterator, true, generator).asArray(generator);
|
||||
Arrays.parallelSort(flattenedData, comparator);
|
||||
return Nodes.node(flattenedData);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Specialized subtype for sorting int streams.
|
||||
*/
|
||||
private static final class OfInt extends IntPipeline.StatefulOp<Integer> {
|
||||
OfInt(AbstractPipeline<?, Integer, ?> upstream) {
|
||||
super(upstream, StreamShape.INT_VALUE,
|
||||
StreamOpFlag.IS_ORDERED | StreamOpFlag.IS_SORTED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Sink<Integer> opWrapSink(int flags, Sink<Integer> sink) {
|
||||
Objects.requireNonNull(sink);
|
||||
|
||||
if (StreamOpFlag.SORTED.isKnown(flags))
|
||||
return sink;
|
||||
else if (StreamOpFlag.SIZED.isKnown(flags))
|
||||
return new SizedIntSortingSink(sink);
|
||||
else
|
||||
return new IntSortingSink(sink);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <P_IN> Node<Integer> opEvaluateParallel(PipelineHelper<Integer> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<Integer[]> generator) {
|
||||
if (StreamOpFlag.SORTED.isKnown(helper.getStreamAndOpFlags())) {
|
||||
return helper.evaluate(spliterator, false, generator);
|
||||
}
|
||||
else {
|
||||
Node.OfInt n = (Node.OfInt) helper.evaluate(spliterator, true, generator);
|
||||
|
||||
int[] content = n.asPrimitiveArray();
|
||||
Arrays.parallelSort(content);
|
||||
|
||||
return Nodes.node(content);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Specialized subtype for sorting long streams.
|
||||
*/
|
||||
private static final class OfLong extends LongPipeline.StatefulOp<Long> {
|
||||
OfLong(AbstractPipeline<?, Long, ?> upstream) {
|
||||
super(upstream, StreamShape.LONG_VALUE,
|
||||
StreamOpFlag.IS_ORDERED | StreamOpFlag.IS_SORTED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Sink<Long> opWrapSink(int flags, Sink<Long> sink) {
|
||||
Objects.requireNonNull(sink);
|
||||
|
||||
if (StreamOpFlag.SORTED.isKnown(flags))
|
||||
return sink;
|
||||
else if (StreamOpFlag.SIZED.isKnown(flags))
|
||||
return new SizedLongSortingSink(sink);
|
||||
else
|
||||
return new LongSortingSink(sink);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <P_IN> Node<Long> opEvaluateParallel(PipelineHelper<Long> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<Long[]> generator) {
|
||||
if (StreamOpFlag.SORTED.isKnown(helper.getStreamAndOpFlags())) {
|
||||
return helper.evaluate(spliterator, false, generator);
|
||||
}
|
||||
else {
|
||||
Node.OfLong n = (Node.OfLong) helper.evaluate(spliterator, true, generator);
|
||||
|
||||
long[] content = n.asPrimitiveArray();
|
||||
Arrays.parallelSort(content);
|
||||
|
||||
return Nodes.node(content);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Specialized subtype for sorting double streams.
|
||||
*/
|
||||
private static final class OfDouble extends DoublePipeline.StatefulOp<Double> {
|
||||
OfDouble(AbstractPipeline<?, Double, ?> upstream) {
|
||||
super(upstream, StreamShape.DOUBLE_VALUE,
|
||||
StreamOpFlag.IS_ORDERED | StreamOpFlag.IS_SORTED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Sink<Double> opWrapSink(int flags, Sink<Double> sink) {
|
||||
Objects.requireNonNull(sink);
|
||||
|
||||
if (StreamOpFlag.SORTED.isKnown(flags))
|
||||
return sink;
|
||||
else if (StreamOpFlag.SIZED.isKnown(flags))
|
||||
return new SizedDoubleSortingSink(sink);
|
||||
else
|
||||
return new DoubleSortingSink(sink);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <P_IN> Node<Double> opEvaluateParallel(PipelineHelper<Double> helper,
|
||||
Spliterator<P_IN> spliterator,
|
||||
IntFunction<Double[]> generator) {
|
||||
if (StreamOpFlag.SORTED.isKnown(helper.getStreamAndOpFlags())) {
|
||||
return helper.evaluate(spliterator, false, generator);
|
||||
}
|
||||
else {
|
||||
Node.OfDouble n = (Node.OfDouble) helper.evaluate(spliterator, true, generator);
|
||||
|
||||
double[] content = n.asPrimitiveArray();
|
||||
Arrays.parallelSort(content);
|
||||
|
||||
return Nodes.node(content);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Abstract {@link Sink} for implementing sort on reference streams.
|
||||
*
|
||||
* <p>
|
||||
* Note: documentation below applies to reference and all primitive sinks.
|
||||
* <p>
|
||||
* Sorting sinks first accept all elements, buffering then into an array
|
||||
* or a re-sizable data structure, if the size of the pipeline is known or
|
||||
* unknown respectively. At the end of the sink protocol those elements are
|
||||
* sorted and then pushed downstream.
|
||||
* This class records if {@link #cancellationRequested} is called. If so it
|
||||
* can be inferred that the source pushing source elements into the pipeline
|
||||
* knows that the pipeline is short-circuiting. In such cases sub-classes
|
||||
* pushing elements downstream will preserve the short-circuiting protocol
|
||||
* by calling {@code downstream.cancellationRequested()} and checking the
|
||||
* result is {@code false} before an element is pushed.
|
||||
* <p>
|
||||
* Note that the above behaviour is an optimization for sorting with
|
||||
* sequential streams. It is not an error that more elements, than strictly
|
||||
* required to produce a result, may flow through the pipeline. This can
|
||||
* occur, in general (not restricted to just sorting), for short-circuiting
|
||||
* parallel pipelines.
|
||||
*/
|
||||
private static abstract class AbstractRefSortingSink<T> extends Sink.ChainedReference<T, T> {
|
||||
protected final Comparator<? super T> comparator;
|
||||
// @@@ could be a lazy final value, if/when support is added
|
||||
// true if cancellationRequested() has been called
|
||||
protected boolean cancellationRequestedCalled;
|
||||
|
||||
AbstractRefSortingSink(Sink<? super T> downstream, Comparator<? super T> comparator) {
|
||||
super(downstream);
|
||||
this.comparator = comparator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Records is cancellation is requested so short-circuiting behaviour
|
||||
* can be preserved when the sorted elements are pushed downstream.
|
||||
*
|
||||
* @return false, as this sink never short-circuits.
|
||||
*/
|
||||
@Override
|
||||
public final boolean cancellationRequested() {
|
||||
// If this method is called then an operation within the stream
|
||||
// pipeline is short-circuiting (see AbstractPipeline.copyInto).
|
||||
// Note that we cannot differentiate between an upstream or
|
||||
// downstream operation
|
||||
cancellationRequestedCalled = true;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link Sink} for implementing sort on SIZED reference streams.
|
||||
*/
|
||||
private static final class SizedRefSortingSink<T> extends AbstractRefSortingSink<T> {
|
||||
private T[] array;
|
||||
private int offset;
|
||||
|
||||
SizedRefSortingSink(Sink<? super T> sink, Comparator<? super T> comparator) {
|
||||
super(sink, comparator);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void begin(long size) {
|
||||
if (size >= Nodes.MAX_ARRAY_SIZE)
|
||||
throw new IllegalArgumentException(Nodes.BAD_SIZE);
|
||||
array = (T[]) new Object[(int) size];
|
||||
}
|
||||
|
||||
@Override
|
||||
public void end() {
|
||||
Arrays.sort(array, 0, offset, comparator);
|
||||
downstream.begin(offset);
|
||||
if (!cancellationRequestedCalled) {
|
||||
for (int i = 0; i < offset; i++)
|
||||
downstream.accept(array[i]);
|
||||
}
|
||||
else {
|
||||
for (int i = 0; i < offset && !downstream.cancellationRequested(); i++)
|
||||
downstream.accept(array[i]);
|
||||
}
|
||||
downstream.end();
|
||||
array = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(T t) {
|
||||
array[offset++] = t;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link Sink} for implementing sort on reference streams.
|
||||
*/
|
||||
private static final class RefSortingSink<T> extends AbstractRefSortingSink<T> {
|
||||
private ArrayList<T> list;
|
||||
|
||||
RefSortingSink(Sink<? super T> sink, Comparator<? super T> comparator) {
|
||||
super(sink, comparator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
if (size >= Nodes.MAX_ARRAY_SIZE)
|
||||
throw new IllegalArgumentException(Nodes.BAD_SIZE);
|
||||
list = (size >= 0) ? new ArrayList<T>((int) size) : new ArrayList<T>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void end() {
|
||||
list.sort(comparator);
|
||||
downstream.begin(list.size());
|
||||
if (!cancellationRequestedCalled) {
|
||||
list.forEach(downstream::accept);
|
||||
}
|
||||
else {
|
||||
for (T t : list) {
|
||||
if (downstream.cancellationRequested()) break;
|
||||
downstream.accept(t);
|
||||
}
|
||||
}
|
||||
downstream.end();
|
||||
list = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(T t) {
|
||||
list.add(t);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Abstract {@link Sink} for implementing sort on int streams.
|
||||
*/
|
||||
private static abstract class AbstractIntSortingSink extends Sink.ChainedInt<Integer> {
|
||||
// true if cancellationRequested() has been called
|
||||
protected boolean cancellationRequestedCalled;
|
||||
|
||||
AbstractIntSortingSink(Sink<? super Integer> downstream) {
|
||||
super(downstream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean cancellationRequested() {
|
||||
cancellationRequestedCalled = true;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link Sink} for implementing sort on SIZED int streams.
|
||||
*/
|
||||
private static final class SizedIntSortingSink extends AbstractIntSortingSink {
|
||||
private int[] array;
|
||||
private int offset;
|
||||
|
||||
SizedIntSortingSink(Sink<? super Integer> downstream) {
|
||||
super(downstream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
if (size >= Nodes.MAX_ARRAY_SIZE)
|
||||
throw new IllegalArgumentException(Nodes.BAD_SIZE);
|
||||
array = new int[(int) size];
|
||||
}
|
||||
|
||||
@Override
|
||||
public void end() {
|
||||
Arrays.sort(array, 0, offset);
|
||||
downstream.begin(offset);
|
||||
if (!cancellationRequestedCalled) {
|
||||
for (int i = 0; i < offset; i++)
|
||||
downstream.accept(array[i]);
|
||||
}
|
||||
else {
|
||||
for (int i = 0; i < offset && !downstream.cancellationRequested(); i++)
|
||||
downstream.accept(array[i]);
|
||||
}
|
||||
downstream.end();
|
||||
array = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
array[offset++] = t;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link Sink} for implementing sort on int streams.
|
||||
*/
|
||||
private static final class IntSortingSink extends AbstractIntSortingSink {
|
||||
private SpinedBuffer.OfInt b;
|
||||
|
||||
IntSortingSink(Sink<? super Integer> sink) {
|
||||
super(sink);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
if (size >= Nodes.MAX_ARRAY_SIZE)
|
||||
throw new IllegalArgumentException(Nodes.BAD_SIZE);
|
||||
b = (size > 0) ? new SpinedBuffer.OfInt((int) size) : new SpinedBuffer.OfInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void end() {
|
||||
int[] ints = b.asPrimitiveArray();
|
||||
Arrays.sort(ints);
|
||||
downstream.begin(ints.length);
|
||||
if (!cancellationRequestedCalled) {
|
||||
for (int anInt : ints)
|
||||
downstream.accept(anInt);
|
||||
}
|
||||
else {
|
||||
for (int anInt : ints) {
|
||||
if (downstream.cancellationRequested()) break;
|
||||
downstream.accept(anInt);
|
||||
}
|
||||
}
|
||||
downstream.end();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
b.accept(t);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Abstract {@link Sink} for implementing sort on long streams.
|
||||
*/
|
||||
private static abstract class AbstractLongSortingSink extends Sink.ChainedLong<Long> {
|
||||
// true if cancellationRequested() has been called
|
||||
protected boolean cancellationRequestedCalled;
|
||||
|
||||
AbstractLongSortingSink(Sink<? super Long> downstream) {
|
||||
super(downstream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean cancellationRequested() {
|
||||
cancellationRequestedCalled = true;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link Sink} for implementing sort on SIZED long streams.
|
||||
*/
|
||||
private static final class SizedLongSortingSink extends AbstractLongSortingSink {
|
||||
private long[] array;
|
||||
private int offset;
|
||||
|
||||
SizedLongSortingSink(Sink<? super Long> downstream) {
|
||||
super(downstream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
if (size >= Nodes.MAX_ARRAY_SIZE)
|
||||
throw new IllegalArgumentException(Nodes.BAD_SIZE);
|
||||
array = new long[(int) size];
|
||||
}
|
||||
|
||||
@Override
|
||||
public void end() {
|
||||
Arrays.sort(array, 0, offset);
|
||||
downstream.begin(offset);
|
||||
if (!cancellationRequestedCalled) {
|
||||
for (int i = 0; i < offset; i++)
|
||||
downstream.accept(array[i]);
|
||||
}
|
||||
else {
|
||||
for (int i = 0; i < offset && !downstream.cancellationRequested(); i++)
|
||||
downstream.accept(array[i]);
|
||||
}
|
||||
downstream.end();
|
||||
array = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
array[offset++] = t;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link Sink} for implementing sort on long streams.
|
||||
*/
|
||||
private static final class LongSortingSink extends AbstractLongSortingSink {
|
||||
private SpinedBuffer.OfLong b;
|
||||
|
||||
LongSortingSink(Sink<? super Long> sink) {
|
||||
super(sink);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
if (size >= Nodes.MAX_ARRAY_SIZE)
|
||||
throw new IllegalArgumentException(Nodes.BAD_SIZE);
|
||||
b = (size > 0) ? new SpinedBuffer.OfLong((int) size) : new SpinedBuffer.OfLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void end() {
|
||||
long[] longs = b.asPrimitiveArray();
|
||||
Arrays.sort(longs);
|
||||
downstream.begin(longs.length);
|
||||
if (!cancellationRequestedCalled) {
|
||||
for (long aLong : longs)
|
||||
downstream.accept(aLong);
|
||||
}
|
||||
else {
|
||||
for (long aLong : longs) {
|
||||
if (downstream.cancellationRequested()) break;
|
||||
downstream.accept(aLong);
|
||||
}
|
||||
}
|
||||
downstream.end();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
b.accept(t);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Abstract {@link Sink} for implementing sort on long streams.
|
||||
*/
|
||||
private static abstract class AbstractDoubleSortingSink extends Sink.ChainedDouble<Double> {
|
||||
// true if cancellationRequested() has been called
|
||||
protected boolean cancellationRequestedCalled;
|
||||
|
||||
AbstractDoubleSortingSink(Sink<? super Double> downstream) {
|
||||
super(downstream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean cancellationRequested() {
|
||||
cancellationRequestedCalled = true;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link Sink} for implementing sort on SIZED double streams.
|
||||
*/
|
||||
private static final class SizedDoubleSortingSink extends AbstractDoubleSortingSink {
|
||||
private double[] array;
|
||||
private int offset;
|
||||
|
||||
SizedDoubleSortingSink(Sink<? super Double> downstream) {
|
||||
super(downstream);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
if (size >= Nodes.MAX_ARRAY_SIZE)
|
||||
throw new IllegalArgumentException(Nodes.BAD_SIZE);
|
||||
array = new double[(int) size];
|
||||
}
|
||||
|
||||
@Override
|
||||
public void end() {
|
||||
Arrays.sort(array, 0, offset);
|
||||
downstream.begin(offset);
|
||||
if (!cancellationRequestedCalled) {
|
||||
for (int i = 0; i < offset; i++)
|
||||
downstream.accept(array[i]);
|
||||
}
|
||||
else {
|
||||
for (int i = 0; i < offset && !downstream.cancellationRequested(); i++)
|
||||
downstream.accept(array[i]);
|
||||
}
|
||||
downstream.end();
|
||||
array = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
array[offset++] = t;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link Sink} for implementing sort on double streams.
|
||||
*/
|
||||
private static final class DoubleSortingSink extends AbstractDoubleSortingSink {
|
||||
private SpinedBuffer.OfDouble b;
|
||||
|
||||
DoubleSortingSink(Sink<? super Double> sink) {
|
||||
super(sink);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void begin(long size) {
|
||||
if (size >= Nodes.MAX_ARRAY_SIZE)
|
||||
throw new IllegalArgumentException(Nodes.BAD_SIZE);
|
||||
b = (size > 0) ? new SpinedBuffer.OfDouble((int) size) : new SpinedBuffer.OfDouble();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void end() {
|
||||
double[] doubles = b.asPrimitiveArray();
|
||||
Arrays.sort(doubles);
|
||||
downstream.begin(doubles.length);
|
||||
if (!cancellationRequestedCalled) {
|
||||
for (double aDouble : doubles)
|
||||
downstream.accept(aDouble);
|
||||
}
|
||||
else {
|
||||
for (double aDouble : doubles) {
|
||||
if (downstream.cancellationRequested()) break;
|
||||
downstream.accept(aDouble);
|
||||
}
|
||||
}
|
||||
downstream.end();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
b.accept(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
1061
jdkSrc/jdk8/java/util/stream/SpinedBuffer.java
Normal file
1061
jdkSrc/jdk8/java/util/stream/SpinedBuffer.java
Normal file
File diff suppressed because it is too large
Load Diff
1145
jdkSrc/jdk8/java/util/stream/Stream.java
Normal file
1145
jdkSrc/jdk8/java/util/stream/Stream.java
Normal file
File diff suppressed because it is too large
Load Diff
753
jdkSrc/jdk8/java/util/stream/StreamOpFlag.java
Normal file
753
jdkSrc/jdk8/java/util/stream/StreamOpFlag.java
Normal file
@@ -0,0 +1,753 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.EnumMap;
|
||||
import java.util.Map;
|
||||
import java.util.Spliterator;
|
||||
|
||||
/**
|
||||
* Flags corresponding to characteristics of streams and operations. Flags are
|
||||
* utilized by the stream framework to control, specialize or optimize
|
||||
* computation.
|
||||
*
|
||||
* <p>
|
||||
* Stream flags may be used to describe characteristics of several different
|
||||
* entities associated with streams: stream sources, intermediate operations,
|
||||
* and terminal operations. Not all stream flags are meaningful for all
|
||||
* entities; the following table summarizes which flags are meaningful in what
|
||||
* contexts:
|
||||
*
|
||||
* <div>
|
||||
* <table>
|
||||
* <caption>Type Characteristics</caption>
|
||||
* <thead class="tableSubHeadingColor">
|
||||
* <tr>
|
||||
* <th colspan="2"> </th>
|
||||
* <th>{@code DISTINCT}</th>
|
||||
* <th>{@code SORTED}</th>
|
||||
* <th>{@code ORDERED}</th>
|
||||
* <th>{@code SIZED}</th>
|
||||
* <th>{@code SHORT_CIRCUIT}</th>
|
||||
* </tr>
|
||||
* </thead>
|
||||
* <tbody>
|
||||
* <tr>
|
||||
* <th colspan="2" class="tableSubHeadingColor">Stream source</th>
|
||||
* <td>Y</td>
|
||||
* <td>Y</td>
|
||||
* <td>Y</td>
|
||||
* <td>Y</td>
|
||||
* <td>N</td>
|
||||
* </tr>
|
||||
* <tr>
|
||||
* <th colspan="2" class="tableSubHeadingColor">Intermediate operation</th>
|
||||
* <td>PCI</td>
|
||||
* <td>PCI</td>
|
||||
* <td>PCI</td>
|
||||
* <td>PC</td>
|
||||
* <td>PI</td>
|
||||
* </tr>
|
||||
* <tr>
|
||||
* <th colspan="2" class="tableSubHeadingColor">Terminal operation</th>
|
||||
* <td>N</td>
|
||||
* <td>N</td>
|
||||
* <td>PC</td>
|
||||
* <td>N</td>
|
||||
* <td>PI</td>
|
||||
* </tr>
|
||||
* </tbody>
|
||||
* <tfoot>
|
||||
* <tr>
|
||||
* <th class="tableSubHeadingColor" colspan="2">Legend</th>
|
||||
* <th colspan="6" rowspan="7"> </th>
|
||||
* </tr>
|
||||
* <tr>
|
||||
* <th class="tableSubHeadingColor">Flag</th>
|
||||
* <th class="tableSubHeadingColor">Meaning</th>
|
||||
* <th colspan="6"></th>
|
||||
* </tr>
|
||||
* <tr><td>Y</td><td>Allowed</td></tr>
|
||||
* <tr><td>N</td><td>Invalid</td></tr>
|
||||
* <tr><td>P</td><td>Preserves</td></tr>
|
||||
* <tr><td>C</td><td>Clears</td></tr>
|
||||
* <tr><td>I</td><td>Injects</td></tr>
|
||||
* </tfoot>
|
||||
* </table>
|
||||
* </div>
|
||||
*
|
||||
* <p>In the above table, "PCI" means "may preserve, clear, or inject"; "PC"
|
||||
* means "may preserve or clear", "PI" means "may preserve or inject", and "N"
|
||||
* means "not valid".
|
||||
*
|
||||
* <p>Stream flags are represented by unioned bit sets, so that a single word
|
||||
* may describe all the characteristics of a given stream entity, and that, for
|
||||
* example, the flags for a stream source can be efficiently combined with the
|
||||
* flags for later operations on that stream.
|
||||
*
|
||||
* <p>The bit masks {@link #STREAM_MASK}, {@link #OP_MASK}, and
|
||||
* {@link #TERMINAL_OP_MASK} can be ANDed with a bit set of stream flags to
|
||||
* produce a mask containing only the valid flags for that entity type.
|
||||
*
|
||||
* <p>When describing a stream source, one only need describe what
|
||||
* characteristics that stream has; when describing a stream operation, one need
|
||||
* describe whether the operation preserves, injects, or clears that
|
||||
* characteristic. Accordingly, two bits are used for each flag, so as to allow
|
||||
* representing not only the presence of a characteristic, but how an
|
||||
* operation modifies that characteristic. There are two common forms in which
|
||||
* flag bits are combined into an {@code int} bit set. <em>Stream flags</em>
|
||||
* are a unioned bit set constructed by ORing the enum characteristic values of
|
||||
* {@link #set()} (or, more commonly, ORing the corresponding static named
|
||||
* constants prefixed with {@code IS_}). <em>Operation flags</em> are a unioned
|
||||
* bit set constructed by ORing the enum characteristic values of {@link #set()}
|
||||
* or {@link #clear()} (to inject, or clear, respectively, the corresponding
|
||||
* flag), or more commonly ORing the corresponding named constants prefixed with
|
||||
* {@code IS_} or {@code NOT_}. Flags that are not marked with {@code IS_} or
|
||||
* {@code NOT_} are implicitly treated as preserved. Care must be taken when
|
||||
* combining bitsets that the correct combining operations are applied in the
|
||||
* correct order.
|
||||
*
|
||||
* <p>
|
||||
* With the exception of {@link #SHORT_CIRCUIT}, stream characteristics can be
|
||||
* derived from the equivalent {@link java.util.Spliterator} characteristics:
|
||||
* {@link java.util.Spliterator#DISTINCT}, {@link java.util.Spliterator#SORTED},
|
||||
* {@link java.util.Spliterator#ORDERED}, and
|
||||
* {@link java.util.Spliterator#SIZED}. A spliterator characteristics bit set
|
||||
* can be converted to stream flags using the method
|
||||
* {@link #fromCharacteristics(java.util.Spliterator)} and converted back using
|
||||
* {@link #toCharacteristics(int)}. (The bit set
|
||||
* {@link #SPLITERATOR_CHARACTERISTICS_MASK} is used to AND with a bit set to
|
||||
* produce a valid spliterator characteristics bit set that can be converted to
|
||||
* stream flags.)
|
||||
*
|
||||
* <p>
|
||||
* The source of a stream encapsulates a spliterator. The characteristics of
|
||||
* that source spliterator when transformed to stream flags will be a proper
|
||||
* subset of stream flags of that stream.
|
||||
* For example:
|
||||
* <pre> {@code
|
||||
* Spliterator s = ...;
|
||||
* Stream stream = Streams.stream(s);
|
||||
* flagsFromSplitr = fromCharacteristics(s.characteristics());
|
||||
* assert(flagsFromSplitr & stream.getStreamFlags() == flagsFromSplitr);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>
|
||||
* An intermediate operation, performed on an input stream to create a new
|
||||
* output stream, may preserve, clear or inject stream or operation
|
||||
* characteristics. Similarly, a terminal operation, performed on an input
|
||||
* stream to produce an output result may preserve, clear or inject stream or
|
||||
* operation characteristics. Preservation means that if that characteristic
|
||||
* is present on the input, then it is also present on the output. Clearing
|
||||
* means that the characteristic is not present on the output regardless of the
|
||||
* input. Injection means that the characteristic is present on the output
|
||||
* regardless of the input. If a characteristic is not cleared or injected then
|
||||
* it is implicitly preserved.
|
||||
*
|
||||
* <p>
|
||||
* A pipeline consists of a stream source encapsulating a spliterator, one or
|
||||
* more intermediate operations, and finally a terminal operation that produces
|
||||
* a result. At each stage of the pipeline, a combined stream and operation
|
||||
* flags can be calculated, using {@link #combineOpFlags(int, int)}. Such flags
|
||||
* ensure that preservation, clearing and injecting information is retained at
|
||||
* each stage.
|
||||
*
|
||||
* The combined stream and operation flags for the source stage of the pipeline
|
||||
* is calculated as follows:
|
||||
* <pre> {@code
|
||||
* int flagsForSourceStage = combineOpFlags(sourceFlags, INITIAL_OPS_VALUE);
|
||||
* }</pre>
|
||||
*
|
||||
* The combined stream and operation flags of each subsequent intermediate
|
||||
* operation stage in the pipeline is calculated as follows:
|
||||
* <pre> {@code
|
||||
* int flagsForThisStage = combineOpFlags(flagsForPreviousStage, thisOpFlags);
|
||||
* }</pre>
|
||||
*
|
||||
* Finally the flags output from the last intermediate operation of the pipeline
|
||||
* are combined with the operation flags of the terminal operation to produce
|
||||
* the flags output from the pipeline.
|
||||
*
|
||||
* <p>Those flags can then be used to apply optimizations. For example, if
|
||||
* {@code SIZED.isKnown(flags)} returns true then the stream size remains
|
||||
* constant throughout the pipeline, this information can be utilized to
|
||||
* pre-allocate data structures and combined with
|
||||
* {@link java.util.Spliterator#SUBSIZED} that information can be utilized to
|
||||
* perform concurrent in-place updates into a shared array.
|
||||
*
|
||||
* For specific details see the {@link AbstractPipeline} constructors.
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
enum StreamOpFlag {
|
||||
|
||||
/*
|
||||
* Each characteristic takes up 2 bits in a bit set to accommodate
|
||||
* preserving, clearing and setting/injecting information.
|
||||
*
|
||||
* This applies to stream flags, intermediate/terminal operation flags, and
|
||||
* combined stream and operation flags. Even though the former only requires
|
||||
* 1 bit of information per characteristic, is it more efficient when
|
||||
* combining flags to align set and inject bits.
|
||||
*
|
||||
* Characteristics belong to certain types, see the Type enum. Bit masks for
|
||||
* the types are constructed as per the following table:
|
||||
*
|
||||
* DISTINCT SORTED ORDERED SIZED SHORT_CIRCUIT
|
||||
* SPLITERATOR 01 01 01 01 00
|
||||
* STREAM 01 01 01 01 00
|
||||
* OP 11 11 11 10 01
|
||||
* TERMINAL_OP 00 00 10 00 01
|
||||
* UPSTREAM_TERMINAL_OP 00 00 10 00 00
|
||||
*
|
||||
* 01 = set/inject
|
||||
* 10 = clear
|
||||
* 11 = preserve
|
||||
*
|
||||
* Construction of the columns is performed using a simple builder for
|
||||
* non-zero values.
|
||||
*/
|
||||
|
||||
|
||||
// The following flags correspond to characteristics on Spliterator
|
||||
// and the values MUST be equal.
|
||||
//
|
||||
|
||||
/**
|
||||
* Characteristic value signifying that, for each pair of
|
||||
* encountered elements in a stream {@code x, y}, {@code !x.equals(y)}.
|
||||
* <p>
|
||||
* A stream may have this value or an intermediate operation can preserve,
|
||||
* clear or inject this value.
|
||||
*/
|
||||
// 0, 0x00000001
|
||||
// Matches Spliterator.DISTINCT
|
||||
DISTINCT(0,
|
||||
set(Type.SPLITERATOR).set(Type.STREAM).setAndClear(Type.OP)),
|
||||
|
||||
/**
|
||||
* Characteristic value signifying that encounter order follows a natural
|
||||
* sort order of comparable elements.
|
||||
* <p>
|
||||
* A stream can have this value or an intermediate operation can preserve,
|
||||
* clear or inject this value.
|
||||
* <p>
|
||||
* Note: The {@link java.util.Spliterator#SORTED} characteristic can define
|
||||
* a sort order with an associated non-null comparator. Augmenting flag
|
||||
* state with addition properties such that those properties can be passed
|
||||
* to operations requires some disruptive changes for a singular use-case.
|
||||
* Furthermore, comparing comparators for equality beyond that of identity
|
||||
* is likely to be unreliable. Therefore the {@code SORTED} characteristic
|
||||
* for a defined non-natural sort order is not mapped internally to the
|
||||
* {@code SORTED} flag.
|
||||
*/
|
||||
// 1, 0x00000004
|
||||
// Matches Spliterator.SORTED
|
||||
SORTED(1,
|
||||
set(Type.SPLITERATOR).set(Type.STREAM).setAndClear(Type.OP)),
|
||||
|
||||
/**
|
||||
* Characteristic value signifying that an encounter order is
|
||||
* defined for stream elements.
|
||||
* <p>
|
||||
* A stream can have this value, an intermediate operation can preserve,
|
||||
* clear or inject this value, or a terminal operation can preserve or clear
|
||||
* this value.
|
||||
*/
|
||||
// 2, 0x00000010
|
||||
// Matches Spliterator.ORDERED
|
||||
ORDERED(2,
|
||||
set(Type.SPLITERATOR).set(Type.STREAM).setAndClear(Type.OP).clear(Type.TERMINAL_OP)
|
||||
.clear(Type.UPSTREAM_TERMINAL_OP)),
|
||||
|
||||
/**
|
||||
* Characteristic value signifying that size of the stream
|
||||
* is of a known finite size that is equal to the known finite
|
||||
* size of the source spliterator input to the first stream
|
||||
* in the pipeline.
|
||||
* <p>
|
||||
* A stream can have this value or an intermediate operation can preserve or
|
||||
* clear this value.
|
||||
*/
|
||||
// 3, 0x00000040
|
||||
// Matches Spliterator.SIZED
|
||||
SIZED(3,
|
||||
set(Type.SPLITERATOR).set(Type.STREAM).clear(Type.OP)),
|
||||
|
||||
// The following Spliterator characteristics are not currently used but a
|
||||
// gap in the bit set is deliberately retained to enable corresponding
|
||||
// stream flags if//when required without modification to other flag values.
|
||||
//
|
||||
// 4, 0x00000100 NONNULL(4, ...
|
||||
// 5, 0x00000400 IMMUTABLE(5, ...
|
||||
// 6, 0x00001000 CONCURRENT(6, ...
|
||||
// 7, 0x00004000 SUBSIZED(7, ...
|
||||
|
||||
// The following 4 flags are currently undefined and a free for any further
|
||||
// spliterator characteristics.
|
||||
//
|
||||
// 8, 0x00010000
|
||||
// 9, 0x00040000
|
||||
// 10, 0x00100000
|
||||
// 11, 0x00400000
|
||||
|
||||
// The following flags are specific to streams and operations
|
||||
//
|
||||
|
||||
/**
|
||||
* Characteristic value signifying that an operation may short-circuit the
|
||||
* stream.
|
||||
* <p>
|
||||
* An intermediate operation can preserve or inject this value,
|
||||
* or a terminal operation can preserve or inject this value.
|
||||
*/
|
||||
// 12, 0x01000000
|
||||
SHORT_CIRCUIT(12,
|
||||
set(Type.OP).set(Type.TERMINAL_OP));
|
||||
|
||||
// The following 2 flags are currently undefined and a free for any further
|
||||
// stream flags if/when required
|
||||
//
|
||||
// 13, 0x04000000
|
||||
// 14, 0x10000000
|
||||
// 15, 0x40000000
|
||||
|
||||
/**
|
||||
* Type of a flag
|
||||
*/
|
||||
enum Type {
|
||||
/**
|
||||
* The flag is associated with spliterator characteristics.
|
||||
*/
|
||||
SPLITERATOR,
|
||||
|
||||
/**
|
||||
* The flag is associated with stream flags.
|
||||
*/
|
||||
STREAM,
|
||||
|
||||
/**
|
||||
* The flag is associated with intermediate operation flags.
|
||||
*/
|
||||
OP,
|
||||
|
||||
/**
|
||||
* The flag is associated with terminal operation flags.
|
||||
*/
|
||||
TERMINAL_OP,
|
||||
|
||||
/**
|
||||
* The flag is associated with terminal operation flags that are
|
||||
* propagated upstream across the last stateful operation boundary
|
||||
*/
|
||||
UPSTREAM_TERMINAL_OP
|
||||
}
|
||||
|
||||
/**
|
||||
* The bit pattern for setting/injecting a flag.
|
||||
*/
|
||||
private static final int SET_BITS = 0b01;
|
||||
|
||||
/**
|
||||
* The bit pattern for clearing a flag.
|
||||
*/
|
||||
private static final int CLEAR_BITS = 0b10;
|
||||
|
||||
/**
|
||||
* The bit pattern for preserving a flag.
|
||||
*/
|
||||
private static final int PRESERVE_BITS = 0b11;
|
||||
|
||||
private static MaskBuilder set(Type t) {
|
||||
return new MaskBuilder(new EnumMap<>(Type.class)).set(t);
|
||||
}
|
||||
|
||||
private static class MaskBuilder {
|
||||
final Map<Type, Integer> map;
|
||||
|
||||
MaskBuilder(Map<Type, Integer> map) {
|
||||
this.map = map;
|
||||
}
|
||||
|
||||
MaskBuilder mask(Type t, Integer i) {
|
||||
map.put(t, i);
|
||||
return this;
|
||||
}
|
||||
|
||||
MaskBuilder set(Type t) {
|
||||
return mask(t, SET_BITS);
|
||||
}
|
||||
|
||||
MaskBuilder clear(Type t) {
|
||||
return mask(t, CLEAR_BITS);
|
||||
}
|
||||
|
||||
MaskBuilder setAndClear(Type t) {
|
||||
return mask(t, PRESERVE_BITS);
|
||||
}
|
||||
|
||||
Map<Type, Integer> build() {
|
||||
for (Type t : Type.values()) {
|
||||
map.putIfAbsent(t, 0b00);
|
||||
}
|
||||
return map;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* The mask table for a flag, this is used to determine if a flag
|
||||
* corresponds to a certain flag type and for creating mask constants.
|
||||
*/
|
||||
private final Map<Type, Integer> maskTable;
|
||||
|
||||
/**
|
||||
* The bit position in the bit mask.
|
||||
*/
|
||||
private final int bitPosition;
|
||||
|
||||
/**
|
||||
* The set 2 bit set offset at the bit position.
|
||||
*/
|
||||
private final int set;
|
||||
|
||||
/**
|
||||
* The clear 2 bit set offset at the bit position.
|
||||
*/
|
||||
private final int clear;
|
||||
|
||||
/**
|
||||
* The preserve 2 bit set offset at the bit position.
|
||||
*/
|
||||
private final int preserve;
|
||||
|
||||
private StreamOpFlag(int position, MaskBuilder maskBuilder) {
|
||||
this.maskTable = maskBuilder.build();
|
||||
// Two bits per flag
|
||||
position *= 2;
|
||||
this.bitPosition = position;
|
||||
this.set = SET_BITS << position;
|
||||
this.clear = CLEAR_BITS << position;
|
||||
this.preserve = PRESERVE_BITS << position;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the bitmap associated with setting this characteristic.
|
||||
*
|
||||
* @return the bitmap for setting this characteristic
|
||||
*/
|
||||
int set() {
|
||||
return set;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the bitmap associated with clearing this characteristic.
|
||||
*
|
||||
* @return the bitmap for clearing this characteristic
|
||||
*/
|
||||
int clear() {
|
||||
return clear;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if this flag is a stream-based flag.
|
||||
*
|
||||
* @return true if a stream-based flag, otherwise false.
|
||||
*/
|
||||
boolean isStreamFlag() {
|
||||
return maskTable.get(Type.STREAM) > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if this flag is set on stream flags, injected on operation flags,
|
||||
* and injected on combined stream and operation flags.
|
||||
*
|
||||
* @param flags the stream flags, operation flags, or combined stream and
|
||||
* operation flags
|
||||
* @return true if this flag is known, otherwise false.
|
||||
*/
|
||||
boolean isKnown(int flags) {
|
||||
return (flags & preserve) == set;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if this flag is cleared on operation flags or combined stream and
|
||||
* operation flags.
|
||||
*
|
||||
* @param flags the operation flags or combined stream and operations flags.
|
||||
* @return true if this flag is preserved, otherwise false.
|
||||
*/
|
||||
boolean isCleared(int flags) {
|
||||
return (flags & preserve) == clear;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if this flag is preserved on combined stream and operation flags.
|
||||
*
|
||||
* @param flags the combined stream and operations flags.
|
||||
* @return true if this flag is preserved, otherwise false.
|
||||
*/
|
||||
boolean isPreserved(int flags) {
|
||||
return (flags & preserve) == preserve;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if this flag can be set for a flag type.
|
||||
*
|
||||
* @param t the flag type.
|
||||
* @return true if this flag can be set for the flag type, otherwise false.
|
||||
*/
|
||||
boolean canSet(Type t) {
|
||||
return (maskTable.get(t) & SET_BITS) > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* The bit mask for spliterator characteristics
|
||||
*/
|
||||
static final int SPLITERATOR_CHARACTERISTICS_MASK = createMask(Type.SPLITERATOR);
|
||||
|
||||
/**
|
||||
* The bit mask for source stream flags.
|
||||
*/
|
||||
static final int STREAM_MASK = createMask(Type.STREAM);
|
||||
|
||||
/**
|
||||
* The bit mask for intermediate operation flags.
|
||||
*/
|
||||
static final int OP_MASK = createMask(Type.OP);
|
||||
|
||||
/**
|
||||
* The bit mask for terminal operation flags.
|
||||
*/
|
||||
static final int TERMINAL_OP_MASK = createMask(Type.TERMINAL_OP);
|
||||
|
||||
/**
|
||||
* The bit mask for upstream terminal operation flags.
|
||||
*/
|
||||
static final int UPSTREAM_TERMINAL_OP_MASK = createMask(Type.UPSTREAM_TERMINAL_OP);
|
||||
|
||||
private static int createMask(Type t) {
|
||||
int mask = 0;
|
||||
for (StreamOpFlag flag : StreamOpFlag.values()) {
|
||||
mask |= flag.maskTable.get(t) << flag.bitPosition;
|
||||
}
|
||||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* Complete flag mask.
|
||||
*/
|
||||
private static final int FLAG_MASK = createFlagMask();
|
||||
|
||||
private static int createFlagMask() {
|
||||
int mask = 0;
|
||||
for (StreamOpFlag flag : StreamOpFlag.values()) {
|
||||
mask |= flag.preserve;
|
||||
}
|
||||
return mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* Flag mask for stream flags that are set.
|
||||
*/
|
||||
private static final int FLAG_MASK_IS = STREAM_MASK;
|
||||
|
||||
/**
|
||||
* Flag mask for stream flags that are cleared.
|
||||
*/
|
||||
private static final int FLAG_MASK_NOT = STREAM_MASK << 1;
|
||||
|
||||
/**
|
||||
* The initial value to be combined with the stream flags of the first
|
||||
* stream in the pipeline.
|
||||
*/
|
||||
static final int INITIAL_OPS_VALUE = FLAG_MASK_IS | FLAG_MASK_NOT;
|
||||
|
||||
/**
|
||||
* The bit value to set or inject {@link #DISTINCT}.
|
||||
*/
|
||||
static final int IS_DISTINCT = DISTINCT.set;
|
||||
|
||||
/**
|
||||
* The bit value to clear {@link #DISTINCT}.
|
||||
*/
|
||||
static final int NOT_DISTINCT = DISTINCT.clear;
|
||||
|
||||
/**
|
||||
* The bit value to set or inject {@link #SORTED}.
|
||||
*/
|
||||
static final int IS_SORTED = SORTED.set;
|
||||
|
||||
/**
|
||||
* The bit value to clear {@link #SORTED}.
|
||||
*/
|
||||
static final int NOT_SORTED = SORTED.clear;
|
||||
|
||||
/**
|
||||
* The bit value to set or inject {@link #ORDERED}.
|
||||
*/
|
||||
static final int IS_ORDERED = ORDERED.set;
|
||||
|
||||
/**
|
||||
* The bit value to clear {@link #ORDERED}.
|
||||
*/
|
||||
static final int NOT_ORDERED = ORDERED.clear;
|
||||
|
||||
/**
|
||||
* The bit value to set {@link #SIZED}.
|
||||
*/
|
||||
static final int IS_SIZED = SIZED.set;
|
||||
|
||||
/**
|
||||
* The bit value to clear {@link #SIZED}.
|
||||
*/
|
||||
static final int NOT_SIZED = SIZED.clear;
|
||||
|
||||
/**
|
||||
* The bit value to inject {@link #SHORT_CIRCUIT}.
|
||||
*/
|
||||
static final int IS_SHORT_CIRCUIT = SHORT_CIRCUIT.set;
|
||||
|
||||
private static int getMask(int flags) {
|
||||
return (flags == 0)
|
||||
? FLAG_MASK
|
||||
: ~(flags | ((FLAG_MASK_IS & flags) << 1) | ((FLAG_MASK_NOT & flags) >> 1));
|
||||
}
|
||||
|
||||
/**
|
||||
* Combines stream or operation flags with previously combined stream and
|
||||
* operation flags to produce updated combined stream and operation flags.
|
||||
* <p>
|
||||
* A flag set on stream flags or injected on operation flags,
|
||||
* and injected combined stream and operation flags,
|
||||
* will be injected on the updated combined stream and operation flags.
|
||||
*
|
||||
* <p>
|
||||
* A flag set on stream flags or injected on operation flags,
|
||||
* and cleared on the combined stream and operation flags,
|
||||
* will be cleared on the updated combined stream and operation flags.
|
||||
*
|
||||
* <p>
|
||||
* A flag set on the stream flags or injected on operation flags,
|
||||
* and preserved on the combined stream and operation flags,
|
||||
* will be injected on the updated combined stream and operation flags.
|
||||
*
|
||||
* <p>
|
||||
* A flag not set on the stream flags or cleared/preserved on operation
|
||||
* flags, and injected on the combined stream and operation flags,
|
||||
* will be injected on the updated combined stream and operation flags.
|
||||
*
|
||||
* <p>
|
||||
* A flag not set on the stream flags or cleared/preserved on operation
|
||||
* flags, and cleared on the combined stream and operation flags,
|
||||
* will be cleared on the updated combined stream and operation flags.
|
||||
*
|
||||
* <p>
|
||||
* A flag not set on the stream flags,
|
||||
* and preserved on the combined stream and operation flags
|
||||
* will be preserved on the updated combined stream and operation flags.
|
||||
*
|
||||
* <p>
|
||||
* A flag cleared on operation flags,
|
||||
* and preserved on the combined stream and operation flags
|
||||
* will be cleared on the updated combined stream and operation flags.
|
||||
*
|
||||
* <p>
|
||||
* A flag preserved on operation flags,
|
||||
* and preserved on the combined stream and operation flags
|
||||
* will be preserved on the updated combined stream and operation flags.
|
||||
*
|
||||
* @param newStreamOrOpFlags the stream or operation flags.
|
||||
* @param prevCombOpFlags previously combined stream and operation flags.
|
||||
* The value {#link INITIAL_OPS_VALUE} must be used as the seed value.
|
||||
* @return the updated combined stream and operation flags.
|
||||
*/
|
||||
static int combineOpFlags(int newStreamOrOpFlags, int prevCombOpFlags) {
|
||||
// 0x01 or 0x10 nibbles are transformed to 0x11
|
||||
// 0x00 nibbles remain unchanged
|
||||
// Then all the bits are flipped
|
||||
// Then the result is logically or'ed with the operation flags.
|
||||
return (prevCombOpFlags & StreamOpFlag.getMask(newStreamOrOpFlags)) | newStreamOrOpFlags;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts combined stream and operation flags to stream flags.
|
||||
*
|
||||
* <p>Each flag injected on the combined stream and operation flags will be
|
||||
* set on the stream flags.
|
||||
*
|
||||
* @param combOpFlags the combined stream and operation flags.
|
||||
* @return the stream flags.
|
||||
*/
|
||||
static int toStreamFlags(int combOpFlags) {
|
||||
// By flipping the nibbles 0x11 become 0x00 and 0x01 become 0x10
|
||||
// Shift left 1 to restore set flags and mask off anything other than the set flags
|
||||
return ((~combOpFlags) >> 1) & FLAG_MASK_IS & combOpFlags;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts stream flags to a spliterator characteristic bit set.
|
||||
*
|
||||
* @param streamFlags the stream flags.
|
||||
* @return the spliterator characteristic bit set.
|
||||
*/
|
||||
static int toCharacteristics(int streamFlags) {
|
||||
return streamFlags & SPLITERATOR_CHARACTERISTICS_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a spliterator characteristic bit set to stream flags.
|
||||
*
|
||||
* @implSpec
|
||||
* If the spliterator is naturally {@code SORTED} (the associated
|
||||
* {@code Comparator} is {@code null}) then the characteristic is converted
|
||||
* to the {@link #SORTED} flag, otherwise the characteristic is not
|
||||
* converted.
|
||||
*
|
||||
* @param spliterator the spliterator from which to obtain characteristic
|
||||
* bit set.
|
||||
* @return the stream flags.
|
||||
*/
|
||||
static int fromCharacteristics(Spliterator<?> spliterator) {
|
||||
int characteristics = spliterator.characteristics();
|
||||
if ((characteristics & Spliterator.SORTED) != 0 && spliterator.getComparator() != null) {
|
||||
// Do not propagate the SORTED characteristic if it does not correspond
|
||||
// to a natural sort order
|
||||
return characteristics & SPLITERATOR_CHARACTERISTICS_MASK & ~Spliterator.SORTED;
|
||||
}
|
||||
else {
|
||||
return characteristics & SPLITERATOR_CHARACTERISTICS_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a spliterator characteristic bit set to stream flags.
|
||||
*
|
||||
* @param characteristics the spliterator characteristic bit set.
|
||||
* @return the stream flags.
|
||||
*/
|
||||
static int fromCharacteristics(int characteristics) {
|
||||
return characteristics & SPLITERATOR_CHARACTERISTICS_MASK;
|
||||
}
|
||||
}
|
||||
70
jdkSrc/jdk8/java/util/stream/StreamShape.java
Normal file
70
jdkSrc/jdk8/java/util/stream/StreamShape.java
Normal file
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
/**
|
||||
* An enum describing the known shape specializations for stream abstractions.
|
||||
* Each will correspond to a specific subinterface of {@link BaseStream}
|
||||
* (e.g., {@code REFERENCE} corresponds to {@code Stream}, {@code INT_VALUE}
|
||||
* corresponds to {@code IntStream}). Each may also correspond to
|
||||
* specializations of value-handling abstractions such as {@code Spliterator},
|
||||
* {@code Consumer}, etc.
|
||||
*
|
||||
* @apiNote
|
||||
* This enum is used by implementations to determine compatibility between
|
||||
* streams and operations (i.e., if the output shape of a stream is compatible
|
||||
* with the input shape of the next operation).
|
||||
*
|
||||
* <p>Some APIs require you to specify both a generic type and a stream shape
|
||||
* for input or output elements, such as {@link TerminalOp} which has both
|
||||
* generic type parameters for its input types, and a getter for the
|
||||
* input shape. When representing primitive streams in this way, the
|
||||
* generic type parameter should correspond to the wrapper type for that
|
||||
* primitive type.
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
enum StreamShape {
|
||||
/**
|
||||
* The shape specialization corresponding to {@code Stream} and elements
|
||||
* that are object references.
|
||||
*/
|
||||
REFERENCE,
|
||||
/**
|
||||
* The shape specialization corresponding to {@code IntStream} and elements
|
||||
* that are {@code int} values.
|
||||
*/
|
||||
INT_VALUE,
|
||||
/**
|
||||
* The shape specialization corresponding to {@code LongStream} and elements
|
||||
* that are {@code long} values.
|
||||
*/
|
||||
LONG_VALUE,
|
||||
/**
|
||||
* The shape specialization corresponding to {@code DoubleStream} and
|
||||
* elements that are {@code double} values.
|
||||
*/
|
||||
DOUBLE_VALUE
|
||||
}
|
||||
1553
jdkSrc/jdk8/java/util/stream/StreamSpliterators.java
Normal file
1553
jdkSrc/jdk8/java/util/stream/StreamSpliterators.java
Normal file
File diff suppressed because it is too large
Load Diff
318
jdkSrc/jdk8/java/util/stream/StreamSupport.java
Normal file
318
jdkSrc/jdk8/java/util/stream/StreamSupport.java
Normal file
@@ -0,0 +1,318 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.Spliterator;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* Low-level utility methods for creating and manipulating streams.
|
||||
*
|
||||
* <p>This class is mostly for library writers presenting stream views
|
||||
* of data structures; most static stream methods intended for end users are in
|
||||
* the various {@code Stream} classes.
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
public final class StreamSupport {
|
||||
|
||||
// Suppresses default constructor, ensuring non-instantiability.
|
||||
private StreamSupport() {}
|
||||
|
||||
/**
|
||||
* Creates a new sequential or parallel {@code Stream} from a
|
||||
* {@code Spliterator}.
|
||||
*
|
||||
* <p>The spliterator is only traversed, split, or queried for estimated
|
||||
* size after the terminal operation of the stream pipeline commences.
|
||||
*
|
||||
* <p>It is strongly recommended the spliterator report a characteristic of
|
||||
* {@code IMMUTABLE} or {@code CONCURRENT}, or be
|
||||
* <a href="../Spliterator.html#binding">late-binding</a>. Otherwise,
|
||||
* {@link #stream(java.util.function.Supplier, int, boolean)} should be used
|
||||
* to reduce the scope of potential interference with the source. See
|
||||
* <a href="package-summary.html#NonInterference">Non-Interference</a> for
|
||||
* more details.
|
||||
*
|
||||
* @param <T> the type of stream elements
|
||||
* @param spliterator a {@code Spliterator} describing the stream elements
|
||||
* @param parallel if {@code true} then the returned stream is a parallel
|
||||
* stream; if {@code false} the returned stream is a sequential
|
||||
* stream.
|
||||
* @return a new sequential or parallel {@code Stream}
|
||||
*/
|
||||
public static <T> Stream<T> stream(Spliterator<T> spliterator, boolean parallel) {
|
||||
Objects.requireNonNull(spliterator);
|
||||
return new ReferencePipeline.Head<>(spliterator,
|
||||
StreamOpFlag.fromCharacteristics(spliterator),
|
||||
parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new sequential or parallel {@code Stream} from a
|
||||
* {@code Supplier} of {@code Spliterator}.
|
||||
*
|
||||
* <p>The {@link Supplier#get()} method will be invoked on the supplier no
|
||||
* more than once, and only after the terminal operation of the stream pipeline
|
||||
* commences.
|
||||
*
|
||||
* <p>For spliterators that report a characteristic of {@code IMMUTABLE}
|
||||
* or {@code CONCURRENT}, or that are
|
||||
* <a href="../Spliterator.html#binding">late-binding</a>, it is likely
|
||||
* more efficient to use {@link #stream(java.util.Spliterator, boolean)}
|
||||
* instead.
|
||||
* <p>The use of a {@code Supplier} in this form provides a level of
|
||||
* indirection that reduces the scope of potential interference with the
|
||||
* source. Since the supplier is only invoked after the terminal operation
|
||||
* commences, any modifications to the source up to the start of the
|
||||
* terminal operation are reflected in the stream result. See
|
||||
* <a href="package-summary.html#NonInterference">Non-Interference</a> for
|
||||
* more details.
|
||||
*
|
||||
* @param <T> the type of stream elements
|
||||
* @param supplier a {@code Supplier} of a {@code Spliterator}
|
||||
* @param characteristics Spliterator characteristics of the supplied
|
||||
* {@code Spliterator}. The characteristics must be equal to
|
||||
* {@code supplier.get().characteristics()}, otherwise undefined
|
||||
* behavior may occur when terminal operation commences.
|
||||
* @param parallel if {@code true} then the returned stream is a parallel
|
||||
* stream; if {@code false} the returned stream is a sequential
|
||||
* stream.
|
||||
* @return a new sequential or parallel {@code Stream}
|
||||
* @see #stream(java.util.Spliterator, boolean)
|
||||
*/
|
||||
public static <T> Stream<T> stream(Supplier<? extends Spliterator<T>> supplier,
|
||||
int characteristics,
|
||||
boolean parallel) {
|
||||
Objects.requireNonNull(supplier);
|
||||
return new ReferencePipeline.Head<>(supplier,
|
||||
StreamOpFlag.fromCharacteristics(characteristics),
|
||||
parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new sequential or parallel {@code IntStream} from a
|
||||
* {@code Spliterator.OfInt}.
|
||||
*
|
||||
* <p>The spliterator is only traversed, split, or queried for estimated size
|
||||
* after the terminal operation of the stream pipeline commences.
|
||||
*
|
||||
* <p>It is strongly recommended the spliterator report a characteristic of
|
||||
* {@code IMMUTABLE} or {@code CONCURRENT}, or be
|
||||
* <a href="../Spliterator.html#binding">late-binding</a>. Otherwise,
|
||||
* {@link #intStream(java.util.function.Supplier, int, boolean)} should be
|
||||
* used to reduce the scope of potential interference with the source. See
|
||||
* <a href="package-summary.html#NonInterference">Non-Interference</a> for
|
||||
* more details.
|
||||
*
|
||||
* @param spliterator a {@code Spliterator.OfInt} describing the stream elements
|
||||
* @param parallel if {@code true} then the returned stream is a parallel
|
||||
* stream; if {@code false} the returned stream is a sequential
|
||||
* stream.
|
||||
* @return a new sequential or parallel {@code IntStream}
|
||||
*/
|
||||
public static IntStream intStream(Spliterator.OfInt spliterator, boolean parallel) {
|
||||
return new IntPipeline.Head<>(spliterator,
|
||||
StreamOpFlag.fromCharacteristics(spliterator),
|
||||
parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new sequential or parallel {@code IntStream} from a
|
||||
* {@code Supplier} of {@code Spliterator.OfInt}.
|
||||
*
|
||||
* <p>The {@link Supplier#get()} method will be invoked on the supplier no
|
||||
* more than once, and only after the terminal operation of the stream pipeline
|
||||
* commences.
|
||||
*
|
||||
* <p>For spliterators that report a characteristic of {@code IMMUTABLE}
|
||||
* or {@code CONCURRENT}, or that are
|
||||
* <a href="../Spliterator.html#binding">late-binding</a>, it is likely
|
||||
* more efficient to use {@link #intStream(java.util.Spliterator.OfInt, boolean)}
|
||||
* instead.
|
||||
* <p>The use of a {@code Supplier} in this form provides a level of
|
||||
* indirection that reduces the scope of potential interference with the
|
||||
* source. Since the supplier is only invoked after the terminal operation
|
||||
* commences, any modifications to the source up to the start of the
|
||||
* terminal operation are reflected in the stream result. See
|
||||
* <a href="package-summary.html#NonInterference">Non-Interference</a> for
|
||||
* more details.
|
||||
*
|
||||
* @param supplier a {@code Supplier} of a {@code Spliterator.OfInt}
|
||||
* @param characteristics Spliterator characteristics of the supplied
|
||||
* {@code Spliterator.OfInt}. The characteristics must be equal to
|
||||
* {@code supplier.get().characteristics()}, otherwise undefined
|
||||
* behavior may occur when terminal operation commences.
|
||||
* @param parallel if {@code true} then the returned stream is a parallel
|
||||
* stream; if {@code false} the returned stream is a sequential
|
||||
* stream.
|
||||
* @return a new sequential or parallel {@code IntStream}
|
||||
* @see #intStream(java.util.Spliterator.OfInt, boolean)
|
||||
*/
|
||||
public static IntStream intStream(Supplier<? extends Spliterator.OfInt> supplier,
|
||||
int characteristics,
|
||||
boolean parallel) {
|
||||
return new IntPipeline.Head<>(supplier,
|
||||
StreamOpFlag.fromCharacteristics(characteristics),
|
||||
parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new sequential or parallel {@code LongStream} from a
|
||||
* {@code Spliterator.OfLong}.
|
||||
*
|
||||
* <p>The spliterator is only traversed, split, or queried for estimated
|
||||
* size after the terminal operation of the stream pipeline commences.
|
||||
*
|
||||
* <p>It is strongly recommended the spliterator report a characteristic of
|
||||
* {@code IMMUTABLE} or {@code CONCURRENT}, or be
|
||||
* <a href="../Spliterator.html#binding">late-binding</a>. Otherwise,
|
||||
* {@link #longStream(java.util.function.Supplier, int, boolean)} should be
|
||||
* used to reduce the scope of potential interference with the source. See
|
||||
* <a href="package-summary.html#NonInterference">Non-Interference</a> for
|
||||
* more details.
|
||||
*
|
||||
* @param spliterator a {@code Spliterator.OfLong} describing the stream elements
|
||||
* @param parallel if {@code true} then the returned stream is a parallel
|
||||
* stream; if {@code false} the returned stream is a sequential
|
||||
* stream.
|
||||
* @return a new sequential or parallel {@code LongStream}
|
||||
*/
|
||||
public static LongStream longStream(Spliterator.OfLong spliterator,
|
||||
boolean parallel) {
|
||||
return new LongPipeline.Head<>(spliterator,
|
||||
StreamOpFlag.fromCharacteristics(spliterator),
|
||||
parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new sequential or parallel {@code LongStream} from a
|
||||
* {@code Supplier} of {@code Spliterator.OfLong}.
|
||||
*
|
||||
* <p>The {@link Supplier#get()} method will be invoked on the supplier no
|
||||
* more than once, and only after the terminal operation of the stream pipeline
|
||||
* commences.
|
||||
*
|
||||
* <p>For spliterators that report a characteristic of {@code IMMUTABLE}
|
||||
* or {@code CONCURRENT}, or that are
|
||||
* <a href="../Spliterator.html#binding">late-binding</a>, it is likely
|
||||
* more efficient to use {@link #longStream(java.util.Spliterator.OfLong, boolean)}
|
||||
* instead.
|
||||
* <p>The use of a {@code Supplier} in this form provides a level of
|
||||
* indirection that reduces the scope of potential interference with the
|
||||
* source. Since the supplier is only invoked after the terminal operation
|
||||
* commences, any modifications to the source up to the start of the
|
||||
* terminal operation are reflected in the stream result. See
|
||||
* <a href="package-summary.html#NonInterference">Non-Interference</a> for
|
||||
* more details.
|
||||
*
|
||||
* @param supplier a {@code Supplier} of a {@code Spliterator.OfLong}
|
||||
* @param characteristics Spliterator characteristics of the supplied
|
||||
* {@code Spliterator.OfLong}. The characteristics must be equal to
|
||||
* {@code supplier.get().characteristics()}, otherwise undefined
|
||||
* behavior may occur when terminal operation commences.
|
||||
* @param parallel if {@code true} then the returned stream is a parallel
|
||||
* stream; if {@code false} the returned stream is a sequential
|
||||
* stream.
|
||||
* @return a new sequential or parallel {@code LongStream}
|
||||
* @see #longStream(java.util.Spliterator.OfLong, boolean)
|
||||
*/
|
||||
public static LongStream longStream(Supplier<? extends Spliterator.OfLong> supplier,
|
||||
int characteristics,
|
||||
boolean parallel) {
|
||||
return new LongPipeline.Head<>(supplier,
|
||||
StreamOpFlag.fromCharacteristics(characteristics),
|
||||
parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new sequential or parallel {@code DoubleStream} from a
|
||||
* {@code Spliterator.OfDouble}.
|
||||
*
|
||||
* <p>The spliterator is only traversed, split, or queried for estimated size
|
||||
* after the terminal operation of the stream pipeline commences.
|
||||
*
|
||||
* <p>It is strongly recommended the spliterator report a characteristic of
|
||||
* {@code IMMUTABLE} or {@code CONCURRENT}, or be
|
||||
* <a href="../Spliterator.html#binding">late-binding</a>. Otherwise,
|
||||
* {@link #doubleStream(java.util.function.Supplier, int, boolean)} should
|
||||
* be used to reduce the scope of potential interference with the source. See
|
||||
* <a href="package-summary.html#NonInterference">Non-Interference</a> for
|
||||
* more details.
|
||||
*
|
||||
* @param spliterator A {@code Spliterator.OfDouble} describing the stream elements
|
||||
* @param parallel if {@code true} then the returned stream is a parallel
|
||||
* stream; if {@code false} the returned stream is a sequential
|
||||
* stream.
|
||||
* @return a new sequential or parallel {@code DoubleStream}
|
||||
*/
|
||||
public static DoubleStream doubleStream(Spliterator.OfDouble spliterator,
|
||||
boolean parallel) {
|
||||
return new DoublePipeline.Head<>(spliterator,
|
||||
StreamOpFlag.fromCharacteristics(spliterator),
|
||||
parallel);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new sequential or parallel {@code DoubleStream} from a
|
||||
* {@code Supplier} of {@code Spliterator.OfDouble}.
|
||||
*
|
||||
* <p>The {@link Supplier#get()} method will be invoked on the supplier no
|
||||
* more than once, and only after the terminal operation of the stream pipeline
|
||||
* commences.
|
||||
*
|
||||
* <p>For spliterators that report a characteristic of {@code IMMUTABLE}
|
||||
* or {@code CONCURRENT}, or that are
|
||||
* <a href="../Spliterator.html#binding">late-binding</a>, it is likely
|
||||
* more efficient to use {@link #doubleStream(java.util.Spliterator.OfDouble, boolean)}
|
||||
* instead.
|
||||
* <p>The use of a {@code Supplier} in this form provides a level of
|
||||
* indirection that reduces the scope of potential interference with the
|
||||
* source. Since the supplier is only invoked after the terminal operation
|
||||
* commences, any modifications to the source up to the start of the
|
||||
* terminal operation are reflected in the stream result. See
|
||||
* <a href="package-summary.html#NonInterference">Non-Interference</a> for
|
||||
* more details.
|
||||
*
|
||||
* @param supplier A {@code Supplier} of a {@code Spliterator.OfDouble}
|
||||
* @param characteristics Spliterator characteristics of the supplied
|
||||
* {@code Spliterator.OfDouble}. The characteristics must be equal to
|
||||
* {@code supplier.get().characteristics()}, otherwise undefined
|
||||
* behavior may occur when terminal operation commences.
|
||||
* @param parallel if {@code true} then the returned stream is a parallel
|
||||
* stream; if {@code false} the returned stream is a sequential
|
||||
* stream.
|
||||
* @return a new sequential or parallel {@code DoubleStream}
|
||||
* @see #doubleStream(java.util.Spliterator.OfDouble, boolean)
|
||||
*/
|
||||
public static DoubleStream doubleStream(Supplier<? extends Spliterator.OfDouble> supplier,
|
||||
int characteristics,
|
||||
boolean parallel) {
|
||||
return new DoublePipeline.Head<>(supplier,
|
||||
StreamOpFlag.fromCharacteristics(characteristics),
|
||||
parallel);
|
||||
}
|
||||
}
|
||||
896
jdkSrc/jdk8/java/util/stream/Streams.java
Normal file
896
jdkSrc/jdk8/java/util/stream/Streams.java
Normal file
@@ -0,0 +1,896 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Comparator;
|
||||
import java.util.Objects;
|
||||
import java.util.Spliterator;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.DoubleConsumer;
|
||||
import java.util.function.IntConsumer;
|
||||
import java.util.function.LongConsumer;
|
||||
|
||||
/**
|
||||
* Utility methods for operating on and creating streams.
|
||||
*
|
||||
* <p>Unless otherwise stated, streams are created as sequential streams. A
|
||||
* sequential stream can be transformed into a parallel stream by calling the
|
||||
* {@code parallel()} method on the created stream.
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
final class Streams {
|
||||
|
||||
private Streams() {
|
||||
throw new Error("no instances");
|
||||
}
|
||||
|
||||
/**
|
||||
* An object instance representing no value, that cannot be an actual
|
||||
* data element of a stream. Used when processing streams that can contain
|
||||
* {@code null} elements to distinguish between a {@code null} value and no
|
||||
* value.
|
||||
*/
|
||||
static final Object NONE = new Object();
|
||||
|
||||
/**
|
||||
* An {@code int} range spliterator.
|
||||
*/
|
||||
static final class RangeIntSpliterator implements Spliterator.OfInt {
|
||||
// Can never be greater that upTo, this avoids overflow if upper bound
|
||||
// is Integer.MAX_VALUE
|
||||
// All elements are traversed if from == upTo & last == 0
|
||||
private int from;
|
||||
private final int upTo;
|
||||
// 1 if the range is closed and the last element has not been traversed
|
||||
// Otherwise, 0 if the range is open, or is a closed range and all
|
||||
// elements have been traversed
|
||||
private int last;
|
||||
|
||||
RangeIntSpliterator(int from, int upTo, boolean closed) {
|
||||
this(from, upTo, closed ? 1 : 0);
|
||||
}
|
||||
|
||||
private RangeIntSpliterator(int from, int upTo, int last) {
|
||||
this.from = from;
|
||||
this.upTo = upTo;
|
||||
this.last = last;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryAdvance(IntConsumer consumer) {
|
||||
Objects.requireNonNull(consumer);
|
||||
|
||||
final int i = from;
|
||||
if (i < upTo) {
|
||||
from++;
|
||||
consumer.accept(i);
|
||||
return true;
|
||||
}
|
||||
else if (last > 0) {
|
||||
last = 0;
|
||||
consumer.accept(i);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachRemaining(IntConsumer consumer) {
|
||||
Objects.requireNonNull(consumer);
|
||||
|
||||
int i = from;
|
||||
final int hUpTo = upTo;
|
||||
int hLast = last;
|
||||
from = upTo;
|
||||
last = 0;
|
||||
while (i < hUpTo) {
|
||||
consumer.accept(i++);
|
||||
}
|
||||
if (hLast > 0) {
|
||||
// Last element of closed range
|
||||
consumer.accept(i);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long estimateSize() {
|
||||
// Ensure ranges of size > Integer.MAX_VALUE report the correct size
|
||||
return ((long) upTo) - from + last;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int characteristics() {
|
||||
return Spliterator.ORDERED | Spliterator.SIZED | Spliterator.SUBSIZED |
|
||||
Spliterator.IMMUTABLE | Spliterator.NONNULL |
|
||||
Spliterator.DISTINCT | Spliterator.SORTED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Comparator<? super Integer> getComparator() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Spliterator.OfInt trySplit() {
|
||||
long size = estimateSize();
|
||||
return size <= 1
|
||||
? null
|
||||
// Left split always has a half-open range
|
||||
: new RangeIntSpliterator(from, from = from + splitPoint(size), 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* The spliterator size below which the spliterator will be split
|
||||
* at the mid-point to produce balanced splits. Above this size the
|
||||
* spliterator will be split at a ratio of
|
||||
* 1:(RIGHT_BALANCED_SPLIT_RATIO - 1)
|
||||
* to produce right-balanced splits.
|
||||
*
|
||||
* <p>Such splitting ensures that for very large ranges that the left
|
||||
* side of the range will more likely be processed at a lower-depth
|
||||
* than a balanced tree at the expense of a higher-depth for the right
|
||||
* side of the range.
|
||||
*
|
||||
* <p>This is optimized for cases such as IntStream.ints() that is
|
||||
* implemented as range of 0 to Integer.MAX_VALUE but is likely to be
|
||||
* augmented with a limit operation that limits the number of elements
|
||||
* to a count lower than this threshold.
|
||||
*/
|
||||
private static final int BALANCED_SPLIT_THRESHOLD = 1 << 24;
|
||||
|
||||
/**
|
||||
* The split ratio of the left and right split when the spliterator
|
||||
* size is above BALANCED_SPLIT_THRESHOLD.
|
||||
*/
|
||||
private static final int RIGHT_BALANCED_SPLIT_RATIO = 1 << 3;
|
||||
|
||||
private int splitPoint(long size) {
|
||||
int d = (size < BALANCED_SPLIT_THRESHOLD) ? 2 : RIGHT_BALANCED_SPLIT_RATIO;
|
||||
// Cast to int is safe since:
|
||||
// 2 <= size < 2^32
|
||||
// 2 <= d <= 8
|
||||
return (int) (size / d);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A {@code long} range spliterator.
|
||||
*
|
||||
* This implementation cannot be used for ranges whose size is greater
|
||||
* than Long.MAX_VALUE
|
||||
*/
|
||||
static final class RangeLongSpliterator implements Spliterator.OfLong {
|
||||
// Can never be greater that upTo, this avoids overflow if upper bound
|
||||
// is Long.MAX_VALUE
|
||||
// All elements are traversed if from == upTo & last == 0
|
||||
private long from;
|
||||
private final long upTo;
|
||||
// 1 if the range is closed and the last element has not been traversed
|
||||
// Otherwise, 0 if the range is open, or is a closed range and all
|
||||
// elements have been traversed
|
||||
private int last;
|
||||
|
||||
RangeLongSpliterator(long from, long upTo, boolean closed) {
|
||||
this(from, upTo, closed ? 1 : 0);
|
||||
}
|
||||
|
||||
private RangeLongSpliterator(long from, long upTo, int last) {
|
||||
assert upTo - from + last > 0;
|
||||
this.from = from;
|
||||
this.upTo = upTo;
|
||||
this.last = last;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryAdvance(LongConsumer consumer) {
|
||||
Objects.requireNonNull(consumer);
|
||||
|
||||
final long i = from;
|
||||
if (i < upTo) {
|
||||
from++;
|
||||
consumer.accept(i);
|
||||
return true;
|
||||
}
|
||||
else if (last > 0) {
|
||||
last = 0;
|
||||
consumer.accept(i);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachRemaining(LongConsumer consumer) {
|
||||
Objects.requireNonNull(consumer);
|
||||
|
||||
long i = from;
|
||||
final long hUpTo = upTo;
|
||||
int hLast = last;
|
||||
from = upTo;
|
||||
last = 0;
|
||||
while (i < hUpTo) {
|
||||
consumer.accept(i++);
|
||||
}
|
||||
if (hLast > 0) {
|
||||
// Last element of closed range
|
||||
consumer.accept(i);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long estimateSize() {
|
||||
return upTo - from + last;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int characteristics() {
|
||||
return Spliterator.ORDERED | Spliterator.SIZED | Spliterator.SUBSIZED |
|
||||
Spliterator.IMMUTABLE | Spliterator.NONNULL |
|
||||
Spliterator.DISTINCT | Spliterator.SORTED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Comparator<? super Long> getComparator() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Spliterator.OfLong trySplit() {
|
||||
long size = estimateSize();
|
||||
return size <= 1
|
||||
? null
|
||||
// Left split always has a half-open range
|
||||
: new RangeLongSpliterator(from, from = from + splitPoint(size), 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* The spliterator size below which the spliterator will be split
|
||||
* at the mid-point to produce balanced splits. Above this size the
|
||||
* spliterator will be split at a ratio of
|
||||
* 1:(RIGHT_BALANCED_SPLIT_RATIO - 1)
|
||||
* to produce right-balanced splits.
|
||||
*
|
||||
* <p>Such splitting ensures that for very large ranges that the left
|
||||
* side of the range will more likely be processed at a lower-depth
|
||||
* than a balanced tree at the expense of a higher-depth for the right
|
||||
* side of the range.
|
||||
*
|
||||
* <p>This is optimized for cases such as LongStream.longs() that is
|
||||
* implemented as range of 0 to Long.MAX_VALUE but is likely to be
|
||||
* augmented with a limit operation that limits the number of elements
|
||||
* to a count lower than this threshold.
|
||||
*/
|
||||
private static final long BALANCED_SPLIT_THRESHOLD = 1 << 24;
|
||||
|
||||
/**
|
||||
* The split ratio of the left and right split when the spliterator
|
||||
* size is above BALANCED_SPLIT_THRESHOLD.
|
||||
*/
|
||||
private static final long RIGHT_BALANCED_SPLIT_RATIO = 1 << 3;
|
||||
|
||||
private long splitPoint(long size) {
|
||||
long d = (size < BALANCED_SPLIT_THRESHOLD) ? 2 : RIGHT_BALANCED_SPLIT_RATIO;
|
||||
// 2 <= size <= Long.MAX_VALUE
|
||||
return size / d;
|
||||
}
|
||||
}
|
||||
|
||||
private static abstract class AbstractStreamBuilderImpl<T, S extends Spliterator<T>> implements Spliterator<T> {
|
||||
// >= 0 when building, < 0 when built
|
||||
// -1 == no elements
|
||||
// -2 == one element, held by first
|
||||
// -3 == two or more elements, held by buffer
|
||||
int count;
|
||||
|
||||
// Spliterator implementation for 0 or 1 element
|
||||
// count == -1 for no elements
|
||||
// count == -2 for one element held by first
|
||||
|
||||
@Override
|
||||
public S trySplit() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long estimateSize() {
|
||||
return -count - 1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int characteristics() {
|
||||
return Spliterator.SIZED | Spliterator.SUBSIZED |
|
||||
Spliterator.ORDERED | Spliterator.IMMUTABLE;
|
||||
}
|
||||
}
|
||||
|
||||
static final class StreamBuilderImpl<T>
|
||||
extends AbstractStreamBuilderImpl<T, Spliterator<T>>
|
||||
implements Stream.Builder<T> {
|
||||
// The first element in the stream
|
||||
// valid if count == 1
|
||||
T first;
|
||||
|
||||
// The first and subsequent elements in the stream
|
||||
// non-null if count == 2
|
||||
SpinedBuffer<T> buffer;
|
||||
|
||||
/**
|
||||
* Constructor for building a stream of 0 or more elements.
|
||||
*/
|
||||
StreamBuilderImpl() { }
|
||||
|
||||
/**
|
||||
* Constructor for a singleton stream.
|
||||
*
|
||||
* @param t the single element
|
||||
*/
|
||||
StreamBuilderImpl(T t) {
|
||||
first = t;
|
||||
count = -2;
|
||||
}
|
||||
|
||||
// StreamBuilder implementation
|
||||
|
||||
@Override
|
||||
public void accept(T t) {
|
||||
if (count == 0) {
|
||||
first = t;
|
||||
count++;
|
||||
}
|
||||
else if (count > 0) {
|
||||
if (buffer == null) {
|
||||
buffer = new SpinedBuffer<>();
|
||||
buffer.accept(first);
|
||||
count++;
|
||||
}
|
||||
|
||||
buffer.accept(t);
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
}
|
||||
|
||||
public Stream.Builder<T> add(T t) {
|
||||
accept(t);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<T> build() {
|
||||
int c = count;
|
||||
if (c >= 0) {
|
||||
// Switch count to negative value signalling the builder is built
|
||||
count = -count - 1;
|
||||
// Use this spliterator if 0 or 1 elements, otherwise use
|
||||
// the spliterator of the spined buffer
|
||||
return (c < 2) ? StreamSupport.stream(this, false) : StreamSupport.stream(buffer.spliterator(), false);
|
||||
}
|
||||
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
|
||||
// Spliterator implementation for 0 or 1 element
|
||||
// count == -1 for no elements
|
||||
// count == -2 for one element held by first
|
||||
|
||||
@Override
|
||||
public boolean tryAdvance(Consumer<? super T> action) {
|
||||
Objects.requireNonNull(action);
|
||||
|
||||
if (count == -2) {
|
||||
action.accept(first);
|
||||
count = -1;
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachRemaining(Consumer<? super T> action) {
|
||||
Objects.requireNonNull(action);
|
||||
|
||||
if (count == -2) {
|
||||
action.accept(first);
|
||||
count = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static final class IntStreamBuilderImpl
|
||||
extends AbstractStreamBuilderImpl<Integer, Spliterator.OfInt>
|
||||
implements IntStream.Builder, Spliterator.OfInt {
|
||||
// The first element in the stream
|
||||
// valid if count == 1
|
||||
int first;
|
||||
|
||||
// The first and subsequent elements in the stream
|
||||
// non-null if count == 2
|
||||
SpinedBuffer.OfInt buffer;
|
||||
|
||||
/**
|
||||
* Constructor for building a stream of 0 or more elements.
|
||||
*/
|
||||
IntStreamBuilderImpl() { }
|
||||
|
||||
/**
|
||||
* Constructor for a singleton stream.
|
||||
*
|
||||
* @param t the single element
|
||||
*/
|
||||
IntStreamBuilderImpl(int t) {
|
||||
first = t;
|
||||
count = -2;
|
||||
}
|
||||
|
||||
// StreamBuilder implementation
|
||||
|
||||
@Override
|
||||
public void accept(int t) {
|
||||
if (count == 0) {
|
||||
first = t;
|
||||
count++;
|
||||
}
|
||||
else if (count > 0) {
|
||||
if (buffer == null) {
|
||||
buffer = new SpinedBuffer.OfInt();
|
||||
buffer.accept(first);
|
||||
count++;
|
||||
}
|
||||
|
||||
buffer.accept(t);
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public IntStream build() {
|
||||
int c = count;
|
||||
if (c >= 0) {
|
||||
// Switch count to negative value signalling the builder is built
|
||||
count = -count - 1;
|
||||
// Use this spliterator if 0 or 1 elements, otherwise use
|
||||
// the spliterator of the spined buffer
|
||||
return (c < 2) ? StreamSupport.intStream(this, false) : StreamSupport.intStream(buffer.spliterator(), false);
|
||||
}
|
||||
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
|
||||
// Spliterator implementation for 0 or 1 element
|
||||
// count == -1 for no elements
|
||||
// count == -2 for one element held by first
|
||||
|
||||
@Override
|
||||
public boolean tryAdvance(IntConsumer action) {
|
||||
Objects.requireNonNull(action);
|
||||
|
||||
if (count == -2) {
|
||||
action.accept(first);
|
||||
count = -1;
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachRemaining(IntConsumer action) {
|
||||
Objects.requireNonNull(action);
|
||||
|
||||
if (count == -2) {
|
||||
action.accept(first);
|
||||
count = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static final class LongStreamBuilderImpl
|
||||
extends AbstractStreamBuilderImpl<Long, Spliterator.OfLong>
|
||||
implements LongStream.Builder, Spliterator.OfLong {
|
||||
// The first element in the stream
|
||||
// valid if count == 1
|
||||
long first;
|
||||
|
||||
// The first and subsequent elements in the stream
|
||||
// non-null if count == 2
|
||||
SpinedBuffer.OfLong buffer;
|
||||
|
||||
/**
|
||||
* Constructor for building a stream of 0 or more elements.
|
||||
*/
|
||||
LongStreamBuilderImpl() { }
|
||||
|
||||
/**
|
||||
* Constructor for a singleton stream.
|
||||
*
|
||||
* @param t the single element
|
||||
*/
|
||||
LongStreamBuilderImpl(long t) {
|
||||
first = t;
|
||||
count = -2;
|
||||
}
|
||||
|
||||
// StreamBuilder implementation
|
||||
|
||||
@Override
|
||||
public void accept(long t) {
|
||||
if (count == 0) {
|
||||
first = t;
|
||||
count++;
|
||||
}
|
||||
else if (count > 0) {
|
||||
if (buffer == null) {
|
||||
buffer = new SpinedBuffer.OfLong();
|
||||
buffer.accept(first);
|
||||
count++;
|
||||
}
|
||||
|
||||
buffer.accept(t);
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public LongStream build() {
|
||||
int c = count;
|
||||
if (c >= 0) {
|
||||
// Switch count to negative value signalling the builder is built
|
||||
count = -count - 1;
|
||||
// Use this spliterator if 0 or 1 elements, otherwise use
|
||||
// the spliterator of the spined buffer
|
||||
return (c < 2) ? StreamSupport.longStream(this, false) : StreamSupport.longStream(buffer.spliterator(), false);
|
||||
}
|
||||
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
|
||||
// Spliterator implementation for 0 or 1 element
|
||||
// count == -1 for no elements
|
||||
// count == -2 for one element held by first
|
||||
|
||||
@Override
|
||||
public boolean tryAdvance(LongConsumer action) {
|
||||
Objects.requireNonNull(action);
|
||||
|
||||
if (count == -2) {
|
||||
action.accept(first);
|
||||
count = -1;
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachRemaining(LongConsumer action) {
|
||||
Objects.requireNonNull(action);
|
||||
|
||||
if (count == -2) {
|
||||
action.accept(first);
|
||||
count = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static final class DoubleStreamBuilderImpl
|
||||
extends AbstractStreamBuilderImpl<Double, Spliterator.OfDouble>
|
||||
implements DoubleStream.Builder, Spliterator.OfDouble {
|
||||
// The first element in the stream
|
||||
// valid if count == 1
|
||||
double first;
|
||||
|
||||
// The first and subsequent elements in the stream
|
||||
// non-null if count == 2
|
||||
SpinedBuffer.OfDouble buffer;
|
||||
|
||||
/**
|
||||
* Constructor for building a stream of 0 or more elements.
|
||||
*/
|
||||
DoubleStreamBuilderImpl() { }
|
||||
|
||||
/**
|
||||
* Constructor for a singleton stream.
|
||||
*
|
||||
* @param t the single element
|
||||
*/
|
||||
DoubleStreamBuilderImpl(double t) {
|
||||
first = t;
|
||||
count = -2;
|
||||
}
|
||||
|
||||
// StreamBuilder implementation
|
||||
|
||||
@Override
|
||||
public void accept(double t) {
|
||||
if (count == 0) {
|
||||
first = t;
|
||||
count++;
|
||||
}
|
||||
else if (count > 0) {
|
||||
if (buffer == null) {
|
||||
buffer = new SpinedBuffer.OfDouble();
|
||||
buffer.accept(first);
|
||||
count++;
|
||||
}
|
||||
|
||||
buffer.accept(t);
|
||||
}
|
||||
else {
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public DoubleStream build() {
|
||||
int c = count;
|
||||
if (c >= 0) {
|
||||
// Switch count to negative value signalling the builder is built
|
||||
count = -count - 1;
|
||||
// Use this spliterator if 0 or 1 elements, otherwise use
|
||||
// the spliterator of the spined buffer
|
||||
return (c < 2) ? StreamSupport.doubleStream(this, false) : StreamSupport.doubleStream(buffer.spliterator(), false);
|
||||
}
|
||||
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
|
||||
// Spliterator implementation for 0 or 1 element
|
||||
// count == -1 for no elements
|
||||
// count == -2 for one element held by first
|
||||
|
||||
@Override
|
||||
public boolean tryAdvance(DoubleConsumer action) {
|
||||
Objects.requireNonNull(action);
|
||||
|
||||
if (count == -2) {
|
||||
action.accept(first);
|
||||
count = -1;
|
||||
return true;
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachRemaining(DoubleConsumer action) {
|
||||
Objects.requireNonNull(action);
|
||||
|
||||
if (count == -2) {
|
||||
action.accept(first);
|
||||
count = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
abstract static class ConcatSpliterator<T, T_SPLITR extends Spliterator<T>>
|
||||
implements Spliterator<T> {
|
||||
protected final T_SPLITR aSpliterator;
|
||||
protected final T_SPLITR bSpliterator;
|
||||
// True when no split has occurred, otherwise false
|
||||
boolean beforeSplit;
|
||||
// Never read after splitting
|
||||
final boolean unsized;
|
||||
|
||||
public ConcatSpliterator(T_SPLITR aSpliterator, T_SPLITR bSpliterator) {
|
||||
this.aSpliterator = aSpliterator;
|
||||
this.bSpliterator = bSpliterator;
|
||||
beforeSplit = true;
|
||||
// The spliterator is known to be unsized before splitting if the
|
||||
// sum of the estimates overflows.
|
||||
unsized = aSpliterator.estimateSize() + bSpliterator.estimateSize() < 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public T_SPLITR trySplit() {
|
||||
@SuppressWarnings("unchecked")
|
||||
T_SPLITR ret = beforeSplit ? aSpliterator : (T_SPLITR) bSpliterator.trySplit();
|
||||
beforeSplit = false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryAdvance(Consumer<? super T> consumer) {
|
||||
boolean hasNext;
|
||||
if (beforeSplit) {
|
||||
hasNext = aSpliterator.tryAdvance(consumer);
|
||||
if (!hasNext) {
|
||||
beforeSplit = false;
|
||||
hasNext = bSpliterator.tryAdvance(consumer);
|
||||
}
|
||||
}
|
||||
else
|
||||
hasNext = bSpliterator.tryAdvance(consumer);
|
||||
return hasNext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachRemaining(Consumer<? super T> consumer) {
|
||||
if (beforeSplit)
|
||||
aSpliterator.forEachRemaining(consumer);
|
||||
bSpliterator.forEachRemaining(consumer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long estimateSize() {
|
||||
if (beforeSplit) {
|
||||
// If one or both estimates are Long.MAX_VALUE then the sum
|
||||
// will either be Long.MAX_VALUE or overflow to a negative value
|
||||
long size = aSpliterator.estimateSize() + bSpliterator.estimateSize();
|
||||
return (size >= 0) ? size : Long.MAX_VALUE;
|
||||
}
|
||||
else {
|
||||
return bSpliterator.estimateSize();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int characteristics() {
|
||||
if (beforeSplit) {
|
||||
// Concatenation loses DISTINCT and SORTED characteristics
|
||||
return aSpliterator.characteristics() & bSpliterator.characteristics()
|
||||
& ~(Spliterator.DISTINCT | Spliterator.SORTED
|
||||
| (unsized ? Spliterator.SIZED | Spliterator.SUBSIZED : 0));
|
||||
}
|
||||
else {
|
||||
return bSpliterator.characteristics();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Comparator<? super T> getComparator() {
|
||||
if (beforeSplit)
|
||||
throw new IllegalStateException();
|
||||
return bSpliterator.getComparator();
|
||||
}
|
||||
|
||||
static class OfRef<T> extends ConcatSpliterator<T, Spliterator<T>> {
|
||||
OfRef(Spliterator<T> aSpliterator, Spliterator<T> bSpliterator) {
|
||||
super(aSpliterator, bSpliterator);
|
||||
}
|
||||
}
|
||||
|
||||
private static abstract class OfPrimitive<T, T_CONS, T_SPLITR extends Spliterator.OfPrimitive<T, T_CONS, T_SPLITR>>
|
||||
extends ConcatSpliterator<T, T_SPLITR>
|
||||
implements Spliterator.OfPrimitive<T, T_CONS, T_SPLITR> {
|
||||
private OfPrimitive(T_SPLITR aSpliterator, T_SPLITR bSpliterator) {
|
||||
super(aSpliterator, bSpliterator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryAdvance(T_CONS action) {
|
||||
boolean hasNext;
|
||||
if (beforeSplit) {
|
||||
hasNext = aSpliterator.tryAdvance(action);
|
||||
if (!hasNext) {
|
||||
beforeSplit = false;
|
||||
hasNext = bSpliterator.tryAdvance(action);
|
||||
}
|
||||
}
|
||||
else
|
||||
hasNext = bSpliterator.tryAdvance(action);
|
||||
return hasNext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forEachRemaining(T_CONS action) {
|
||||
if (beforeSplit)
|
||||
aSpliterator.forEachRemaining(action);
|
||||
bSpliterator.forEachRemaining(action);
|
||||
}
|
||||
}
|
||||
|
||||
static class OfInt
|
||||
extends ConcatSpliterator.OfPrimitive<Integer, IntConsumer, Spliterator.OfInt>
|
||||
implements Spliterator.OfInt {
|
||||
OfInt(Spliterator.OfInt aSpliterator, Spliterator.OfInt bSpliterator) {
|
||||
super(aSpliterator, bSpliterator);
|
||||
}
|
||||
}
|
||||
|
||||
static class OfLong
|
||||
extends ConcatSpliterator.OfPrimitive<Long, LongConsumer, Spliterator.OfLong>
|
||||
implements Spliterator.OfLong {
|
||||
OfLong(Spliterator.OfLong aSpliterator, Spliterator.OfLong bSpliterator) {
|
||||
super(aSpliterator, bSpliterator);
|
||||
}
|
||||
}
|
||||
|
||||
static class OfDouble
|
||||
extends ConcatSpliterator.OfPrimitive<Double, DoubleConsumer, Spliterator.OfDouble>
|
||||
implements Spliterator.OfDouble {
|
||||
OfDouble(Spliterator.OfDouble aSpliterator, Spliterator.OfDouble bSpliterator) {
|
||||
super(aSpliterator, bSpliterator);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Given two Runnables, return a Runnable that executes both in sequence,
|
||||
* even if the first throws an exception, and if both throw exceptions, add
|
||||
* any exceptions thrown by the second as suppressed exceptions of the first.
|
||||
*/
|
||||
static Runnable composeWithExceptions(Runnable a, Runnable b) {
|
||||
return new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
a.run();
|
||||
}
|
||||
catch (Throwable e1) {
|
||||
try {
|
||||
b.run();
|
||||
}
|
||||
catch (Throwable e2) {
|
||||
try {
|
||||
e1.addSuppressed(e2);
|
||||
} catch (Throwable ignore) {}
|
||||
}
|
||||
throw e1;
|
||||
}
|
||||
b.run();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Given two streams, return a Runnable that
|
||||
* executes both of their {@link BaseStream#close} methods in sequence,
|
||||
* even if the first throws an exception, and if both throw exceptions, add
|
||||
* any exceptions thrown by the second as suppressed exceptions of the first.
|
||||
*/
|
||||
static Runnable composedClose(BaseStream<?, ?> a, BaseStream<?, ?> b) {
|
||||
return new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
a.close();
|
||||
}
|
||||
catch (Throwable e1) {
|
||||
try {
|
||||
b.close();
|
||||
}
|
||||
catch (Throwable e2) {
|
||||
try {
|
||||
e1.addSuppressed(e2);
|
||||
} catch (Throwable ignore) {}
|
||||
}
|
||||
throw e1;
|
||||
}
|
||||
b.close();
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
98
jdkSrc/jdk8/java/util/stream/TerminalOp.java
Normal file
98
jdkSrc/jdk8/java/util/stream/TerminalOp.java
Normal file
@@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.Spliterator;
|
||||
|
||||
/**
|
||||
* An operation in a stream pipeline that takes a stream as input and produces
|
||||
* a result or side-effect. A {@code TerminalOp} has an input type and stream
|
||||
* shape, and a result type. A {@code TerminalOp} also has a set of
|
||||
* <em>operation flags</em> that describes how the operation processes elements
|
||||
* of the stream (such as short-circuiting or respecting encounter order; see
|
||||
* {@link StreamOpFlag}).
|
||||
*
|
||||
* <p>A {@code TerminalOp} must provide a sequential and parallel implementation
|
||||
* of the operation relative to a given stream source and set of intermediate
|
||||
* operations.
|
||||
*
|
||||
* @param <E_IN> the type of input elements
|
||||
* @param <R> the type of the result
|
||||
* @since 1.8
|
||||
*/
|
||||
interface TerminalOp<E_IN, R> {
|
||||
/**
|
||||
* Gets the shape of the input type of this operation.
|
||||
*
|
||||
* @implSpec The default returns {@code StreamShape.REFERENCE}.
|
||||
*
|
||||
* @return StreamShape of the input type of this operation
|
||||
*/
|
||||
default StreamShape inputShape() { return StreamShape.REFERENCE; }
|
||||
|
||||
/**
|
||||
* Gets the stream flags of the operation. Terminal operations may set a
|
||||
* limited subset of the stream flags defined in {@link StreamOpFlag}, and
|
||||
* these flags are combined with the previously combined stream and
|
||||
* intermediate operation flags for the pipeline.
|
||||
*
|
||||
* @implSpec The default implementation returns zero.
|
||||
*
|
||||
* @return the stream flags for this operation
|
||||
* @see StreamOpFlag
|
||||
*/
|
||||
default int getOpFlags() { return 0; }
|
||||
|
||||
/**
|
||||
* Performs a parallel evaluation of the operation using the specified
|
||||
* {@code PipelineHelper}, which describes the upstream intermediate
|
||||
* operations.
|
||||
*
|
||||
* @implSpec The default performs a sequential evaluation of the operation
|
||||
* using the specified {@code PipelineHelper}.
|
||||
*
|
||||
* @param helper the pipeline helper
|
||||
* @param spliterator the source spliterator
|
||||
* @return the result of the evaluation
|
||||
*/
|
||||
default <P_IN> R evaluateParallel(PipelineHelper<E_IN> helper,
|
||||
Spliterator<P_IN> spliterator) {
|
||||
if (Tripwire.ENABLED)
|
||||
Tripwire.trip(getClass(), "{0} triggering TerminalOp.evaluateParallel serial default");
|
||||
return evaluateSequential(helper, spliterator);
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs a sequential evaluation of the operation using the specified
|
||||
* {@code PipelineHelper}, which describes the upstream intermediate
|
||||
* operations.
|
||||
*
|
||||
* @param helper the pipeline helper
|
||||
* @param spliterator the source spliterator
|
||||
* @return the result of the evaluation
|
||||
*/
|
||||
<P_IN> R evaluateSequential(PipelineHelper<E_IN> helper,
|
||||
Spliterator<P_IN> spliterator);
|
||||
}
|
||||
38
jdkSrc/jdk8/java/util/stream/TerminalSink.java
Normal file
38
jdkSrc/jdk8/java/util/stream/TerminalSink.java
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* A {@link Sink} which accumulates state as elements are accepted, and allows
|
||||
* a result to be retrieved after the computation is finished.
|
||||
*
|
||||
* @param <T> the type of elements to be accepted
|
||||
* @param <R> the type of the result
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
interface TerminalSink<T, R> extends Sink<T>, Supplier<R> { }
|
||||
69
jdkSrc/jdk8/java/util/stream/Tripwire.java
Normal file
69
jdkSrc/jdk8/java/util/stream/Tripwire.java
Normal file
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
|
||||
import sun.util.logging.PlatformLogger;
|
||||
|
||||
/**
|
||||
* Utility class for detecting inadvertent uses of boxing in
|
||||
* {@code java.util.stream} classes. The detection is turned on or off based on
|
||||
* whether the system property {@code org.openjdk.java.util.stream.tripwire} is
|
||||
* considered {@code true} according to {@link Boolean#getBoolean(String)}.
|
||||
* This should normally be turned off for production use.
|
||||
*
|
||||
* @apiNote
|
||||
* Typical usage would be for boxing code to do:
|
||||
* <pre>{@code
|
||||
* if (Tripwire.ENABLED)
|
||||
* Tripwire.trip(getClass(), "{0} calling Sink.OfInt.accept(Integer)");
|
||||
* }</pre>
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
final class Tripwire {
|
||||
private static final String TRIPWIRE_PROPERTY = "org.openjdk.java.util.stream.tripwire";
|
||||
|
||||
/** Should debugging checks be enabled? */
|
||||
static final boolean ENABLED = AccessController.doPrivileged(
|
||||
(PrivilegedAction<Boolean>) () -> Boolean.getBoolean(TRIPWIRE_PROPERTY));
|
||||
|
||||
private Tripwire() { }
|
||||
|
||||
/**
|
||||
* Produces a log warning, using {@code PlatformLogger.getLogger(className)},
|
||||
* using the supplied message. The class name of {@code trippingClass} will
|
||||
* be used as the first parameter to the message.
|
||||
*
|
||||
* @param trippingClass Name of the class generating the message
|
||||
* @param msg A message format string of the type expected by
|
||||
* {@link PlatformLogger}
|
||||
*/
|
||||
static void trip(Class<?> trippingClass, String msg) {
|
||||
PlatformLogger.getLogger(trippingClass.getName()).warning(msg, trippingClass.getName());
|
||||
}
|
||||
}
|
||||
740
jdkSrc/jdk8/java/util/stream/package-info.java
Normal file
740
jdkSrc/jdk8/java/util/stream/package-info.java
Normal file
@@ -0,0 +1,740 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Classes to support functional-style operations on streams of elements, such
|
||||
* as map-reduce transformations on collections. For example:
|
||||
*
|
||||
* <pre>{@code
|
||||
* int sum = widgets.stream()
|
||||
* .filter(b -> b.getColor() == RED)
|
||||
* .mapToInt(b -> b.getWeight())
|
||||
* .sum();
|
||||
* }</pre>
|
||||
*
|
||||
* <p>Here we use {@code widgets}, a {@code Collection<Widget>},
|
||||
* as a source for a stream, and then perform a filter-map-reduce on the stream
|
||||
* to obtain the sum of the weights of the red widgets. (Summation is an
|
||||
* example of a <a href="package-summary.html#Reduction">reduction</a>
|
||||
* operation.)
|
||||
*
|
||||
* <p>The key abstraction introduced in this package is <em>stream</em>. The
|
||||
* classes {@link java.util.stream.Stream}, {@link java.util.stream.IntStream},
|
||||
* {@link java.util.stream.LongStream}, and {@link java.util.stream.DoubleStream}
|
||||
* are streams over objects and the primitive {@code int}, {@code long} and
|
||||
* {@code double} types. Streams differ from collections in several ways:
|
||||
*
|
||||
* <ul>
|
||||
* <li>No storage. A stream is not a data structure that stores elements;
|
||||
* instead, it conveys elements from a source such as a data structure,
|
||||
* an array, a generator function, or an I/O channel, through a pipeline of
|
||||
* computational operations.</li>
|
||||
* <li>Functional in nature. An operation on a stream produces a result,
|
||||
* but does not modify its source. For example, filtering a {@code Stream}
|
||||
* obtained from a collection produces a new {@code Stream} without the
|
||||
* filtered elements, rather than removing elements from the source
|
||||
* collection.</li>
|
||||
* <li>Laziness-seeking. Many stream operations, such as filtering, mapping,
|
||||
* or duplicate removal, can be implemented lazily, exposing opportunities
|
||||
* for optimization. For example, "find the first {@code String} with
|
||||
* three consecutive vowels" need not examine all the input strings.
|
||||
* Stream operations are divided into intermediate ({@code Stream}-producing)
|
||||
* operations and terminal (value- or side-effect-producing) operations.
|
||||
* Intermediate operations are always lazy.</li>
|
||||
* <li>Possibly unbounded. While collections have a finite size, streams
|
||||
* need not. Short-circuiting operations such as {@code limit(n)} or
|
||||
* {@code findFirst()} can allow computations on infinite streams to
|
||||
* complete in finite time.</li>
|
||||
* <li>Consumable. The elements of a stream are only visited once during
|
||||
* the life of a stream. Like an {@link java.util.Iterator}, a new stream
|
||||
* must be generated to revisit the same elements of the source.
|
||||
* </li>
|
||||
* </ul>
|
||||
*
|
||||
* Streams can be obtained in a number of ways. Some examples include:
|
||||
* <ul>
|
||||
* <li>From a {@link java.util.Collection} via the {@code stream()} and
|
||||
* {@code parallelStream()} methods;</li>
|
||||
* <li>From an array via {@link java.util.Arrays#stream(Object[])};</li>
|
||||
* <li>From static factory methods on the stream classes, such as
|
||||
* {@link java.util.stream.Stream#of(Object[])},
|
||||
* {@link java.util.stream.IntStream#range(int, int)}
|
||||
* or {@link java.util.stream.Stream#iterate(Object, UnaryOperator)};</li>
|
||||
* <li>The lines of a file can be obtained from {@link java.io.BufferedReader#lines()};</li>
|
||||
* <li>Streams of file paths can be obtained from methods in {@link java.nio.file.Files};</li>
|
||||
* <li>Streams of random numbers can be obtained from {@link java.util.Random#ints()};</li>
|
||||
* <li>Numerous other stream-bearing methods in the JDK, including
|
||||
* {@link java.util.BitSet#stream()},
|
||||
* {@link java.util.regex.Pattern#splitAsStream(java.lang.CharSequence)},
|
||||
* and {@link java.util.jar.JarFile#stream()}.</li>
|
||||
* </ul>
|
||||
*
|
||||
* <p>Additional stream sources can be provided by third-party libraries using
|
||||
* <a href="package-summary.html#StreamSources">these techniques</a>.
|
||||
*
|
||||
* <h2><a name="StreamOps">Stream operations and pipelines</a></h2>
|
||||
*
|
||||
* <p>Stream operations are divided into <em>intermediate</em> and
|
||||
* <em>terminal</em> operations, and are combined to form <em>stream
|
||||
* pipelines</em>. A stream pipeline consists of a source (such as a
|
||||
* {@code Collection}, an array, a generator function, or an I/O channel);
|
||||
* followed by zero or more intermediate operations such as
|
||||
* {@code Stream.filter} or {@code Stream.map}; and a terminal operation such
|
||||
* as {@code Stream.forEach} or {@code Stream.reduce}.
|
||||
*
|
||||
* <p>Intermediate operations return a new stream. They are always
|
||||
* <em>lazy</em>; executing an intermediate operation such as
|
||||
* {@code filter()} does not actually perform any filtering, but instead
|
||||
* creates a new stream that, when traversed, contains the elements of
|
||||
* the initial stream that match the given predicate. Traversal
|
||||
* of the pipeline source does not begin until the terminal operation of the
|
||||
* pipeline is executed.
|
||||
*
|
||||
* <p>Terminal operations, such as {@code Stream.forEach} or
|
||||
* {@code IntStream.sum}, may traverse the stream to produce a result or a
|
||||
* side-effect. After the terminal operation is performed, the stream pipeline
|
||||
* is considered consumed, and can no longer be used; if you need to traverse
|
||||
* the same data source again, you must return to the data source to get a new
|
||||
* stream. In almost all cases, terminal operations are <em>eager</em>,
|
||||
* completing their traversal of the data source and processing of the pipeline
|
||||
* before returning. Only the terminal operations {@code iterator()} and
|
||||
* {@code spliterator()} are not; these are provided as an "escape hatch" to enable
|
||||
* arbitrary client-controlled pipeline traversals in the event that the
|
||||
* existing operations are not sufficient to the task.
|
||||
*
|
||||
* <p> Processing streams lazily allows for significant efficiencies; in a
|
||||
* pipeline such as the filter-map-sum example above, filtering, mapping, and
|
||||
* summing can be fused into a single pass on the data, with minimal
|
||||
* intermediate state. Laziness also allows avoiding examining all the data
|
||||
* when it is not necessary; for operations such as "find the first string
|
||||
* longer than 1000 characters", it is only necessary to examine just enough
|
||||
* strings to find one that has the desired characteristics without examining
|
||||
* all of the strings available from the source. (This behavior becomes even
|
||||
* more important when the input stream is infinite and not merely large.)
|
||||
*
|
||||
* <p>Intermediate operations are further divided into <em>stateless</em>
|
||||
* and <em>stateful</em> operations. Stateless operations, such as {@code filter}
|
||||
* and {@code map}, retain no state from previously seen element when processing
|
||||
* a new element -- each element can be processed
|
||||
* independently of operations on other elements. Stateful operations, such as
|
||||
* {@code distinct} and {@code sorted}, may incorporate state from previously
|
||||
* seen elements when processing new elements.
|
||||
*
|
||||
* <p>Stateful operations may need to process the entire input
|
||||
* before producing a result. For example, one cannot produce any results from
|
||||
* sorting a stream until one has seen all elements of the stream. As a result,
|
||||
* under parallel computation, some pipelines containing stateful intermediate
|
||||
* operations may require multiple passes on the data or may need to buffer
|
||||
* significant data. Pipelines containing exclusively stateless intermediate
|
||||
* operations can be processed in a single pass, whether sequential or parallel,
|
||||
* with minimal data buffering.
|
||||
*
|
||||
* <p>Further, some operations are deemed <em>short-circuiting</em> operations.
|
||||
* An intermediate operation is short-circuiting if, when presented with
|
||||
* infinite input, it may produce a finite stream as a result. A terminal
|
||||
* operation is short-circuiting if, when presented with infinite input, it may
|
||||
* terminate in finite time. Having a short-circuiting operation in the pipeline
|
||||
* is a necessary, but not sufficient, condition for the processing of an infinite
|
||||
* stream to terminate normally in finite time.
|
||||
*
|
||||
* <h3>Parallelism</h3>
|
||||
*
|
||||
* <p>Processing elements with an explicit {@code for-}loop is inherently serial.
|
||||
* Streams facilitate parallel execution by reframing the computation as a pipeline of
|
||||
* aggregate operations, rather than as imperative operations on each individual
|
||||
* element. All streams operations can execute either in serial or in parallel.
|
||||
* The stream implementations in the JDK create serial streams unless parallelism is
|
||||
* explicitly requested. For example, {@code Collection} has methods
|
||||
* {@link java.util.Collection#stream} and {@link java.util.Collection#parallelStream},
|
||||
* which produce sequential and parallel streams respectively; other
|
||||
* stream-bearing methods such as {@link java.util.stream.IntStream#range(int, int)}
|
||||
* produce sequential streams but these streams can be efficiently parallelized by
|
||||
* invoking their {@link java.util.stream.BaseStream#parallel()} method.
|
||||
* To execute the prior "sum of weights of widgets" query in parallel, we would
|
||||
* do:
|
||||
*
|
||||
* <pre>{@code
|
||||
* int sumOfWeights = widgets.}<code><b>parallelStream()</b></code>{@code
|
||||
* .filter(b -> b.getColor() == RED)
|
||||
* .mapToInt(b -> b.getWeight())
|
||||
* .sum();
|
||||
* }</pre>
|
||||
*
|
||||
* <p>The only difference between the serial and parallel versions of this
|
||||
* example is the creation of the initial stream, using "{@code parallelStream()}"
|
||||
* instead of "{@code stream()}". When the terminal operation is initiated,
|
||||
* the stream pipeline is executed sequentially or in parallel depending on the
|
||||
* orientation of the stream on which it is invoked. Whether a stream will execute in serial or
|
||||
* parallel can be determined with the {@code isParallel()} method, and the
|
||||
* orientation of a stream can be modified with the
|
||||
* {@link java.util.stream.BaseStream#sequential()} and
|
||||
* {@link java.util.stream.BaseStream#parallel()} operations. When the terminal
|
||||
* operation is initiated, the stream pipeline is executed sequentially or in
|
||||
* parallel depending on the mode of the stream on which it is invoked.
|
||||
*
|
||||
* <p>Except for operations identified as explicitly nondeterministic, such
|
||||
* as {@code findAny()}, whether a stream executes sequentially or in parallel
|
||||
* should not change the result of the computation.
|
||||
*
|
||||
* <p>Most stream operations accept parameters that describe user-specified
|
||||
* behavior, which are often lambda expressions. To preserve correct behavior,
|
||||
* these <em>behavioral parameters</em> must be <em>non-interfering</em>, and in
|
||||
* most cases must be <em>stateless</em>. Such parameters are always instances
|
||||
* of a <a href="../function/package-summary.html">functional interface</a> such
|
||||
* as {@link java.util.function.Function}, and are often lambda expressions or
|
||||
* method references.
|
||||
*
|
||||
* <h3><a name="NonInterference">Non-interference</a></h3>
|
||||
*
|
||||
* Streams enable you to execute possibly-parallel aggregate operations over a
|
||||
* variety of data sources, including even non-thread-safe collections such as
|
||||
* {@code ArrayList}. This is possible only if we can prevent
|
||||
* <em>interference</em> with the data source during the execution of a stream
|
||||
* pipeline. Except for the escape-hatch operations {@code iterator()} and
|
||||
* {@code spliterator()}, execution begins when the terminal operation is
|
||||
* invoked, and ends when the terminal operation completes. For most data
|
||||
* sources, preventing interference means ensuring that the data source is
|
||||
* <em>not modified at all</em> during the execution of the stream pipeline.
|
||||
* The notable exception to this are streams whose sources are concurrent
|
||||
* collections, which are specifically designed to handle concurrent modification.
|
||||
* Concurrent stream sources are those whose {@code Spliterator} reports the
|
||||
* {@code CONCURRENT} characteristic.
|
||||
*
|
||||
* <p>Accordingly, behavioral parameters in stream pipelines whose source might
|
||||
* not be concurrent should never modify the stream's data source.
|
||||
* A behavioral parameter is said to <em>interfere</em> with a non-concurrent
|
||||
* data source if it modifies, or causes to be
|
||||
* modified, the stream's data source. The need for non-interference applies
|
||||
* to all pipelines, not just parallel ones. Unless the stream source is
|
||||
* concurrent, modifying a stream's data source during execution of a stream
|
||||
* pipeline can cause exceptions, incorrect answers, or nonconformant behavior.
|
||||
*
|
||||
* For well-behaved stream sources, the source can be modified before the
|
||||
* terminal operation commences and those modifications will be reflected in
|
||||
* the covered elements. For example, consider the following code:
|
||||
*
|
||||
* <pre>{@code
|
||||
* List<String> l = new ArrayList(Arrays.asList("one", "two"));
|
||||
* Stream<String> sl = l.stream();
|
||||
* l.add("three");
|
||||
* String s = sl.collect(joining(" "));
|
||||
* }</pre>
|
||||
*
|
||||
* First a list is created consisting of two strings: "one"; and "two". Then a
|
||||
* stream is created from that list. Next the list is modified by adding a third
|
||||
* string: "three". Finally the elements of the stream are collected and joined
|
||||
* together. Since the list was modified before the terminal {@code collect}
|
||||
* operation commenced the result will be a string of "one two three". All the
|
||||
* streams returned from JDK collections, and most other JDK classes,
|
||||
* are well-behaved in this manner; for streams generated by other libraries, see
|
||||
* <a href="package-summary.html#StreamSources">Low-level stream
|
||||
* construction</a> for requirements for building well-behaved streams.
|
||||
*
|
||||
* <h3><a name="Statelessness">Stateless behaviors</a></h3>
|
||||
*
|
||||
* Stream pipeline results may be nondeterministic or incorrect if the behavioral
|
||||
* parameters to the stream operations are <em>stateful</em>. A stateful lambda
|
||||
* (or other object implementing the appropriate functional interface) is one
|
||||
* whose result depends on any state which might change during the execution
|
||||
* of the stream pipeline. An example of a stateful lambda is the parameter
|
||||
* to {@code map()} in:
|
||||
*
|
||||
* <pre>{@code
|
||||
* Set<Integer> seen = Collections.synchronizedSet(new HashSet<>());
|
||||
* stream.parallel().map(e -> { if (seen.add(e)) return 0; else return e; })...
|
||||
* }</pre>
|
||||
*
|
||||
* Here, if the mapping operation is performed in parallel, the results for the
|
||||
* same input could vary from run to run, due to thread scheduling differences,
|
||||
* whereas, with a stateless lambda expression the results would always be the
|
||||
* same.
|
||||
*
|
||||
* <p>Note also that attempting to access mutable state from behavioral parameters
|
||||
* presents you with a bad choice with respect to safety and performance; if
|
||||
* you do not synchronize access to that state, you have a data race and
|
||||
* therefore your code is broken, but if you do synchronize access to that
|
||||
* state, you risk having contention undermine the parallelism you are seeking
|
||||
* to benefit from. The best approach is to avoid stateful behavioral
|
||||
* parameters to stream operations entirely; there is usually a way to
|
||||
* restructure the stream pipeline to avoid statefulness.
|
||||
*
|
||||
* <h3>Side-effects</h3>
|
||||
*
|
||||
* Side-effects in behavioral parameters to stream operations are, in general,
|
||||
* discouraged, as they can often lead to unwitting violations of the
|
||||
* statelessness requirement, as well as other thread-safety hazards.
|
||||
*
|
||||
* <p>If the behavioral parameters do have side-effects, unless explicitly
|
||||
* stated, there are no guarantees as to the
|
||||
* <a href="../concurrent/package-summary.html#MemoryVisibility"><i>visibility</i></a>
|
||||
* of those side-effects to other threads, nor are there any guarantees that
|
||||
* different operations on the "same" element within the same stream pipeline
|
||||
* are executed in the same thread. Further, the ordering of those effects
|
||||
* may be surprising. Even when a pipeline is constrained to produce a
|
||||
* <em>result</em> that is consistent with the encounter order of the stream
|
||||
* source (for example, {@code IntStream.range(0,5).parallel().map(x -> x*2).toArray()}
|
||||
* must produce {@code [0, 2, 4, 6, 8]}), no guarantees are made as to the order
|
||||
* in which the mapper function is applied to individual elements, or in what
|
||||
* thread any behavioral parameter is executed for a given element.
|
||||
*
|
||||
* <p>Many computations where one might be tempted to use side effects can be more
|
||||
* safely and efficiently expressed without side-effects, such as using
|
||||
* <a href="package-summary.html#Reduction">reduction</a> instead of mutable
|
||||
* accumulators. However, side-effects such as using {@code println()} for debugging
|
||||
* purposes are usually harmless. A small number of stream operations, such as
|
||||
* {@code forEach()} and {@code peek()}, can operate only via side-effects;
|
||||
* these should be used with care.
|
||||
*
|
||||
* <p>As an example of how to transform a stream pipeline that inappropriately
|
||||
* uses side-effects to one that does not, the following code searches a stream
|
||||
* of strings for those matching a given regular expression, and puts the
|
||||
* matches in a list.
|
||||
*
|
||||
* <pre>{@code
|
||||
* ArrayList<String> results = new ArrayList<>();
|
||||
* stream.filter(s -> pattern.matcher(s).matches())
|
||||
* .forEach(s -> results.add(s)); // Unnecessary use of side-effects!
|
||||
* }</pre>
|
||||
*
|
||||
* This code unnecessarily uses side-effects. If executed in parallel, the
|
||||
* non-thread-safety of {@code ArrayList} would cause incorrect results, and
|
||||
* adding needed synchronization would cause contention, undermining the
|
||||
* benefit of parallelism. Furthermore, using side-effects here is completely
|
||||
* unnecessary; the {@code forEach()} can simply be replaced with a reduction
|
||||
* operation that is safer, more efficient, and more amenable to
|
||||
* parallelization:
|
||||
*
|
||||
* <pre>{@code
|
||||
* List<String>results =
|
||||
* stream.filter(s -> pattern.matcher(s).matches())
|
||||
* .collect(Collectors.toList()); // No side-effects!
|
||||
* }</pre>
|
||||
*
|
||||
* <h3><a name="Ordering">Ordering</a></h3>
|
||||
*
|
||||
* <p>Streams may or may not have a defined <em>encounter order</em>. Whether
|
||||
* or not a stream has an encounter order depends on the source and the
|
||||
* intermediate operations. Certain stream sources (such as {@code List} or
|
||||
* arrays) are intrinsically ordered, whereas others (such as {@code HashSet})
|
||||
* are not. Some intermediate operations, such as {@code sorted()}, may impose
|
||||
* an encounter order on an otherwise unordered stream, and others may render an
|
||||
* ordered stream unordered, such as {@link java.util.stream.BaseStream#unordered()}.
|
||||
* Further, some terminal operations may ignore encounter order, such as
|
||||
* {@code forEach()}.
|
||||
*
|
||||
* <p>If a stream is ordered, most operations are constrained to operate on the
|
||||
* elements in their encounter order; if the source of a stream is a {@code List}
|
||||
* containing {@code [1, 2, 3]}, then the result of executing {@code map(x -> x*2)}
|
||||
* must be {@code [2, 4, 6]}. However, if the source has no defined encounter
|
||||
* order, then any permutation of the values {@code [2, 4, 6]} would be a valid
|
||||
* result.
|
||||
*
|
||||
* <p>For sequential streams, the presence or absence of an encounter order does
|
||||
* not affect performance, only determinism. If a stream is ordered, repeated
|
||||
* execution of identical stream pipelines on an identical source will produce
|
||||
* an identical result; if it is not ordered, repeated execution might produce
|
||||
* different results.
|
||||
*
|
||||
* <p>For parallel streams, relaxing the ordering constraint can sometimes enable
|
||||
* more efficient execution. Certain aggregate operations,
|
||||
* such as filtering duplicates ({@code distinct()}) or grouped reductions
|
||||
* ({@code Collectors.groupingBy()}) can be implemented more efficiently if ordering of elements
|
||||
* is not relevant. Similarly, operations that are intrinsically tied to encounter order,
|
||||
* such as {@code limit()}, may require
|
||||
* buffering to ensure proper ordering, undermining the benefit of parallelism.
|
||||
* In cases where the stream has an encounter order, but the user does not
|
||||
* particularly <em>care</em> about that encounter order, explicitly de-ordering
|
||||
* the stream with {@link java.util.stream.BaseStream#unordered() unordered()} may
|
||||
* improve parallel performance for some stateful or terminal operations.
|
||||
* However, most stream pipelines, such as the "sum of weight of blocks" example
|
||||
* above, still parallelize efficiently even under ordering constraints.
|
||||
*
|
||||
* <h2><a name="Reduction">Reduction operations</a></h2>
|
||||
*
|
||||
* A <em>reduction</em> operation (also called a <em>fold</em>) takes a sequence
|
||||
* of input elements and combines them into a single summary result by repeated
|
||||
* application of a combining operation, such as finding the sum or maximum of
|
||||
* a set of numbers, or accumulating elements into a list. The streams classes have
|
||||
* multiple forms of general reduction operations, called
|
||||
* {@link java.util.stream.Stream#reduce(java.util.function.BinaryOperator) reduce()}
|
||||
* and {@link java.util.stream.Stream#collect(java.util.stream.Collector) collect()},
|
||||
* as well as multiple specialized reduction forms such as
|
||||
* {@link java.util.stream.IntStream#sum() sum()}, {@link java.util.stream.IntStream#max() max()},
|
||||
* or {@link java.util.stream.IntStream#count() count()}.
|
||||
*
|
||||
* <p>Of course, such operations can be readily implemented as simple sequential
|
||||
* loops, as in:
|
||||
* <pre>{@code
|
||||
* int sum = 0;
|
||||
* for (int x : numbers) {
|
||||
* sum += x;
|
||||
* }
|
||||
* }</pre>
|
||||
* However, there are good reasons to prefer a reduce operation
|
||||
* over a mutative accumulation such as the above. Not only is a reduction
|
||||
* "more abstract" -- it operates on the stream as a whole rather than individual
|
||||
* elements -- but a properly constructed reduce operation is inherently
|
||||
* parallelizable, so long as the function(s) used to process the elements
|
||||
* are <a href="package-summary.html#Associativity">associative</a> and
|
||||
* <a href="package-summary.html#NonInterfering">stateless</a>.
|
||||
* For example, given a stream of numbers for which we want to find the sum, we
|
||||
* can write:
|
||||
* <pre>{@code
|
||||
* int sum = numbers.stream().reduce(0, (x,y) -> x+y);
|
||||
* }</pre>
|
||||
* or:
|
||||
* <pre>{@code
|
||||
* int sum = numbers.stream().reduce(0, Integer::sum);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>These reduction operations can run safely in parallel with almost no
|
||||
* modification:
|
||||
* <pre>{@code
|
||||
* int sum = numbers.parallelStream().reduce(0, Integer::sum);
|
||||
* }</pre>
|
||||
*
|
||||
* <p>Reduction parallellizes well because the implementation
|
||||
* can operate on subsets of the data in parallel, and then combine the
|
||||
* intermediate results to get the final correct answer. (Even if the language
|
||||
* had a "parallel for-each" construct, the mutative accumulation approach would
|
||||
* still required the developer to provide
|
||||
* thread-safe updates to the shared accumulating variable {@code sum}, and
|
||||
* the required synchronization would then likely eliminate any performance gain from
|
||||
* parallelism.) Using {@code reduce()} instead removes all of the
|
||||
* burden of parallelizing the reduction operation, and the library can provide
|
||||
* an efficient parallel implementation with no additional synchronization
|
||||
* required.
|
||||
*
|
||||
* <p>The "widgets" examples shown earlier shows how reduction combines with
|
||||
* other operations to replace for loops with bulk operations. If {@code widgets}
|
||||
* is a collection of {@code Widget} objects, which have a {@code getWeight} method,
|
||||
* we can find the heaviest widget with:
|
||||
* <pre>{@code
|
||||
* OptionalInt heaviest = widgets.parallelStream()
|
||||
* .mapToInt(Widget::getWeight)
|
||||
* .max();
|
||||
* }</pre>
|
||||
*
|
||||
* <p>In its more general form, a {@code reduce} operation on elements of type
|
||||
* {@code <T>} yielding a result of type {@code <U>} requires three parameters:
|
||||
* <pre>{@code
|
||||
* <U> U reduce(U identity,
|
||||
* BiFunction<U, ? super T, U> accumulator,
|
||||
* BinaryOperator<U> combiner);
|
||||
* }</pre>
|
||||
* Here, the <em>identity</em> element is both an initial seed value for the reduction
|
||||
* and a default result if there are no input elements. The <em>accumulator</em>
|
||||
* function takes a partial result and the next element, and produces a new
|
||||
* partial result. The <em>combiner</em> function combines two partial results
|
||||
* to produce a new partial result. (The combiner is necessary in parallel
|
||||
* reductions, where the input is partitioned, a partial accumulation computed
|
||||
* for each partition, and then the partial results are combined to produce a
|
||||
* final result.)
|
||||
*
|
||||
* <p>More formally, the {@code identity} value must be an <em>identity</em> for
|
||||
* the combiner function. This means that for all {@code u},
|
||||
* {@code combiner.apply(identity, u)} is equal to {@code u}. Additionally, the
|
||||
* {@code combiner} function must be <a href="package-summary.html#Associativity">associative</a> and
|
||||
* must be compatible with the {@code accumulator} function: for all {@code u}
|
||||
* and {@code t}, {@code combiner.apply(u, accumulator.apply(identity, t))} must
|
||||
* be {@code equals()} to {@code accumulator.apply(u, t)}.
|
||||
*
|
||||
* <p>The three-argument form is a generalization of the two-argument form,
|
||||
* incorporating a mapping step into the accumulation step. We could
|
||||
* re-cast the simple sum-of-weights example using the more general form as
|
||||
* follows:
|
||||
* <pre>{@code
|
||||
* int sumOfWeights = widgets.stream()
|
||||
* .reduce(0,
|
||||
* (sum, b) -> sum + b.getWeight())
|
||||
* Integer::sum);
|
||||
* }</pre>
|
||||
* though the explicit map-reduce form is more readable and therefore should
|
||||
* usually be preferred. The generalized form is provided for cases where
|
||||
* significant work can be optimized away by combining mapping and reducing
|
||||
* into a single function.
|
||||
*
|
||||
* <h3><a name="MutableReduction">Mutable reduction</a></h3>
|
||||
*
|
||||
* A <em>mutable reduction operation</em> accumulates input elements into a
|
||||
* mutable result container, such as a {@code Collection} or {@code StringBuilder},
|
||||
* as it processes the elements in the stream.
|
||||
*
|
||||
* <p>If we wanted to take a stream of strings and concatenate them into a
|
||||
* single long string, we <em>could</em> achieve this with ordinary reduction:
|
||||
* <pre>{@code
|
||||
* String concatenated = strings.reduce("", String::concat)
|
||||
* }</pre>
|
||||
*
|
||||
* <p>We would get the desired result, and it would even work in parallel. However,
|
||||
* we might not be happy about the performance! Such an implementation would do
|
||||
* a great deal of string copying, and the run time would be <em>O(n^2)</em> in
|
||||
* the number of characters. A more performant approach would be to accumulate
|
||||
* the results into a {@link java.lang.StringBuilder}, which is a mutable
|
||||
* container for accumulating strings. We can use the same technique to
|
||||
* parallelize mutable reduction as we do with ordinary reduction.
|
||||
*
|
||||
* <p>The mutable reduction operation is called
|
||||
* {@link java.util.stream.Stream#collect(Collector) collect()},
|
||||
* as it collects together the desired results into a result container such
|
||||
* as a {@code Collection}.
|
||||
* A {@code collect} operation requires three functions:
|
||||
* a supplier function to construct new instances of the result container, an
|
||||
* accumulator function to incorporate an input element into a result
|
||||
* container, and a combining function to merge the contents of one result
|
||||
* container into another. The form of this is very similar to the general
|
||||
* form of ordinary reduction:
|
||||
* <pre>{@code
|
||||
* <R> R collect(Supplier<R> supplier,
|
||||
* BiConsumer<R, ? super T> accumulator,
|
||||
* BiConsumer<R, R> combiner);
|
||||
* }</pre>
|
||||
* <p>As with {@code reduce()}, a benefit of expressing {@code collect} in this
|
||||
* abstract way is that it is directly amenable to parallelization: we can
|
||||
* accumulate partial results in parallel and then combine them, so long as the
|
||||
* accumulation and combining functions satisfy the appropriate requirements.
|
||||
* For example, to collect the String representations of the elements in a
|
||||
* stream into an {@code ArrayList}, we could write the obvious sequential
|
||||
* for-each form:
|
||||
* <pre>{@code
|
||||
* ArrayList<String> strings = new ArrayList<>();
|
||||
* for (T element : stream) {
|
||||
* strings.add(element.toString());
|
||||
* }
|
||||
* }</pre>
|
||||
* Or we could use a parallelizable collect form:
|
||||
* <pre>{@code
|
||||
* ArrayList<String> strings = stream.collect(() -> new ArrayList<>(),
|
||||
* (c, e) -> c.add(e.toString()),
|
||||
* (c1, c2) -> c1.addAll(c2));
|
||||
* }</pre>
|
||||
* or, pulling the mapping operation out of the accumulator function, we could
|
||||
* express it more succinctly as:
|
||||
* <pre>{@code
|
||||
* List<String> strings = stream.map(Object::toString)
|
||||
* .collect(ArrayList::new, ArrayList::add, ArrayList::addAll);
|
||||
* }</pre>
|
||||
* Here, our supplier is just the {@link java.util.ArrayList#ArrayList()
|
||||
* ArrayList constructor}, the accumulator adds the stringified element to an
|
||||
* {@code ArrayList}, and the combiner simply uses {@link java.util.ArrayList#addAll addAll}
|
||||
* to copy the strings from one container into the other.
|
||||
*
|
||||
* <p>The three aspects of {@code collect} -- supplier, accumulator, and
|
||||
* combiner -- are tightly coupled. We can use the abstraction of a
|
||||
* {@link java.util.stream.Collector} to capture all three aspects. The
|
||||
* above example for collecting strings into a {@code List} can be rewritten
|
||||
* using a standard {@code Collector} as:
|
||||
* <pre>{@code
|
||||
* List<String> strings = stream.map(Object::toString)
|
||||
* .collect(Collectors.toList());
|
||||
* }</pre>
|
||||
*
|
||||
* <p>Packaging mutable reductions into a Collector has another advantage:
|
||||
* composability. The class {@link java.util.stream.Collectors} contains a
|
||||
* number of predefined factories for collectors, including combinators
|
||||
* that transform one collector into another. For example, suppose we have a
|
||||
* collector that computes the sum of the salaries of a stream of
|
||||
* employees, as follows:
|
||||
*
|
||||
* <pre>{@code
|
||||
* Collector<Employee, ?, Integer> summingSalaries
|
||||
* = Collectors.summingInt(Employee::getSalary);
|
||||
* }</pre>
|
||||
*
|
||||
* (The {@code ?} for the second type parameter merely indicates that we don't
|
||||
* care about the intermediate representation used by this collector.)
|
||||
* If we wanted to create a collector to tabulate the sum of salaries by
|
||||
* department, we could reuse {@code summingSalaries} using
|
||||
* {@link java.util.stream.Collectors#groupingBy(java.util.function.Function, java.util.stream.Collector) groupingBy}:
|
||||
*
|
||||
* <pre>{@code
|
||||
* Map<Department, Integer> salariesByDept
|
||||
* = employees.stream().collect(Collectors.groupingBy(Employee::getDepartment,
|
||||
* summingSalaries));
|
||||
* }</pre>
|
||||
*
|
||||
* <p>As with the regular reduction operation, {@code collect()} operations can
|
||||
* only be parallelized if appropriate conditions are met. For any partially
|
||||
* accumulated result, combining it with an empty result container must
|
||||
* produce an equivalent result. That is, for a partially accumulated result
|
||||
* {@code p} that is the result of any series of accumulator and combiner
|
||||
* invocations, {@code p} must be equivalent to
|
||||
* {@code combiner.apply(p, supplier.get())}.
|
||||
*
|
||||
* <p>Further, however the computation is split, it must produce an equivalent
|
||||
* result. For any input elements {@code t1} and {@code t2}, the results
|
||||
* {@code r1} and {@code r2} in the computation below must be equivalent:
|
||||
* <pre>{@code
|
||||
* A a1 = supplier.get();
|
||||
* accumulator.accept(a1, t1);
|
||||
* accumulator.accept(a1, t2);
|
||||
* R r1 = finisher.apply(a1); // result without splitting
|
||||
*
|
||||
* A a2 = supplier.get();
|
||||
* accumulator.accept(a2, t1);
|
||||
* A a3 = supplier.get();
|
||||
* accumulator.accept(a3, t2);
|
||||
* R r2 = finisher.apply(combiner.apply(a2, a3)); // result with splitting
|
||||
* }</pre>
|
||||
*
|
||||
* <p>Here, equivalence generally means according to {@link java.lang.Object#equals(Object)}.
|
||||
* but in some cases equivalence may be relaxed to account for differences in
|
||||
* order.
|
||||
*
|
||||
* <h3><a name="ConcurrentReduction">Reduction, concurrency, and ordering</a></h3>
|
||||
*
|
||||
* With some complex reduction operations, for example a {@code collect()} that
|
||||
* produces a {@code Map}, such as:
|
||||
* <pre>{@code
|
||||
* Map<Buyer, List<Transaction>> salesByBuyer
|
||||
* = txns.parallelStream()
|
||||
* .collect(Collectors.groupingBy(Transaction::getBuyer));
|
||||
* }</pre>
|
||||
* it may actually be counterproductive to perform the operation in parallel.
|
||||
* This is because the combining step (merging one {@code Map} into another by
|
||||
* key) can be expensive for some {@code Map} implementations.
|
||||
*
|
||||
* <p>Suppose, however, that the result container used in this reduction
|
||||
* was a concurrently modifiable collection -- such as a
|
||||
* {@link java.util.concurrent.ConcurrentHashMap}. In that case, the parallel
|
||||
* invocations of the accumulator could actually deposit their results
|
||||
* concurrently into the same shared result container, eliminating the need for
|
||||
* the combiner to merge distinct result containers. This potentially provides
|
||||
* a boost to the parallel execution performance. We call this a
|
||||
* <em>concurrent</em> reduction.
|
||||
*
|
||||
* <p>A {@link java.util.stream.Collector} that supports concurrent reduction is
|
||||
* marked with the {@link java.util.stream.Collector.Characteristics#CONCURRENT}
|
||||
* characteristic. However, a concurrent collection also has a downside. If
|
||||
* multiple threads are depositing results concurrently into a shared container,
|
||||
* the order in which results are deposited is non-deterministic. Consequently,
|
||||
* a concurrent reduction is only possible if ordering is not important for the
|
||||
* stream being processed. The {@link java.util.stream.Stream#collect(Collector)}
|
||||
* implementation will only perform a concurrent reduction if
|
||||
* <ul>
|
||||
* <li>The stream is parallel;</li>
|
||||
* <li>The collector has the
|
||||
* {@link java.util.stream.Collector.Characteristics#CONCURRENT} characteristic,
|
||||
* and;</li>
|
||||
* <li>Either the stream is unordered, or the collector has the
|
||||
* {@link java.util.stream.Collector.Characteristics#UNORDERED} characteristic.
|
||||
* </ul>
|
||||
* You can ensure the stream is unordered by using the
|
||||
* {@link java.util.stream.BaseStream#unordered()} method. For example:
|
||||
* <pre>{@code
|
||||
* Map<Buyer, List<Transaction>> salesByBuyer
|
||||
* = txns.parallelStream()
|
||||
* .unordered()
|
||||
* .collect(groupingByConcurrent(Transaction::getBuyer));
|
||||
* }</pre>
|
||||
* (where {@link java.util.stream.Collectors#groupingByConcurrent} is the
|
||||
* concurrent equivalent of {@code groupingBy}).
|
||||
*
|
||||
* <p>Note that if it is important that the elements for a given key appear in
|
||||
* the order they appear in the source, then we cannot use a concurrent
|
||||
* reduction, as ordering is one of the casualties of concurrent insertion.
|
||||
* We would then be constrained to implement either a sequential reduction or
|
||||
* a merge-based parallel reduction.
|
||||
*
|
||||
* <h3><a name="Associativity">Associativity</a></h3>
|
||||
*
|
||||
* An operator or function {@code op} is <em>associative</em> if the following
|
||||
* holds:
|
||||
* <pre>{@code
|
||||
* (a op b) op c == a op (b op c)
|
||||
* }</pre>
|
||||
* The importance of this to parallel evaluation can be seen if we expand this
|
||||
* to four terms:
|
||||
* <pre>{@code
|
||||
* a op b op c op d == (a op b) op (c op d)
|
||||
* }</pre>
|
||||
* So we can evaluate {@code (a op b)} in parallel with {@code (c op d)}, and
|
||||
* then invoke {@code op} on the results.
|
||||
*
|
||||
* <p>Examples of associative operations include numeric addition, min, and
|
||||
* max, and string concatenation.
|
||||
*
|
||||
* <h2><a name="StreamSources">Low-level stream construction</a></h2>
|
||||
*
|
||||
* So far, all the stream examples have used methods like
|
||||
* {@link java.util.Collection#stream()} or {@link java.util.Arrays#stream(Object[])}
|
||||
* to obtain a stream. How are those stream-bearing methods implemented?
|
||||
*
|
||||
* <p>The class {@link java.util.stream.StreamSupport} has a number of
|
||||
* low-level methods for creating a stream, all using some form of a
|
||||
* {@link java.util.Spliterator}. A spliterator is the parallel analogue of an
|
||||
* {@link java.util.Iterator}; it describes a (possibly infinite) collection of
|
||||
* elements, with support for sequentially advancing, bulk traversal, and
|
||||
* splitting off some portion of the input into another spliterator which can
|
||||
* be processed in parallel. At the lowest level, all streams are driven by a
|
||||
* spliterator.
|
||||
*
|
||||
* <p>There are a number of implementation choices in implementing a
|
||||
* spliterator, nearly all of which are tradeoffs between simplicity of
|
||||
* implementation and runtime performance of streams using that spliterator.
|
||||
* The simplest, but least performant, way to create a spliterator is to
|
||||
* create one from an iterator using
|
||||
* {@link java.util.Spliterators#spliteratorUnknownSize(java.util.Iterator, int)}.
|
||||
* While such a spliterator will work, it will likely offer poor parallel
|
||||
* performance, since we have lost sizing information (how big is the
|
||||
* underlying data set), as well as being constrained to a simplistic
|
||||
* splitting algorithm.
|
||||
*
|
||||
* <p>A higher-quality spliterator will provide balanced and known-size
|
||||
* splits, accurate sizing information, and a number of other
|
||||
* {@link java.util.Spliterator#characteristics() characteristics} of the
|
||||
* spliterator or data that can be used by implementations to optimize
|
||||
* execution.
|
||||
*
|
||||
* <p>Spliterators for mutable data sources have an additional challenge;
|
||||
* timing of binding to the data, since the data could change between the time
|
||||
* the spliterator is created and the time the stream pipeline is executed.
|
||||
* Ideally, a spliterator for a stream would report a characteristic of
|
||||
|
||||
* {@code IMMUTABLE} or {@code CONCURRENT}; if not it should be
|
||||
* <a href="../Spliterator.html#binding"><em>late-binding</em></a>. If a source
|
||||
* cannot directly supply a recommended spliterator, it may indirectly supply
|
||||
* a spliterator using a {@code Supplier}, and construct a stream via the
|
||||
* {@code Supplier}-accepting versions of
|
||||
* {@link java.util.stream.StreamSupport#stream(Supplier, int, boolean) stream()}.
|
||||
* The spliterator is obtained from the supplier only after the terminal
|
||||
* operation of the stream pipeline commences.
|
||||
*
|
||||
* <p>These requirements significantly reduce the scope of potential
|
||||
* interference between mutations of the stream source and execution of stream
|
||||
* pipelines. Streams based on spliterators with the desired characteristics,
|
||||
* or those using the Supplier-based factory forms, are immune to
|
||||
* modifications of the data source prior to commencement of the terminal
|
||||
* operation (provided the behavioral parameters to the stream operations meet
|
||||
* the required criteria for non-interference and statelessness). See
|
||||
* <a href="package-summary.html#NonInterference">Non-Interference</a>
|
||||
* for more details.
|
||||
*
|
||||
* @since 1.8
|
||||
*/
|
||||
package java.util.stream;
|
||||
|
||||
import java.util.function.BinaryOperator;
|
||||
import java.util.function.UnaryOperator;
|
||||
Reference in New Issue
Block a user