提交 2b51fd83 编写于 作者: M mduigou

8025910: rename substream(long) -> skip and remove substream(long,long)

Reviewed-by: psandoz, henryjen
上级 3545dc8f
......@@ -340,24 +340,17 @@ abstract class DoublePipeline<E_IN>
}
@Override
public final DoubleStream substream(long startingOffset) {
if (startingOffset < 0)
throw new IllegalArgumentException(Long.toString(startingOffset));
if (startingOffset == 0)
public final DoubleStream skip(long n) {
if (n < 0)
throw new IllegalArgumentException(Long.toString(n));
if (n == 0)
return this;
else {
long limit = -1;
return SliceOps.makeDouble(this, startingOffset, limit);
return SliceOps.makeDouble(this, n, limit);
}
}
@Override
public final DoubleStream substream(long startingOffset, long endingOffset) {
if (startingOffset < 0 || endingOffset < startingOffset)
throw new IllegalArgumentException(String.format("substream(%d, %d)", startingOffset, endingOffset));
return SliceOps.makeDouble(this, startingOffset, endingOffset - startingOffset);
}
@Override
public final DoubleStream sorted() {
return SortedOps.makeDouble(this);
......
......@@ -289,6 +289,20 @@ public interface DoubleStream extends BaseStream<Double, DoubleStream> {
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
* stateful intermediate operation</a>.
*
* @apiNote
* While {@code limit()} is generally a cheap operation on sequential
* stream pipelines, it can be quite expensive on ordered parallel pipelines,
* especially for large values of {@code maxSize}, since {@code limit(n)}
* is constrained to return not just any <em>n</em> elements, but the
* <em>first n</em> elements in the encounter order. Using an unordered
* stream source (such as {@link #generate(DoubleSupplier)}) or removing the
* ordering constraint with {@link #unordered()} may result in significant
* speedups of {@code limit()} in parallel pipelines, if the semantics of
* your situation permit. If consistency with encounter order is required,
* and you are experiencing poor performance or memory utilization with
* {@code limit()} in parallel pipelines, switching to sequential execution
* with {@link #sequential()} may improve performance.
*
* @param maxSize the number of elements the stream should be limited to
* @return the new stream
* @throws IllegalArgumentException if {@code maxSize} is negative
......@@ -297,37 +311,32 @@ public interface DoubleStream extends BaseStream<Double, DoubleStream> {
/**
* Returns a stream consisting of the remaining elements of this stream
* after discarding the first {@code startInclusive} elements of the stream.
* If this stream contains fewer than {@code startInclusive} elements then an
* after discarding the first {@code n} elements of the stream.
* If this stream contains fewer than {@code n} elements then an
* empty stream will be returned.
*
* <p>This is a <a href="package-summary.html#StreamOps">stateful
* intermediate operation</a>.
*
* @param startInclusive the number of leading elements to skip
* @return the new stream
* @throws IllegalArgumentException if {@code startInclusive} is negative
*/
DoubleStream substream(long startInclusive);
/**
* Returns a stream consisting of the remaining elements of this stream
* after discarding the first {@code startInclusive} elements and truncating
* the result to be no longer than {@code endExclusive - startInclusive}
* elements in length. If this stream contains fewer than
* {@code startInclusive} elements then an empty stream will be returned.
*
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
* stateful intermediate operation</a>.
*
* @param startInclusive the starting position of the substream, inclusive
* @param endExclusive the ending position of the substream, exclusive
* @apiNote
* While {@code skip()} is generally a cheap operation on sequential
* stream pipelines, it can be quite expensive on ordered parallel pipelines,
* especially for large values of {@code n}, since {@code skip(n)}
* is constrained to skip not just any <em>n</em> elements, but the
* <em>first n</em> elements in the encounter order. Using an unordered
* stream source (such as {@link #generate(DoubleSupplier)}) or removing the
* ordering constraint with {@link #unordered()} may result in significant
* speedups of {@code skip()} in parallel pipelines, if the semantics of
* your situation permit. If consistency with encounter order is required,
* and you are experiencing poor performance or memory utilization with
* {@code skip()} in parallel pipelines, switching to sequential execution
* with {@link #sequential()} may improve performance.
*
* @param n the number of leading elements to skip
* @return the new stream
* @throws IllegalArgumentException if {@code startInclusive} or
* {@code endExclusive} is negative or {@code startInclusive} is greater
* than {@code endExclusive}
* @throws IllegalArgumentException if {@code n} is negative
*/
DoubleStream substream(long startInclusive, long endExclusive);
DoubleStream skip(long n);
/**
* Performs an action for each element of this stream.
......
......@@ -368,32 +368,21 @@ abstract class IntPipeline<E_IN>
// Stateful intermediate ops from IntStream
private IntStream slice(long skip, long limit) {
return SliceOps.makeInt(this, skip, limit);
}
@Override
public final IntStream limit(long maxSize) {
if (maxSize < 0)
throw new IllegalArgumentException(Long.toString(maxSize));
return slice(0, maxSize);
return SliceOps.makeInt(this, 0, maxSize);
}
@Override
public final IntStream substream(long startingOffset) {
if (startingOffset < 0)
throw new IllegalArgumentException(Long.toString(startingOffset));
if (startingOffset == 0)
public final IntStream skip(long n) {
if (n < 0)
throw new IllegalArgumentException(Long.toString(n));
if (n == 0)
return this;
else
return slice(startingOffset, -1);
}
@Override
public final IntStream substream(long startingOffset, long endingOffset) {
if (startingOffset < 0 || endingOffset < startingOffset)
throw new IllegalArgumentException(String.format("substream(%d, %d)", startingOffset, endingOffset));
return slice(startingOffset, endingOffset - startingOffset);
return SliceOps.makeInt(this, n, -1);
}
@Override
......
......@@ -287,6 +287,20 @@ public interface IntStream extends BaseStream<Integer, IntStream> {
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
* stateful intermediate operation</a>.
*
* @apiNote
* While {@code limit()} is generally a cheap operation on sequential
* stream pipelines, it can be quite expensive on ordered parallel pipelines,
* especially for large values of {@code maxSize}, since {@code limit(n)}
* is constrained to return not just any <em>n</em> elements, but the
* <em>first n</em> elements in the encounter order. Using an unordered
* stream source (such as {@link #generate(IntSupplier)}) or removing the
* ordering constraint with {@link #unordered()} may result in significant
* speedups of {@code limit()} in parallel pipelines, if the semantics of
* your situation permit. If consistency with encounter order is required,
* and you are experiencing poor performance or memory utilization with
* {@code limit()} in parallel pipelines, switching to sequential execution
* with {@link #sequential()} may improve performance.
*
* @param maxSize the number of elements the stream should be limited to
* @return the new stream
* @throws IllegalArgumentException if {@code maxSize} is negative
......@@ -295,37 +309,32 @@ public interface IntStream extends BaseStream<Integer, IntStream> {
/**
* Returns a stream consisting of the remaining elements of this stream
* after discarding the first {@code startInclusive} elements of the stream.
* If this stream contains fewer than {@code startInclusive} elements then an
* after discarding the first {@code n} elements of the stream.
* If this stream contains fewer than {@code n} elements then an
* empty stream will be returned.
*
* <p>This is a <a href="package-summary.html#StreamOps">stateful
* intermediate operation</a>.
*
* @param startInclusive the number of leading elements to skip
* @return the new stream
* @throws IllegalArgumentException if {@code startInclusive} is negative
*/
IntStream substream(long startInclusive);
/**
* Returns a stream consisting of the remaining elements of this stream
* after discarding the first {@code startInclusive} elements and truncating
* the result to be no longer than {@code endExclusive - startInclusive}
* elements in length. If this stream contains fewer than
* {@code startInclusive} elements then an empty stream will be returned.
*
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
* stateful intermediate operation</a>.
*
* @param startInclusive the starting position of the substream, inclusive
* @param endExclusive the ending position of the substream, exclusive
* @apiNote
* While {@code skip()} is generally a cheap operation on sequential
* stream pipelines, it can be quite expensive on ordered parallel pipelines,
* especially for large values of {@code n}, since {@code skip(n)}
* is constrained to skip not just any <em>n</em> elements, but the
* <em>first n</em> elements in the encounter order. Using an unordered
* stream source (such as {@link #generate(IntSupplier)}) or removing the
* ordering constraint with {@link #unordered()} may result in significant
* speedups of {@code skip()} in parallel pipelines, if the semantics of
* your situation permit. If consistency with encounter order is required,
* and you are experiencing poor performance or memory utilization with
* {@code skip()} in parallel pipelines, switching to sequential execution
* with {@link #sequential()} may improve performance.
*
* @param n the number of leading elements to skip
* @return the new stream
* @throws IllegalArgumentException if {@code startInclusive} or
* {@code endExclusive} is negative or {@code startInclusive} is greater
* than {@code endExclusive}
* @throws IllegalArgumentException if {@code n} is negative
*/
IntStream substream(long startInclusive, long endExclusive);
IntStream skip(long n);
/**
* Performs an action for each element of this stream.
......
......@@ -349,32 +349,21 @@ abstract class LongPipeline<E_IN>
// Stateful intermediate ops from LongStream
private LongStream slice(long skip, long limit) {
return SliceOps.makeLong(this, skip, limit);
}
@Override
public final LongStream limit(long maxSize) {
if (maxSize < 0)
throw new IllegalArgumentException(Long.toString(maxSize));
return slice(0, maxSize);
return SliceOps.makeLong(this, 0, maxSize);
}
@Override
public final LongStream substream(long startingOffset) {
if (startingOffset < 0)
throw new IllegalArgumentException(Long.toString(startingOffset));
if (startingOffset == 0)
public final LongStream skip(long n) {
if (n < 0)
throw new IllegalArgumentException(Long.toString(n));
if (n == 0)
return this;
else
return slice(startingOffset, -1);
}
@Override
public final LongStream substream(long startingOffset, long endingOffset) {
if (startingOffset < 0 || endingOffset < startingOffset)
throw new IllegalArgumentException(String.format("substream(%d, %d)", startingOffset, endingOffset));
return slice(startingOffset, endingOffset - startingOffset);
return SliceOps.makeLong(this, n, -1);
}
@Override
......
......@@ -287,6 +287,20 @@ public interface LongStream extends BaseStream<Long, LongStream> {
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
* stateful intermediate operation</a>.
*
* @apiNote
* While {@code limit()} is generally a cheap operation on sequential
* stream pipelines, it can be quite expensive on ordered parallel pipelines,
* especially for large values of {@code maxSize}, since {@code limit(n)}
* is constrained to return not just any <em>n</em> elements, but the
* <em>first n</em> elements in the encounter order. Using an unordered
* stream source (such as {@link #generate(LongSupplier)}) or removing the
* ordering constraint with {@link #unordered()} may result in significant
* speedups of {@code limit()} in parallel pipelines, if the semantics of
* your situation permit. If consistency with encounter order is required,
* and you are experiencing poor performance or memory utilization with
* {@code limit()} in parallel pipelines, switching to sequential execution
* with {@link #sequential()} may improve performance.
*
* @param maxSize the number of elements the stream should be limited to
* @return the new stream
* @throws IllegalArgumentException if {@code maxSize} is negative
......@@ -295,37 +309,32 @@ public interface LongStream extends BaseStream<Long, LongStream> {
/**
* Returns a stream consisting of the remaining elements of this stream
* after discarding the first {@code startInclusive} elements of the stream.
* If this stream contains fewer than {@code startInclusive} elements then an
* after discarding the first {@code n} elements of the stream.
* If this stream contains fewer than {@code n} elements then an
* empty stream will be returned.
*
* <p>This is a <a href="package-summary.html#StreamOps">stateful
* intermediate operation</a>.
*
* @param startInclusive the number of leading elements to skip
* @return the new stream
* @throws IllegalArgumentException if {@code startInclusive} is negative
*/
LongStream substream(long startInclusive);
/**
* Returns a stream consisting of the remaining elements of this stream
* after discarding the first {@code startInclusive} elements and truncating
* the result to be no longer than {@code endExclusive - startInclusive}
* elements in length. If this stream contains fewer than
* {@code startInclusive} elements then an empty stream will be returned.
*
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
* stateful intermediate operation</a>.
*
* @param startInclusive the starting position of the substream, inclusive
* @param endExclusive the ending position of the substream, exclusive
* @apiNote
* While {@code skip()} is generally a cheap operation on sequential
* stream pipelines, it can be quite expensive on ordered parallel pipelines,
* especially for large values of {@code n}, since {@code skip(n)}
* is constrained to skip not just any <em>n</em> elements, but the
* <em>first n</em> elements in the encounter order. Using an unordered
* stream source (such as {@link #generate(LongSupplier)}) or removing the
* ordering constraint with {@link #unordered()} may result in significant
* speedups of {@code skip()} in parallel pipelines, if the semantics of
* your situation permit. If consistency with encounter order is required,
* and you are experiencing poor performance or memory utilization with
* {@code skip()} in parallel pipelines, switching to sequential execution
* with {@link #sequential()} may improve performance.
*
* @param n the number of leading elements to skip
* @return the new stream
* @throws IllegalArgumentException if {@code startInclusive} or
* {@code endExclusive} is negative or {@code startInclusive} is greater
* than {@code endExclusive}
* @throws IllegalArgumentException if {@code n} is negative
*/
LongStream substream(long startInclusive, long endExclusive);
LongStream skip(long n);
/**
* Performs an action for each element of this stream.
......
......@@ -394,32 +394,21 @@ abstract class ReferencePipeline<P_IN, P_OUT>
return SortedOps.makeRef(this, comparator);
}
private Stream<P_OUT> slice(long skip, long limit) {
return SliceOps.makeRef(this, skip, limit);
}
@Override
public final Stream<P_OUT> limit(long maxSize) {
if (maxSize < 0)
throw new IllegalArgumentException(Long.toString(maxSize));
return slice(0, maxSize);
return SliceOps.makeRef(this, 0, maxSize);
}
@Override
public final Stream<P_OUT> substream(long startingOffset) {
if (startingOffset < 0)
throw new IllegalArgumentException(Long.toString(startingOffset));
if (startingOffset == 0)
public final Stream<P_OUT> skip(long n) {
if (n < 0)
throw new IllegalArgumentException(Long.toString(n));
if (n == 0)
return this;
else
return slice(startingOffset, -1);
}
@Override
public final Stream<P_OUT> substream(long startingOffset, long endingOffset) {
if (startingOffset < 0 || endingOffset < startingOffset)
throw new IllegalArgumentException(String.format("substream(%d, %d)", startingOffset, endingOffset));
return slice(startingOffset, endingOffset - startingOffset);
return SliceOps.makeRef(this, n, -1);
}
// Terminal operations from Stream
......
......@@ -365,6 +365,20 @@ public interface Stream<T> extends BaseStream<T, Stream<T>> {
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
* stateful intermediate operation</a>.
*
* @apiNote
* While {@code limit()} is generally a cheap operation on sequential
* stream pipelines, it can be quite expensive on ordered parallel pipelines,
* especially for large values of {@code maxSize}, since {@code limit(n)}
* is constrained to return not just any <em>n</em> elements, but the
* <em>first n</em> elements in the encounter order. Using an unordered
* stream source (such as {@link #generate(Supplier)}) or removing the
* ordering constraint with {@link #unordered()} may result in significant
* speedups of {@code limit()} in parallel pipelines, if the semantics of
* your situation permit. If consistency with encounter order is required,
* and you are experiencing poor performance or memory utilization with
* {@code limit()} in parallel pipelines, switching to sequential execution
* with {@link #sequential()} may improve performance.
*
* @param maxSize the number of elements the stream should be limited to
* @return the new stream
* @throws IllegalArgumentException if {@code maxSize} is negative
......@@ -373,37 +387,32 @@ public interface Stream<T> extends BaseStream<T, Stream<T>> {
/**
* Returns a stream consisting of the remaining elements of this stream
* after discarding the first {@code startInclusive} elements of the stream.
* If this stream contains fewer than {@code startInclusive} elements then an
* after discarding the first {@code n} elements of the stream.
* If this stream contains fewer than {@code n} elements then an
* empty stream will be returned.
*
* <p>This is a <a href="package-summary.html#StreamOps">stateful
* intermediate operation</a>.
*
* @param startInclusive the number of leading elements to skip
* @return the new stream
* @throws IllegalArgumentException if {@code startInclusive} is negative
*/
Stream<T> substream(long startInclusive);
/**
* Returns a stream consisting of the remaining elements of this stream
* after discarding the first {@code startInclusive} elements and truncating
* the result to be no longer than {@code endExclusive - startInclusive}
* elements in length. If this stream contains fewer than
* {@code startInclusive} elements then an empty stream will be returned.
*
* <p>This is a <a href="package-summary.html#StreamOps">short-circuiting
* stateful intermediate operation</a>.
*
* @param startInclusive the starting position of the substream, inclusive
* @param endExclusive the ending position of the substream, exclusive
* @apiNote
* While {@code skip()} is generally a cheap operation on sequential
* stream pipelines, it can be quite expensive on ordered parallel pipelines,
* especially for large values of {@code n}, since {@code skip(n)}
* is constrained to skip not just any <em>n</em> elements, but the
* <em>first n</em> elements in the encounter order. Using an unordered
* stream source (such as {@link #generate(Supplier)}) or removing the
* ordering constraint with {@link #unordered()} may result in significant
* speedups of {@code skip()} in parallel pipelines, if the semantics of
* your situation permit. If consistency with encounter order is required,
* and you are experiencing poor performance or memory utilization with
* {@code skip()} in parallel pipelines, switching to sequential execution
* with {@link #sequential()} may improve performance.
*
* @param n the number of leading elements to skip
* @return the new stream
* @throws IllegalArgumentException if {@code startInclusive} or
* {@code endExclusive} is negative or {@code startInclusive} is greater
* than {@code endExclusive}
* @throws IllegalArgumentException if {@code n} is negative
*/
Stream<T> substream(long startInclusive, long endExclusive);
Stream<T> skip(long n);
/**
* Performs an action for each element of this stream.
......
......@@ -109,7 +109,7 @@ public class SpinedBufferTest {
List<Integer> end = Arrays.stream(array)
.boxed()
.substream(array.length - lastSplitSize)
.skip(array.length - lastSplitSize)
.collect(Collectors.toList());
assertEquals(contentOfLastSplit, end);
}
......@@ -184,7 +184,7 @@ public class SpinedBufferTest {
List<Integer> end = Arrays.stream(array)
.boxed()
.substream(array.length - lastSplitSize)
.skip(array.length - lastSplitSize)
.collect(Collectors.toList());
assertEquals(contentOfLastSplit, end);
}
......@@ -259,7 +259,7 @@ public class SpinedBufferTest {
List<Long> end = Arrays.stream(array)
.boxed()
.substream(array.length - lastSplitSize)
.skip(array.length - lastSplitSize)
.collect(Collectors.toList());
assertEquals(contentOfLastSplit, end);
}
......@@ -335,7 +335,7 @@ public class SpinedBufferTest {
List<Double> end = Arrays.stream(array)
.boxed()
.substream(array.length - lastSplitSize)
.skip(array.length - lastSplitSize)
.collect(Collectors.toList());
assertEquals(contentOfLastSplit, end);
}
......
......@@ -63,10 +63,8 @@ public class InfiniteStreamWithLimitOpTest extends OpTestCase {
data.add(new Object[]{f.apply("Stream.limit(%d)"),
(UnaryOperator<Stream>) s -> s.limit(SKIP_LIMIT_SIZE)});
data.add(new Object[]{f.apply("Stream.substream(%d)"),
(UnaryOperator<Stream>) s -> s.substream(SKIP_LIMIT_SIZE, SKIP_LIMIT_SIZE * 2)});
data.add(new Object[]{f.apply("Stream.substream(%1$d).limit(%1$d)"),
(UnaryOperator<Stream>) s -> s.substream(SKIP_LIMIT_SIZE).limit(SKIP_LIMIT_SIZE)});
data.add(new Object[]{f.apply("Stream.skip(%1$d).limit(%1$d)"),
(UnaryOperator<Stream>) s -> s.skip(SKIP_LIMIT_SIZE).limit(SKIP_LIMIT_SIZE)});
return data.toArray(new Object[0][]);
}
......@@ -79,10 +77,8 @@ public class InfiniteStreamWithLimitOpTest extends OpTestCase {
data.add(new Object[]{f.apply("IntStream.limit(%d)"),
(UnaryOperator<IntStream>) s -> s.limit(SKIP_LIMIT_SIZE)});
data.add(new Object[]{f.apply("IntStream.substream(%d)"),
(UnaryOperator<IntStream>) s -> s.substream(SKIP_LIMIT_SIZE, SKIP_LIMIT_SIZE * 2)});
data.add(new Object[]{f.apply("IntStream.substream(%1$d).limit(%1$d)"),
(UnaryOperator<IntStream>) s -> s.substream(SKIP_LIMIT_SIZE).limit(SKIP_LIMIT_SIZE)});
data.add(new Object[]{f.apply("IntStream.skip(%1$d).limit(%1$d)"),
(UnaryOperator<IntStream>) s -> s.skip(SKIP_LIMIT_SIZE).limit(SKIP_LIMIT_SIZE)});
return data.toArray(new Object[0][]);
}
......@@ -95,10 +91,8 @@ public class InfiniteStreamWithLimitOpTest extends OpTestCase {
data.add(new Object[]{f.apply("LongStream.limit(%d)"),
(UnaryOperator<LongStream>) s -> s.limit(SKIP_LIMIT_SIZE)});
data.add(new Object[]{f.apply("LongStream.substream(%d)"),
(UnaryOperator<LongStream>) s -> s.substream(SKIP_LIMIT_SIZE, SKIP_LIMIT_SIZE * 2)});
data.add(new Object[]{f.apply("LongStream.substream(%1$d).limit(%1$d)"),
(UnaryOperator<LongStream>) s -> s.substream(SKIP_LIMIT_SIZE).limit(SKIP_LIMIT_SIZE)});
data.add(new Object[]{f.apply("LongStream.skip(%1$d).limit(%1$d)"),
(UnaryOperator<LongStream>) s -> s.skip(SKIP_LIMIT_SIZE).limit(SKIP_LIMIT_SIZE)});
return data.toArray(new Object[0][]);
}
......@@ -111,10 +105,8 @@ public class InfiniteStreamWithLimitOpTest extends OpTestCase {
data.add(new Object[]{f.apply("DoubleStream.limit(%d)"),
(UnaryOperator<DoubleStream>) s -> s.limit(SKIP_LIMIT_SIZE)});
data.add(new Object[]{f.apply("DoubleStream.substream(%d)"),
(UnaryOperator<DoubleStream>) s -> s.substream(SKIP_LIMIT_SIZE, SKIP_LIMIT_SIZE * 2)});
data.add(new Object[]{f.apply("DoubleStream.substream(%1$d).limit(%1$d)"),
(UnaryOperator<DoubleStream>) s -> s.substream(SKIP_LIMIT_SIZE).limit(SKIP_LIMIT_SIZE)});
data.add(new Object[]{f.apply("DoubleStream.skip(%1$d).limit(%1$d)"),
(UnaryOperator<DoubleStream>) s -> s.skip(SKIP_LIMIT_SIZE).limit(SKIP_LIMIT_SIZE)});
return data.toArray(new Object[0][]);
}
......
......@@ -44,27 +44,27 @@ public class IntSliceOpTest extends OpTestCase {
private static final int[] EMPTY_INT_ARRAY = new int[0];
public void testSkip() {
assertCountSum(IntStream.range(0, 0).substream(0).boxed(), 0, 0);
assertCountSum(IntStream.range(0, 0).substream(4).boxed(), 0, 0);
assertCountSum(IntStream.range(1, 5).substream(4).boxed(), 0, 0);
assertCountSum(IntStream.range(1, 5).substream(2).boxed(), 2, 7);
assertCountSum(IntStream.range(1, 5).substream(0).boxed(), 4, 10);
assertCountSum(IntStream.range(0, 0).skip(0).boxed(), 0, 0);
assertCountSum(IntStream.range(0, 0).skip(4).boxed(), 0, 0);
assertCountSum(IntStream.range(1, 5).skip(4).boxed(), 0, 0);
assertCountSum(IntStream.range(1, 5).skip(2).boxed(), 2, 7);
assertCountSum(IntStream.range(1, 5).skip(0).boxed(), 4, 10);
assertCountSum(IntStream.range(0, 0).parallel().substream(0).boxed(), 0, 0);
assertCountSum(IntStream.range(0, 0).parallel().substream(4).boxed(), 0, 0);
assertCountSum(IntStream.range(1, 5).parallel().substream(4).boxed(), 0, 0);
assertCountSum(IntStream.range(1, 5).parallel().substream(2).boxed(), 2, 7);
assertCountSum(IntStream.range(1, 5).parallel().substream(0).boxed(), 4, 10);
assertCountSum(IntStream.range(0, 0).parallel().skip(0).boxed(), 0, 0);
assertCountSum(IntStream.range(0, 0).parallel().skip(4).boxed(), 0, 0);
assertCountSum(IntStream.range(1, 5).parallel().skip(4).boxed(), 0, 0);
assertCountSum(IntStream.range(1, 5).parallel().skip(2).boxed(), 2, 7);
assertCountSum(IntStream.range(1, 5).parallel().skip(0).boxed(), 4, 10);
exerciseOps(EMPTY_INT_ARRAY, s -> s.substream(0), EMPTY_INT_ARRAY);
exerciseOps(EMPTY_INT_ARRAY, s -> s.substream(10), EMPTY_INT_ARRAY);
exerciseOps(EMPTY_INT_ARRAY, s -> s.skip(0), EMPTY_INT_ARRAY);
exerciseOps(EMPTY_INT_ARRAY, s -> s.skip(10), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 2).toArray(), s -> s.substream(0), IntStream.range(1, 2).toArray());
exerciseOps(IntStream.range(1, 2).toArray(), s -> s.substream(1), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(0), IntStream.range(1, 101).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(10), IntStream.range(11, 101).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(100), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(200), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 2).toArray(), s -> s.skip(0), IntStream.range(1, 2).toArray());
exerciseOps(IntStream.range(1, 2).toArray(), s -> s.skip(1), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(0), IntStream.range(1, 101).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(10), IntStream.range(11, 101).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(100), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(200), EMPTY_INT_ARRAY);
}
public void testLimit() {
......@@ -92,41 +92,23 @@ public class IntSliceOpTest extends OpTestCase {
}
public void testSkipLimit() {
exerciseOps(EMPTY_INT_ARRAY, s -> s.substream(0).limit(0), EMPTY_INT_ARRAY);
exerciseOps(EMPTY_INT_ARRAY, s -> s.substream(0).limit(10), EMPTY_INT_ARRAY);
exerciseOps(EMPTY_INT_ARRAY, s -> s.substream(10).limit(0), EMPTY_INT_ARRAY);
exerciseOps(EMPTY_INT_ARRAY, s -> s.substream(10).limit(10), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(0).limit(100), IntStream.range(1, 101).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(0).limit(10), IntStream.range(1, 11).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(0).limit(0), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(10).limit(100), IntStream.range(11, 101).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(10).limit(10), IntStream.range(11, 21).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(10).limit(0), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(100).limit(100), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(100).limit(10), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(100).limit(0), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(200).limit(100), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(200).limit(10), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(200).limit(0), EMPTY_INT_ARRAY);
}
public void testSlice() {
exerciseOps(EMPTY_INT_ARRAY, s -> s.substream(0, 0), EMPTY_INT_ARRAY);
exerciseOps(EMPTY_INT_ARRAY, s -> s.substream(10, 10), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(0, 100), IntStream.range(1, 101).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(0, 10), IntStream.range(1, 11).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(0, 0), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(10, 110), IntStream.range(11, 101).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(10, 20), IntStream.range(11, 21).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(10, 10), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(100, 200), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(100, 110), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(100, 100), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(200, 300), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(200, 210), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.substream(200, 200), EMPTY_INT_ARRAY);
exerciseOps(EMPTY_INT_ARRAY, s -> s.skip(0).limit(0), EMPTY_INT_ARRAY);
exerciseOps(EMPTY_INT_ARRAY, s -> s.skip(0).limit(10), EMPTY_INT_ARRAY);
exerciseOps(EMPTY_INT_ARRAY, s -> s.skip(10).limit(0), EMPTY_INT_ARRAY);
exerciseOps(EMPTY_INT_ARRAY, s -> s.skip(10).limit(10), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(0).limit(100), IntStream.range(1, 101).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(0).limit(10), IntStream.range(1, 11).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(0).limit(0), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(10).limit(100), IntStream.range(11, 101).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(10).limit(10), IntStream.range(11, 21).toArray());
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(10).limit(0), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(100).limit(100), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(100).limit(10), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(100).limit(0), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(200).limit(100), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(200).limit(10), EMPTY_INT_ARRAY);
exerciseOps(IntStream.range(1, 101).toArray(), s -> s.skip(200).limit(0), EMPTY_INT_ARRAY);
}
private int sliceSize(int dataSize, int skip, int limit) {
......@@ -146,10 +128,10 @@ public class IntSliceOpTest extends OpTestCase {
for (int s : skips) {
setContext("skip", s);
Collection<Integer> sr = exerciseOps(data, st -> st.substream(s));
Collection<Integer> sr = exerciseOps(data, st -> st.skip(s));
assertEquals(sr.size(), sliceSize(data.size(), s));
sr = exerciseOps(data, st -> st.substream(s).substream(s / 2));
sr = exerciseOps(data, st -> st.skip(s).skip(s / 2));
assertEquals(sr.size(), sliceSize(sliceSize(data.size(), s), s / 2));
}
}
......@@ -163,10 +145,10 @@ public class IntSliceOpTest extends OpTestCase {
setContext("skip", s);
for (int limit : limits) {
setContext("limit", limit);
Collection<Integer> sr = exerciseOps(data, st -> st.substream(s).limit(limit));
Collection<Integer> sr = exerciseOps(data, st -> st.skip(s).limit(limit));
assertEquals(sr.size(), sliceSize(sliceSize(data.size(), s), 0, limit));
sr = exerciseOps(data, st -> st.substream(s, limit+s));
sr = exerciseOps(data, st -> st.skip(s).limit(limit));
assertEquals(sr.size(), sliceSize(data.size(), s, limit));
}
}
......@@ -204,7 +186,7 @@ public class IntSliceOpTest extends OpTestCase {
}
public void testSkipParallel() {
int[] l = IntStream.range(1, 1001).parallel().substream(200).limit(200).sequential().toArray();
int[] l = IntStream.range(1, 1001).parallel().skip(200).limit(200).sequential().toArray();
assertEquals(l.length, 200);
assertEquals(l[l.length - 1], 400);
}
......
......@@ -50,27 +50,27 @@ import static java.util.stream.LambdaTestHelpers.*;
public class SliceOpTest extends OpTestCase {
public void testSkip() {
assertCountSum(countTo(0).stream().substream(0), 0, 0);
assertCountSum(countTo(0).stream().substream(4), 0, 0);
assertCountSum(countTo(4).stream().substream(4), 0, 0);
assertCountSum(countTo(4).stream().substream(2), 2, 7);
assertCountSum(countTo(4).stream().substream(0), 4, 10);
assertCountSum(countTo(0).stream().skip(0), 0, 0);
assertCountSum(countTo(0).stream().skip(4), 0, 0);
assertCountSum(countTo(4).stream().skip(4), 0, 0);
assertCountSum(countTo(4).stream().skip(2), 2, 7);
assertCountSum(countTo(4).stream().skip(0), 4, 10);
assertCountSum(countTo(0).parallelStream().substream(0), 0, 0);
assertCountSum(countTo(0).parallelStream().substream(4), 0, 0);
assertCountSum(countTo(4).parallelStream().substream(4), 0, 0);
assertCountSum(countTo(4).parallelStream().substream(2), 2, 7);
assertCountSum(countTo(4).parallelStream().substream(0), 4, 10);
assertCountSum(countTo(0).parallelStream().skip(0), 0, 0);
assertCountSum(countTo(0).parallelStream().skip(4), 0, 0);
assertCountSum(countTo(4).parallelStream().skip(4), 0, 0);
assertCountSum(countTo(4).parallelStream().skip(2), 2, 7);
assertCountSum(countTo(4).parallelStream().skip(0), 4, 10);
exerciseOps(Collections.emptyList(), s -> s.substream(0), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.substream(10), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.skip(0), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.skip(10), Collections.emptyList());
exerciseOps(countTo(1), s -> s.substream(0), countTo(1));
exerciseOps(countTo(1), s -> s.substream(1), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(0), countTo(100));
exerciseOps(countTo(100), s -> s.substream(10), range(11, 100));
exerciseOps(countTo(100), s -> s.substream(100), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(200), Collections.emptyList());
exerciseOps(countTo(1), s -> s.skip(0), countTo(1));
exerciseOps(countTo(1), s -> s.skip(1), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(0), countTo(100));
exerciseOps(countTo(100), s -> s.skip(10), range(11, 100));
exerciseOps(countTo(100), s -> s.skip(100), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(200), Collections.emptyList());
}
public void testLimit() {
......@@ -97,43 +97,43 @@ public class SliceOpTest extends OpTestCase {
}
public void testSkipLimit() {
exerciseOps(Collections.emptyList(), s -> s.substream(0).limit(0), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.substream(0).limit(10), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.substream(10).limit(0), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.substream(10).limit(10), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(0).limit(100), countTo(100));
exerciseOps(countTo(100), s -> s.substream(0).limit(10), countTo(10));
exerciseOps(countTo(100), s -> s.substream(0).limit(0), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(10).limit(100), range(11, 100));
exerciseOps(countTo(100), s -> s.substream(10).limit(10), range(11, 20));
exerciseOps(countTo(100), s -> s.substream(10).limit(0), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(100).limit(100), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(100).limit(10), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(100).limit(0), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(200).limit(100), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(200).limit(10), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(200).limit(0), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.skip(0).limit(0), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.skip(0).limit(10), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.skip(10).limit(0), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.skip(10).limit(10), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(0).limit(100), countTo(100));
exerciseOps(countTo(100), s -> s.skip(0).limit(10), countTo(10));
exerciseOps(countTo(100), s -> s.skip(0).limit(0), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(10).limit(100), range(11, 100));
exerciseOps(countTo(100), s -> s.skip(10).limit(10), range(11, 20));
exerciseOps(countTo(100), s -> s.skip(10).limit(0), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(100).limit(100), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(100).limit(10), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(100).limit(0), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(200).limit(100), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(200).limit(10), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(200).limit(0), Collections.emptyList());
}
public void testSlice() {
exerciseOps(Collections.emptyList(), s -> s.substream(0, 0), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.substream(0, 10), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.substream(10, 10), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.substream(10, 20), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(0, 100), countTo(100));
exerciseOps(countTo(100), s -> s.substream(0, 10), countTo(10));
exerciseOps(countTo(100), s -> s.substream(0, 0), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(10, 110), range(11, 100));
exerciseOps(countTo(100), s -> s.substream(10, 20), range(11, 20));
exerciseOps(countTo(100), s -> s.substream(10, 10), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(100, 200), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(100, 110), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(100, 100), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(200, 300), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(200, 210), Collections.emptyList());
exerciseOps(countTo(100), s -> s.substream(200, 200), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.skip(0).limit(0), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.skip(0).limit(10), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.skip(10).limit(10), Collections.emptyList());
exerciseOps(Collections.emptyList(), s -> s.skip(10).limit(20), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(0).limit(100), countTo(100));
exerciseOps(countTo(100), s -> s.skip(0).limit(10), countTo(10));
exerciseOps(countTo(100), s -> s.skip(0).limit(0), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(10).limit(100), range(11, 100));
exerciseOps(countTo(100), s -> s.skip(10).limit(10), range(11, 20));
exerciseOps(countTo(100), s -> s.skip(10).limit(0), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(100).limit(100), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(100).limit(10), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(100).limit(0), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(200).limit(100), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(200).limit(10), Collections.emptyList());
exerciseOps(countTo(100), s -> s.skip(200).limit(0), Collections.emptyList());
}
private int sliceSize(int dataSize, int skip, int limit) {
......@@ -156,17 +156,17 @@ public class SliceOpTest extends OpTestCase {
setContext("skip", s);
testSliceMulti(data,
sliceSize(data.size(), s),
st -> st.substream(s),
st -> st.substream(s),
st -> st.substream(s),
st -> st.substream(s));
st -> st.skip(s),
st -> st.skip(s),
st -> st.skip(s),
st -> st.skip(s));
testSliceMulti(data,
sliceSize(sliceSize(data.size(), s), s/2),
st -> st.substream(s).substream(s / 2),
st -> st.substream(s).substream(s / 2),
st -> st.substream(s).substream(s / 2),
st -> st.substream(s).substream(s / 2));
st -> st.skip(s).skip(s / 2),
st -> st.skip(s).skip(s / 2),
st -> st.skip(s).skip(s / 2),
st -> st.skip(s).skip(s / 2));
}
}
......@@ -182,17 +182,10 @@ public class SliceOpTest extends OpTestCase {
setContext("limit", l);
testSliceMulti(data,
sliceSize(sliceSize(data.size(), s), 0, l),
st -> st.substream(s).limit(l),
st -> st.substream(s).limit(l),
st -> st.substream(s).limit(l),
st -> st.substream(s).limit(l));
testSliceMulti(data,
sliceSize(data.size(), s, l),
st -> st.substream(s, l+s),
st -> st.substream(s, l+s),
st -> st.substream(s, l+s),
st -> st.substream(s, l+s));
st -> st.skip(s).limit(l),
st -> st.skip(s).limit(l),
st -> st.skip(s).limit(l),
st -> st.skip(s).limit(l));
}
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册