java.util.stream.IntStream.parallel()方法的使用及代码示例

x33g5p2x  于2022-01-20 转载在 其他  
字(9.2k)|赞(0)|评价(0)|浏览(238)

本文整理了Java中java.util.stream.IntStream.parallel()方法的一些代码示例,展示了IntStream.parallel()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。IntStream.parallel()方法的具体详情如下:
包路径:java.util.stream.IntStream
类名称:IntStream
方法名:parallel

IntStream.parallel介绍

暂无

代码示例

代码示例来源:origin: google/guava

private static void doParallelCacheOp(int count, IntConsumer consumer) {
 IntStream.range(0, count).parallel().forEach(consumer);
}

代码示例来源:origin: RichardWarburton/java-8-lambdas-exercises

@Override
    // BEGIN parallel_functional
public long countPrimes(int upTo) {
  return IntStream.range(1, upTo)
          .parallel()
          .filter(this::isPrime)
          .count();
}

代码示例来源:origin: sixt/ja-micro

@Test
  public void testShouldVerifyNoDuplicateMetricRegistered() {
    final MetricRegistry metricRegistry = new MetricRegistry();

    final MetricBuilderFactory metricBuilderFactory = new MetricBuilderFactory(
        () -> new MetricBuilder(metricRegistry)
    );

    IntStream.rangeClosed(0, 500)
        .parallel()
        .forEach(i -> metricBuilderFactory.newMetric("metric_name").buildCounter().incSuccess());
  }
}

代码示例来源:origin: stackoverflow.com

new Random().ints(0, 50)
     .parallel()
     .distinct()
     .limit(5)
     .forEach(d -> System.out.println("s: " + d));

代码示例来源:origin: ben-manes/caffeine

private static void doParallelCacheOp(int count, IntConsumer consumer) {
 IntStream.range(0, count).parallel().forEach(consumer);
}

代码示例来源:origin: RichardWarburton/java-8-lambdas-exercises

@GenerateMicroBenchmark
public int range() {
  return IntStream.range(0, size).parallel().sum();
}

代码示例来源:origin: ivan-vasilev/neuralnetworks

@Override
public void getNextTarget(Tensor target)
{
  IntStream stream = IntStream.range(0, currentArrays.size());
  if (properties.getParallelPreprocessing())
  {
    stream = stream.parallel();
  }
  stream.forEach(i -> {
    float[] t = getNextTarget(augmentedToRaw.get(arrayToAugmented.get(currentArrays.get(i))));
    if (t != null)
    {
      System.arraycopy(t, 0, target.getElements(), target.getStartIndex() + i * getTargetDimensions(), t.length);
    }
  });
}

代码示例来源:origin: RichardWarburton/java-8-lambdas-exercises

@GenerateMicroBenchmark
  // BEGIN parallel
public Map<Integer, Double> parallelDiceRolls() {
  double fraction = 1.0 / N;
  return IntStream.range(0, N)                        // <1>
          .parallel()                         // <2>
          .mapToObj(twoDiceThrows())          // <3>
          .collect(groupingBy(side -> side,   // <4>
            summingDouble(n -> fraction))); // <5>
}
  // END parallel

代码示例来源:origin: RoaringBitmap/RoaringBitmap

/**
 * Computes the bitwise union of the input bitmaps
 * @param bitmaps the input bitmaps
 * @return the union of the bitmaps
 */
public static RoaringBitmap or(RoaringBitmap... bitmaps) {
 SortedMap<Short, List<Container>> grouped = groupByKey(bitmaps);
 short[] keys = new short[grouped.size()];
 Container[] values = new Container[grouped.size()];
 List<List<Container>> slices = new ArrayList<>(grouped.size());
 int i = 0;
 for (Map.Entry<Short, List<Container>> slice : grouped.entrySet()) {
  keys[i++] = slice.getKey();
  slices.add(slice.getValue());
 }
 IntStream.range(0, i)
      .parallel()
      .forEach(position -> values[position] = or(slices.get(position)));
 return new RoaringBitmap(new RoaringArray(keys, values, i));
}

代码示例来源:origin: OryxProject/oryx

Objects.requireNonNull(task);
Objects.requireNonNull(collector);
IntStream range = IntStream.range(0, numTasks);
IntStream taskIndices = parallelism > 1 ? range.parallel() : range;
if (parallelism > 1 && privatePool) {
 ForkJoinPool pool = new ForkJoinPool(parallelism);

代码示例来源:origin: RoaringBitmap/RoaringBitmap

/**
 * Computes the bitwise union of the input bitmaps
 * @param bitmaps the input bitmaps
 * @return the union of the bitmaps
 */
public static MutableRoaringBitmap or(ImmutableRoaringBitmap... bitmaps) {
 SortedMap<Short, List<MappeableContainer>> grouped = groupByKey(bitmaps);
 short[] keys = new short[grouped.size()];
 MappeableContainer[] values = new MappeableContainer[grouped.size()];
 List<List<MappeableContainer>> slices = new ArrayList<>(grouped.size());
 int i = 0;
 for (Map.Entry<Short, List<MappeableContainer>> slice : grouped.entrySet()) {
  keys[i++] = slice.getKey();
  slices.add(slice.getValue());
 }
 IntStream.range(0, i)
     .parallel()
     .forEach(position -> values[position] = or(slices.get(position)));
 return new MutableRoaringBitmap(new MutableRoaringArray(keys, values, i));
}

代码示例来源:origin: RoaringBitmap/RoaringBitmap

private static Container or(List<Container> containers) {
 int parallelism;
 // if there are few enough containers it's possible no bitmaps will be materialised
 if (containers.size() < 16) {
  Container result = containers.get(0).clone();
  for (int i = 1; i < containers.size(); ++i) {
   result = result.lazyIOR(containers.get(i));
  }
  return result.repairAfterLazy();
 }
 // heuristic to save memory if the union is large and likely to end up as a bitmap
 if (containers.size() < 512 || (parallelism = availableParallelism()) == 1) {
  Container result = new BitmapContainer(new long[1 << 10], -1);
  for (Container container : containers) {
   result = result.lazyIOR(container);
  }
  return result.repairAfterLazy();
 }
 // we have an enormous slice (probably skewed), parallelise it
 int partitionSize = (containers.size() + parallelism - 1) / parallelism;
 return IntStream.range(0, parallelism)
     .parallel()
     .mapToObj(i -> containers.subList(i * partitionSize,
         Math.min((i + 1) * partitionSize, containers.size())))
     .collect(OR);
}

代码示例来源:origin: OryxProject/oryx

Preconditions.checkArgument(parallelism >= 1);
Objects.requireNonNull(task);
IntStream range = IntStream.range(0, numTasks);
IntStream taskIndices = parallelism > 1 ? range.parallel() : range;
if (parallelism > 1 && privatePool) {
 ForkJoinPool pool = new ForkJoinPool(parallelism);
 try {
  pool.submit(() -> taskIndices.forEach(task::accept)).get();
 } catch (InterruptedException e) {
  throw new IllegalStateException(e);
 taskIndices.forEach(task::accept);

代码示例来源:origin: RoaringBitmap/RoaringBitmap

private static MappeableContainer or(List<MappeableContainer> containers) {
 int parallelism;
 // if there are few enough containers it's possible no bitmaps will be materialised
 if (containers.size() < 16) {
  MappeableContainer result = containers.get(0).clone();
  for (int i = 1; i < containers.size(); ++i) {
   result = result.lazyIOR(containers.get(i));
  }
  return result.repairAfterLazy();
 }
 // heuristic to save memory if the union is large and likely to end up as a bitmap
 if (containers.size() < 512 || (parallelism = availableParallelism()) == 1) {
  MappeableContainer result = new MappeableBitmapContainer(LongBuffer.allocate(1 << 10), -1);
  for (MappeableContainer container : containers) {
   result = result.lazyIOR(container);
  }
  return result.repairAfterLazy();
 }
 // we have an enormous slice (probably skewed), parallelise it
 int partitionSize = (containers.size() + parallelism - 1) / parallelism;
 return IntStream.range(0, parallelism)
     .parallel()
     .mapToObj(i -> containers.subList(i * partitionSize,
         Math.min((i + 1) * partitionSize, containers.size())))
     .collect(OR);
}

代码示例来源:origin: ivan-vasilev/neuralnetworks

@Override
  public void value(Tensor inputOutput)
  {
    int sampleLength = inputOutput.getSize() / inputOutput.getDimensions()[0];
    IntStream.range(0, inputOutput.getSize()).parallel().forEach(i -> inputOutput.getElements()[inputOutput.getStartIndex() + i] -= meanValues[i % sampleLength]);
  }
}

代码示例来源:origin: confluentinc/ksql

@Test
public void shouldBeThreadSafe() {
 // When:
 final List<CompletableFuture<Void>> futures = IntStream.range(1, 11).parallel()
   .mapToObj(idx -> {
    final CompletableFuture<Void> f = futureStore.getFutureForSequenceNumber(idx);
    if (idx % 10 == 0) {
     futureStore.completeFuturesUpToAndIncludingSequenceNumber(idx);
    }
    return f;
   })
   .collect(Collectors.toList());
 // Then:
 assertThat(futures.stream().allMatch(CompletableFuture::isDone), is(true));
}

代码示例来源:origin: ivan-vasilev/neuralnetworks

IntStream stream = IntStream.range(0, properties.getImagesBulkSize());
if (properties.getParallelPreprocessing())
  stream = stream.parallel();
stream.forEach(i -> {
  int index = (currentEl + i) % filesAndCategory.size();
  Pair<String, Integer> imageCategoryPair = filesAndCategory.get(index);

代码示例来源:origin: neo4j-contrib/neo4j-apoc-procedures

public static Stream<List<Object>> partitionSubList(List<Object> data, int partitions, List<Object> tombstone) {
  if (partitions==0) partitions=1;
  List<Object> list = new ArrayList<>(data);
  int total = list.size();
  int batchSize = Math.max((int)Math.ceil((double)total / partitions),1);
  Stream<List<Object>> stream = IntStream.range(0, partitions).parallel()
      .mapToObj((part) -> list.subList(Math.min(part * batchSize, total), Math.min((part + 1) * batchSize, total)))
      .filter(partition -> !partition.isEmpty());
  return tombstone == null ? stream : Stream.concat(stream,Stream.of(tombstone));
}

代码示例来源:origin: ivan-vasilev/neuralnetworks

@Override
public void getNextInput(Tensor input)
{
  int mb = input.getDimensions()[0];
  currentArrays.clear();
  for (int i = 0; i < mb; i++)
  {
    try
    {
      if (getValues().size() == 0)
      {
        createArrays();
      }
      currentArrays.add(getValues().take());
    } catch (InterruptedException e)
    {
      e.printStackTrace();
    }
  }
  IntStream stream = IntStream.range(0, mb);
  if (properties.getParallelPreprocessing())
  {
    stream = stream.parallel();
  }
  stream.forEach(i -> {
    float[] a = currentArrays.get(i);
    rawMBPosition.put(i, augmentedToRaw.get(arrayToAugmented.get(a)));
    System.arraycopy(a, 0, input.getElements(), input.getStartIndex() + i * getInputDimensions(), a.length);
  });
}

代码示例来源:origin: neo4j-contrib/neo4j-apoc-procedures

private Stream<List<Object>> partitionList(@Name("values") List list, @Name("batchSize") int batchSize) {
  int total = list.size();
  int pages = total % batchSize == 0 ? total/batchSize : total/batchSize + 1;
  return IntStream.range(0, pages).parallel().boxed()
      .map(page -> {
        int from = page * batchSize;
        return list.subList(from, Math.min(from + batchSize, total));
      });
}

相关文章