本文整理了Java中scala.collection.Map.iterator()
方法的一些代码示例,展示了Map.iterator()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Map.iterator()
方法的具体详情如下:
包路径:scala.collection.Map
类名称:Map
方法名:iterator
暂无
代码示例来源:origin: linkedin/kafka-monitor
private static void reassignPartitions(KafkaZkClient zkClient, Collection<Broker> brokers, String topic, int partitionCount, int replicationFactor) {
scala.collection.mutable.ArrayBuffer<BrokerMetadata> brokersMetadata = new scala.collection.mutable.ArrayBuffer<>(brokers.size());
for (Broker broker : brokers) {
brokersMetadata.$plus$eq(new BrokerMetadata(broker.id(), broker.rack()));
}
scala.collection.Map<Object, Seq<Object>> assignedReplicas =
AdminUtils.assignReplicasToBrokers(brokersMetadata, partitionCount, replicationFactor, 0, 0);
scala.collection.immutable.Map<TopicPartition, Seq<Object>> newAssignment = new scala.collection.immutable.HashMap<>();
scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = assignedReplicas.iterator();
while (it.hasNext()) {
scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next();
TopicPartition tp = new TopicPartition(topic, (Integer) scalaTuple._1);
newAssignment = newAssignment.$plus(new scala.Tuple2<>(tp, scalaTuple._2));
}
scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic);
scala.collection.Map<Object, scala.collection.Seq<Object>> currentAssignment = zkClient.getPartitionAssignmentForTopics(topicList).apply(topic);
String currentAssignmentJson = formatAsReassignmentJson(topic, currentAssignment);
String newAssignmentJson = formatAsReassignmentJson(topic, assignedReplicas);
LOG.info("Reassign partitions for topic " + topic);
LOG.info("Current partition replica assignment " + currentAssignmentJson);
LOG.info("New partition replica assignment " + newAssignmentJson);
zkClient.createPartitionReassignment(newAssignment);
}
代码示例来源:origin: linkedin/kafka-monitor
private static List<PartitionInfo> getPartitionInfo(KafkaZkClient zkClient, String topic) {
scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic);
scala.collection.Map<Object, scala.collection.Seq<Object>> partitionAssignments =
zkClient.getPartitionAssignmentForTopics(topicList).apply(topic);
List<PartitionInfo> partitionInfoList = new ArrayList<>();
scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = partitionAssignments.iterator();
while (it.hasNext()) {
scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next();
Integer partition = (Integer) scalaTuple._1();
scala.Option<Object> leaderOption = zkClient.getLeaderForPartition(new TopicPartition(topic, partition));
Node leader = leaderOption.isEmpty() ? null : new Node((Integer) leaderOption.get(), "", -1);
Node[] replicas = new Node[scalaTuple._2().size()];
for (int i = 0; i < replicas.length; i++) {
Integer brokerId = (Integer) scalaTuple._2().apply(i);
replicas[i] = new Node(brokerId, "", -1);
}
partitionInfoList.add(new PartitionInfo(topic, partition, leader, replicas, null));
}
return partitionInfoList;
}
代码示例来源:origin: org.scala-lang.modules/scala-java8-compat
/**
* Generates a Stream that traverses the key-value pairs of a scala.collection.Map.
* <p>
* Only sequential operations will be efficient.
* For efficient parallel operation, use the streamAccumulated method instead, but
* note that this creates a new collection containing the Map's key-value pairs.
*
* @param coll The Map to traverse
* @return A Stream view of the collection which, by default, executes sequentially.
*/
public static <K,V> Stream< scala.Tuple2<K, V> > stream(scala.collection.Map<K, V> coll) {
return StreamSupport.stream(new StepsAnyIterator< scala.Tuple2<K, V> >(coll.iterator()), false);
}
代码示例来源:origin: org.scala-lang.modules/scala-java8-compat_2.12
/**
* Generates a Stream that traverses the key-value pairs of a scala.collection.Map.
* <p>
* Only sequential operations will be efficient.
* For efficient parallel operation, use the streamAccumulated method instead, but
* note that this creates a new collection containing the Map's key-value pairs.
*
* @param coll The Map to traverse
* @return A Stream view of the collection which, by default, executes sequentially.
*/
public static <K,V> Stream< scala.Tuple2<K, V> > stream(scala.collection.Map<K, V> coll) {
return StreamSupport.stream(new StepsAnyIterator< scala.Tuple2<K, V> >(coll.iterator()), false);
}
代码示例来源:origin: org.scala-lang.modules/scala-java8-compat_2.11
/**
* Generates a Stream that traverses the key-value pairs of a scala.collection.Map.
* <p>
* Only sequential operations will be efficient.
* For efficient parallel operation, use the streamAccumulated method instead, but
* note that this creates a new collection containing the Map's key-value pairs.
*
* @param coll The Map to traverse
* @return A Stream view of the collection which, by default, executes sequentially.
*/
public static <K,V> Stream< scala.Tuple2<K, V> > stream(scala.collection.Map<K, V> coll) {
return StreamSupport.stream(new StepsAnyIterator< scala.Tuple2<K, V> >(coll.iterator()), false);
}
代码示例来源:origin: pinterest/doctorkafka
private void fillMetricsBuffer(StatsSummary summary, int epochSecs) {
buffer.reset();
OpenTsdbClient.MetricsBuffer buf = buffer;
Map<String, Long> counters = (Map<String, Long>) (Map<String, ?>) summary.counters();
Iterator<Tuple2<String, Long>> countersIter = counters.iterator();
while (countersIter.hasNext()) {
Tuple2<String, Long> tuple = countersIter.next();
converter.convertCounter(tuple._1(), epochSecs, tuple._2(), buf);
}
Map<String, Double> gauges = (Map<String, Double>) (Map<String, ?>) summary.gauges();
Iterator<Tuple2<String, Double>> gaugesIter = gauges.iterator();
while (gaugesIter.hasNext()) {
Tuple2<String, Double> tuple = gaugesIter.next();
converter.convertGauge(tuple._1(), epochSecs, (float) tuple._2().doubleValue(), buf);
}
Map<String, Distribution> metrics = summary.metrics();
Iterator<Tuple2<String, Distribution>> metricsIter = metrics.iterator();
while (metricsIter.hasNext()) {
Tuple2<String, Distribution> tuple = metricsIter.next();
converter.convertMetric(tuple._1(), epochSecs, tuple._2(), buf);
}
}
代码示例来源:origin: com.github.pinterest/kafkastats
private void fillMetricsBuffer(StatsSummary summary, int epochSecs) {
buffer.reset();
OpenTsdbClient.MetricsBuffer buf = buffer;
Map<String, Long> counters = (Map<String, Long>) (Map<String, ?>) summary.counters();
Iterator<Tuple2<String, Long>> countersIter = counters.iterator();
while (countersIter.hasNext()) {
Tuple2<String, Long> tuple = countersIter.next();
converter.convertCounter(tuple._1(), epochSecs, tuple._2(), buf);
}
Map<String, Double> gauges = (Map<String, Double>) (Map<String, ?>) summary.gauges();
Iterator<Tuple2<String, Double>> gaugesIter = gauges.iterator();
while (gaugesIter.hasNext()) {
Tuple2<String, Double> tuple = gaugesIter.next();
converter.convertGauge(tuple._1(), epochSecs, (float) tuple._2().doubleValue(), buf);
}
Map<String, Distribution> metrics = summary.metrics();
Iterator<Tuple2<String, Distribution>> metricsIter = metrics.iterator();
while (metricsIter.hasNext()) {
Tuple2<String, Distribution> tuple = metricsIter.next();
converter.convertMetric(tuple._1(), epochSecs, tuple._2(), buf);
}
}
内容来源于网络,如有侵权,请联系作者删除!