本文整理了Java中scala.collection.mutable.Map.get()
方法的一些代码示例,展示了Map.get()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Map.get()
方法的具体详情如下:
包路径:scala.collection.mutable.Map
类名称:Map
方法名:get
暂无
代码示例来源:origin: goldmansachs/gs-collections
@Benchmark
public void get()
{
int localSize = this.size;
String[] localElements = this.elements;
Map<String, String> localScalaMap = this.scalaMap;
for (int i = 0; i < localSize; i++)
{
if (!localScalaMap.get(localElements[i]).isDefined())
{
throw new AssertionError(i);
}
}
}
}
代码示例来源:origin: goldmansachs/gs-collections
@Benchmark
public void get()
{
int localSize = this.size;
String[] localElements = this.elements;
Map<String, String> localScalaAnyRefMap = this.scalaAnyRefMap;
for (int i = 0; i < localSize; i++)
{
if (!localScalaAnyRefMap.get(localElements[i]).isDefined())
{
throw new AssertionError(i);
}
}
}
}
代码示例来源:origin: uber/chaperone
try {
scala.collection.Map<Object, Seq<Object>> partitionsMap =
partitionAssignmentForTopics.get(topic).get();
TopicPartition tp = new TopicPartition(topic, partitionsMap.size());
_topicPartitionInfoMap.put(topic, tp);
代码示例来源:origin: uber/chaperone
@Override
public void handleChildChange(String parentPath, List<String> currentChilds)
throws Exception {
if (!tryToRefreshCache()) {
synchronized (_lock) {
Set<String> newAddedTopics = new HashSet<String>(currentChilds);
Set<String> currentServingTopics = getAllTopics();
newAddedTopics.removeAll(currentServingTopics);
for (String existedTopic : currentServingTopics) {
if (!currentChilds.contains(existedTopic)) {
_topicPartitionInfoMap.remove(existedTopic);
}
}
scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
_zkUtils.getPartitionAssignmentForTopics(
JavaConversions.asScalaBuffer(ImmutableList.copyOf(newAddedTopics)));
for (String topic : newAddedTopics) {
try {
scala.collection.Map<Object, Seq<Object>> partitionsMap =
partitionAssignmentForTopics.get(topic).get();
TopicPartition tp = new TopicPartition(topic, partitionsMap.size());
_topicPartitionInfoMap.put(topic, tp);
} catch (Exception e) {
LOGGER.warn("Failed to get topicPartition info for {} from kafka zk: {}", topic, e);
}
}
_kafkaTopicsCounter.inc(_topicPartitionInfoMap.size() - _kafkaTopicsCounter.getCount());
}
}
}
代码示例来源:origin: guru.nidi/text-transform
public static Object getAttribute(Segment segment, Attribute name) {
Option<Object> val = segment.attributes().get(name);
return val instanceof Some ? val.get() : null;
}
代码示例来源:origin: pinterest/doctorkafka
private scala.collection.Map<Object, Seq<Object>> getReplicaAssignmentForTopic(
ZkUtils zkUtils, String topic) {
if (topicPartitionAssignments.containsKey(topic)) {
return topicPartitionAssignments.get(topic);
}
List<String> topics = new ArrayList<>();
topics.add(topic);
Seq<String> topicsSeq = scala.collection.JavaConverters.asScalaBuffer(topics).toSeq();
scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> assignments;
assignments = zkUtils.getPartitionAssignmentForTopics(topicsSeq);
scala.collection.Map<Object, Seq<Object>> partitionAssignment = assignments.get(topic).get();
topicPartitionAssignments.put(topic, partitionAssignment);
return partitionAssignment;
}
代码示例来源:origin: uber/uReplicator
public void tryUpdateTopic(String topic) {
scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
_zkUtils.getPartitionAssignmentForTopics(JavaConversions.asScalaBuffer(ImmutableList.of(topic)));
if (partitionAssignmentForTopics.get(topic).isEmpty()
|| partitionAssignmentForTopics.get(topic).get().size() == 0) {
LOGGER.info("try to update for topic {} but found no topic partition for it", topic);
return;
}
synchronized (_lock) {
LOGGER.info("starting to refresh for update topic {}", topic);
try {
_topicPartitionInfoMap.put(topic, new TopicPartition(topic,
partitionAssignmentForTopics.get(topic).get().size()));
} catch (Exception e) {
LOGGER.warn("Failed to get topicPartition info for {} from kafka zk: {}", topic, e);
}
LOGGER.info("finished refreshing for updating topic {}", topic);
}
}
代码示例来源:origin: uber/uReplicator
private void tryAddTopic(String topic) {
scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
_zkUtils.getPartitionAssignmentForTopics(JavaConversions.asScalaBuffer(ImmutableList.of(topic)));
if (partitionAssignmentForTopics.get(topic).isEmpty()
|| partitionAssignmentForTopics.get(topic).get().size() == 0) {
LOGGER.info("try to refresh for topic {} but found no topic partition for it", topic);
return;
}
synchronized (_lock) {
LOGGER.info("starting to refresh for adding topic {}", topic);
if (!getAllTopics().contains(topic)) {
try {
_topicPartitionInfoMap.put(topic, new TopicPartition(topic,
partitionAssignmentForTopics.get(topic).get().size()));
} catch (Exception e) {
LOGGER.warn("Failed to get topicPartition info for {} from kafka zk: {}", topic, e);
}
}
LOGGER.info("finished refreshing for adding topic {}", topic);
}
}
代码示例来源:origin: uber/uReplicator
private void tryAddTopic(String topic) {
scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
_zkUtils.getPartitionAssignmentForTopics(JavaConversions.asScalaBuffer(ImmutableList.of(topic)));
if (partitionAssignmentForTopics.get(topic).isEmpty()
|| partitionAssignmentForTopics.get(topic).get().size() == 0) {
LOGGER.info("try to refresh for topic {} but found no topic partition for it", topic);
return;
}
synchronized (_lock) {
LOGGER.info("starting to refresh for adding topic {}", topic);
if (!getAllTopics().contains(topic)) {
try {
_topicPartitionInfoMap.put(topic, new TopicPartition(topic,
partitionAssignmentForTopics.get(topic).get().size()));
} catch (Exception e) {
LOGGER.warn("Failed to get topicPartition info for {} from kafka zk: {}", topic, e);
}
}
LOGGER.info("finished refreshing for adding topic {}", topic);
}
}
代码示例来源:origin: uber/uReplicator
try {
scala.collection.Map<Object, Seq<Object>> partitionsMap =
partitionAssignmentForTopics.get(topic).get();
TopicPartition tp = new TopicPartition(topic, partitionsMap.size());
_topicPartitionInfoMap.put(topic, tp);
代码示例来源:origin: uber/uReplicator
try {
scala.collection.Map<Object, Seq<Object>> partitionsMap =
partitionAssignmentForTopics.get(topic).get();
TopicPartition tp = new TopicPartition(topic, partitionsMap.size());
_topicPartitionInfoMap.put(topic, tp);
代码示例来源:origin: uber/uReplicator
try {
scala.collection.Map<Object, Seq<Object>> partitionsMap =
partitionAssignmentForTopics.get(topic).get();
TopicPartition tp = new TopicPartition(topic, partitionsMap.size());
_topicPartitionInfoMap.put(topic, tp);
代码示例来源:origin: uber/uReplicator
try {
scala.collection.Map<Object, Seq<Object>> partitionsMap =
partitionAssignmentForTopics.get(topic).get();
TopicPartition tp = new TopicPartition(topic, partitionsMap.size());
_topicPartitionInfoMap.put(topic, tp);
代码示例来源:origin: com.hotels.road/road-kafka-store
@SuppressWarnings({ "rawtypes", "unchecked" })
private static void verifyTopic(ZkUtils zkUtils, String topic) {
Set topics = new HashSet();
topics.add(topic);
// check # partition and the replication factor
scala.collection.mutable.Map partitionAssignmentForTopics = zkUtils
.getPartitionAssignmentForTopics(JavaConversions.asScalaSet(topics).toSeq());
scala.collection.Map partitionAssignment = (scala.collection.Map) partitionAssignmentForTopics.get(topic).get();
if (partitionAssignment.size() != 1) {
throw new RuntimeException(String.format("The schema topic %s should have only 1 partition.", topic));
}
// check the retention policy
Properties prop = AdminUtils.fetchEntityConfig(zkUtils, ConfigType.Topic(), topic);
String retentionPolicy = prop.getProperty(LogConfig.CleanupPolicyProp());
if (retentionPolicy == null || "compact".compareTo(retentionPolicy) != 0) {
throw new RuntimeException(String.format("The retention policy of the schema topic %s must be compact.", topic));
}
}
}
代码示例来源:origin: pinterest/doctorkafka
public static void main(String[] args) throws Exception {
CommandLine commandLine = parseCommandLine(args);
String zookeeper = commandLine.getOptionValue(ZOOKEEPER);
ZkUtils zkUtils = KafkaUtils.getZkUtils(zookeeper);
Seq<String> topicsSeq = zkUtils.getAllTopics();
List<String> topics = scala.collection.JavaConverters.seqAsJavaList(topicsSeq);
scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>>
partitionAssignments = zkUtils.getPartitionAssignmentForTopics(topicsSeq);
Map<String, Integer> replicationFactors = new HashMap<>();
Map<String, Integer> partitionCounts = new HashMap<>();
topics.stream().forEach(topic -> {
int partitionCount = partitionAssignments.get(topic).get().size();
int factor = partitionAssignments.get(topic).get().head()._2().size();
partitionCounts.put(topic, partitionCount);
replicationFactors.put(topic, factor);
});
List<PartitionInfo> urps = KafkaClusterManager.getUnderReplicatedPartitions(
zookeeper, SecurityProtocol.PLAINTEXT, null, topics, partitionAssignments, replicationFactors, partitionCounts);
for (PartitionInfo partitionInfo : urps) {
LOG.info("under-replicated : {}", partitionInfo);
}
}
}
代码示例来源:origin: com.hurence.logisland/logisland-agent
private void verifySchemaTopic() {
Set<String> topics = new HashSet<String>();
topics.add(topic);
// check # partition and the replication factor
scala.collection.Map partitionAssignment = zkUtils.getPartitionAssignmentForTopics(
JavaConversions.asScalaSet(topics).toSeq())
.get(topic).get();
if (partitionAssignment.size() != 1) {
log.warn("The schema topic " + topic + " should have only 1 partition.");
}
if (((Seq) partitionAssignment.get(0).get()).size() < desiredReplicationFactor) {
log.warn("The replication factor of the schema topic " + topic + " is less than the " +
"desired one of " + desiredReplicationFactor + ". If this is a production " +
"environment, it's crucial to add more brokers and increase the replication " +
"factor of the topic.");
}
// check the retention policy
Properties prop = AdminUtils.fetchEntityConfig(zkUtils, ConfigType.Topic(), topic);
String retentionPolicy = prop.getProperty(LogConfig.CleanupPolicyProp());
if (retentionPolicy == null || "compact".compareTo(retentionPolicy) != 0) {
log.warn("The retention policy of the schema topic " + topic + " may be incorrect. " +
"Please configure it with compact.");
}
}
代码示例来源:origin: pinterest/doctorkafka
Seq<Object> seq = partitionAssignments.get(topic).get().get(partitionId).get();
Node[] nodes = JavaConverters.seqAsJavaList(seq).stream()
.map(val -> new Node((Integer) val, "", -1)).toArray(Node[]::new);
代码示例来源:origin: pinterest/doctorkafka
Map<String, Integer> partitionCounts = new HashMap<>();
topics.stream().forEach(topic -> {
int partitionCount = partitionAssignments.get(topic).get().size();
int factor = partitionAssignments.get(topic).get().head()._2().size();
partitionCounts.put(topic, partitionCount);
replicationFactors.put(topic, factor);
内容来源于网络,如有侵权,请联系作者删除!