本文整理了Java中java.util.Map.getOrDefault()
方法的一些代码示例,展示了Map.getOrDefault()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Map.getOrDefault()
方法的具体详情如下:
包路径:java.util.Map
类名称:Map
方法名:getOrDefault
暂无
代码示例来源:origin: hs-web/hsweb-framework
protected ResponseConvertHandler getConvertHandler(String id, String provider) {
ResponseConvertHandler convertHandler = convertHandlerMap.get("serverId:" + id);
if (convertHandler == null) {
convertHandler = convertHandlerMap.getOrDefault("provider:" + provider, defaultConvertHandler);
}
return convertHandler;
}
代码示例来源:origin: square/javapoet
void add(T t) {
int count = map.getOrDefault(t, 0);
map.put(t, count + 1);
}
代码示例来源:origin: prestodb/presto
private static int mergeMaps(Map<String, Integer> map, Map<String, Integer> other)
{
int deltaSize = 0;
for (Map.Entry<String, Integer> entry : other.entrySet()) {
if (!map.containsKey(entry.getKey())) {
deltaSize += entry.getKey().getBytes().length + SIZE_OF_INT;
}
map.put(entry.getKey(), map.getOrDefault(entry.getKey(), 0) + other.getOrDefault(entry.getKey(), 0));
}
return deltaSize;
}
代码示例来源:origin: twosigma/beakerx
public void taskStart(int stageId, long taskId) {
if (!stages.containsKey(stageId)) {
logger.warning(String.format("Spark stage %d could not be found for task progress reporting.", stageId));
return;
}
removeTask(stageId, taskId);
List<Long> at = activeTasks.getOrDefault(stageId, new ArrayList<Long>());
at.add(taskId);
activeTasks.put(stageId, at);
}
代码示例来源:origin: ctripcorp/apollo
Map<String, Long> latestNotifications = Maps.newHashMap();
for (ReleaseMessage releaseMessage : latestReleaseMessages) {
latestNotifications.put(releaseMessage.getMessage(), releaseMessage.getId());
long clientSideId = clientSideNotifications.get(namespace);
long latestId = ConfigConsts.NOTIFICATION_ID_PLACEHOLDER;
Collection<String> namespaceWatchedKeys = watchedKeysMap.get(namespace);
for (String namespaceWatchedKey : namespaceWatchedKeys) {
long namespaceNotificationId =
latestNotifications.getOrDefault(namespaceWatchedKey, ConfigConsts.NOTIFICATION_ID_PLACEHOLDER);
if (namespaceNotificationId > latestId) {
latestId = namespaceNotificationId;
ApolloConfigNotification notification = new ApolloConfigNotification(namespace, latestId);
namespaceWatchedKeys.stream().filter(latestNotifications::containsKey).forEach(namespaceWatchedKey ->
notification.addMessage(namespaceWatchedKey, latestNotifications.get(namespaceWatchedKey)));
newNotifications.add(notification);
代码示例来源:origin: apache/storm
private Map<String, Double> mkSupervisorCapacities(Map<String, Object> conf) {
Map<String, Double> ret = new HashMap<String, Double>();
// Put in legacy values
Double mem = ObjectReader.getDouble(conf.get(Config.SUPERVISOR_MEMORY_CAPACITY_MB), 4096.0);
ret.put(Config.SUPERVISOR_MEMORY_CAPACITY_MB, mem);
Double cpu = ObjectReader.getDouble(conf.get(Config.SUPERVISOR_CPU_CAPACITY), 400.0);
ret.put(Config.SUPERVISOR_CPU_CAPACITY, cpu);
// If configs are present in Generic map and legacy - the legacy values will be overwritten
Map<String, Number> rawResourcesMap = (Map<String, Number>) conf.getOrDefault(
Config.SUPERVISOR_RESOURCES_MAP, Collections.emptyMap()
);
for (Map.Entry<String, Number> stringNumberEntry : rawResourcesMap.entrySet()) {
ret.put(stringNumberEntry.getKey(), stringNumberEntry.getValue().doubleValue());
}
return NormalizedResources.RESOURCE_NAME_NORMALIZER.normalizedResourceMap(ret);
}
代码示例来源:origin: graphql-java/graphql-java
int increaseExpectedFetchCount(int level, int count) {
expectedFetchCountPerLevel.put(level, expectedFetchCountPerLevel.getOrDefault(level, 0) + count);
return expectedFetchCountPerLevel.get(level);
}
代码示例来源:origin: apache/storm
@VisibleForTesting
void prepare(Cluster cluster) {
this.cluster = cluster;
nodes = new RAS_Nodes(cluster);
networkTopography = cluster.getNetworkTopography();
Map<String, String> hostToRack = new HashMap<>();
for (Map.Entry<String, List<String>> entry : networkTopography.entrySet()) {
String rackId = entry.getKey();
for (String hostName: entry.getValue()) {
hostToRack.put(hostName, rackId);
}
}
for (RAS_Node node: nodes.getNodes()) {
String superId = node.getId();
String hostName = node.getHostname();
String rackId = hostToRack.getOrDefault(hostName, DNSToSwitchMapping.DEFAULT_RACK);
superIdToHostname.put(superId, hostName);
superIdToRack.put(superId, rackId);
hostnameToNodes.computeIfAbsent(hostName, (hn) -> new ArrayList<>()).add(node);
rackIdToNodes.computeIfAbsent(rackId, (hn) -> new ArrayList<>()).add(node);
}
logClusterInfo();
}
代码示例来源:origin: graphhopper/graphhopper
List<Transfer> getTransfersFromStop(String fromStopId, String fromRouteId) {
final List<Transfer> allOutboundTransfers = transfersFromStop.getOrDefault(fromStopId, Collections.emptyList());
final Map<String, List<Transfer>> byToStop = allOutboundTransfers.stream()
.filter(t -> t.transfer_type == 0 || t.transfer_type == 2)
.filter(t -> t.from_route_id == null || fromRouteId.equals(t.from_route_id))
.collect(Collectors.groupingBy(t -> t.to_stop_id));
final List<Transfer> result = new ArrayList<>();
byToStop.forEach((toStop, transfers) -> {
routesByStop.getOrDefault(toStop, Collections.emptySet()).forEach(toRouteId -> {
final Transfer mostSpecificRule = findMostSpecificRule(transfers, fromRouteId, toRouteId);
final Transfer myRule = new Transfer();
myRule.to_route_id = toRouteId;
myRule.from_route_id = fromRouteId;
myRule.to_stop_id = mostSpecificRule.to_stop_id;
myRule.from_stop_id = mostSpecificRule.from_stop_id;
myRule.transfer_type = mostSpecificRule.transfer_type;
myRule.min_transfer_time = mostSpecificRule.min_transfer_time;
myRule.from_trip_id = mostSpecificRule.from_trip_id;
myRule.to_trip_id = mostSpecificRule.to_trip_id;
result.add(myRule);
});
});
return result;
}
代码示例来源:origin: apache/storm
public ColumnsFileReporter(String path, Map<String, String> query, Map<String, MetricExtractor> extractorsMap,
String defaultPreceision) throws FileNotFoundException {
super(path, query, extractorsMap);
targetUnit = UNIT_MAP.get(query.getOrDefault("time", "MILLISECONDS").toUpperCase());
if (targetUnit == null) {
throw new IllegalArgumentException(query.get("time") + " is not a supported time unit");
List<String> extractors = handleExtractorCleanup(Arrays.asList(query.get("columns").split("\\s*,\\s*")));
extractors.add(extractor);
String strPrecision = query.getOrDefault("precision", defaultPreceision);
if (strPrecision == null) {
precision = -1;
代码示例来源:origin: apache/incubator-druid
private Map<String, Long> getDeltaValues(Map<String, Long> total, Map<String, Long> prev)
{
return total.entrySet()
.stream()
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue() - prev.getOrDefault(e.getKey(), 0L)));
}
代码示例来源:origin: apache/storm
/**
* Constructs a {@link DefaultStateSerializer} instance with the given list of classes registered in kryo.
*
* @param classesToRegister the classes to register.
*/
public DefaultStateSerializer(Map<String, Object> topoConf, TopologyContext context, List<Class<?>> classesToRegister) {
this.context = context;
this.topoConf = topoConf;
registrations.addAll(classesToRegister.stream().map(Class::getName).collect(Collectors.toSet()));
// other classes from config
registrations.addAll((List<String>) topoConf.getOrDefault(Config.TOPOLOGY_STATE_KRYO_REGISTER, Collections.emptyList()));
// defaults
registrations.add(Optional.class.getName());
}
代码示例来源:origin: testcontainers/testcontainers-java
/**
* Attach an output consumer at container startup, enabling stdout and stderr to be followed, waited on, etc.
* <p>
* More than one consumer may be registered.
*
* @param serviceName the name of the service as set in the docker-compose.yml file
* @param consumer consumer that output frames should be sent to
* @return this instance, for chaining
*/
public SELF withLogConsumer(String serviceName, Consumer<OutputFrame> consumer) {
String serviceInstanceName = getServiceInstanceName(serviceName);
final List<Consumer<OutputFrame>> consumers = this.logConsumers.getOrDefault(serviceInstanceName, new ArrayList<>());
consumers.add(consumer);
this.logConsumers.putIfAbsent(serviceInstanceName, consumers);
return self();
}
代码示例来源:origin: prestodb/presto
@Override
public final RecordCursor cursor(ConnectorTransactionHandle transactionHandle, ConnectorSession session, TupleDomain<Integer> constraint)
{
TransactionId transactionId = ((GlobalSystemTransactionHandle) transactionHandle).getTransactionId();
InMemoryRecordSet.Builder table = InMemoryRecordSet.builder(tableMetadata);
Map<ConnectorId, Map<String, PropertyMetadata<?>>> connectorProperties = propertySupplier.get();
for (Entry<String, ConnectorId> entry : new TreeMap<>(transactionManager.getCatalogNames(transactionId)).entrySet()) {
String catalog = entry.getKey();
Map<String, PropertyMetadata<?>> properties = new TreeMap<>(connectorProperties.getOrDefault(entry.getValue(), ImmutableMap.of()));
for (PropertyMetadata<?> propertyMetadata : properties.values()) {
table.addRow(
catalog,
propertyMetadata.getName(),
firstNonNull(propertyMetadata.getDefaultValue(), "").toString(),
propertyMetadata.getSqlType().toString(),
propertyMetadata.getDescription());
}
}
return table.build().cursor();
}
}
代码示例来源:origin: apache/storm
@Override
public void prepare(Map<String, Object> topoConf, TopologyContext context, OutputCollector collector) {
List<String> registrations = (List<String>) topoConf.getOrDefault(Config.TOPOLOGY_STATE_KRYO_REGISTER, new ArrayList<>());
registrations.add(ConcurrentLinkedQueue.class.getName());
registrations.add(LinkedList.class.getName());
registrations.add(AtomicInteger.class.getName());
registrations.add(EventImpl.class.getName());
registrations.add(WindowPartition.class.getName());
registrations.add(DefaultEvictionContext.class.getName());
topoConf.put(Config.TOPOLOGY_STATE_KRYO_REGISTER, registrations);
prepare(topoConf, context, collector, getWindowState(topoConf, context), getPartitionState(topoConf, context),
getWindowSystemState(topoConf, context));
}
代码示例来源:origin: apache/storm
@SuppressWarnings("unchecked")
private static Map<String, Object> normalizeConf(Map<String, Object> conf, Map<String, Object> topoConf, StormTopology topology) {
//ensure that serializations are same for all tasks no matter what's on
// the supervisors. this also allows you to declare the serializations as a sequence
List<Map<String, Object>> allConfs = new ArrayList<>();
for (Object comp : StormCommon.allComponents(topology).values()) {
allConfs.add(StormCommon.componentConf(comp));
}
Set<String> decorators = new HashSet<>();
//Yes we are putting in a config that is not the same type we pulled out.
Map<String, String> serializers = new HashMap<>();
for (Map<String, Object> c : allConfs) {
addToDecorators(decorators, (List<String>) c.get(Config.TOPOLOGY_KRYO_DECORATORS));
addToSerializers(serializers, (List<Object>) c.get(Config.TOPOLOGY_KRYO_REGISTER));
}
addToDecorators(decorators, (List<String>) topoConf.getOrDefault(Config.TOPOLOGY_KRYO_DECORATORS,
conf.get(Config.TOPOLOGY_KRYO_DECORATORS)));
addToSerializers(serializers, (List<Object>) topoConf.getOrDefault(Config.TOPOLOGY_KRYO_REGISTER,
conf.get(Config.TOPOLOGY_KRYO_REGISTER)));
Map<String, Object> mergedConf = Utils.merge(conf, topoConf);
Map<String, Object> ret = new HashMap<>(topoConf);
ret.put(Config.TOPOLOGY_KRYO_REGISTER, serializers);
ret.put(Config.TOPOLOGY_KRYO_DECORATORS, new ArrayList<>(decorators));
ret.put(Config.TOPOLOGY_ACKER_EXECUTORS, mergedConf.get(Config.TOPOLOGY_ACKER_EXECUTORS));
ret.put(Config.TOPOLOGY_EVENTLOGGER_EXECUTORS, mergedConf.get(Config.TOPOLOGY_EVENTLOGGER_EXECUTORS));
ret.put(Config.TOPOLOGY_MAX_TASK_PARALLELISM, mergedConf.get(Config.TOPOLOGY_MAX_TASK_PARALLELISM));
return ret;
}
代码示例来源:origin: linkedin/cruise-control
private Set<Map<String, Object>> recentGoalViolations(boolean useDateFormat) {
Map<Long, Anomaly> goalViolationsByTime = _recentAnomaliesByType.get(AnomalyType.GOAL_VIOLATION);
Set<Map<String, Object>> recentAnomalies = new HashSet<>(_numCachedRecentAnomalyStates);
for (Map.Entry<Long, Anomaly> entry: goalViolationsByTime.entrySet()) {
GoalViolations goalViolations = (GoalViolations) entry.getValue();
Map<Boolean, List<String>> violatedGoalsByFixability = goalViolations.violatedGoalsByFixability();
Map<String, Object> anomalyDetails = new HashMap<>(3);
anomalyDetails.put(useDateFormat ? DETECTION_DATE : DETECTION_MS,
useDateFormat ? getDateFormat(entry.getKey()) : entry.getKey());
anomalyDetails.put(FIXABLE_VIOLATED_GOALS, violatedGoalsByFixability.getOrDefault(true, Collections.emptyList()));
anomalyDetails.put(UNFIXABLE_VIOLATED_GOALS, violatedGoalsByFixability.getOrDefault(false, Collections.emptyList()));
recentAnomalies.add(anomalyDetails);
}
return recentAnomalies;
}
代码示例来源:origin: hs-web/hsweb-framework
protected ResponseJudge getResponseJudge(String id, String provider) {
ResponseJudge judge = judgeMap.get("serverId:" + id);
if (judge == null) {
judge = judgeMap.getOrDefault("provider:" + provider, defaultResponseJudge);
}
return judge;
}
代码示例来源:origin: apache/storm
/**
* Chooses one of the incoming tasks and selects the one that has been selected the fewest times so far.
*/
public Integer chooseTask(int[] assignedTasks) {
Integer taskIdWithMinLoad = null;
Long minTaskLoad = Long.MAX_VALUE;
for (Integer currentTaskId : assignedTasks) {
final Long currentTaskLoad = targetTaskStats.getOrDefault(currentTaskId, 0L);
if (currentTaskLoad < minTaskLoad) {
minTaskLoad = currentTaskLoad;
taskIdWithMinLoad = currentTaskId;
}
}
targetTaskStats.put(taskIdWithMinLoad, targetTaskStats.getOrDefault(taskIdWithMinLoad, 0L) + 1);
return taskIdWithMinLoad;
}
}
代码示例来源:origin: apache/ignite
/** {@inheritDoc} */
@Override public Double apply(double[] estimations) {
A.notEmpty(estimations, "estimations vector");
Map<Double, Integer> cntrsByCls = new HashMap<>();
for (Double predictedValue : estimations) {
Integer cntrVal = cntrsByCls.getOrDefault(predictedValue, 0) + 1;
cntrsByCls.put(predictedValue, cntrVal);
}
return cntrsByCls.entrySet().stream()
.max(Comparator.comparing(Map.Entry::getValue))
.get().getKey();
}
}
内容来源于网络,如有侵权,请联系作者删除!