本文整理了Java中java.util.LinkedHashMap.put()
方法的一些代码示例,展示了LinkedHashMap.put()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。LinkedHashMap.put()
方法的具体详情如下:
包路径:java.util.LinkedHashMap
类名称:LinkedHashMap
方法名:put
暂无
代码示例来源:origin: ch.qos.logback/logback-classic
int getMessageCountAndThenIncrement(String msg) {
// don't insert null elements
if (msg == null) {
return 0;
}
Integer i;
// LinkedHashMap is not LinkedHashMap. See also LBCLASSIC-255
synchronized (this) {
i = super.get(msg);
if (i == null) {
i = 0;
} else {
i = i + 1;
}
super.put(msg, i);
}
return i;
}
代码示例来源:origin: square/leakcanary
private Map<String, Map<String, Exclusion>> unmodifiableRefStringMap(
Map<String, Map<String, ParamsBuilder>> mapmap) {
LinkedHashMap<String, Map<String, Exclusion>> fieldNameByClassName = new LinkedHashMap<>();
for (Map.Entry<String, Map<String, ParamsBuilder>> entry : mapmap.entrySet()) {
fieldNameByClassName.put(entry.getKey(), unmodifiableRefMap(entry.getValue()));
}
return unmodifiableMap(fieldNameByClassName);
}
代码示例来源:origin: apache/hive
public void addAggregationExprsForClause(String clause,
LinkedHashMap<String, ASTNode> aggregationTrees) {
if (destToAggregationExprs.containsKey(clause)) {
destToAggregationExprs.get(clause).putAll(aggregationTrees);
} else {
destToAggregationExprs.put(clause, aggregationTrees);
}
}
代码示例来源:origin: google/guava
@Override
public Iterable<Entry<K, Collection<V>>> order(List<Entry<K, Collection<V>>> insertionOrder) {
Map<K, Collection<V>> map = new HashMap<>();
List<Entry<K, V>> builder = new ArrayList<>();
for (Entry<K, Collection<V>> entry : insertionOrder) {
for (V v : entry.getValue()) {
builder.add(mapEntry(entry.getKey(), v));
}
map.put(entry.getKey(), entry.getValue());
}
Iterable<Entry<K, V>> ordered = multimapGenerator.order(builder);
LinkedHashMap<K, Collection<V>> orderedMap = new LinkedHashMap<>();
for (Entry<K, V> entry : ordered) {
orderedMap.put(entry.getKey(), map.get(entry.getKey()));
}
return orderedMap.entrySet();
}
代码示例来源:origin: apache/hive
targetAliasToPartnInfo.remove(targetAlias);
List<Path> pathsToRemove = new ArrayList<>();
for (Entry<Path, ArrayList<String>> entry: targetPathToAliases.entrySet()) {
ArrayList<String> aliases = entry.getValue();
aliases.remove(targetAlias);
if (aliases.isEmpty()) {
pathsToRemove.add(entry.getKey());
targetAliasToWork.put(sourceAlias, sourceAliasToWork.get(sourceAlias));
targetAliasToPartnInfo.putAll(sourceAliasToPartnInfo);
targetPathToPartitionInfo.putAll(sourcePathToPartitionInfo);
List<Path> pathsToAdd = new ArrayList<>();
for (Entry<Path, ArrayList<String>> entry: sourcePathToAliases.entrySet()) {
ArrayList<String> aliases = entry.getValue();
if (aliases.contains(sourceAlias)) {
pathsToAdd.add(entry.getKey());
targetPathToAliases.put(pathToAdd, new ArrayList<String>());
targetPathToAliases.get(pathToAdd).add(sourceAlias);
代码示例来源:origin: apache/hive
/**
* Puts the dependency for an operator, columninfo tuple.
* @param op The operator whose dependency is being inserted.
* @param col The column info whose dependency is being inserted.
* @param dep The dependency.
*/
public void putDependency(Operator<? extends OperatorDesc> op,
ColumnInfo col, Dependency dep) {
LinkedHashMap<ColumnInfo, Dependency> colMap = depMap.get(op);
if (colMap == null) {
colMap = new LinkedHashMap<ColumnInfo, Dependency>();
depMap.put(op, colMap);
}
colMap.put(col, dep);
}
代码示例来源:origin: jeasonlzy/okhttp-OkGo
private void put(String key, String value, boolean isReplace) {
if (key != null && value != null) {
List<String> urlValues = urlParamsMap.get(key);
if (urlValues == null) {
urlValues = new ArrayList<>();
urlParamsMap.put(key, urlValues);
}
if (isReplace) urlValues.clear();
urlValues.add(value);
}
}
代码示例来源:origin: spring-projects/spring-data-mongodb
/**
* Creates a criterion using the given object as a pattern.
*
* @param sample
* @return
* @since 1.8
*/
public Criteria alike(Example<?> sample) {
criteria.put("$example", sample);
this.criteriaChain.add(this);
return this;
}
代码示例来源:origin: apache/kafka
List<TopicPartition> altered = new ArrayList<>();
for (Iterator<Entry<TopicPartition, PartitionData>> iter =
sessionPartitions.entrySet().iterator(); iter.hasNext(); ) {
Entry<TopicPartition, PartitionData> entry = iter.next();
TopicPartition topicPartition = entry.getKey();
PartitionData prevData = entry.getValue();
PartitionData nextData = next.get(topicPartition);
if (nextData != null) {
if (prevData.equals(nextData)) {
next.put(topicPartition, nextData);
entry.setValue(nextData);
altered.add(topicPartition);
removed.add(topicPartition);
for (Entry<TopicPartition, PartitionData> entry : next.entrySet()) {
TopicPartition topicPartition = entry.getKey();
PartitionData nextData = entry.getValue();
if (sessionPartitions.containsKey(topicPartition)) {
sessionPartitions.put(topicPartition, nextData);
added.add(topicPartition);
代码示例来源:origin: redisson/redisson
/**
* Creates a proxy instance for the supplied annotation type and values.
*
* @param classLoader The class loader that should be used for loading the annotation's values.
* @param annotationType The annotation's type.
* @param values The values that the annotation contains.
* @param <S> The type of the handled annotation.
* @return A proxy for the annotation type and values.
* @throws ClassNotFoundException If the class of an instance that is contained by this annotation could not be found.
*/
@SuppressWarnings("unchecked")
public static <S extends Annotation> S of(ClassLoader classLoader,
Class<S> annotationType,
Map<String, ? extends AnnotationValue<?, ?>> values) throws ClassNotFoundException {
LinkedHashMap<Method, AnnotationValue.Loaded<?>> loadedValues = new LinkedHashMap<Method, AnnotationValue.Loaded<?>>();
for (Method method : annotationType.getDeclaredMethods()) {
AnnotationValue<?, ?> annotationValue = values.get(method.getName());
loadedValues.put(method, (annotationValue == null
? defaultValueOf(method)
: annotationValue).load(classLoader));
}
return (S) Proxy.newProxyInstance(classLoader, new Class<?>[]{annotationType}, new AnnotationInvocationHandler<S>(annotationType, loadedValues));
}
代码示例来源:origin: apache/hive
@Override
public void replaceRoots(Map<Operator<?>, Operator<?>> replacementMap) {
LinkedHashMap<String, Operator<?>> newAliasToWork = new LinkedHashMap<String, Operator<?>>();
for (Map.Entry<String, Operator<?>> entry: aliasToWork.entrySet()) {
newAliasToWork.put(entry.getKey(), replacementMap.get(entry.getValue()));
}
setAliasToWork(newAliasToWork);
}
代码示例来源:origin: pentaho/pentaho-kettle
coll.add( i );
metaNameToIndex.put( metaFieldNames[i], coll );
List<Integer> columnIndexes = metaNameToIndex.get( actualFieldNames[ i ] );
if ( columnIndexes == null || columnIndexes.isEmpty() ) {
unmatchedMetaFields.add( i );
actualToMetaFieldMapping[ i ] = FIELD_DOES_NOT_EXIST;
continue;
代码示例来源:origin: gocd/gocd
private static void addToHistory(LinkedHashMap<String, Properties> propHistory, Map<String, Object> flatMap) {
String id = String.valueOf(flatMap.get("pipelineid"));
String key = (String) flatMap.get("key");
String value = (String) flatMap.get("value");
if (!propHistory.containsKey(id)) {
propHistory.put(id, new Properties());
}
propHistory.get(id).add(new Property(key, value));
}
代码示例来源:origin: apache/hbase
/**
* Prints a summary of important details about the chore. Used for debugging purposes
*/
private void printChoreDetails(final String header, ScheduledChore chore) {
LinkedHashMap<String, String> output = new LinkedHashMap<>();
output.put(header, "");
output.put("Chore name: ", chore.getName());
output.put("Chore period: ", Integer.toString(chore.getPeriod()));
output.put("Chore timeBetweenRuns: ", Long.toString(chore.getTimeBetweenRuns()));
for (Entry<String, String> entry : output.entrySet()) {
if (LOG.isTraceEnabled()) LOG.trace(entry.getKey() + entry.getValue());
}
}
代码示例来源:origin: xuxueli/xxl-job
public String route(int jobId, List<String> addressList) {
// cache clear
if (System.currentTimeMillis() > CACHE_VALID_TIME) {
jobLRUMap.clear();
CACHE_VALID_TIME = System.currentTimeMillis() + 1000*60*60*24;
}
// init lru
LinkedHashMap<String, String> lruItem = jobLRUMap.get(jobId);
if (lruItem == null) {
/**
* LinkedHashMap
* a、accessOrder:ture=访问顺序排序(get/put时排序);false=插入顺序排期;
* b、removeEldestEntry:新增元素时将会调用,返回true时会删除最老元素;可封装LinkedHashMap并重写该方法,比如定义最大容量,超出是返回true即可实现固定长度的LRU算法;
*/
lruItem = new LinkedHashMap<String, String>(16, 0.75f, true);
jobLRUMap.putIfAbsent(jobId, lruItem);
}
// put
for (String address: addressList) {
if (!lruItem.containsKey(address)) {
lruItem.put(address, address);
}
}
// load
String eldestKey = lruItem.entrySet().iterator().next().getKey();
String eldestValue = lruItem.get(eldestKey);
return eldestValue;
}
代码示例来源:origin: apache/kafka
/**
* Creates a map from a sequence of entries
*
* @param entries The entries to map
* @param <K> The key type
* @param <V> The value type
* @return A map
*/
@SafeVarargs
public static <K, V> Map<K, V> mkMap(final Map.Entry<K, V>... entries) {
final LinkedHashMap<K, V> result = new LinkedHashMap<>();
for (final Map.Entry<K, V> entry : entries) {
result.put(entry.getKey(), entry.getValue());
}
return result;
}
代码示例来源:origin: apache/kafka
private void update(Map<TopicPartition, S> partitionToState) {
LinkedHashMap<String, List<TopicPartition>> topicToPartitions = new LinkedHashMap<>();
for (TopicPartition tp : partitionToState.keySet()) {
List<TopicPartition> partitions = topicToPartitions.computeIfAbsent(tp.topic(), k -> new ArrayList<>());
partitions.add(tp);
}
for (Map.Entry<String, List<TopicPartition>> entry : topicToPartitions.entrySet()) {
for (TopicPartition tp : entry.getValue()) {
S state = partitionToState.get(tp);
map.put(tp, state);
}
}
}
代码示例来源:origin: apache/drill
targetAliasToPartnInfo.remove(targetAlias);
List<Path> pathsToRemove = new ArrayList<>();
for (Entry<Path, ArrayList<String>> entry: targetPathToAliases.entrySet()) {
ArrayList<String> aliases = entry.getValue();
aliases.remove(targetAlias);
if (aliases.isEmpty()) {
pathsToRemove.add(entry.getKey());
targetAliasToWork.put(sourceAlias, sourceAliasToWork.get(sourceAlias));
targetAliasToPartnInfo.putAll(sourceAliasToPartnInfo);
targetPathToPartitionInfo.putAll(sourcePathToPartitionInfo);
List<Path> pathsToAdd = new ArrayList<>();
for (Entry<Path, ArrayList<String>> entry: sourcePathToAliases.entrySet()) {
ArrayList<String> aliases = entry.getValue();
if (aliases.contains(sourceAlias)) {
pathsToAdd.add(entry.getKey());
targetPathToAliases.put(pathToAdd, new ArrayList<String>());
targetPathToAliases.get(pathToAdd).add(sourceAlias);
代码示例来源:origin: apache/ignite
/**
*
* @param key Key.
* @param node Mapped node.
* @param mappings Full node mapping.
*/
private void addNodeMapping(
KeyCacheObject key,
ClusterNode node,
Map<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>> mappings
) {
LinkedHashMap<KeyCacheObject, Boolean> old = mappings.get(node);
if (old == null)
mappings.put(node, old = new LinkedHashMap<>(3, 1f));
old.put(key, false);
}
代码示例来源:origin: jeasonlzy/okhttp-OkGo
public void put(String key, File file, String fileName, MediaType contentType) {
if (key != null) {
List<FileWrapper> fileWrappers = fileParamsMap.get(key);
if (fileWrappers == null) {
fileWrappers = new ArrayList<>();
fileParamsMap.put(key, fileWrappers);
}
fileWrappers.add(new FileWrapper(file, fileName, contentType));
}
}
内容来源于网络,如有侵权,请联系作者删除!