本文整理了Java中java.util.HashMap.entrySet()
方法的一些代码示例,展示了HashMap.entrySet()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HashMap.entrySet()
方法的具体详情如下:
包路径:java.util.HashMap
类名称:HashMap
方法名:entrySet
[英]Returns a set containing all of the mappings in this map. Each mapping is an instance of Map.Entry. As the set is backed by this map, changes in one will be reflected in the other.
[中]返回包含此映射中所有映射的集。每个映射都是映射的一个实例。进入由于此地图支持此集合,因此其中一个集合中的更改将反映在另一个集合中。
代码示例来源:origin: stackoverflow.com
HashMap<String, HashMap> selects = new HashMap<String, HashMap>();
for(Map.Entry<String, HashMap> entry : selects.entrySet()) {
String key = entry.getKey();
HashMap value = entry.getValue();
// do what you have to do here
// In your case, an other loop.
}
代码示例来源:origin: prestodb/presto
@Override
public Annotations asAnnotations() {
if (_annotations.size() == 2) {
Iterator<Map.Entry<Class<?>,Annotation>> it = _annotations.entrySet().iterator();
Map.Entry<Class<?>,Annotation> en1 = it.next(), en2 = it.next();
return new TwoAnnotations(en1.getKey(), en1.getValue(),
en2.getKey(), en2.getValue());
}
return new AnnotationMap(_annotations);
}
代码示例来源:origin: apache/kylin
private void deleteByTbl(String table) {
Iterator<Map.Entry<String, TableBlackList>> it = super.entrySet().iterator();
while (it.hasNext()) {
TableBlackList tableBlackList = it.next().getValue();
tableBlackList.removeTbl(table);
if (tableBlackList.isEmpty()) {
it.remove();
}
}
}
代码示例来源:origin: FudanNLP/fnlp
private void filter(int i) {
HashMap<String, Integer> newwordsFreq = new HashMap<String, Integer>();
for(Entry<String, Integer> e : wordsFreq.entrySet()){
Integer v = e.getValue();
if(v>i){
String key = e.getKey();
newwordsFreq.put(key, v);
}
}
wordsFreq.clear();
wordsFreq = newwordsFreq;
}
代码示例来源:origin: apache/storm
private static List<List<Long>> changedExecutors(Map<List<Long>, NodeInfo> map, Map<List<Long>,
List<Object>> newExecToNodePort) {
HashMap<NodeInfo, List<List<Long>>> tmpSlotAssigned = map == null ? new HashMap<>() : Utils.reverseMap(map);
HashMap<List<Object>, List<List<Long>>> slotAssigned = new HashMap<>();
for (Entry<NodeInfo, List<List<Long>>> entry : tmpSlotAssigned.entrySet()) {
NodeInfo ni = entry.getKey();
List<Object> key = new ArrayList<>(2);
key.add(ni.get_node());
key.add(ni.get_port_iterator().next());
List<List<Long>> value = new ArrayList<>(entry.getValue());
value.sort(Comparator.comparing(a -> a.get(0)));
slotAssigned.put(key, value);
}
HashMap<List<Object>, List<List<Long>>> tmpNewSlotAssigned = newExecToNodePort == null ? new HashMap<>() :
Utils.reverseMap(newExecToNodePort);
HashMap<List<Object>, List<List<Long>>> newSlotAssigned = new HashMap<>();
for (Entry<List<Object>, List<List<Long>>> entry : tmpNewSlotAssigned.entrySet()) {
List<List<Long>> value = new ArrayList<>(entry.getValue());
value.sort(Comparator.comparing(a -> a.get(0)));
newSlotAssigned.put(entry.getKey(), value);
}
Map<List<Object>, List<List<Long>>> diff = mapDiff(slotAssigned, newSlotAssigned);
List<List<Long>> ret = new ArrayList<>();
for (List<List<Long>> val : diff.values()) {
ret.addAll(val);
}
return ret;
}
代码示例来源:origin: apache/storm
public void send(Map<Integer, NodeInfo> taskToNode, Map<NodeInfo, IConnection> connections) {
HashMap<NodeInfo, Stream<TaskMessage>> bundleMapByDestination = groupBundleByDestination(taskToNode);
for (Map.Entry<NodeInfo, Stream<TaskMessage>> entry : bundleMapByDestination.entrySet()) {
NodeInfo node = entry.getKey();
IConnection conn = connections.get(node);
if (conn != null) {
Iterator<TaskMessage> iter = entry.getValue().iterator();
if (iter.hasNext()) {
conn.send(iter);
}
} else {
LOG.warn("Connection not available for hostPort {}", node);
}
}
}
代码示例来源:origin: apache/hive
@Override
public Iterator<Entry<Long, ByteBuffer>> getFileMetadata(List<Long> fileIds)
throws HiveException {
getCount.incrementAndGet();
HashMap<Long, ByteBuffer> result = new HashMap<>();
for (Long id : fileIds) {
MockItem mi = cache.get(id);
if (mi == null) continue;
getHitCount.incrementAndGet();
result.put(id, mi.data);
}
return result.entrySet().iterator();
}
代码示例来源:origin: apache/hive
/**
* Build the environment used for all exec calls.
*
* @return The environment variables.
*/
public Map<String, String> execEnv(Map<String, String> env) {
HashMap<String, String> res = new HashMap<String, String>();
for (String key : appConf.getStrings(AppConfig.EXEC_ENVS_NAME)) {
String val = System.getenv(key);
if (val != null) {
res.put(key, val);
}
}
if (env != null)
res.putAll(env);
for (Map.Entry<String, String> envs : res.entrySet()) {
LOG.info("Env " + envs.getKey() + "=" + envs.getValue());
}
return res;
}
代码示例来源:origin: apache/geode
public String toString() {
String out =
"Portfolio [ID=" + ID + " status=" + status + " type=" + type + " pkid=" + pkid + "\n ";
Iterator iter = positions.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
out += entry.getKey() + ":" + entry.getValue() + ", ";
}
out += "\n P1:" + position1 + ", P2:" + position2;
return out + "\n]";
}
代码示例来源:origin: apache/flink
/**
* Adds all entries from the given configuration into this configuration. The keys
* are prepended with the given prefix.
*
* @param other
* The configuration whose entries are added to this configuration.
* @param prefix
* The prefix to prepend.
*/
public void addAll(Configuration other, String prefix) {
final StringBuilder bld = new StringBuilder();
bld.append(prefix);
final int pl = bld.length();
synchronized (this.confData) {
synchronized (other.confData) {
for (Map.Entry<String, Object> entry : other.confData.entrySet()) {
bld.setLength(pl);
bld.append(entry.getKey());
this.confData.put(bld.toString(), entry.getValue());
}
}
}
}
代码示例来源:origin: redisson/redisson
@Override
public Annotations asAnnotations() {
if (_annotations.size() == 2) {
Iterator<Map.Entry<Class<?>,Annotation>> it = _annotations.entrySet().iterator();
Map.Entry<Class<?>,Annotation> en1 = it.next(), en2 = it.next();
return new TwoAnnotations(en1.getKey(), en1.getValue(),
en2.getKey(), en2.getValue());
}
return new AnnotationMap(_annotations);
}
代码示例来源:origin: apache/flink
/**
* Adds all entries in this {@code Configuration} to the given {@link Properties}.
*/
public void addAllToProperties(Properties props) {
synchronized (this.confData) {
for (Map.Entry<String, Object> entry : this.confData.entrySet()) {
props.put(entry.getKey(), entry.getValue());
}
}
}
代码示例来源:origin: apache/geode
public String toString() {
String out =
"PortfolioPdx [ID=" + ID + " status=" + status + " type=" + type + " pkid=" + pkid + "\n ";
Iterator iter = positions.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
out += entry.getKey() + ":" + entry.getValue() + ", ";
}
out += "\n P1:" + position1 + ", P2:" + position2;
return out + "\n]";
}
代码示例来源:origin: apache/hive
/**
* Merge the rhs tables from another join tree.
*
* @param src
* the source join tree
*/
public void mergeRHSSemijoin(QBJoinTree src) {
for (Entry<String, ArrayList<ASTNode>> e : src.rhsSemijoin.entrySet()) {
String key = e.getKey();
ArrayList<ASTNode> value = rhsSemijoin.get(key);
if (value == null) {
rhsSemijoin.put(key, e.getValue());
} else {
value.addAll(e.getValue());
}
}
}
代码示例来源:origin: thinkaurelius/titan
public K getMaxObject() {
K result = null;
double count = Double.MIN_VALUE;
for (Map.Entry<K,Counter> entry : countMap.entrySet()) {
if (entry.getValue().count>=count) {
count=entry.getValue().count;
result = entry.getKey();
}
}
return result;
}
代码示例来源:origin: apache/geode
public String toString() {
String out =
"Portfolio [ID=" + ID + " status=" + status + " type=" + type + " pkid=" + pkid + "\n ";
Iterator iter = positions.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry entry = (Map.Entry) iter.next();
out += entry.getKey() + ":" + entry.getValue() + ", ";
}
out += "\n P1:" + position1 + ", P2:" + position2;
return out + "\n]";
}
代码示例来源:origin: oblac/jodd
@Override
public PropsData clone() {
final HashMap<String, Map<String, PropsEntry>> newProfiles = new HashMap<>();
final HashMap<String, PropsEntry> newBase = new HashMap<>(baseProperties);
for (final Map.Entry<String, Map<String, PropsEntry>> entry : profileProperties.entrySet()) {
final Map<String, PropsEntry> map = new HashMap<>(entry.getValue().size());
map.putAll(entry.getValue());
newProfiles.put(entry.getKey(), map);
}
final PropsData pd = new PropsData(newBase, newProfiles);
pd.appendDuplicateProps = appendDuplicateProps;
pd.ignoreMissingMacros = ignoreMissingMacros;
pd.skipEmptyProps = skipEmptyProps;
return pd;
}
代码示例来源:origin: apache/flink
@Override
public Map<String, String> toMap() {
synchronized (this.confData){
Map<String, String> ret = new HashMap<>(this.confData.size());
for (Map.Entry<String, Object> entry : confData.entrySet()) {
ret.put(entry.getKey(), entry.getValue().toString());
}
return ret;
}
}
代码示例来源:origin: wildfly/wildfly
public String toString() {
StringBuilder buf=new StringBuilder("Fragmentation list contains ");
synchronized(frag_tables) {
buf.append(frag_tables.size()).append(" tables\n");
for(Iterator<Entry<Address,FragmentationTable>> it=frag_tables.entrySet().iterator(); it.hasNext();) {
Entry<Address,FragmentationTable> entry=it.next();
buf.append(entry.getKey()).append(": " ).append(entry.getValue()).append("\n");
}
}
return buf.toString();
}
代码示例来源:origin: apache/zookeeper
/**
* This method pre-computes the weights of groups to speed up processing
* when validating a given set. We compute the weights of groups in
* different places, so we have a separate method.
*/
private void computeGroupWeight(){
for (Entry<Long, Long> entry : serverGroup.entrySet()) {
Long sid = entry.getKey();
Long gid = entry.getValue();
if(!groupWeight.containsKey(gid))
groupWeight.put(gid, serverWeight.get(sid));
else {
long totalWeight = serverWeight.get(sid) + groupWeight.get(gid);
groupWeight.put(gid, totalWeight);
}
}
/*
* Do not consider groups with weight zero
*/
for(long weight: groupWeight.values()){
LOG.debug("Group weight: " + weight);
if(weight == ((long) 0)){
numGroups--;
LOG.debug("One zero-weight group: " + 1 + ", " + numGroups);
}
}
}
内容来源于网络,如有侵权,请联系作者删除!