本文整理了Java中java.util.LinkedHashSet.iterator()
方法的一些代码示例,展示了LinkedHashSet.iterator()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。LinkedHashSet.iterator()
方法的具体详情如下:
包路径:java.util.LinkedHashSet
类名称:LinkedHashSet
方法名:iterator
暂无
代码示例来源:origin: pockethub/PocketHub
private void trim() {
Iterator<Long> iterator = ids.iterator();
while (iterator.hasNext() && ids.size() > MAX_SIZE) {
iterator.next();
iterator.remove();
}
}
代码示例来源:origin: apache/incubator-gobblin
private boolean addRecordAndEvictIfNecessary(GlobalMetadata recordToAdd) {
// First remove the element from the HashSet if it's already in there to reset
// the 'LRU' piece; then add it back in
boolean isNew = !metadataRecords.remove(recordToAdd);
metadataRecords.add(recordToAdd);
// Now remove the first element (which should be the oldest) from the list
// if we've exceeded the cache size
if (cacheSize != -1 && metadataRecords.size() > cacheSize) {
Iterator<GlobalMetadata> recordIt = metadataRecords.iterator();
recordIt.next(); // Remove the oldest element - don't care what it is
recordIt.remove();
}
return isNew;
}
代码示例来源:origin: loklak/loklak_server
private void checkSize() {
if (this.set.size() >= this.maxSize) {
Iterator<K> i = this.set.iterator();
while (i.hasNext() && this.set.size() > this.maxSize) this.set.remove(i.next());
}
}
代码示例来源:origin: apache/geode
/**
* It is different from its super implementation only in not invoking
* incrementTakeSidePutPermits(). Fix for #41521.
*/
@Override
protected Long getAndRemoveNextAvailableID() throws InterruptedException {
Long next = null;
acquireWriteLock();
try {
if (this.idsAvailable.isEmpty()) {
if (waitForData()) {
Iterator itr = this.idsAvailable.iterator();
next = (Long) itr.next();
itr.remove();
}
} else {
Iterator itr = this.idsAvailable.iterator();
next = (Long) itr.next();
itr.remove();
}
} finally {
releaseWriteLock();
}
return next;
}
代码示例来源:origin: hibernate/hibernate-orm
@Override
public Enumeration<URL> getResources(String name) throws IOException {
final LinkedHashSet<URL> resourceUrls = new LinkedHashSet<URL>();
final Iterator<ClassLoader> clIterator = newClassLoaderIterator();
while ( clIterator.hasNext() ) {
final ClassLoader classLoader = clIterator.next();
final Enumeration<URL> urls = classLoader.getResources( name );
while ( urls.hasMoreElements() ) {
resourceUrls.add( urls.nextElement() );
}
}
return new Enumeration<URL>() {
final Iterator<URL> resourceUrlIterator = resourceUrls.iterator();
@Override
public boolean hasMoreElements() {
return resourceUrlIterator.hasNext();
}
@Override
public URL nextElement() {
return resourceUrlIterator.next();
}
};
}
代码示例来源:origin: apache/geode
/**
* Returns the next position counter present in idsAvailable set. This method is invoked by the
* peek function. In case of BlockingQueue, this method waits till a valid ID is available.
*
* @return valid Long poistion or null depending upon the nature of the queue
* @throws TimeoutException if operation is interrupted (unfortunately)
*/
private Long getNextAvailableID() throws InterruptedException {
Long next = null;
acquireReadLock();
try {
if (this.idsAvailable.isEmpty()) {
// Asif:Wait in case it is a blocking thread
if (waitForData()) {
next = (Long) this.idsAvailable.iterator().next();
}
} else {
next = (Long) this.idsAvailable.iterator().next();
}
} finally {
releaseReadLock();
}
return next;
}
代码示例来源:origin: redisson/redisson
/**
* {@inheritDoc}
*/
public MethodGraph.Node asNode(Merger merger) {
Iterator<MethodDescription> iterator = methodDescriptions.iterator();
MethodDescription methodDescription = iterator.next();
while (iterator.hasNext()) {
methodDescription = merger.merge(methodDescription, iterator.next());
}
return new Node(key.detach(methodDescription.asTypeToken()), methodDescription, visibility);
}
代码示例来源:origin: apache/geode
Iterator itr = this.idsAvailable.iterator();
int currSize = this.idsAvailable.size();
int limit = currSize >= batchSize ? batchSize : currSize;
Long counter = (Long) itr.next();
Object eventOrWrapper = this.region.get(counter);
Object event;
代码示例来源:origin: apache/activemq
private void doEviction() {
int currentlyDeleted = 0;
float target = maxCacheSize * evictionFactor;
while (currentlyDeleted < target) {
LinkedHashSet<CacheNode<Key, Value>> nodes = frequencyList[lowestFrequency];
if (nodes.isEmpty()) {
throw new IllegalStateException("Lowest frequency constraint violated!");
} else {
Iterator<CacheNode<Key, Value>> it = nodes.iterator();
while (it.hasNext() && currentlyDeleted++ < target) {
CacheNode<Key, Value> node = it.next();
it.remove();
cache.remove(node.k);
}
if (!it.hasNext()) {
findNextLowestFrequency();
}
}
}
}
代码示例来源:origin: elastic/elasticsearch-hadoop
private static FieldType resolveTypeConflict(String fullName, FieldType existing, FieldType incoming) {
// Prefer to upcast the incoming field to the existing first
LinkedHashSet<FieldType> incomingSuperTypes = incoming.getCastingTypes();
if (incomingSuperTypes.contains(existing)) {
// Incoming can be cast to existing.
return existing;
}
// See if existing can be upcast to the incoming field's type next
LinkedHashSet<FieldType> existingSuperTypes = existing.getCastingTypes();
if (existingSuperTypes.contains(incoming)) {
// Existing can be cast to incoming
return incoming;
}
// Finally, Try to pick the lowest common super type for both fields if it exists
if (incomingSuperTypes.size() > 0 && existingSuperTypes.size() > 0) {
LinkedHashSet<FieldType> combined = new LinkedHashSet<FieldType>(incomingSuperTypes);
combined.retainAll(existingSuperTypes);
if (combined.size() > 0) {
return combined.iterator().next();
}
}
// If none of the above options succeed, the fields are conflicting
throw new EsHadoopIllegalArgumentException("Incompatible types found in multi-mapping: " +
"Field ["+fullName+"] has conflicting types of ["+existing+"] and ["+
incoming+"].");
}
代码示例来源:origin: wildfly/wildfly
/**
* Remove all values of the given attribute name.
*
* @param name the attribute name (must not be {@code null})
* @return the removed attribute values, or {@code null} if the attribute was not present in the builder
*/
public List<AttributeValue> removeAttribute(final String name) {
Assert.checkNotNullParam("name", name);
final LinkedHashSet<AttributeValue> removed = attributes.remove(name);
if (removed == null) {
return Collections.emptyList();
}
final Iterator<AttributeValue> iterator = removed.iterator();
if (! iterator.hasNext()) {
return Collections.emptyList();
}
final AttributeValue first = iterator.next();
if (! iterator.hasNext()) {
return Collections.singletonList(first);
}
final ArrayList<AttributeValue> list = new ArrayList<>(removed.size());
list.add(first);
do {
list.add(iterator.next());
} while (iterator.hasNext());
return list;
}
代码示例来源:origin: apache/geode
/**
* This method is invoked by the take function . For non blocking queue it returns null or a valid
* long position while for blocking queue it waits for data in the queue or throws Exception if
* the thread encounters exception while waiting.
*/
protected Long getAndRemoveNextAvailableID() throws InterruptedException {
Long next = null;
acquireWriteLock();
try {
if (this.idsAvailable.isEmpty()) {
if (waitForData()) {
Iterator itr = this.idsAvailable.iterator();
next = (Long) itr.next();
itr.remove();
this.incrementTakeSidePutPermits();
}
} else {
Iterator itr = this.idsAvailable.iterator();
next = (Long) itr.next();
itr.remove();
this.incrementTakeSidePutPermits();
}
} finally {
releaseWriteLock();
}
return next;
}
代码示例来源:origin: wildfly/wildfly
Supplier<String> doCreateSupplier(final LinkedHashSet<String> set, final SSLSession sslSession) {
final Supplier<String> prevSupplier = prev.doCreateSupplier(set, sslSession);
final Iterator<String> iterator = set.iterator();
return () -> {
String name = prevSupplier.get();
if (name != null) {
return name;
}
while (iterator.hasNext()) {
name = iterator.next();
if (predicate.test(name, sslSession)) try {
return name;
} finally {
iterator.remove();
}
}
return null;
};
}
代码示例来源:origin: redisson/redisson
/**
* {@inheritDoc}
*/
public Entry<U> extendBy(MethodDescription methodDescription, Harmonizer<U> harmonizer) {
Harmonized<U> key = this.key.extend(methodDescription.asDefined(), harmonizer);
LinkedHashSet<MethodDescription> methodDescriptions = new LinkedHashSet<MethodDescription>();
TypeDescription declaringType = methodDescription.getDeclaringType().asErasure();
boolean bridge = methodDescription.isBridge();
Visibility visibility = this.visibility;
for (MethodDescription extendedMethod : this.methodDescriptions) {
if (extendedMethod.getDeclaringType().asErasure().equals(declaringType)) {
if (extendedMethod.isBridge() ^ bridge) {
methodDescriptions.add(bridge ? extendedMethod : methodDescription);
} else {
methodDescriptions.add(methodDescription);
methodDescriptions.add(extendedMethod);
}
}
visibility = visibility.expandTo(extendedMethod.getVisibility());
}
if (methodDescriptions.isEmpty()) {
return new Resolved<U>(key, methodDescription, visibility, bridge);
} else if (methodDescriptions.size() == 1) {
return new Resolved<U>(key, methodDescriptions.iterator().next(), visibility, Resolved.NOT_MADE_VISIBLE);
} else {
return new Ambiguous<U>(key, methodDescriptions, visibility);
}
}
代码示例来源:origin: btraceio/btrace
@Override
protected String getCommonSuperClass(String type1, String type2) {
// Using type closures resolved via the associate classloader
LinkedHashSet<String> type1Closure = new LinkedHashSet<>();
LinkedHashSet<String> type2Closure = new LinkedHashSet<>();
InstrumentUtils.collectHierarchyClosure(targetCL, type1, type1Closure, true);
InstrumentUtils.collectHierarchyClosure(targetCL, type2, type2Closure, true);
// basically, do intersection
type1Closure.retainAll(type2Closure);
// if the intersection is not empty the first element is the closest common ancestor
Iterator<String> iter = type1Closure.iterator();
if (iter.hasNext()) {
String common = iter.next();
return common;
}
return Constants.OBJECT_INTERNAL;
}
代码示例来源:origin: redisson/redisson
Visibility visibility = left.getVisibility().expandTo(right.getVisibility());
return combined.size() == 1
? new Entry.Resolved<W>(key, combined.iterator().next(), visibility, Entry.Resolved.NOT_MADE_VISIBLE)
: new Entry.Ambiguous<W>(key, combined, visibility);
代码示例来源:origin: geoserver/geoserver
for (Iterator itr = kvp.entrySet().iterator(); itr.hasNext(); ) {
Map.Entry entry = (Map.Entry) itr.next();
String key = (String) entry.getKey();
Object value = null;
value = null;
} else if (normalized.size() == 1) {
value = normalized.iterator().next();
} else {
value = (String[]) normalized.toArray(new String[normalized.size()]);
代码示例来源:origin: requery/requery
this.keyAttributes = Collections.unmodifiableSet(keyAttributes);
if (keyAttributes.size() == 1) {
keyAttribute = keyAttributes.iterator().next();
代码示例来源:origin: apache/drill
Iterator<MaterializedField> thisIter = children.iterator();
Iterator<MaterializedField> otherIter = other.children.iterator();
while (thisIter.hasNext()) {
MaterializedField thisChild = thisIter.next();
MaterializedField otherChild = otherIter.next();
if (! thisChild.isEquivalent(otherChild)) {
return false;
代码示例来源:origin: igniterealtime/Smack
/**
* When inserting new network addresses to the proxy the order should remain in the order they
* were inserted.
*/
@Test
public void shouldPreserveAddressOrderOnInsertions() {
Socks5Proxy proxy = Socks5Proxy.getSocks5Proxy();
LinkedHashSet<String> addresses = new LinkedHashSet<>(proxy.getLocalAddresses());
for (int i = 1 ; i <= 3; i++) {
addresses.add(Integer.toString(i));
}
for (String address : addresses) {
proxy.addLocalAddress(address);
}
List<String> localAddresses = proxy.getLocalAddresses();
Iterator<String> iterator = addresses.iterator();
for (int i = 0; i < addresses.size(); i++) {
assertEquals(iterator.next(), localAddresses.get(i));
}
}
内容来源于网络,如有侵权,请联系作者删除!