org.apache.hadoop.hbase.regionserver.HStore.getRegionInfo()方法的使用及代码示例

x33g5p2x  于2022-01-20 转载在 其他  
字(9.0k)|赞(0)|评价(0)|浏览(107)

本文整理了Java中org.apache.hadoop.hbase.regionserver.HStore.getRegionInfo()方法的一些代码示例,展示了HStore.getRegionInfo()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HStore.getRegionInfo()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.regionserver.HStore
类名称:HStore
方法名:getRegionInfo

HStore.getRegionInfo介绍

暂无

代码示例

代码示例来源:origin: apache/hbase

@Override
public TableName getTableName() {
 return this.getRegionInfo().getTable();
}

代码示例来源:origin: apache/hbase

@Override
public boolean isPrimaryReplicaStore() {
 return getRegionInfo().getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID;
}

代码示例来源:origin: apache/hbase

/**
 * Adds a value to the memstore
 */
public void add(final Cell cell, MemStoreSizing memstoreSizing) {
 lock.readLock().lock();
 try {
  if (this.currentParallelPutCount.getAndIncrement() > this.parallelPutCountPrintThreshold) {
   LOG.trace(this.getTableName() + "tableName={}, encodedName={}, columnFamilyName={} is " +
    "too busy!", this.getRegionInfo().getEncodedName(), this .getColumnFamilyName());
  }
  this.memstore.add(cell, memstoreSizing);
 } finally {
  lock.readLock().unlock();
  currentParallelPutCount.decrementAndGet();
 }
}

代码示例来源:origin: apache/hbase

/**
 * Adds the specified value to the memstore
 */
public void add(final Iterable<Cell> cells, MemStoreSizing memstoreSizing) {
 lock.readLock().lock();
 try {
  if (this.currentParallelPutCount.getAndIncrement() > this.parallelPutCountPrintThreshold) {
   LOG.trace(this.getTableName() + "tableName={}, encodedName={}, columnFamilyName={} is " +
     "too busy!", this.getRegionInfo().getEncodedName(), this .getColumnFamilyName());
  }
  memstore.add(cells, memstoreSizing);
 } finally {
  lock.readLock().unlock();
  currentParallelPutCount.decrementAndGet();
 }
}

代码示例来源:origin: apache/hbase

/**
 * Determines if Store should be split.
 */
public Optional<byte[]> getSplitPoint() {
 this.lock.readLock().lock();
 try {
  // Should already be enforced by the split policy!
  assert !this.getRegionInfo().isMetaRegion();
  // Not split-able if we find a reference store file present in the store.
  if (hasReferences()) {
   LOG.trace("Not splittable; has references: {}", this);
   return Optional.empty();
  }
  return this.storeEngine.getStoreFileManager().getSplitPoint();
 } catch(IOException e) {
  LOG.warn("Failed getting store size for {}", this, e);
 } finally {
  this.lock.readLock().unlock();
 }
 return Optional.empty();
}

代码示例来源:origin: apache/hbase

/**
  * Generate a name for throttling, to prevent name conflict when multiple IO operation running
  * parallel on the same store.
  * @param store the Store instance on which IO operation is happening
  * @param opName Name of the IO operation, e.g. "flush", "compaction", etc.
  * @return The name for throttling
  */
 public static String getNameForThrottling(HStore store, String opName) {
  int counter;
  for (;;) {
   counter = NAME_COUNTER.get();
   int next = counter == Integer.MAX_VALUE ? 0 : counter + 1;
   if (NAME_COUNTER.compareAndSet(counter, next)) {
    break;
   }
  }
  return store.getRegionInfo().getEncodedName() + NAME_DELIMITER +
    store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName +
    NAME_DELIMITER + counter;
 }
}

代码示例来源:origin: apache/hbase

"Completed" + (cr.isMajor() ? " major" : "") + " compaction of "
 + cr.getFiles().size() + (cr.isAllFiles() ? " (all)" : "") + " file(s) in "
 + this + " of " + this.getRegionInfo().getShortNameToLog() + " into ");
if (sfs.isEmpty()) {
 message.append("none, ");

代码示例来源:origin: apache/hbase

@VisibleForTesting
void replaceStoreFiles(Collection<HStoreFile> compactedFiles, Collection<HStoreFile> result)
  throws IOException {
 this.lock.writeLock().lock();
 try {
  this.storeEngine.getStoreFileManager().addCompactionResults(compactedFiles, result);
  synchronized (filesCompacting) {
   filesCompacting.removeAll(compactedFiles);
  }
  // These may be null when the RS is shutting down. The space quota Chores will fix the Region
  // sizes later so it's not super-critical if we miss these.
  RegionServerServices rsServices = region.getRegionServerServices();
  if (rsServices != null && rsServices.getRegionServerSpaceQuotaManager() != null) {
   updateSpaceQuotaAfterFileReplacement(
     rsServices.getRegionServerSpaceQuotaManager().getRegionSizeStore(), getRegionInfo(),
     compactedFiles, result);
  }
 } finally {
  this.lock.writeLock().unlock();
 }
}

代码示例来源:origin: apache/hbase

LOG.info("Refreshing store files for region " + this.getRegionInfo().getRegionNameAsString()
 + " files to add: " + toBeAddedFiles + " files to remove: " + toBeRemovedFiles);

代码示例来源:origin: apache/hbase

completeCompaction(delSfs);
LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in "
  + this + " of " + this.getRegionInfo().getRegionNameAsString()
  + "; total size for store is "
  + TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1));

代码示例来源:origin: apache/hbase

String regionInfo;
if (this.storeConfigInfo != null && this.storeConfigInfo instanceof HStore) {
 regionInfo = ((HStore)this.storeConfigInfo).getRegionInfo().getRegionNameAsString();
} else {
 regionInfo = this.toString();

代码示例来源:origin: apache/hbase

private StripeCompactor createCompactor() throws Exception {
 HColumnDescriptor col = new HColumnDescriptor(Bytes.toBytes("foo"));
 StoreFileWritersCapture writers = new StoreFileWritersCapture();
 HStore store = mock(HStore.class);
 HRegionInfo info = mock(HRegionInfo.class);
 when(info.getRegionNameAsString()).thenReturn("testRegion");
 when(store.getColumnFamilyDescriptor()).thenReturn(col);
 when(store.getRegionInfo()).thenReturn(info);
 when(
  store.createWriterInTmp(anyLong(), any(), anyBoolean(),
   anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
 Configuration conf = HBaseConfiguration.create();
 conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders);
 final Scanner scanner = new Scanner();
 return new StripeCompactor(conf, store) {
  @Override
  protected InternalScanner createScanner(HStore store, ScanInfo scanInfo,
    List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs,
    byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
   return scanner;
  }
  @Override
  protected InternalScanner createScanner(HStore store, ScanInfo scanInfo,
    List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint,
    long earliestPutTs) throws IOException {
   return scanner;
  }
 };
}

代码示例来源:origin: apache/hbase

.addAndGet(storeFile.getReader().getTotalUncompressedBytes());
if (LOG.isInfoEnabled()) {
 LOG.info("Region: " + HStore.this.getRegionInfo().getEncodedName() +
  " added " + storeFile + ", entries=" + storeFile.getReader().getEntries() +
   ", sequenceid=" + +storeFile.getReader().getSequenceID() + ", filesize="

代码示例来源:origin: apache/hbase

throughputController != null && !store.getRegionInfo().getTable().isSystemTable();
if (control) {
 throughputController.start(flushName);

代码示例来源:origin: apache/hbase

try {
 LOG.info("Validating hfile at " + srcPath + " for inclusion in "
   + "store " + this + " region " + this.getRegionInfo().getRegionNameAsString());
 FileSystem srcFs = srcPath.getFileSystem(conf);
 srcFs.access(srcPath, FsAction.READ_WRITE);
    " last=" + Bytes.toStringBinary(lastKey));
  LOG.debug("Region bounds: first=" +
    Bytes.toStringBinary(getRegionInfo().getStartKey()) +
    " last=" + Bytes.toStringBinary(getRegionInfo().getEndKey()));
 if (!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) {
  throw new WrongRegionException(
    "Bulk load file " + srcPath.toString() + " does not fit inside region "
    + this.getRegionInfo().getRegionNameAsString());

代码示例来源:origin: apache/hbase

when(store.areWritesEnabled()).thenReturn(true);
when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
when(store.createWriterInTmp(anyLong(), any(), anyBoolean(),
 anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);

代码示例来源:origin: apache/hbase

when(store.areWritesEnabled()).thenReturn(true);
when(store.getFileSystem()).thenReturn(mock(FileSystem.class));
when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME));
when(store.createWriterInTmp(anyLong(), any(), anyBoolean(),
 anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);

代码示例来源:origin: apache/hbase

request.setDescription(getRegionInfo().getRegionNameAsString(), getColumnFamilyName());
 request.setTracker(tracker);
LOG.debug(getRegionInfo().getEncodedName() + " - " + getColumnFamilyName()
  + ": Initiating " + (request.isMajor() ? "major" : "minor") + " compaction"
  + (request.isAllFiles() ? " (all files)" : ""));

代码示例来源:origin: apache/hbase

long time = snapshot.getTimeRangeTracker().getMax();
mobFileWriter = mobStore.createWriterInTmp(new Date(time), snapshot.getCellsCount(),
  store.getColumnFamilyDescriptor().getCompressionType(), store.getRegionInfo().getStartKey(), false);
boolean hasMore;
String flushName = ThroughputControlUtil.getNameForThrottling(store, "flush");
boolean control = throughputController != null && !store.getRegionInfo().getTable().isSystemTable();
if (control) {
 throughputController.start(flushName);

代码示例来源:origin: apache/hbase

if (!finished) {
 throw new InterruptedIOException("Aborting compaction of store " + store + " in region "
   + store.getRegionInfo().getRegionNameAsString() + " because it was interrupted.");

相关文章

HStore类方法