org.apache.hadoop.hbase.regionserver.HStore.getHRegion()方法的使用及代码示例

x33g5p2x  于2022-01-20 转载在 其他  
字(7.6k)|赞(0)|评价(0)|浏览(106)

本文整理了Java中org.apache.hadoop.hbase.regionserver.HStore.getHRegion()方法的一些代码示例,展示了HStore.getHRegion()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HStore.getHRegion()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.regionserver.HStore
类名称:HStore
方法名:getHRegion

HStore.getHRegion介绍

暂无

代码示例

代码示例来源:origin: apache/hbase

long getSmallestReadPoint(HStore store) {
  return store.getHRegion().getSmallestReadPoint();
 }
}

代码示例来源:origin: apache/hbase

ms = ReflectionUtils.newInstance(DefaultMemStore.class,
   new Object[] { conf, this.comparator,
     this.getHRegion().getRegionServicesForStores()});
 break;
default:
   CompactingMemStore.class, CompactingMemStore.class);
 ms = ReflectionUtils.newInstance(clz, new Object[]{conf, this.comparator, this,
   this.getHRegion().getRegionServicesForStores(), inMemoryCompaction});

代码示例来源:origin: apache/hbase

/**
 * Test for HBASE-3492 - Test split on empty colfam (no store files).
 *
 * @throws IOException When the IO operations fail.
 */
@Test
public void testSplitWithEmptyColFam() throws IOException {
 init(this.name.getMethodName());
 assertFalse(store.getSplitPoint().isPresent());
 store.getHRegion().forceSplit(null);
 assertFalse(store.getSplitPoint().isPresent());
 store.getHRegion().clearSplit();
}

代码示例来源:origin: apache/hbase

private StoreScanner(HStore store, ScanInfo scanInfo, List<? extends KeyValueScanner> scanners,
  ScanType scanType, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow,
  byte[] dropDeletesToRow) throws IOException {
 this(store, SCAN_FOR_COMPACTION, scanInfo, 0,
   store.getHRegion().getReadPoint(IsolationLevel.READ_COMMITTED), false, scanType);
 assert scanType != ScanType.USER_SCAN;
 matcher =
   CompactionScanQueryMatcher.create(scanInfo, scanType, smallestReadPoint, earliestPutTs,
    oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost());
 // Filter the list of scanners using Bloom filters, time range, TTL, etc.
 scanners = selectScannersFrom(store, scanners);
 // Seek all scanners to the initial key
 seekScanners(scanners, matcher.getStartKey(), false, parallelSeekEnabled);
 addCurrentScanners(scanners);
 // Combine all seeked scanners with a heap
 resetKVHeap(scanners, comparator);
}

代码示例来源:origin: apache/hbase

/**----------------------------------------------------------------------
 * The request to dispatch the compaction asynchronous task.
 * The method returns true if compaction was successfully dispatched, or false if there
 * is already an ongoing compaction or no segments to compact.
 */
public boolean start() throws IOException {
 if (!compactingMemStore.hasImmutableSegments()) { // no compaction on empty pipeline
  return false;
 }
 // get a snapshot of the list of the segments from the pipeline,
 // this local copy of the list is marked with specific version
 versionedList = compactingMemStore.getImmutableSegments();
 LOG.trace("Speculative compaction starting on {}/{}",
   compactingMemStore.getStore().getHRegion().getRegionInfo().getEncodedName(),
   compactingMemStore.getStore().getColumnFamilyName());
 HStore store = compactingMemStore.getStore();
 RegionCoprocessorHost cpHost = store.getCoprocessorHost();
 if (cpHost != null) {
  cpHost.preMemStoreCompaction(store);
 }
 try {
  doCompaction();
 } finally {
  if (cpHost != null) {
   cpHost.postMemStoreCompaction(store);
  }
 }
 return true;
}

代码示例来源:origin: apache/hbase

@Override
 public Object run() throws Exception {
  // Make sure it worked (above is sensitive to caching details in hadoop core)
  FileSystem fs = FileSystem.get(conf);
  assertEquals(FaultyFileSystem.class, fs.getClass());
  // Initialize region
  init(name.getMethodName(), conf);
  LOG.info("Adding some data");
  store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null);
  store.add(new KeyValue(row, family, qf2, 1, (byte[])null), null);
  store.add(new KeyValue(row, family, qf3, 1, (byte[])null), null);
  LOG.info("Before flush, we should have no files");
  Collection<StoreFileInfo> files =
   store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
  assertEquals(0, files != null ? files.size() : 0);
  //flush
  try {
   LOG.info("Flushing");
   flush(1);
   fail("Didn't bubble up IOE!");
  } catch (IOException ioe) {
   assertTrue(ioe.getMessage().contains("Fault injected"));
  }
  LOG.info("After failed flush, we should still have no files!");
  files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
  assertEquals(0, files != null ? files.size() : 0);
  store.getHRegion().getWAL().close();
  return null;
 }
});

代码示例来源:origin: apache/hbase

this.store.close();
this.store = new HStore(this.store.getHRegion(), this.store.getColumnFamilyDescriptor(), c);
assertEquals(2, this.store.getStorefilesCount());

代码示例来源:origin: apache/hbase

RegionServerServices rsService = store.getHRegion().getRegionServerServices();
if (rsService != null && scanInfo.isParallelSeekEnabled()) {
 this.parallelSeekEnabled = true;

代码示例来源:origin: org.apache.hbase/hbase-server

long getSmallestReadPoint(HStore store) {
  return store.getHRegion().getSmallestReadPoint();
 }
}

代码示例来源:origin: org.apache.hbase/hbase-server

/**
 * Test for HBASE-3492 - Test split on empty colfam (no store files).
 *
 * @throws IOException When the IO operations fail.
 */
@Test
public void testSplitWithEmptyColFam() throws IOException {
 init(this.name.getMethodName());
 assertFalse(store.getSplitPoint().isPresent());
 store.getHRegion().forceSplit(null);
 assertFalse(store.getSplitPoint().isPresent());
 store.getHRegion().clearSplit();
}

代码示例来源:origin: harbby/presto-connectors

private StoreScanner(Store store, ScanInfo scanInfo, Scan scan,
  List<? extends KeyValueScanner> scanners, ScanType scanType, long smallestReadPoint,
  long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
 this(store, scan, scanInfo, null,
  ((HStore)store).getHRegion().getReadpoint(IsolationLevel.READ_COMMITTED), false);
 if (dropDeletesFromRow == null) {
  matcher = new ScanQueryMatcher(scan, scanInfo, null, scanType, smallestReadPoint,
    earliestPutTs, oldestUnexpiredTS, now, store.getCoprocessorHost());
 } else {
  matcher = new ScanQueryMatcher(scan, scanInfo, null, smallestReadPoint, earliestPutTs,
    oldestUnexpiredTS, now, dropDeletesFromRow, dropDeletesToRow, store.getCoprocessorHost());
 }
 // Filter the list of scanners using Bloom filters, time range, TTL, etc.
 scanners = selectScannersFrom(scanners);
 // Seek all scanners to the initial key
 seekScanners(scanners, matcher.getStartKey(), false, parallelSeekEnabled);
 // Combine all seeked scanners with a heap
 resetKVHeap(scanners, store.getComparator());
}

代码示例来源:origin: harbby/presto-connectors

RegionServerServices rsService = ((HStore)store).getHRegion().getRegionServerServices();
if (rsService != null && scanInfo.isParallelSeekEnabled()) {
 this.parallelSeekEnabled = true;

代码示例来源:origin: org.apache.hbase/hbase-server

@Override
 public Object run() throws Exception {
  // Make sure it worked (above is sensitive to caching details in hadoop core)
  FileSystem fs = FileSystem.get(conf);
  assertEquals(FaultyFileSystem.class, fs.getClass());
  // Initialize region
  init(name.getMethodName(), conf);
  LOG.info("Adding some data");
  store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null);
  store.add(new KeyValue(row, family, qf2, 1, (byte[])null), null);
  store.add(new KeyValue(row, family, qf3, 1, (byte[])null), null);
  LOG.info("Before flush, we should have no files");
  Collection<StoreFileInfo> files =
   store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
  assertEquals(0, files != null ? files.size() : 0);
  //flush
  try {
   LOG.info("Flushing");
   flush(1);
   fail("Didn't bubble up IOE!");
  } catch (IOException ioe) {
   assertTrue(ioe.getMessage().contains("Fault injected"));
  }
  LOG.info("After failed flush, we should still have no files!");
  files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
  assertEquals(0, files != null ? files.size() : 0);
  store.getHRegion().getWAL().close();
  return null;
 }
});

代码示例来源:origin: org.apache.hbase/hbase-server

this.store.close();
this.store = new HStore(this.store.getHRegion(), this.store.getColumnFamilyDescriptor(), c);
assertEquals(2, this.store.getStorefilesCount());

相关文章

HStore类方法