org.apache.hadoop.hbase.regionserver.HStore.getColumnFamilyDescriptor()方法的使用及代码示例

x33g5p2x  于2022-01-20 转载在 其他  
字(12.2k)|赞(0)|评价(0)|浏览(122)

本文整理了Java中org.apache.hadoop.hbase.regionserver.HStore.getColumnFamilyDescriptor()方法的一些代码示例,展示了HStore.getColumnFamilyDescriptor()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HStore.getColumnFamilyDescriptor()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.regionserver.HStore
类名称:HStore
方法名:getColumnFamilyDescriptor

HStore.getColumnFamilyDescriptor介绍

暂无

代码示例

代码示例来源:origin: apache/hbase

Compactor(Configuration conf, HStore store) {
 this.conf = conf;
 this.store = store;
 this.compactionKVMax =
  this.conf.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
 this.compactionCompression = (this.store.getColumnFamilyDescriptor() == null) ?
   Compression.Algorithm.NONE : this.store.getColumnFamilyDescriptor().getCompactionCompressionType();
 this.keepSeqIdPeriod = Math.max(this.conf.getInt(HConstants.KEEP_SEQID_PERIOD,
  HConstants.MIN_KEEP_SEQID_PERIOD), HConstants.MIN_KEEP_SEQID_PERIOD);
 this.dropCacheMajor = conf.getBoolean(MAJOR_COMPACTION_DROP_CACHE, true);
 this.dropCacheMinor = conf.getBoolean(MINOR_COMPACTION_DROP_CACHE, true);
}

代码示例来源:origin: apache/hbase

public DefaultMobStoreCompactor(Configuration conf, HStore store) {
 super(conf, store);
 // The mob cells reside in the mob-enabled column family which is held by HMobStore.
 // During the compaction, the compactor reads the cells from the mob files and
 // probably creates new mob files. All of these operations are included in HMobStore,
 // so we need to cast the Store to HMobStore.
 if (!(store instanceof HMobStore)) {
  throw new IllegalArgumentException("The store " + store + " is not a HMobStore");
 }
 mobStore = (HMobStore) store;
 mobSizeThreshold = store.getColumnFamilyDescriptor().getMobThreshold();
}

代码示例来源:origin: apache/hbase

private byte[] getFamilyNameInBytes() {
 return store.getColumnFamilyDescriptor().getName();
}

代码示例来源:origin: apache/hbase

/**
 * @return Map of StoreFiles by column family
 */
private NavigableMap<byte[], List<Path>> getStoreFiles() {
 NavigableMap<byte[], List<Path>> allStoreFiles = new TreeMap<>(Bytes.BYTES_COMPARATOR);
 for (HStore store : stores.values()) {
  Collection<HStoreFile> storeFiles = store.getStorefiles();
  if (storeFiles == null) {
   continue;
  }
  List<Path> storeFileNames = new ArrayList<>();
  for (HStoreFile storeFile : storeFiles) {
   storeFileNames.add(storeFile.getPath());
  }
  allStoreFiles.put(store.getColumnFamilyDescriptor().getName(), storeFileNames);
 }
 return allStoreFiles;
}

代码示例来源:origin: apache/hbase

@Override
 public StoreFileWriter createWriter() throws IOException {
  StoreFileWriter writer = store.createWriterInTmp(kvCount,
    store.getColumnFamilyDescriptor().getCompressionType(), false, true, true, false);
  return writer;
 }
};

代码示例来源:origin: apache/hbase

void throwException(String title, String regionName) {
 StringBuilder buf = new StringBuilder();
 buf.append(title + ", ");
 buf.append(getRegionInfo().toString());
 buf.append(getRegionInfo().isMetaRegion() ? " meta region " : " ");
 buf.append("stores: ");
 for (HStore s : stores.values()) {
  buf.append(s.getColumnFamilyDescriptor().getNameAsString());
  buf.append(" size: ");
  buf.append(s.getMemStoreSize().getDataSize());
  buf.append(" ");
 }
 buf.append("end-of-stores");
 buf.append(", memstore size ");
 buf.append(getMemStoreDataSize());
 if (getRegionInfo().getRegionNameAsString().startsWith(regionName)) {
  throw new RuntimeException(buf.toString());
 }
}

代码示例来源:origin: apache/hbase

/**
  * Generate a name for throttling, to prevent name conflict when multiple IO operation running
  * parallel on the same store.
  * @param store the Store instance on which IO operation is happening
  * @param opName Name of the IO operation, e.g. "flush", "compaction", etc.
  * @return The name for throttling
  */
 public static String getNameForThrottling(HStore store, String opName) {
  int counter;
  for (;;) {
   counter = NAME_COUNTER.get();
   int next = counter == Integer.MAX_VALUE ? 0 : counter + 1;
   if (NAME_COUNTER.compareAndSet(counter, next)) {
    break;
   }
  }
  return store.getRegionInfo().getEncodedName() + NAME_DELIMITER +
    store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName +
    NAME_DELIMITER + counter;
 }
}

代码示例来源:origin: apache/hbase

@Override
 public Pair<byte[], Collection<HStoreFile>> call() throws IOException {
  return new Pair<>(store.getColumnFamilyDescriptor().getName(), store.close());
 }
});

代码示例来源:origin: apache/hbase

public DefaultMobStoreFlusher(Configuration conf, HStore store) throws IOException {
 super(conf, store);
 if (!(store instanceof HMobStore)) {
  throw new IllegalArgumentException("The store " + store + " is not a HMobStore");
 }
 mobCellValueSizeThreshold = store.getColumnFamilyDescriptor().getMobThreshold();
 this.targetPath = MobUtils.getMobFamilyPath(conf, store.getTableName(),
   store.getColumnFamilyName());
 if (!this.store.getFileSystem().exists(targetPath)) {
  this.store.getFileSystem().mkdirs(targetPath);
 }
 this.mobStore = (HMobStore) store;
}

代码示例来源:origin: apache/hbase

/**
 * This functionality should be resolved in the higher level which is
 * MemStoreScanner, currently returns true as default. Doesn't throw
 * IllegalStateException in order not to change the signature of the
 * overridden method
 */
@Override
public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) {
 return getSegment().shouldSeek(scan.getColumnFamilyTimeRange()
     .getOrDefault(store.getColumnFamilyDescriptor().getName(), scan.getTimeRange()), oldestUnexpiredTS);
}

代码示例来源:origin: apache/hbase

@Override
public boolean shouldUseScanner(Scan scan, HStore store, long oldestUnexpiredTS) {
 // if the file has no entries, no need to validate or create a scanner.
 byte[] cf = store.getColumnFamilyDescriptor().getName();
 TimeRange timeRange = scan.getColumnFamilyTimeRange().get(cf);
 if (timeRange == null) {
  timeRange = scan.getTimeRange();
 }
 return reader.passesTimerangeFilter(timeRange, oldestUnexpiredTS) && reader
   .passesKeyRangeFilter(scan) && reader.passesBloomFilter(scan, scan.getFamilyMap().get(cf));
}

代码示例来源:origin: apache/hbase

/**
 * Writes the compaction WAL record.
 * @param filesCompacted Files compacted (input).
 * @param newFiles Files from compaction.
 */
private void writeCompactionWalRecord(Collection<HStoreFile> filesCompacted,
  Collection<HStoreFile> newFiles) throws IOException {
 if (region.getWAL() == null) {
  return;
 }
 List<Path> inputPaths =
   filesCompacted.stream().map(HStoreFile::getPath).collect(Collectors.toList());
 List<Path> outputPaths =
   newFiles.stream().map(HStoreFile::getPath).collect(Collectors.toList());
 RegionInfo info = this.region.getRegionInfo();
 CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(info,
   family.getName(), inputPaths, outputPaths, fs.getStoreDir(getColumnFamilyDescriptor().getNameAsString()));
 // Fix reaching into Region to get the maxWaitForSeqId.
 // Does this method belong in Region altogether given it is making so many references up there?
 // Could be Region#writeCompactionMarker(compactionDescriptor);
 WALUtil.writeCompactionMarker(this.region.getWAL(), this.region.getReplicationScope(),
   this.region.getRegionInfo(), compactionDescriptor, this.region.getMVCC());
}

代码示例来源:origin: apache/hbase

/**
 * @param delta If we are doing delta changes -- e.g. increment/append -- then this flag will be
 *          set; when set we will run operations that make sense in the increment/append scenario
 *          but that do not make sense otherwise.
 * @see #applyToMemStore(HStore, Cell, MemStoreSizing)
 */
private void applyToMemStore(HStore store, List<Cell> cells, boolean delta,
  MemStoreSizing memstoreAccounting) throws IOException {
 // Any change in how we update Store/MemStore needs to also be done in other applyToMemStore!!!!
 boolean upsert = delta && store.getColumnFamilyDescriptor().getMaxVersions() == 1;
 if (upsert) {
  store.upsert(cells, getSmallestReadPoint(), memstoreAccounting);
 } else {
  store.add(cells, memstoreAccounting);
 }
}

代码示例来源:origin: apache/hbase

private boolean isInBloom(StoreFileScanner scanner, byte[] row,
  byte[] qualifier) {
 Scan scan = new Scan().withStartRow(row).withStopRow(row, true);
 scan.addColumn(Bytes.toBytes(RandomKeyValueUtil.COLUMN_FAMILY_NAME), qualifier);
 HStore store = mock(HStore.class);
 when(store.getColumnFamilyDescriptor())
   .thenReturn(ColumnFamilyDescriptorBuilder.of(RandomKeyValueUtil.COLUMN_FAMILY_NAME));
 return scanner.shouldUseScanner(scan, store, Long.MIN_VALUE);
}

代码示例来源:origin: apache/hbase

/**
 * Do a small get/scan against one store. This is required because store
 * has no actual methods of querying itself, and relies on StoreScanner.
 */
public static List<Cell> getFromStoreFile(HStore store,
                       byte [] row,
                       NavigableSet<byte[]> columns
                       ) throws IOException {
 Get get = new Get(row);
 Map<byte[], NavigableSet<byte[]>> s = get.getFamilyMap();
 s.put(store.getColumnFamilyDescriptor().getName(), columns);
 return getFromStoreFile(store,get);
}

代码示例来源:origin: apache/hbase

@Test
public void testEmptyStoreFileRestrictKeyRanges() throws Exception {
 StoreFileReader reader = mock(StoreFileReader.class);
 HStore store = mock(HStore.class);
 byte[] cf = Bytes.toBytes("ty");
 ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.of(cf);
 when(store.getColumnFamilyDescriptor()).thenReturn(cfd);
 StoreFileScanner scanner =
   new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 0, 0, true);
 Scan scan = new Scan();
 scan.setColumnFamilyTimeRange(cf, 0, 1);
 assertFalse(scanner.shouldUseScanner(scan, store, 0));
}

代码示例来源:origin: apache/hbase

@VisibleForTesting
protected void addRegion(final HRegion region, RegionVisitor visitor) throws IOException {
 // 1. dump region meta info into the snapshot directory
 LOG.debug("Storing '" + region + "' region-info for snapshot.");
 Object regionData = visitor.regionOpen(region.getRegionInfo());
 monitor.rethrowException();
 // 2. iterate through all the stores in the region
 LOG.debug("Creating references for hfiles");
 for (HStore store : region.getStores()) {
  // 2.1. build the snapshot reference for the store
  Object familyData = visitor.familyOpen(regionData,
    store.getColumnFamilyDescriptor().getName());
  monitor.rethrowException();
  List<HStoreFile> storeFiles = new ArrayList<>(store.getStorefiles());
  if (LOG.isDebugEnabled()) {
   LOG.debug("Adding snapshot references for " + storeFiles  + " hfiles");
  }
  // 2.2. iterate through all the store's files and create "references".
  for (int i = 0, sz = storeFiles.size(); i < sz; i++) {
   HStoreFile storeFile = storeFiles.get(i);
   monitor.rethrowException();
   // create "reference" to this store file.
   LOG.debug("Adding reference for file (" + (i+1) + "/" + sz + "): " + storeFile.getPath());
   visitor.storeFile(regionData, familyData, storeFile.getFileInfo());
  }
  visitor.familyClose(regionData, familyData);
 }
 visitor.regionClose(regionData);
}

代码示例来源:origin: apache/hbase

private StripeCompactor createCompactor() throws Exception {
 HColumnDescriptor col = new HColumnDescriptor(Bytes.toBytes("foo"));
 StoreFileWritersCapture writers = new StoreFileWritersCapture();
 HStore store = mock(HStore.class);
 HRegionInfo info = mock(HRegionInfo.class);
 when(info.getRegionNameAsString()).thenReturn("testRegion");
 when(store.getColumnFamilyDescriptor()).thenReturn(col);
 when(store.getRegionInfo()).thenReturn(info);
 when(
  store.createWriterInTmp(anyLong(), any(), anyBoolean(),
   anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers);
 Configuration conf = HBaseConfiguration.create();
 conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders);
 final Scanner scanner = new Scanner();
 return new StripeCompactor(conf, store) {
  @Override
  protected InternalScanner createScanner(HStore store, ScanInfo scanInfo,
    List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs,
    byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException {
   return scanner;
  }
  @Override
  protected InternalScanner createScanner(HStore store, ScanInfo scanInfo,
    List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint,
    long earliestPutTs) throws IOException {
   return scanner;
  }
 };
}

代码示例来源:origin: apache/hbase

store.getColumnFamilyDescriptor().getName()) - 1;
if (earliest > 0 && earliest + flushPerChanges < mvcc.getReadPoint()) {
 if (LOG.isDebugEnabled()) {

代码示例来源:origin: apache/hbase

/**
 * Do a small get/scan against one store. This is required because store
 * has no actual methods of querying itself, and relies on StoreScanner.
 */
public static List<Cell> getFromStoreFile(HStore store,
                       Get get) throws IOException {
 Scan scan = new Scan(get);
 InternalScanner scanner = (InternalScanner) store.getScanner(scan,
   scan.getFamilyMap().get(store.getColumnFamilyDescriptor().getName()),
   // originally MultiVersionConcurrencyControl.resetThreadReadPoint() was called to set
   // readpoint 0.
   0);
 List<Cell> result = new ArrayList<>();
 scanner.next(result);
 if (!result.isEmpty()) {
  // verify that we are on the row we want:
  Cell kv = result.get(0);
  if (!CellUtil.matchingRows(kv, get.getRow())) {
   result.clear();
  }
 }
 scanner.close();
 return result;
}

相关文章

HStore类方法