本文整理了Java中org.apache.hadoop.hbase.regionserver.HStore.triggerMajorCompaction()
方法的一些代码示例,展示了HStore.triggerMajorCompaction()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HStore.triggerMajorCompaction()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.regionserver.HStore
类名称:HStore
方法名:triggerMajorCompaction
暂无
代码示例来源:origin: apache/hbase
@Override
public void requestCompaction(byte[] family, String why, int priority, boolean major,
CompactionLifeCycleTracker tracker) throws IOException {
HStore store = stores.get(family);
if (store == null) {
throw new NoSuchColumnFamilyException("column family " + Bytes.toString(family) +
" does not exist in region " + getRegionInfo().getRegionNameAsString());
}
if (major) {
store.triggerMajorCompaction();
}
rsServices.getCompactionRequestor().requestCompaction(this, store, why, priority, tracker,
RpcServer.getRequestUser().orElse(null));
}
代码示例来源:origin: apache/hbase
" family=" + familyName);
if (major) {
store.triggerMajorCompaction();
代码示例来源:origin: apache/hbase
getName() + " requests compaction");
} else if (s.shouldPerformMajorCompaction()) {
s.triggerMajorCompaction();
if (majorCompactPriority == DEFAULT_PRIORITY ||
majorCompactPriority > hr.getCompactPriority()) {
代码示例来源:origin: apache/hbase
/**
* Test for HBASE-5920 - Test user requested major compactions always occurring
*/
@Test
public void testNonUserMajorCompactionRequest() throws Exception {
HStore store = r.getStore(COLUMN_FAMILY);
createStoreFile(r);
for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
createStoreFile(r);
}
store.triggerMajorCompaction();
CompactionRequestImpl request = store.requestCompaction().get().getRequest();
assertNotNull("Expected to receive a compaction request", request);
assertEquals(
"System-requested major compaction should not occur if there are too many store files",
false,
request.isMajor());
}
代码示例来源:origin: apache/hbase
store.triggerMajorCompaction();
Optional<CompactionContext> cc = store.requestCompaction();
assertTrue(cc.isPresent());
代码示例来源:origin: apache/hbase
/**
* Test for HBASE-5920
*/
@Test
public void testUserMajorCompactionRequest() throws IOException{
HStore store = r.getStore(COLUMN_FAMILY);
createStoreFile(r);
for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
createStoreFile(r);
}
store.triggerMajorCompaction();
CompactionRequestImpl request =
store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null).get()
.getRequest();
assertNotNull("Expected to receive a compaction request", request);
assertEquals(
"User-requested major compaction should always occur, even if there are too many store files",
true,
request.isMajor());
}
代码示例来源:origin: apache/hbase
store.triggerMajorCompaction();
代码示例来源:origin: apache/hbase
System.out.println("Compacting");
assertEquals(2, store.getStorefilesCount());
store.triggerMajorCompaction();
region.compact(true);
waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max
代码示例来源:origin: apache/hbase
System.out.println("Compacting");
assertEquals(3, store.getStorefilesCount());
store.triggerMajorCompaction();
region.compact(true);
waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max
代码示例来源:origin: apache/hbase
store.triggerMajorCompaction();
region.compact(true);
waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max
代码示例来源:origin: apache/hbase
store.triggerMajorCompaction();
region.compact(true);
store.closeAndArchiveCompactedFiles();
代码示例来源:origin: apache/phoenix
@Test(timeout=120000)
public void testCompactNonPhoenixTable() throws Exception {
try (Connection conn = getConnection()) {
// create a vanilla HBase table (non-Phoenix)
String randomTable = generateUniqueName();
TableName hbaseTN = TableName.valueOf(randomTable);
byte[] famBytes = Bytes.toBytes("fam");
Table hTable = getUtility().createTable(hbaseTN, famBytes);
TestUtil.addCoprocessor(conn, randomTable, UngroupedAggregateRegionObserver.class);
Put put = new Put(Bytes.toBytes("row"));
byte[] value = new byte[1];
Bytes.random(value);
put.addColumn(famBytes, Bytes.toBytes("colQ"), value);
hTable.put(put);
// major compaction shouldn't cause a timeout or RS abort
List<HRegion> regions = getUtility().getHBaseCluster().getRegions(hbaseTN);
HRegion hRegion = regions.get(0);
hRegion.flush(true);
HStore store = hRegion.getStore(famBytes);
store.triggerMajorCompaction();
store.compactRecentForTestingAssumingDefaultPolicy(1);
// we should be able to compact syscat itself as well
regions = getUtility().getHBaseCluster().getRegions(TableName.valueOf(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
hRegion = regions.get(0);
hRegion.flush(true);
store = hRegion.getStore(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES);
store.triggerMajorCompaction();
store.compactRecentForTestingAssumingDefaultPolicy(1);
}
}
代码示例来源:origin: org.apache.hbase/hbase-mapreduce
" family=" + familyName);
if (major) {
store.triggerMajorCompaction();
代码示例来源:origin: com.aliyun.hbase/alihbase-mapreduce
" family=" + familyName);
if (major) {
store.triggerMajorCompaction();
代码示例来源:origin: harbby/presto-connectors
/**
* Execute the actual compaction job.
* If the compact once flag is not specified, execute the compaction until
* no more compactions are needed. Uses the Configuration settings provided.
*/
private void compactStoreFiles(final Path tableDir, final HTableDescriptor htd,
final HRegionInfo hri, final String familyName, final boolean compactOnce,
final boolean major) throws IOException {
HStore store = getStore(conf, fs, tableDir, htd, hri, familyName, tmpDir);
LOG.info("Compact table=" + htd.getTableName() +
" region=" + hri.getRegionNameAsString() +
" family=" + familyName);
if (major) {
store.triggerMajorCompaction();
}
do {
CompactionContext compaction = store.requestCompaction(Store.PRIORITY_USER, null);
if (compaction == null) break;
List<StoreFile> storeFiles =
store.compact(compaction, NoLimitCompactionThroughputController.INSTANCE);
if (storeFiles != null && !storeFiles.isEmpty()) {
if (keepCompactedFiles && deleteCompacted) {
for (StoreFile storeFile: storeFiles) {
fs.delete(storeFile.getPath(), false);
}
}
}
} while (store.needsCompaction() && !compactOnce);
}
代码示例来源:origin: org.apache.hbase/hbase-server
store.triggerMajorCompaction();
Optional<CompactionContext> cc = store.requestCompaction();
assertTrue(cc.isPresent());
代码示例来源:origin: org.apache.hbase/hbase-server
/**
* Test for HBASE-5920 - Test user requested major compactions always occurring
*/
@Test
public void testNonUserMajorCompactionRequest() throws Exception {
HStore store = r.getStore(COLUMN_FAMILY);
createStoreFile(r);
for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
createStoreFile(r);
}
store.triggerMajorCompaction();
CompactionRequestImpl request = store.requestCompaction().get().getRequest();
assertNotNull("Expected to receive a compaction request", request);
assertEquals(
"System-requested major compaction should not occur if there are too many store files",
false,
request.isMajor());
}
代码示例来源:origin: org.apache.hbase/hbase-server
/**
* Test for HBASE-5920
*/
@Test
public void testUserMajorCompactionRequest() throws IOException{
HStore store = r.getStore(COLUMN_FAMILY);
createStoreFile(r);
for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) {
createStoreFile(r);
}
store.triggerMajorCompaction();
CompactionRequestImpl request =
store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, null).get()
.getRequest();
assertNotNull("Expected to receive a compaction request", request);
assertEquals(
"User-requested major compaction should always occur, even if there are too many store files",
true,
request.isMajor());
}
代码示例来源:origin: org.apache.hbase/hbase-server
store.triggerMajorCompaction();
代码示例来源:origin: org.apache.hbase/hbase-server
store.triggerMajorCompaction();
region.compact(true);
waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max
内容来源于网络,如有侵权,请联系作者删除!