本文整理了Java中org.apache.hadoop.hbase.TableName.toString()
方法的一些代码示例,展示了TableName.toString()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。TableName.toString()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.TableName
类名称:TableName
方法名:toString
暂无
代码示例来源:origin: apache/hbase
private String[] toStringArray(TableName[] list) {
String[] arr = new String[list.length];
for (int i = 0; i < list.length; i++) {
arr[i] = list[i].toString();
}
return arr;
}
代码示例来源:origin: apache/hbase
public static List<Delete> createDeleteForOrigBulkLoad(List<TableName> lst) {
List<Delete> lstDels = new ArrayList<>(lst.size());
for (TableName table : lst) {
Delete del = new Delete(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM));
del.addFamily(BackupSystemTable.META_FAMILY);
lstDels.add(del);
}
return lstDels;
}
代码示例来源:origin: apache/hbase
protected String ownerToString() {
StringBuilder builder = new StringBuilder();
if (userName != null) {
builder.append("USER => '");
builder.append(userName);
builder.append("', ");
}
if (tableName != null) {
builder.append("TABLE => '");
builder.append(tableName.toString());
builder.append("', ");
}
if (namespace != null) {
builder.append("NAMESPACE => '");
builder.append(namespace);
builder.append("', ");
}
if (regionServer != null) {
builder.append("REGIONSERVER => ").append(regionServer).append(", ");
}
return builder.toString();
}
代码示例来源:origin: apache/hbase
@Override
public Map<String,Entry<Long,Long>> getTableSpaceUtilization() {
QuotaObserverChore quotaChore = master.getQuotaObserverChore();
if (quotaChore == null) {
return Collections.emptyMap();
}
Map<TableName,SpaceQuotaSnapshot> tableSnapshots = quotaChore.getTableQuotaSnapshots();
Map<String,Entry<Long,Long>> convertedData = new HashMap<>();
for (Entry<TableName,SpaceQuotaSnapshot> entry : tableSnapshots.entrySet()) {
convertedData.put(entry.getKey().toString(), convertSnapshot(entry.getValue()));
}
return convertedData;
}
代码示例来源:origin: apache/hbase
/**
* Creates a {@link Get} for the HBase snapshot's size against the given table.
*/
static Get makeGetForSnapshotSize(TableName tn, String snapshot) {
Get g = new Get(Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, Bytes.toBytes(tn.toString())));
g.addColumn(
QUOTA_FAMILY_USAGE,
Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot)));
return g;
}
代码示例来源:origin: apache/hbase
static List<Put> createPutForCommittedBulkload(TableName table, byte[] region,
Map<byte[], List<Path>> finalPaths) {
List<Put> puts = new ArrayList<>();
for (Map.Entry<byte[], List<Path>> entry : finalPaths.entrySet()) {
for (Path path : entry.getValue()) {
String file = path.toString();
int lastSlash = file.lastIndexOf("/");
String filename = file.substring(lastSlash + 1);
Put put = new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM,
Bytes.toString(region), BLK_LD_DELIM, filename));
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, entry.getKey());
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, file.getBytes());
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_COMMIT);
puts.add(put);
LOG.debug(
"writing done bulk path " + file + " for " + table + " " + Bytes.toString(region));
}
}
return puts;
}
代码示例来源:origin: apache/hbase
static List<Put> createPutForPreparedBulkload(TableName table, byte[] region, final byte[] family,
final List<Pair<Path, Path>> pairs) {
List<Put> puts = new ArrayList<>(pairs.size());
for (Pair<Path, Path> pair : pairs) {
Path path = pair.getSecond();
String file = path.toString();
int lastSlash = file.lastIndexOf("/");
String filename = file.substring(lastSlash + 1);
Put put = new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM,
Bytes.toString(region), BLK_LD_DELIM, filename));
put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName());
put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, family);
put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, file.getBytes());
put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_PREPARE);
puts.add(put);
LOG.debug("writing raw bulk path " + file + " for " + table + " " + Bytes.toString(region));
}
return puts;
}
代码示例来源:origin: apache/hbase
static Scan createScanForOrigBulkLoadedFiles(TableName table) {
Scan scan = new Scan();
byte[] startRow = rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM);
byte[] stopRow = Arrays.copyOf(startRow, startRow.length);
stopRow[stopRow.length - 1] = (byte) (stopRow[stopRow.length - 1] + 1);
scan.withStartRow(startRow);
scan.withStopRow(stopRow);
scan.addFamily(BackupSystemTable.META_FAMILY);
scan.setMaxVersions(1);
return scan;
}
代码示例来源:origin: apache/hbase
tds.put(entry.getKey().toString(), entry.getValue());
代码示例来源:origin: apache/hbase
private void loadAndValidateHFileReplication(String testName, byte[] row, byte[] fam,
Table source, byte[][][] hfileRanges, int numOfRows) throws Exception {
Path dir = utility1.getDataTestDirOnTestFS(testName);
FileSystem fs = utility1.getTestFileSystem();
dir = dir.makeQualified(fs);
Path familyDir = new Path(dir, Bytes.toString(fam));
int hfileIdx = 0;
for (byte[][] range : hfileRanges) {
byte[] from = range[0];
byte[] to = range[1];
HFileTestUtil.createHFile(utility1.getConfiguration(), fs, new Path(familyDir, "hfile_"
+ hfileIdx++), fam, row, from, to, numOfRows);
}
final TableName tableName = source.getName();
LoadIncrementalHFiles loader = new LoadIncrementalHFiles(utility1.getConfiguration());
String[] args = { dir.toString(), tableName.toString() };
loader.run(args);
}
代码示例来源:origin: apache/hbase
private void loadAndValidateHFileReplication(String testName, int masterNumber,
int[] slaveNumbers, byte[] row, byte[] fam, Table[] tables, byte[][][] hfileRanges,
int numOfRows, int[] expectedCounts, boolean toValidate) throws Exception {
HBaseTestingUtility util = utilities[masterNumber];
Path dir = util.getDataTestDirOnTestFS(testName);
FileSystem fs = util.getTestFileSystem();
dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
Path familyDir = new Path(dir, Bytes.toString(fam));
int hfileIdx = 0;
for (byte[][] range : hfileRanges) {
byte[] from = range[0];
byte[] to = range[1];
HFileTestUtil.createHFile(util.getConfiguration(), fs,
new Path(familyDir, "hfile_" + hfileIdx++), fam, row, from, to, numOfRows);
}
Table source = tables[masterNumber];
final TableName tableName = source.getName();
LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());
String[] args = { dir.toString(), tableName.toString() };
loader.run(args);
if (toValidate) {
for (int slaveClusterNumber : slaveNumbers) {
wait(slaveClusterNumber, tables[slaveClusterNumber], expectedCounts[slaveClusterNumber]);
}
}
}
代码示例来源:origin: apache/hbase
private boolean prepareCreate(final MasterProcedureEnv env) throws IOException {
final TableName tableName = getTableName();
if (MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
setFailure("master-create-table", new TableExistsException(getTableName()));
return false;
}
// check that we have at least 1 CF
if (tableDescriptor.getColumnFamilyCount() == 0) {
setFailure("master-create-table", new DoNotRetryIOException("Table " +
getTableName().toString() + " should have at least one column family."));
return false;
}
return true;
}
代码示例来源:origin: apache/hbase
@Before
public void setUp() throws Exception {
createNamespace(TEST_UTIL, NamespaceDescriptor.create(namespace).build());
try (Table table = createTable(TEST_UTIL, tableName,
new byte[][] { TEST_FAMILY, TEST_FAMILY_2 })) {
TEST_UTIL.waitTableEnabled(tableName);
// Ingesting test data.
table.put(Arrays.asList(new Put(TEST_ROW).addColumn(TEST_FAMILY, Q1, value1),
new Put(TEST_ROW_2).addColumn(TEST_FAMILY, Q2, value2),
new Put(TEST_ROW_3).addColumn(TEST_FAMILY_2, Q1, value1)));
}
assertEquals(1, AccessControlLists.getTablePermissions(conf, tableName).size());
try {
assertEquals(1, AccessControlClient.getUserPermissions(systemUserConnection,
tableName.toString()).size());
} catch (Throwable e) {
LOG.error("Error during call of AccessControlClient.getUserPermissions. ", e);
}
}
代码示例来源:origin: apache/hbase
String[] args = { dir.toString(), tableName.toString() };
loader.run(args);
代码示例来源:origin: apache/hbase
throw new DoNotRetryIOException("Table " + getTableName().toString() +
" should have at least one column family.");
代码示例来源:origin: apache/hbase
throw new DoNotRetryIOException("Table " + getTableName().toString() +
" should have at least one column family.");
代码示例来源:origin: apache/hbase
protected final void verifyReplicationRequestRejection(HBaseTestingUtility utility,
boolean expectedRejection) throws Exception {
HRegionServer regionServer = utility.getRSForFirstRegionInTable(TABLE_NAME);
ClusterConnection connection = regionServer.getClusterConnection();
Entry[] entries = new Entry[10];
for (int i = 0; i < entries.length; i++) {
entries[i] =
new Entry(new WALKeyImpl(HConstants.EMPTY_BYTE_ARRAY, TABLE_NAME, 0), new WALEdit());
}
if (!expectedRejection) {
ReplicationProtbufUtil.replicateWALEntry(connection.getAdmin(regionServer.getServerName()),
entries, null, null, null);
} else {
try {
ReplicationProtbufUtil.replicateWALEntry(connection.getAdmin(regionServer.getServerName()),
entries, null, null, null);
fail("Should throw IOException when sync-replication state is in A or DA");
} catch (DoNotRetryIOException e) {
assertTrue(e.getMessage().contains("Reject to apply to sink cluster"));
assertTrue(e.getMessage().contains(TABLE_NAME.toString()));
}
}
}
代码示例来源:origin: apache/hbase
public void testRegionReplicaReplication(int regionReplication) throws Exception {
// test region replica replication. Create a table with single region, write some data
// ensure that data is replicated to the secondary region
TableName tableName = TableName.valueOf("testRegionReplicaReplicationWithReplicas_"
+ regionReplication);
HTableDescriptor htd = HTU.createTableDescriptor(tableName.toString());
htd.setRegionReplication(regionReplication);
HTU.getAdmin().createTable(htd);
TableName tableNameNoReplicas =
TableName.valueOf("testRegionReplicaReplicationWithReplicas_NO_REPLICAS");
HTU.deleteTableIfAny(tableNameNoReplicas);
HTU.createTable(tableNameNoReplicas, HBaseTestingUtility.fam1);
Connection connection = ConnectionFactory.createConnection(HTU.getConfiguration());
Table table = connection.getTable(tableName);
Table tableNoReplicas = connection.getTable(tableNameNoReplicas);
try {
// load some data to the non-replicated table
HTU.loadNumericRows(tableNoReplicas, HBaseTestingUtility.fam1, 6000, 7000);
// load the data to the table
HTU.loadNumericRows(table, HBaseTestingUtility.fam1, 0, 1000);
verifyReplication(tableName, regionReplication, 0, 1000);
} finally {
table.close();
tableNoReplicas.close();
HTU.deleteTableIfAny(tableNameNoReplicas);
connection.close();
}
}
代码示例来源:origin: apache/hbase
/**
* Confirm all fields are equal.
* @param some some instance
* @param clone a clone of that instance, but not the same instance.
*/
private void cloneTest(BufferedMutatorParams some,
BufferedMutatorParams clone) {
assertFalse(some == clone);
assertEquals(some.getTableName().toString(),
clone.getTableName().toString());
assertEquals(some.getWriteBufferSize(), clone.getWriteBufferSize());
assertEquals(some.getWriteBufferPeriodicFlushTimeoutMs(),
clone.getWriteBufferPeriodicFlushTimeoutMs());
assertEquals(some.getWriteBufferPeriodicFlushTimerTickMs(),
clone.getWriteBufferPeriodicFlushTimerTickMs());
assertEquals(some.getMaxKeyValueSize(), clone.getMaxKeyValueSize());
assertTrue(some.getListener() == clone.getListener());
assertTrue(some.getPool() == clone.getPool());
assertEquals(some.getImplementationClassName(), clone.getImplementationClassName());
}
代码示例来源:origin: apache/hbase
@Test
public void testClone() {
ExecutorService pool = new MockExecutorService();
final String tableName = name.getMethodName();
BufferedMutatorParams bmp = new BufferedMutatorParams(TableName.valueOf(tableName));
BufferedMutator.ExceptionListener listener = new MockExceptionListener();
bmp
.writeBufferSize(17)
.setWriteBufferPeriodicFlushTimeoutMs(123)
.setWriteBufferPeriodicFlushTimerTickMs(456)
.maxKeyValueSize(13)
.pool(pool)
.listener(listener);
bmp.implementationClassName("someClassName");
BufferedMutatorParams clone = bmp.clone();
// Confirm some literals
assertEquals(tableName, clone.getTableName().toString());
assertEquals(17, clone.getWriteBufferSize());
assertEquals(123, clone.getWriteBufferPeriodicFlushTimeoutMs());
assertEquals(456, clone.getWriteBufferPeriodicFlushTimerTickMs());
assertEquals(13, clone.getMaxKeyValueSize());
assertEquals("someClassName", clone.getImplementationClassName());
cloneTest(bmp, clone);
BufferedMutatorParams cloneWars = clone.clone();
cloneTest(clone, cloneWars);
cloneTest(bmp, cloneWars);
// Mocking with clone leave original unaffected.
clone.implementationClassName(null);
assertEquals("someClassName", bmp.getImplementationClassName());
}
内容来源于网络,如有侵权,请联系作者删除!