本文整理了Java中org.apache.hadoop.hive.ql.metadata.Hive.getAllDatabases()
方法的一些代码示例,展示了Hive.getAllDatabases()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Hive.getAllDatabases()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Hive
类名称:Hive
方法名:getAllDatabases
[英]Get all existing database names.
[中]获取所有现有数据库名称。
代码示例来源:origin: apache/hive
public static Iterable<String> matchesDb(Hive db, String dbPattern) throws HiveException {
if (dbPattern == null) {
return db.getAllDatabases();
} else {
return db.getDatabasesByPattern(dbPattern);
}
}
代码示例来源:origin: apache/drill
private Iterable<? extends String> matchesDb(String dbPattern) throws HiveException {
if (dbPattern == null) {
return db.getAllDatabases();
} else {
return db.getDatabasesByPattern(dbPattern);
}
}
}
代码示例来源:origin: apache/drill
/**
* Initialize the registry for the given database. It will extract the materialized views
* that are enabled for rewriting from the metastore for the current user, parse them,
* and register them in this cache.
*
* The loading process runs on the background; the method returns in the moment that the
* runnable task is created, thus the views will still not be loaded in the cache when
* it does.
*/
public void init(final Hive db) {
try {
List<Table> tables = new ArrayList<Table>();
for (String dbName : db.getAllDatabases()) {
// TODO: We should enhance metastore API such that it returns only
// materialized views instead of all tables
tables.addAll(db.getAllTableObjects(dbName));
}
pool.submit(new Loader(tables));
} catch (HiveException e) {
LOG.error("Problem connecting to the metastore when initializing the view registry");
}
}
代码示例来源:origin: apache/hive
@Override
public void run() {
try {
SessionState ss = new SessionState(db.getConf());
ss.setIsHiveServerQuery(true); // All is served from HS2, we do not need e.g. Tez sessions
SessionState.start(ss);
final boolean cache = !db.getConf()
.get(HiveConf.ConfVars.HIVE_SERVER2_MATERIALIZED_VIEWS_REGISTRY_IMPL.varname).equals("DUMMY");
for (String dbName : db.getAllDatabases()) {
for (Table mv : db.getAllMaterializedViewObjects(dbName)) {
addMaterializedView(db.getConf(), mv, OpType.LOAD, cache);
}
}
initialized.set(true);
LOG.info("Materialized views registry has been initialized");
} catch (HiveException e) {
LOG.error("Problem connecting to the metastore when initializing the view registry", e);
}
}
}
代码示例来源:origin: apache/hive
/**
* Write a list of the available databases to a file.
*
* @param showDatabasesDesc
* These are the databases we're interested in.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
private int showDatabases(Hive db, ShowDatabasesDesc showDatabasesDesc) throws HiveException {
// get the databases for the desired pattern - populate the output stream
List<String> databases = null;
if (showDatabasesDesc.getPattern() != null) {
LOG.debug("pattern: {}", showDatabasesDesc.getPattern());
databases = db.getDatabasesByPattern(showDatabasesDesc.getPattern());
} else {
databases = db.getAllDatabases();
}
LOG.info("Found {} database(s) matching the SHOW DATABASES statement.", databases.size());
// write the results in the file
DataOutputStream outStream = getOutputStream(showDatabasesDesc.getResFile());
try {
formatter.showDatabases(outStream, databases);
} catch (Exception e) {
throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show databases");
} finally {
IOUtils.closeStream(outStream);
}
return 0;
}
代码示例来源:origin: apache/drill
/**
* Write a list of the available databases to a file.
*
* @param showDatabasesDesc
* These are the databases we're interested in.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
private int showDatabases(Hive db, ShowDatabasesDesc showDatabasesDesc) throws HiveException {
// get the databases for the desired pattern - populate the output stream
List<String> databases = null;
if (showDatabasesDesc.getPattern() != null) {
LOG.info("pattern: " + showDatabasesDesc.getPattern());
databases = db.getDatabasesByPattern(showDatabasesDesc.getPattern());
} else {
databases = db.getAllDatabases();
}
LOG.info("results : " + databases.size());
// write the results in the file
DataOutputStream outStream = getOutputStream(showDatabasesDesc.getResFile());
try {
formatter.showDatabases(outStream, databases);
} catch (Exception e) {
throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "show databases");
} finally {
IOUtils.closeStream(outStream);
}
return 0;
}
代码示例来源:origin: apache/hive
/**
* Removes all databases and tables from the metastore
*/
public static void cleanupHMS(Hive hive, Warehouse wh, FsPermission defaultPerm)
throws HiveException, MetaException, NoSuchObjectException {
for (String dbName : hive.getAllDatabases()) {
if (dbName.equals("default")) {
continue;
}
try {
Path path = getDbPath(hive, wh, dbName);
FileSystem whFs = path.getFileSystem(hive.getConf());
whFs.setPermission(path, defaultPerm);
} catch (IOException ex) {
//ignore
}
hive.dropDatabase(dbName, true, true, true);
}
//clean tables in default db
for (String tablename : hive.getAllTables("default")) {
hive.dropTable("default", tablename, true, true);
}
}
代码示例来源:origin: apache/hive
hm.getAllDatabases();
hm.dumpAndClearMetaCallTiming("test");
String logStr = appender.getOutput();
代码示例来源:origin: apache/phoenix
for (String dbName : db.getAllDatabases()) {
SessionState.get().setCurrentDatabase(dbName);
for (String tblName : db.getAllTables()) {
for (String dbName : db.getAllDatabases()) {
SessionState.get().setCurrentDatabase(dbName);
for (String tblName : db.getAllTables()) {
代码示例来源:origin: apache/incubator-atlas
private void importDatabases(boolean failOnError) throws Exception {
List<String> databases = hiveClient.getAllDatabases();
for (String databaseName : databases) {
Referenceable dbReference = registerDatabase(databaseName);
if (dbReference != null) {
importTables(dbReference, databaseName, failOnError);
}
}
}
代码示例来源:origin: org.apache.hadoop.hive/hive-exec
databases = db.getDatabasesByPattern(showDatabasesDesc.getPattern());
} else {
databases = db.getAllDatabases();
代码示例来源:origin: apache/lens
/**
* @return get all database names
*/
@Override
public List<String> getAllDatabases(LensSessionHandle sessionid) throws LensException {
try (SessionContext ignored = new SessionContext(sessionid)){
return Hive.get(getSession(sessionid).getHiveConf()).getAllDatabases();
} catch (HiveException e) {
throw new LensException(e);
}
}
代码示例来源:origin: com.facebook.presto.hive/hive-apache
databases = db.getDatabasesByPattern(showDatabasesDesc.getPattern());
} else {
databases = db.getAllDatabases();
代码示例来源:origin: com.facebook.presto.hive/hive-apache
public static void reloadFunctions() throws HiveException {
Hive db = Hive.get();
for (String dbName : db.getAllDatabases()) {
for (String functionName : db.getFunctions(dbName, "*")) {
Function function = db.getFunction(dbName, functionName);
try {
FunctionRegistry.registerPermanentFunction(
FunctionUtils.qualifyFunctionName(functionName, dbName), function.getClassName(),
false, FunctionTask.toFunctionResource(function.getResourceUris()));
} catch (Exception e) {
LOG.warn("Failed to register persistent function " +
functionName + ":" + function.getClassName() + ". Ignore and continue.");
}
}
}
}
代码示例来源:origin: io.snappydata/snappydata-core
List<String> schemas = hmc.getAllDatabases();
ArrayList<ExternalTableMetaData> externalTables = new ArrayList<>();
for (String schema : schemas) {
List<String> schemas = hmc.getAllDatabases();
ArrayList<PolicyTableData> policyData = new ArrayList<>();
for (String schema : schemas) {
List<String> dbList = hmc.getAllDatabases();
HashMap<String, List<String>> dbTablesMap = new HashMap<>();
for (String db : dbList) {
内容来源于网络,如有侵权,请联系作者删除!