本文整理了Java中org.apache.hadoop.hive.ql.metadata.Hive.alterPartition()
方法的一些代码示例,展示了Hive.alterPartition()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Hive.alterPartition()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Hive
类名称:Hive
方法名:alterPartition
[英]Updates the existing partition metadata with the new metadata.
[中]使用新元数据更新现有分区元数据。
代码示例来源:origin: apache/drill
/**
* Updates the existing partition metadata with the new metadata.
*
* @param tblName
* name of the existing table
* @param newPart
* new partition
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
* @throws TException
*/
public void alterPartition(String tblName, Partition newPart, EnvironmentContext environmentContext)
throws InvalidOperationException, HiveException {
String[] names = Utilities.getDbTableName(tblName);
alterPartition(names[0], names[1], newPart, environmentContext);
}
代码示例来源:origin: apache/drill
private void alterPartitionSpec(Table tbl,
Map<String, String> partSpec,
org.apache.hadoop.hive.metastore.api.Partition tpart,
boolean inheritTableSpecs,
String partPath) throws HiveException, InvalidOperationException {
alterPartitionSpecInMemory(tbl, partSpec, tpart, inheritTableSpecs, partPath);
String fullName = tbl.getTableName();
if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) {
fullName = tbl.getDbName() + "." + tbl.getTableName();
}
alterPartition(fullName, new Partition(tbl, tpart), null);
}
代码示例来源:origin: apache/hive
/**
* Updates the existing partition metadata with the new metadata.
*
* @param tblName
* name of the existing table
* @param newPart
* new partition
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
* @throws TException
*/
@Deprecated
public void alterPartition(String tblName, Partition newPart,
EnvironmentContext environmentContext, boolean transactional)
throws InvalidOperationException, HiveException {
String[] names = Utilities.getDbTableName(tblName);
alterPartition(null, names[0], names[1], newPart, environmentContext, transactional);
}
代码示例来源:origin: apache/hive
private void alterPartitionSpec(Table tbl,
Map<String, String> partSpec,
org.apache.hadoop.hive.metastore.api.Partition tpart,
boolean inheritTableSpecs,
String partPath) throws HiveException, InvalidOperationException {
alterPartitionSpecInMemory(tbl, partSpec, tpart, inheritTableSpecs, partPath);
String fullName = tbl.getTableName();
if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) {
fullName = tbl.getFullyQualifiedName();
}
alterPartition(tbl.getCatalogName(), tbl.getDbName(), tbl.getTableName(),
new Partition(tbl, tpart), null, true);
}
代码示例来源:origin: apache/hive
db.alterPartition(table.getCatalogName(), table.getDbName(), table.getTableName(),
partn, null, true);
代码示例来源:origin: apache/drill
db.alterPartition(table.getDbName(), table.getTableName(), partn, null);
代码示例来源:origin: apache/drill
db.alterPartition(tbl.getTableName(), part, null);
} else {
Path url = new Path(tbl.getPath().toString());
代码示例来源:origin: apache/hive
db.alterPartition(simpleDesc.getTableName(), p, null, true);
代码示例来源:origin: apache/hive
try {
db.alterPartition(simpleDesc.getTableName(), p, null, true);
} catch (InvalidOperationException e) {
throw new HiveException(e);
代码示例来源:origin: apache/drill
private List<Path> getLocations(Hive db, Table table, Map<String, String> partSpec)
throws HiveException, InvalidOperationException {
List<Path> locations = new ArrayList<Path>();
if (partSpec == null) {
if (table.isPartitioned()) {
for (Partition partition : db.getPartitions(table)) {
locations.add(partition.getDataLocation());
EnvironmentContext environmentContext = new EnvironmentContext();
if (needToUpdateStats(partition.getParameters(), environmentContext)) {
db.alterPartition(table.getDbName(), table.getTableName(), partition, environmentContext);
}
}
} else {
locations.add(table.getPath());
EnvironmentContext environmentContext = new EnvironmentContext();
if (needToUpdateStats(table.getParameters(), environmentContext)) {
db.alterTable(table.getDbName()+"."+table.getTableName(), table, environmentContext);
}
}
} else {
for (Partition partition : db.getPartitionsByNames(table, partSpec)) {
locations.add(partition.getDataLocation());
EnvironmentContext environmentContext = new EnvironmentContext();
if (needToUpdateStats(partition.getParameters(), environmentContext)) {
db.alterPartition(table.getDbName(), table.getTableName(), partition, environmentContext);
}
}
}
return locations;
}
代码示例来源:origin: apache/drill
db.alterPartition(simpleDesc.getTableName(), p, null);
代码示例来源:origin: apache/drill
setUnArchived(p);
try {
db.alterPartition(simpleDesc.getTableName(), p, null);
} catch (InvalidOperationException e) {
throw new HiveException(e);
代码示例来源:origin: apache/hive
db.alterPartition(tbl.getCatalogName(), tbl.getDbName(), tbl.getTableName(),
part, environmentContext, true);
} catch (InvalidOperationException e) {
代码示例来源:origin: apache/drill
db.alterPartition(touchDesc.getTableName(), part, environmentContext);
} catch (InvalidOperationException e) {
throw new HiveException(e);
代码示例来源:origin: apache/hive
p = db.getPartition(t, p.getSpec(), false);
p.setLastAccessTime(lastAccessTime);
db.alterPartition(null, dbName, tblName, p, null, false);
t.setLastAccessTime(lastAccessTime);
db.alterTable(dbName + "." + tblName, t, false, null, false);
代码示例来源:origin: apache/drill
p = db.getPartition(t, p.getSpec(), false);
p.setLastAccessTime(lastAccessTime);
db.alterPartition(t.getTableName(), p, null);
t.setLastAccessTime(lastAccessTime);
db.alterTable(t.getDbName() + "." + t.getTableName(), t, null);
代码示例来源:origin: com.facebook.presto.hive/hive-apache
/**
* Updates the existing partition metadata with the new metadata.
*
* @param tblName
* name of the existing table
* @param newPart
* new partition
* @throws InvalidOperationException
* if the changes in metadata is not acceptable
* @throws TException
*/
public void alterPartition(String tblName, Partition newPart)
throws InvalidOperationException, HiveException {
String[] names = Utilities.getDbTableName(tblName);
alterPartition(names[0], names[1], newPart);
}
代码示例来源:origin: apache/lens
/**
* Update existing partition
* @param client hive client instance
* @param fact fact name
* @param partition partition to be updated
* @throws InvalidOperationException
* @throws HiveException
*/
public void updatePartition(Hive client, String fact, Partition partition)
throws InvalidOperationException, HiveException {
client.alterPartition(MetastoreUtil.getFactOrDimtableStorageTableName(fact, getName()), partition, null);
}
代码示例来源:origin: org.apache.lens/lens-cube
/**
* Update existing partition
* @param client hive client instance
* @param fact fact name
* @param partition partition to be updated
* @throws InvalidOperationException
* @throws HiveException
*/
public void updatePartition(Hive client, String fact, Partition partition)
throws InvalidOperationException, HiveException {
client.alterPartition(MetastoreUtil.getFactOrDimtableStorageTableName(fact, getName()), partition, null);
}
代码示例来源:origin: apache/lens
/**
* Adds the partition.
*
* @param eventName the event name
* @param key the key
* @param finalPath the final path
* @param className the class name
* @return true, if successful
*/
private boolean addPartition(String eventName, String key, Path finalPath, String className) {
try {
Table t = getTable(eventName, className);
HashMap<String, String> partSpec = new HashMap<String, String>();
partSpec.put("dt", key);
Partition p = client.createPartition(t, partSpec);
p.setLocation(finalPath.toString());
client.alterPartition(database, eventName, p, null);
return true;
} catch (Exception e) {
LOG.warn("Unable to add the partition ", e);
return false;
}
}
内容来源于网络,如有侵权,请联系作者删除!