javax.jdo.Query.closeAll()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(11.3k)|赞(0)|评价(0)|浏览(115)

本文整理了Java中javax.jdo.Query.closeAll方法的一些代码示例,展示了Query.closeAll的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Query.closeAll方法的具体详情如下:
包路径:javax.jdo.Query
类名称:Query
方法名:closeAll

Query.closeAll介绍

[英]Close all query results associated with this Query instance, and release all resources associated with them. The query results might have iterators open on them. Iterators associated with the query results are invalidated: they return false to hasNext() and throw NoSuchElementException to next().
[中]关闭与此Query实例关联的所有查询结果,并释放与它们关联的所有资源。查询结果可能会打开迭代器。与查询结果关联的迭代器无效:它们将false返回hasNext(),并将NoSuchElementException抛出next()

代码示例

代码示例来源:origin: apache/hive

/**
  * Explicitly closes the query object to release the resources
  */
 @Override
 public void close() {
  if (query != null) {
   query.closeAll();
   query = null;
  }
 }
}

代码示例来源:origin: apache/hive

public void closeAllQueries() {
 for (Query q : queries) {
  try {
   q.closeAll();
  } catch (Throwable t) {
   LOG.error("Failed to close a query", t);
  }
 }
}

代码示例来源:origin: apache/hive

/**
 * This is a cleanup method which is used to rollback a active transaction
 * if the success flag is false and close the associated Query object. This method is used
 * internally and visible for testing purposes only
 * @param success Rollback the current active transaction if false
 * @param query Query object which needs to be closed
 */
@VisibleForTesting
void rollbackAndCleanup(boolean success, Query query) {
 try {
  if (!success) {
   rollbackTransaction();
  }
 } finally {
  if (query != null) {
   query.closeAll();
  }
 }
}

代码示例来源:origin: apache/hive

@Override
 public List<Object[]> run(List<String> input) throws MetaException {
  String queryText = queryText0 + makeParams(input.size()) + ")";
  Object[] params = new Object[input.size() + 3];
  params[0] = catName;
  params[1] = dbName;
  params[2] = tableName;
  for (int i = 0; i < input.size(); ++i) {
   params[i + 3] = input.get(i);
  }
  long start = doTrace ? System.nanoTime() : 0;
  Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
  Object qResult = executeWithArray(query, params, queryText);
  MetastoreDirectSqlUtils.timingTrace(doTrace, queryText0 + "...)", start, (doTrace ? System.nanoTime() : 0));
  if (qResult == null) {
   query.closeAll();
   return null;
  }
  addQueryAfterUse(query);
  return MetastoreDirectSqlUtils.ensureList(qResult);
 }
};

代码示例来源:origin: apache/hive

private boolean isViewTable(String catName, String dbName, String tblName) throws MetaException {
 Query query = null;
 try {
  String queryText = "select \"TBL_TYPE\" from " + TBLS + "" +
    " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " +
    " where " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ? and " + DBS + ".\"CTLG_NAME\" = ?";
  Object[] params = new Object[] { tblName, dbName, catName };
  query = pm.newQuery("javax.jdo.query.SQL", queryText);
  query.setUnique(true);
  Object result = executeWithArray(query, params, queryText);
  return (result != null) && result.toString().equals(TableType.VIRTUAL_VIEW.toString());
 } finally {
  if (query != null) {
   query.closeAll();
  }
 }
}

代码示例来源:origin: apache/hive

query.closeAll();

代码示例来源:origin: apache/hive

private void dropPartitionsNoTxn(String catName, String dbName, String tblName, List<String> partNames) {
 ObjectPair<Query, Map<String, String>> queryWithParams =
   getPartQueryWithParams(catName, dbName, tblName, partNames);
 Query query = queryWithParams.getFirst();
 query.setClass(MPartition.class);
 long deleted = query.deletePersistentAll(queryWithParams.getSecond());
 LOG.debug("Deleted {} partition from store", deleted);
 query.closeAll();
}

代码示例来源:origin: apache/hive

qResult = executeWithArray(query, new Object[] { dbName, catName }, queryText);
if (qResult == null) {
 query.closeAll();
 return colStatsForDB;
query.closeAll();

代码示例来源:origin: apache/hive

private MDelegationToken getTokenFrom(String tokenId) {
 Query query = pm.newQuery(MDelegationToken.class, "tokenIdentifier == tokenId");
 query.declareParameters("java.lang.String tokenId");
 query.setUnique(true);
 MDelegationToken delegationToken = (MDelegationToken) query.execute(tokenId);
 if (query != null) {
  query.closeAll();
 }
 return delegationToken;
}

代码示例来源:origin: apache/hive

private void getStatsTableListResult(
   String queryText, List<org.apache.hadoop.hive.common.TableName> result) throws MetaException {
  LOG.debug("Running {}", queryText);
  Query<?> query = pm.newQuery("javax.jdo.query.SQL", queryText);
  try {
   List<Object[]> sqlResult = MetastoreDirectSqlUtils
     .ensureList(executeWithArray(query, STATS_TABLE_TYPES, queryText));
   for (Object[] line : sqlResult) {
    result.add(new org.apache.hadoop.hive.common.TableName(
      MetastoreDirectSqlUtils.extractSqlString(line[2]), MetastoreDirectSqlUtils
      .extractSqlString(line[1]), MetastoreDirectSqlUtils.extractSqlString(line[0])));
   }
  } finally {
   query.closeAll();
  }
 }
}

代码示例来源:origin: apache/hive

private MSerDeInfo getMSerDeInfo(String serDeName) throws MetaException {
 Query query = null;
 try {
  query = pm.newQuery(MSerDeInfo.class, "name == serDeName");
  query.declareParameters("java.lang.String serDeName");
  query.setUnique(true);
  MSerDeInfo mSerDeInfo = (MSerDeInfo)query.execute(serDeName);
  pm.retrieve(mSerDeInfo);
  return mSerDeInfo;
 } finally {
  if (query != null) {
   query.closeAll();
  }
 }
}

代码示例来源:origin: apache/hive

private Integer getNumPartitionsViaOrmFilter(Table table, ExpressionTree tree, boolean isValidatedFilter)
 throws MetaException {
 Map<String, Object> params = new HashMap<>();
 String jdoFilter = makeQueryFilterString(table.getCatName(), table.getDbName(), table, tree,
   params, isValidatedFilter);
 if (jdoFilter == null) {
  assert !isValidatedFilter;
  return null;
 }
 Query query = pm.newQuery(
   "select count(partitionName) from org.apache.hadoop.hive.metastore.model.MPartition"
 );
 query.setFilter(jdoFilter);
 String parameterDeclaration = makeParameterDeclarationStringObj(params);
 query.declareParameters(parameterDeclaration);
 Long result = (Long) query.executeWithMap(params);
 query.closeAll();
 return result.intValue();
}
/**

代码示例来源:origin: apache/hive

private List<String> getPartitionNamesNoTxn(String catName, String dbName, String tableName, short max) {
 List<String> pns = new ArrayList<>();
 if (max == 0) {
  return pns;
 }
 catName = normalizeIdentifier(catName);
 dbName = normalizeIdentifier(dbName);
 tableName = normalizeIdentifier(tableName);
 Query query =
   pm.newQuery("select partitionName from org.apache.hadoop.hive.metastore.model.MPartition "
     + "where table.database.name == t1 && table.tableName == t2 && table.database.catalogName == t3 "
     + "order by partitionName asc");
 query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3");
 query.setResult("partitionName");
 if (max > 0) {
  query.setRange(0, max);
 }
 Collection<String> names = (Collection<String>) query.execute(dbName, tableName, catName);
 pns.addAll(names);
 if (query != null) {
  query.closeAll();
 }
 return pns;
}

代码示例来源:origin: apache/hive

/**
 * Gets partition names from the table via ORM (JDOQL) name filter.
 * @param dbName Database name.
 * @param tblName Table name.
 * @param partNames Partition names to get the objects for.
 * @return Resulting partitions.
 */
private List<Partition> getPartitionsViaOrmFilter(String catName,
  String dbName, String tblName, List<String> partNames) throws MetaException {
 if (partNames.isEmpty()) {
  return new ArrayList<>();
 }
 ObjectPair<Query, Map<String, String>> queryWithParams =
   getPartQueryWithParams(catName, dbName, tblName, partNames);
 Query query = queryWithParams.getFirst();
 query.setResultClass(MPartition.class);
 query.setClass(MPartition.class);
 query.setOrdering("partitionName ascending");
 @SuppressWarnings("unchecked")
 List<MPartition> mparts = (List<MPartition>)query.executeWithMap(queryWithParams.getSecond());
 List<Partition> partitions = convertToParts(catName, dbName, tblName, mparts);
 if (query != null) {
  query.closeAll();
 }
 return partitions;
}

代码示例来源:origin: apache/hive

private void lockForUpdate() throws MetaException {
 String selectQuery = "select \"NEXT_EVENT_ID\" from \"NOTIFICATION_SEQUENCE\"";
 String selectForUpdateQuery = sqlGenerator.addForUpdateClause(selectQuery);
 new RetryingExecutor(conf, () -> {
  prepareQuotes();
  Query query = pm.newQuery("javax.jdo.query.SQL", selectForUpdateQuery);
  query.setUnique(true);
  // only need to execute it to get db Lock
  query.execute();
  query.closeAll();
 }).run();
}

代码示例来源:origin: apache/hive

/**
 * Detaches column descriptors from storage descriptors; returns the set of unique CDs
 * thus detached. This is done before dropping partitions because CDs are reused between
 * SDs; so, we remove the links to delete SDs and then check the returned CDs to see if
 * they are referenced by other SDs.
 */
private HashSet<MColumnDescriptor> detachCdsFromSdsNoTxn(
  String catName, String dbName, String tblName, List<String> partNames) {
 ObjectPair<Query, Map<String, String>> queryWithParams =
   getPartQueryWithParams(catName, dbName, tblName, partNames);
 Query query = queryWithParams.getFirst();
 query.setClass(MPartition.class);
 query.setResult("sd");
 @SuppressWarnings("unchecked")
 List<MStorageDescriptor> sds = (List<MStorageDescriptor>)query.executeWithMap(
   queryWithParams.getSecond());
 HashSet<MColumnDescriptor> candidateCds = new HashSet<>();
 for (MStorageDescriptor sd : sds) {
  if (sd != null && sd.getCD() != null) {
   candidateCds.add(sd.getCD());
   sd.setCD(null);
  }
 }
 if (query != null) {
  query.closeAll();
 }
 return candidateCds;
}

代码示例来源:origin: apache/hive

query.closeAll();
return results;

代码示例来源:origin: apache/hive

private boolean runTestQuery() {
 Transaction tx = pm.currentTransaction();
 boolean doCommit = false;
 if (!tx.isActive()) {
  tx.begin();
  doCommit = true;
 }
 Query query = null;
 // Run a self-test query. If it doesn't work, we will self-disable. What a PITA...
 String selfTestQuery = "select \"DB_ID\" from " + DBS + "";
 try {
  prepareTxn();
  query = pm.newQuery("javax.jdo.query.SQL", selfTestQuery);
  query.execute();
  return true;
 } catch (Throwable t) {
  doCommit = false;
  LOG.warn("Self-test query [" + selfTestQuery + "] failed; direct SQL is disabled", t);
  tx.rollback();
  return false;
 } finally {
  if (doCommit) {
   tx.commit();
  }
  if (query != null) {
   query.closeAll();
  }
 }
}

代码示例来源:origin: apache/hive

private MISchema getMISchema(String catName, String dbName, String name) {
 Query query = null;
 try {
  name = normalizeIdentifier(name);
  dbName = normalizeIdentifier(dbName);
  catName = normalizeIdentifier(catName);
  query = pm.newQuery(MISchema.class,
    "name == schemaName && db.name == dbname && db.catalogName == cat");
  query.declareParameters(
    "java.lang.String schemaName, java.lang.String dbname, java.lang.String cat");
  query.setUnique(true);
  MISchema mSchema = (MISchema)query.execute(name, dbName, catName);
  pm.retrieve(mSchema);
  return mSchema;
 } finally {
  if (query != null) {
   query.closeAll();
  }
 }
}

代码示例来源:origin: apache/hive

private MSchemaVersion getMSchemaVersion(String catName, String dbName, String schemaName, int version) {
 Query query = null;
 try {
  dbName = normalizeIdentifier(dbName);
  schemaName = normalizeIdentifier(schemaName);
  query = pm.newQuery(MSchemaVersion.class,
    "iSchema.name == schemaName && iSchema.db.name == dbName &&" +
      "iSchema.db.catalogName == cat && version == schemaVersion");
  query.declareParameters( "java.lang.String schemaName, java.lang.String dbName," +
    "java.lang.String cat, java.lang.Integer schemaVersion");
  query.setUnique(true);
  MSchemaVersion mSchemaVersion =
    (MSchemaVersion)query.executeWithArray(schemaName, dbName, catName, version);
  pm.retrieve(mSchemaVersion);
  if (mSchemaVersion != null) {
   pm.retrieveAll(mSchemaVersion.getCols());
   if (mSchemaVersion.getSerDe() != null) {
    pm.retrieve(mSchemaVersion.getSerDe());
   }
  }
  return mSchemaVersion;
 } finally {
  if (query != null) {
   query.closeAll();
  }
 }
}

相关文章