本文整理了Java中org.elasticsearch.client.Client.prepareSearch()
方法的一些代码示例,展示了Client.prepareSearch()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Client.prepareSearch()
方法的具体详情如下:
包路径:org.elasticsearch.client.Client
类名称:Client
方法名:prepareSearch
[英]Search across one or more indices and one or more types with a query.
[中]使用查询搜索一个或多个索引和一个或多个类型。
代码示例来源:origin: loklak/loklak_server
/**
* Get the number of documents in the search index for a given search query
*
* @param q
* the query
* @return the count of all documents in the index which matches with the query
*/
private long count(final QueryBuilder q, final String indexName) {
SearchResponse response =
elasticsearchClient.prepareSearch(indexName).setQuery(q).setSize(0).execute().actionGet();
return response.getHits().getTotalHits();
}
代码示例来源:origin: stagemonitor/stagemonitor
@Test
public void testCollectElasticsearchQueries() throws Exception {
CallStackElement total = Profiler.activateProfiling("total");
client.prepareSearch().setQuery(QueryBuilders.matchAllQuery()).get();
client.prepareSearch().setQuery(QueryBuilders.matchAllQuery()).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).get();
Profiler.stop();
Assert.assertEquals(total.toString(), "POST /_search\n" +
"{\"query\":{\"match_all\":{\"boost\":1.0}}} ", total.getChildren().get(0).getSignature());
Assert.assertEquals(total.toString(), "POST /_search?search_type=dfs_query_then_fetch\n" +
"{\"query\":{\"match_all\":{\"boost\":1.0}}} ", total.getChildren().get(1).getSignature());
}
}
代码示例来源:origin: unchartedsoftware/aperture-tiles
private SearchRequestBuilder baseQuery(FilterBuilder filter){
return this.client.prepareSearch(this.index)
.setTypes("datum")
.setSearchType(SearchType.COUNT)
.setQuery(QueryBuilders.filteredQuery(
QueryBuilders.matchAllQuery(),
filter
));
}
代码示例来源:origin: loklak/loklak_server
public long countLocal(final String index, final String provider_hash) {
try {
SearchResponse response = elasticsearchClient.prepareSearch(index)
.setSize(0)
.setQuery(QueryBuilders.matchQuery("provider_hash", provider_hash))
.execute()
.actionGet();
return response.getHits().getTotalHits();
} catch (Throwable e) {
DAO.severe(e);
return 0;
}
}
代码示例来源:origin: loklak/loklak_server
public long count(final String index, final String histogram_timefield, final long millis) {
try {
SearchResponse response = elasticsearchClient.prepareSearch(index)
.setSize(0)
.setQuery(millis <= 0 ? QueryBuilders.constantScoreQuery(QueryBuilders.matchAllQuery()) : QueryBuilders.rangeQuery(histogram_timefield).from(new Date(System.currentTimeMillis() - millis)))
.execute()
.actionGet();
return response.getHits().getTotalHits();
} catch (Throwable e) {
DAO.severe(e);
return 0;
}
}
代码示例来源:origin: brianfrankcooper/YCSB
try {
final RangeQueryBuilder rangeQuery = rangeQuery("_id").gte(startkey);
final SearchResponse response = client.prepareSearch(indexKey)
.setTypes(table)
.setQuery(rangeQuery)
.setSize(recordcount)
.execute()
.actionGet();
for (SearchHit hit : response.getHits()) {
entry = new HashMap<>(fields.size());
for (String field : fields) {
代码示例来源:origin: loklak/loklak_server
/**
* Get a search response for predefined aggregation task on a specific index.
* @param index Name of ES index
* @param aggr Pre-configured AggregationBuilder object
* @return HashMap with parsed aggregations
*/
private SearchResponse getAggregationResponse(String index, @SuppressWarnings("rawtypes") AggregationBuilder aggr) {
return this.elasticsearchClient.prepareSearch(index)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(QueryBuilders.matchAllQuery())
.setFrom(0)
.setSize(0)
.addAggregation(aggr)
.execute().actionGet();
}
代码示例来源:origin: loklak/loklak_server
private Query(final String indexName, QueryBuilder queryBuilder, String order_field, int resultCount) {
//TODO: sort data using order_field
// prepare request
SearchRequestBuilder request = elasticsearchClient.prepareSearch(indexName)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(queryBuilder)
.setFrom(0)
.setSize(resultCount);
request.clearRescorers();
// get response
SearchResponse response = request.execute().actionGet();
hitCount = (int) response.getHits().getTotalHits();
// evaluate search result
SearchHit[] hits = response.getHits().getHits();
this.result = new ArrayList<Map<String, Object>>(hitCount);
for (SearchHit hit: hits) {
Map<String, Object> map = hit.getSource();
this.result.add(map);
}
}
代码示例来源:origin: Netflix/conductor
@Override
public List<String> searchArchivableWorkflows(String indexName, long archiveTtlDays) {
QueryBuilder q = QueryBuilders.boolQuery()
.should(QueryBuilders.termQuery("status", "COMPLETED"))
.should(QueryBuilders.termQuery("status", "FAILED"))
.mustNot(QueryBuilders.existsQuery("archived"))
.minimumShouldMatch(1);
SearchRequestBuilder s = elasticSearchClient.prepareSearch(indexName)
.setTypes("workflow")
.setQuery(q)
.addSort("endTime", SortOrder.ASC)
.setSize(archiveSearchBatchSize);
SearchResponse response = s.execute().actionGet();
SearchHits hits = response.getHits();
logger.info("Archive search totalHits - {}", hits.getTotalHits());
return Arrays.stream(hits.getHits())
.map(hit -> hit.getId())
.collect(Collectors.toCollection(LinkedList::new));
}
代码示例来源:origin: thinkaurelius/titan
@Override
public Iterable<RawQuery.Result<String>> query(RawQuery query, KeyInformation.IndexRetriever informations, BaseTransaction tx) throws BackendException {
SearchRequestBuilder srb = client.prepareSearch(indexName);
srb.setTypes(query.getStore());
srb.setQuery(QueryBuilders.queryStringQuery(query.getQuery()));
srb.setFrom(query.getOffset());
if (query.hasLimit()) srb.setSize(query.getLimit());
else srb.setSize(maxResultsSize);
srb.setNoFields();
//srb.setExplain(true);
SearchResponse response = srb.execute().actionGet();
log.debug("Executed query [{}] in {} ms", query.getQuery(), response.getTookInMillis());
SearchHits hits = response.getHits();
if (!query.hasLimit() && hits.totalHits() >= maxResultsSize)
log.warn("Query result set truncated to first [{}] elements for query: {}", maxResultsSize, query);
List<RawQuery.Result<String>> result = new ArrayList<RawQuery.Result<String>>(hits.hits().length);
for (SearchHit hit : hits) {
result.add(new RawQuery.Result<String>(hit.id(),hit.getScore()));
}
return result;
}
代码示例来源:origin: Netflix/conductor
@Override
public List<String> searchRecentRunningWorkflows(int lastModifiedHoursAgoFrom,
int lastModifiedHoursAgoTo) {
DateTime dateTime = new DateTime();
QueryBuilder q = QueryBuilders.boolQuery()
.must(QueryBuilders.rangeQuery("updateTime")
.gt(dateTime.minusHours(lastModifiedHoursAgoFrom)))
.must(QueryBuilders.rangeQuery("updateTime")
.lt(dateTime.minusHours(lastModifiedHoursAgoTo)))
.must(QueryBuilders.termQuery("status", "RUNNING"));
SearchRequestBuilder s = elasticSearchClient.prepareSearch(indexName)
.setTypes("workflow")
.setQuery(q)
.setSize(5000)
.addSort("updateTime", SortOrder.ASC);
SearchResponse response = s.execute().actionGet();
return StreamSupport.stream(response.getHits().spliterator(), false)
.map(hit -> hit.getId())
.collect(Collectors.toCollection(LinkedList::new));
}
代码示例来源:origin: loklak/loklak_server
public Map<String, Object> query(final String indexName, final String fieldKey, final String fieldValue) {
if (fieldKey == null || fieldValue.length() == 0) return null;
// prepare request
BoolQueryBuilder query = QueryBuilders.boolQuery();
query.filter(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery(fieldKey, fieldValue)));
SearchRequestBuilder request = elasticsearchClient.prepareSearch(indexName)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(query)
.setFrom(0)
.setSize(1)
.setTerminateAfter(1);
// get response
SearchResponse response = request.execute().actionGet();
// evaluate search result
SearchHit[] hits = response.getHits().getHits();
if (hits.length == 0) return null;
assert hits.length == 1;
Map<String, Object> map = hits[0].getSource();
return map;
}
代码示例来源:origin: loklak/loklak_server
/**
* Delete documents using a query. Check what would be deleted first with a normal search query!
* Elasticsearch once provided a native prepareDeleteByQuery method, but this was removed
* in later versions. Instead, there is a plugin which iterates over search results,
* see https://www.elastic.co/guide/en/elasticsearch/plugins/current/plugins-delete-by-query.html
* We simulate the same behaviour here without the need of that plugin.
*
* @param q
* @return delete document count
*/
public int deleteByQuery(String indexName, final QueryBuilder q) {
Map<String, String> ids = new TreeMap<>();
// FIXME: deprecated, "will be removed in 3.0, you should do a regular scroll instead, ordered by `_doc`"
@SuppressWarnings("deprecation")
SearchResponse response = elasticsearchClient.prepareSearch(indexName).setSearchType(SearchType.SCAN)
.setScroll(new TimeValue(60000)).setQuery(q).setSize(100).execute().actionGet();
while (true) {
// accumulate the ids here, don't delete them right now to prevent an interference of the delete with the
// scroll
for (SearchHit hit : response.getHits().getHits()) {
ids.put(hit.getId(), hit.getType());
}
response = elasticsearchClient.prepareSearchScroll(response.getScrollId()).setScroll(new TimeValue(600000))
.execute().actionGet();
// termination
if (response.getHits().getHits().length == 0)
break;
}
return deleteBulk(indexName, ids);
}
代码示例来源:origin: Netflix/conductor
final SearchRequestBuilder srb = elasticSearchClient.prepareSearch(logIndexPrefix + "*")
.setQuery(fq)
.setTypes(LOG_DOC_TYPE)
.addSort(sortBuilder);
SearchResponse response = srb.execute().actionGet();
return Arrays.stream(response.getHits().getHits())
.map(hit -> {
String source = hit.getSourceAsString();
代码示例来源:origin: loklak/loklak_server
public List<Map<String, Object>> queryWithConstraints(final String indexName, final String fieldName, final String fieldValue, final Map<String, String> constraints, boolean latest) throws IOException {
SearchRequestBuilder request = this.elasticsearchClient.prepareSearch(indexName)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setFrom(0);
BoolQueryBuilder bFilter = QueryBuilders.boolQuery();
bFilter.filter(QueryBuilders.constantScoreQuery(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery(fieldName, fieldValue))));
for (Object o : constraints.entrySet()) {
@SuppressWarnings("rawtypes")
Map.Entry entry = (Map.Entry) o;
bFilter.filter(QueryBuilders.constantScoreQuery(QueryBuilders.termQuery((String) entry.getKey(), ((String) entry.getValue()).toLowerCase())));
}
request.setQuery(bFilter);
// get response
SearchResponse response = request.execute().actionGet();
// evaluate search result
ArrayList<Map<String, Object>> result = new ArrayList<Map<String, Object>>();
SearchHit[] hits = response.getHits().getHits();
for (SearchHit hit: hits) {
Map<String, Object> map = hit.getSource();
result.add(map);
}
return result;
}
代码示例来源:origin: loklak/loklak_server
public LinkedHashMap<String, Long> fullDateHistogram(final String indexName, int timezoneOffset, String histogram_timefield) {
// prepare request
SearchRequestBuilder request = elasticsearchClient.prepareSearch(indexName)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchAllQuery()))
.setFrom(0)
.setSize(0);
request.clearRescorers();
request.addAggregation(AggregationBuilders.dateHistogram(histogram_timefield).field(histogram_timefield).timeZone("UTC").minDocCount(1).interval(DateHistogramInterval.DAY));
// get response
SearchResponse response = request.execute().actionGet();
// evaluate date histogram:
InternalHistogram<InternalHistogram.Bucket> dateCounts = response.getAggregations().get(histogram_timefield);
LinkedHashMap<String, Long> list = new LinkedHashMap<>();
for (InternalHistogram.Bucket bucket : dateCounts.getBuckets()) {
Calendar cal = Calendar.getInstance(DateParser.UTCtimeZone);
org.joda.time.DateTime k = (org.joda.time.DateTime) bucket.getKey();
cal.setTime(k.toDate());
cal.add(Calendar.MINUTE, -timezoneOffset);
long docCount = bucket.getDocCount();
list.put(DateParser.dayDateFormat.format(cal.getTime()), docCount);
}
return list;
}
代码示例来源:origin: thinkaurelius/titan
@Override
public List<String> query(IndexQuery query, KeyInformation.IndexRetriever informations, BaseTransaction tx) throws BackendException {
SearchRequestBuilder srb = client.prepareSearch(indexName);
srb.setTypes(query.getStore());
srb.setQuery(QueryBuilders.matchAllQuery());
srb.setPostFilter(getFilter(query.getCondition(),informations.get(query.getStore())));
if (!query.getOrder().isEmpty()) {
if (query.hasLimit()) srb.setSize(query.getLimit());
else srb.setSize(maxResultsSize);
srb.setNoFields();
SearchResponse response = srb.execute().actionGet();
log.debug("Executed query [{}] in {} ms", query.getCondition(), response.getTookInMillis());
SearchHits hits = response.getHits();
if (!query.hasLimit() && hits.totalHits() >= maxResultsSize)
log.warn("Query result set truncated to first [{}] elements for query: {}", maxResultsSize, query);
代码示例来源:origin: Netflix/conductor
@Override
public List<EventExecution> getEventExecutions(String event) {
try {
Expression expression = Expression.fromString("event='" + event + "'");
QueryBuilder queryBuilder = expression.getFilterBuilder();
BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(queryBuilder);
QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery("*");
BoolQueryBuilder fq = QueryBuilders.boolQuery().must(stringQuery).must(filterQuery);
final SearchRequestBuilder srb = elasticSearchClient.prepareSearch(logIndexPrefix + "*")
.setQuery(fq).setTypes(EVENT_DOC_TYPE)
.addSort(SortBuilders.fieldSort("created")
.order(SortOrder.ASC));
return mapEventExecutionsResponse(srb.execute().actionGet());
} catch (Exception e) {
logger.error("Failed to get executions for event: {}", event, e);
throw new ApplicationException(Code.BACKEND_ERROR, e.getMessage(), e);
}
}
代码示例来源:origin: Netflix/conductor
@Override
public List<Message> getMessages(String queue) {
try {
Expression expression = Expression.fromString("queue='" + queue + "'");
QueryBuilder queryBuilder = expression.getFilterBuilder();
BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(queryBuilder);
QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery("*");
BoolQueryBuilder fq = QueryBuilders.boolQuery().must(stringQuery).must(filterQuery);
final SearchRequestBuilder srb = elasticSearchClient.prepareSearch(logIndexPrefix + "*")
.setQuery(fq)
.setTypes(MSG_DOC_TYPE)
.addSort(SortBuilders.fieldSort("created").order(SortOrder.ASC));
return mapGetMessagesResponse(srb.execute().actionGet());
} catch (Exception e) {
logger.error("Failed to get messages for queue: {}", queue, e);
throw new ApplicationException(Code.BACKEND_ERROR, e.getMessage(), e);
}
}
代码示例来源:origin: apache/usergrid
/**
* Get the search request builder
*/
public SearchRequestBuilder getBuilder( final SearchEdge searchEdge, final SearchTypes searchTypes,
final QueryVisitor visitor, final int limit, final int from,
final List<SortPredicate> sortPredicates,
final Map<String, Class> fieldsWithType ) {
Preconditions
.checkArgument( limit <= EntityIndex.MAX_LIMIT, "limit is greater than max " + EntityIndex.MAX_LIMIT );
Preconditions.checkNotNull( visitor, "query visitor cannot be null");
SearchRequestBuilder srb =
esProvider.getClient().prepareSearch( alias.getReadAlias() ).setTypes( IndexingUtils.ES_ENTITY_TYPE )
.setSearchType( SearchType.QUERY_THEN_FETCH );
final Optional<QueryBuilder> queryBuilder = visitor.getQueryBuilder();
if ( queryBuilder.isPresent() ) {
srb.setQuery( queryBuilder.get() );
}
srb.setPostFilter( createFilterBuilder( searchEdge, visitor, searchTypes ) );
srb = srb.setFrom( from ).setSize( limit );
//if we have a geo field, sort by closest to farthest by default
final GeoSortFields geoFields = visitor.getGeoSorts();
//no sort predicates, sort by edge time descending, entity id second
if ( sortPredicates.size() == 0 ) {
applyDefaultSortPredicates( srb, geoFields );
}
else {
applySortPredicates( srb, sortPredicates, geoFields, fieldsWithType );
}
return srb;
}
内容来源于网络,如有侵权,请联系作者删除!