本文整理了Java中org.apache.druid.query.Query
类的一些代码示例,展示了Query
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Query
类的具体详情如下:
包路径:org.apache.druid.query.Query
类名称:Query
暂无
代码示例来源:origin: apache/incubator-druid
try {
final Query query = requestLogLine.getQuery();
MDC.put("queryId", query.getId());
MDC.put("sqlQueryId", StringUtils.nullToEmptyNonDruidDataString(query.getSqlQueryId()));
MDC.put("dataSource", findInnerDatasource(query).toString());
MDC.put("queryType", query.getType());
MDC.put("isNested", String.valueOf(!(query.getDataSource() instanceof TableDataSource)));
MDC.put("hasFilters", Boolean.toString(query.hasFilters()));
MDC.put("remoteAddr", requestLogLine.getRemoteAddr());
MDC.put("duration", query.getDuration().toString());
MDC.put("descending", Boolean.toString(query.isDescending()));
if (setContextMDC) {
final Iterable<Map.Entry<String, Object>> entries = query.getContext() == null
? ImmutableList.of()
: query.getContext().entrySet();
for (Map.Entry<String, Object> entry : entries) {
MDC.put(entry.getKey(), entry.getValue() == null ? "NULL" : entry.getValue().toString());
代码示例来源:origin: apache/incubator-druid
default List<Interval> getIntervalsOfInnerMostQuery()
{
if (getDataSource() instanceof QueryDataSource) {
//noinspection unchecked
return ((QueryDataSource) getDataSource()).getQuery().getIntervalsOfInnerMostQuery();
} else {
return getIntervals();
}
}
}
代码示例来源:origin: apache/incubator-druid
@Override
public <ContextType> ContextType getContextValue(String key, ContextType defaultValue)
{
return (ContextType) query.getContextValue(key, defaultValue);
}
代码示例来源:origin: apache/incubator-druid
public static <T> Query<T> withMaxScatterGatherBytes(Query<T> query, long maxScatterGatherBytesLimit)
{
Object obj = query.getContextValue(MAX_SCATTER_GATHER_BYTES_KEY);
if (obj == null) {
return query.withOverriddenContext(ImmutableMap.of(MAX_SCATTER_GATHER_BYTES_KEY, maxScatterGatherBytesLimit));
} else {
long curr = ((Number) obj).longValue();
if (curr > maxScatterGatherBytesLimit) {
throw new IAE(
"configured [%s = %s] is more than enforced limit of [%s].",
MAX_SCATTER_GATHER_BYTES_KEY,
curr,
maxScatterGatherBytesLimit
);
} else {
return query;
}
}
}
代码示例来源:origin: apache/incubator-druid
/**
* Initializes this object to execute a specific query. Does not actually execute the query.
*
* @param baseQuery the query
*/
@SuppressWarnings("unchecked")
public void initialize(final Query baseQuery)
{
transition(State.NEW, State.INITIALIZED);
String queryId = baseQuery.getId();
if (Strings.isNullOrEmpty(queryId)) {
queryId = UUID.randomUUID().toString();
}
this.baseQuery = baseQuery.withId(queryId);
this.toolChest = warehouse.getToolChest(baseQuery);
}
代码示例来源:origin: apache/incubator-druid
@Override
public DataSource getDataSource()
{
return query.getDataSource();
}
代码示例来源:origin: apache/incubator-druid
|| !(query.getDataSource() instanceof TableDataSource)) {
return Collections.singletonList(query);
String datasourceName = ((TableDataSource) query.getDataSource()).getName();
List<Interval> remainingQueryIntervals = (List<Interval>) query.getIntervals();
query.withDataSource(new TableDataSource(derivativeDataSource.getName()))
.withQuerySegmentSpec(new MultipleIntervalSegmentSpec(derivativeIntervals))
);
derivativesHitCount.get(derivativeDataSource.getName()).incrementAndGet();
queries.add(query.withQuerySegmentSpec(new MultipleIntervalSegmentSpec(remainingQueryIntervals)));
代码示例来源:origin: apache/incubator-druid
Sequence<T> run(final UnaryOperator<TimelineLookup<String, ServerSelector>> timelineConverter)
TimelineLookup<String, ServerSelector> timeline = serverView.getTimeline(query.getDataSource());
if (timeline == null) {
return Sequences.empty();
@Nullable
final byte[] queryCacheKey = computeQueryCacheKey();
if (query.getContext().get(QueryResource.HEADER_IF_NONE_MATCH) != null) {
@Nullable
final String prevEtag = (String) query.getContext().get(QueryResource.HEADER_IF_NONE_MATCH);
@Nullable
final String currentEtag = computeCurrentEtag(segments, queryCacheKey);
return Sequences
.simple(sequencesByInterval)
.flatMerge(seq -> seq, query.getResultOrdering());
});
代码示例来源:origin: apache/incubator-druid
@Override
public List<Interval> getIntervals()
{
return query.getIntervals();
}
代码示例来源:origin: apache/incubator-druid
@Override
public Ordering<T> getResultOrdering()
{
return query.getResultOrdering();
}
代码示例来源:origin: apache/incubator-druid
@Test
public void testSerde() throws Exception
{
String queryStr = "{\n"
+ " \"queryType\":\"segmentMetadata\",\n"
+ " \"dataSource\":\"test_ds\",\n"
+ " \"intervals\":[\"2013-12-04T00:00:00.000Z/2013-12-05T00:00:00.000Z\"],\n"
+ " \"analysisTypes\":[\"cardinality\",\"size\"]\n"
+ "}";
EnumSet<SegmentMetadataQuery.AnalysisType> expectedAnalysisTypes = EnumSet.of(
SegmentMetadataQuery.AnalysisType.CARDINALITY,
SegmentMetadataQuery.AnalysisType.SIZE
);
Query query = MAPPER.readValue(queryStr, Query.class);
Assert.assertTrue(query instanceof SegmentMetadataQuery);
Assert.assertEquals("test_ds", Iterables.getOnlyElement(query.getDataSource().getNames()));
Assert.assertEquals(
Intervals.of("2013-12-04T00:00:00.000Z/2013-12-05T00:00:00.000Z"),
query.getIntervals().get(0)
);
Assert.assertEquals(expectedAnalysisTypes, ((SegmentMetadataQuery) query).getAnalysisTypes());
// test serialize and deserialize
Assert.assertEquals(query, MAPPER.readValue(MAPPER.writeValueAsString(query), Query.class));
}
代码示例来源:origin: apache/incubator-druid
@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, Map<String, Object> responseContext)
{
DataSource dataSource = queryPlus.getQuery().getDataSource();
boolean forcePushDownNestedQuery = queryPlus.getQuery()
.getContextBoolean(
GroupByQueryConfig.CTX_KEY_FORCE_PUSH_DOWN_NESTED_QUERY,
false
);
if (dataSource instanceof QueryDataSource && !forcePushDownNestedQuery) {
return run(queryPlus.withQuery((Query<T>) ((QueryDataSource) dataSource).getQuery()), responseContext);
} else {
QueryPlus newQuery = queryPlus;
if (forcePushDownNestedQuery) {
// Disable any more push downs before firing off the query. But do let the historical know
// that it is executing the complete nested query and not just the inner most part of it
newQuery = queryPlus.withQuery(
queryPlus.getQuery()
.withOverriddenContext(
ImmutableMap.of(
GroupByQueryConfig.CTX_KEY_FORCE_PUSH_DOWN_NESTED_QUERY, false,
GroupByQueryConfig.CTX_KEY_EXECUTING_NESTED_QUERY, true
)
)
);
}
return baseRunner.run(newQuery, responseContext);
}
}
}
代码示例来源:origin: apache/incubator-druid
if (!(query.getDataSource() instanceof TableDataSource)
|| !dataSource.equals(((TableDataSource) query.getDataSource()).getName())) {
log.makeAlert("Received query for unknown dataSource")
.addData("dataSource", query.getDataSource())
.emit();
return new NoopQueryRunner<>();
final boolean skipIncrementalSegment = query.getContextValue(CONTEXT_SKIP_INCREMENTAL_SEGMENT, false);
final AtomicLong cpuTimeAccumulator = new AtomicLong(0L);
代码示例来源:origin: apache/incubator-druid
public static <T> Query<T> withDefaultTimeout(Query<T> query, long defaultTimeout)
{
return query.withOverriddenContext(ImmutableMap.of(QueryContexts.DEFAULT_TIMEOUT_KEY, defaultTimeout));
}
代码示例来源:origin: apache/incubator-druid
@Override
public void registerQuery(Query query, final ListenableFuture future)
{
final String id = query.getId();
final List<String> datasources = query.getDataSource().getNames();
queries.put(id, future);
queryDatasources.putAll(id, datasources);
future.addListener(
new Runnable()
{
@Override
public void run()
{
queries.remove(id, future);
for (String datasource : datasources) {
queryDatasources.remove(id, datasource);
}
}
},
Execs.directExecutor()
);
}
代码示例来源:origin: apache/incubator-druid
@Override
public Sequence<T> run(QueryPlus<T> queryPlus, Map<String, Object> responseContext)
{
Query<T> query = queryPlus.getQuery();
List<TimelineObjectHolder> segments = new ArrayList<>();
for (Interval interval : query.getIntervals()) {
segments.addAll(timeline.lookup(interval));
}
List<Sequence<T>> sequences = new ArrayList<>();
for (TimelineObjectHolder<String, Segment> holder : toolChest.filterSegments(query, segments)) {
Segment segment = holder.getObject().getChunk(0).getObject();
QueryPlus queryPlusRunning = queryPlus.withQuerySegmentSpec(
new SpecificSegmentSpec(
new SegmentDescriptor(
holder.getInterval(),
holder.getVersion(),
0
)
)
);
sequences.add(factory.createRunner(segment).run(queryPlusRunning, responseContext));
}
return new MergeSequence<>(query.getResultOrdering(), Sequences.simple(sequences));
}
}
代码示例来源:origin: apache/incubator-druid
@Override
public MaterializedViewQuery withQuerySegmentSpec(QuerySegmentSpec spec)
{
return new MaterializedViewQuery(query.withQuerySegmentSpec(spec), optimizer);
}
代码示例来源:origin: apache/incubator-druid
if (((QueryDataSource) dataSource).getQuery().getContext() != null) {
subqueryContext.putAll(((QueryDataSource) dataSource).getQuery().getContext());
subquery = (GroupByQuery) ((QueryDataSource) dataSource).getQuery().withOverriddenContext(subqueryContext);
代码示例来源:origin: apache/incubator-druid
@Override
public String getId()
{
return query.getId();
}
代码示例来源:origin: apache/incubator-druid
);
Assert.assertEquals(1, serdeQuery.getContextValue(QueryContexts.PRIORITY_KEY));
Assert.assertEquals(true, serdeQuery.getContextValue("useCache"));
Assert.assertEquals("true", serdeQuery.getContextValue("populateCache"));
Assert.assertEquals(true, serdeQuery.getContextValue("finalize"));
Assert.assertEquals(true, serdeQuery.getContextBoolean("useCache", false));
Assert.assertEquals(true, serdeQuery.getContextBoolean("populateCache", false));
Assert.assertEquals(true, serdeQuery.getContextBoolean("finalize", false));
内容来源于网络,如有侵权,请联系作者删除!