本文整理了Java中io.druid.query.Query.getDataSource
方法的一些代码示例,展示了Query.getDataSource
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Query.getDataSource
方法的具体详情如下:
包路径:io.druid.query.Query
类名称:Query
方法名:getDataSource
暂无
代码示例来源:origin: com.n3twork.druid/druid-server
private <T> String getDataSourceName(Query<T> query)
{
DataSource dataSource = query.getDataSource();
if (!(dataSource instanceof TableDataSource)) {
throw new UnsupportedOperationException("data source type '" + dataSource.getClass().getName() + "' unsupported");
}
String dataSourceName;
try {
dataSourceName = ((TableDataSource) query.getDataSource()).getName();
} catch (ClassCastException e) {
throw new UnsupportedOperationException("Subqueries are only supported in the broker");
}
return dataSourceName;
}
代码示例来源:origin: io.druid/druid-processing
@Override
public List<String> getNames()
{
return query.getDataSource().getNames();
}
代码示例来源:origin: com.n3twork.druid/druid-processing
@Override
public String getName()
{
return query.getDataSource().getName();
}
代码示例来源:origin: com.n3twork.druid/druid-server
@Override
public VersionedIntervalTimeline<String, ServerSelector> getTimeline(DataSource dataSource)
{
String table;
while (dataSource instanceof QueryDataSource) {
dataSource = ((QueryDataSource) dataSource).getQuery().getDataSource();
}
if (dataSource instanceof TableDataSource) {
table = ((TableDataSource) dataSource).getName();
} else {
throw new UnsupportedOperationException("Unsupported data source type: " + dataSource.getClass().getSimpleName());
}
synchronized (lock) {
return timelines.get(table);
}
}
代码示例来源:origin: io.druid/druid-processing
@Override
public void dataSource(QueryType query)
{
setDimension(DruidMetrics.DATASOURCE, DataSourceUtil.getMetricName(query.getDataSource()));
}
代码示例来源:origin: io.druid/druid-processing
private static int countRequiredMergeBufferNum(Query query, int foundNum)
{
// Note: A broker requires merge buffers for processing the groupBy layers beyond the inner-most one.
// For example, the number of required merge buffers for a nested groupBy (groupBy -> groupBy -> table) is 1.
// If the broker processes an outer groupBy which reads input from an inner groupBy,
// it requires two merge buffers for inner and outer groupBys to keep the intermediate result of inner groupBy
// until the outer groupBy processing completes.
// This is same for subsequent groupBy layers, and thus the maximum number of required merge buffers becomes 2.
final DataSource dataSource = query.getDataSource();
if (foundNum == MAX_MERGE_BUFFER_NUM + 1 || !(dataSource instanceof QueryDataSource)) {
return foundNum - 1;
} else {
return countRequiredMergeBufferNum(((QueryDataSource) dataSource).getQuery(), foundNum + 1);
}
}
代码示例来源:origin: com.n3twork.druid/druid-processing
@Override
public Sequence<T> run(final Query<T> query)
{
DataSource dataSource = query.getDataSource();
if (dataSource instanceof QueryDataSource) {
return run((Query<T>) ((QueryDataSource) dataSource).getQuery());
} else {
return baseRunner.run(query);
}
}
}
代码示例来源:origin: io.druid/druid-server
@POST
@Path("/candidates")
@Produces({MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE})
@Consumes({MediaType.APPLICATION_JSON, SmileMediaTypes.APPLICATION_JACKSON_SMILE, APPLICATION_SMILE})
@ResourceFilters(StateResourceFilter.class)
public Response getQueryTargets(
InputStream in,
@QueryParam("pretty") String pretty,
@QueryParam("numCandidates") @DefaultValue("-1") int numCandidates,
@Context final HttpServletRequest req
) throws IOException
{
final ResponseContext context = createContext(req.getContentType(), pretty != null);
try {
Query<?> query = context.getObjectMapper().readValue(in, Query.class);
return context.ok(
ServerViewUtil.getTargetLocations(
brokerServerView,
query.getDataSource(),
query.getIntervals(),
numCandidates
)
);
}
catch (Exception e) {
return context.gotError(e);
}
}
}
代码示例来源:origin: io.druid/druid-server
@Override
public <T> QueryRunner<T> getQueryRunnerForIntervals(final Query<T> query, Iterable<Interval> intervals)
{
final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
final Map<Integer, FireChief> partitionChiefs = chiefs.get(Iterables.getOnlyElement(query.getDataSource()
.getNames()));
return partitionChiefs == null ? new NoopQueryRunner<T>() : factory.getToolchest().mergeResults(
factory.mergeRunners(
MoreExecutors.sameThreadExecutor(),
// Chaining query runners which wait on submitted chain query runners can make executor pools deadlock
Iterables.transform(
partitionChiefs.values(), new Function<FireChief, QueryRunner<T>>()
{
@Override
public QueryRunner<T> apply(FireChief fireChief)
{
return fireChief.getQueryRunner(query);
}
}
)
)
);
}
代码示例来源:origin: io.druid/druid-server
private SortedMap<DruidServer, List<SegmentDescriptor>> groupSegmentsByServer(Set<ServerToSegment> segments)
{
final SortedMap<DruidServer, List<SegmentDescriptor>> serverSegments = Maps.newTreeMap();
for (ServerToSegment serverToSegment : segments) {
final QueryableDruidServer queryableDruidServer = serverToSegment.getServer().pick();
if (queryableDruidServer == null) {
log.makeAlert(
"No servers found for SegmentDescriptor[%s] for DataSource[%s]?! How can this be?!",
serverToSegment.getSegmentDescriptor(),
query.getDataSource()
).emit();
} else {
final DruidServer server = queryableDruidServer.getServer();
serverSegments.computeIfAbsent(server, s -> new ArrayList<>()).add(serverToSegment.getSegmentDescriptor());
}
}
return serverSegments;
}
代码示例来源:origin: io.druid/druid-server
@Override
public void registerQuery(Query query, final ListenableFuture future)
{
final String id = query.getId();
final List<String> datasources = query.getDataSource().getNames();
queries.put(id, future);
queryDatasources.putAll(id, datasources);
future.addListener(
new Runnable()
{
@Override
public void run()
{
queries.remove(id, future);
for (String datasource : datasources) {
queryDatasources.remove(id, datasource);
}
}
},
MoreExecutors.sameThreadExecutor()
);
}
代码示例来源:origin: io.druid/druid-processing
@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, Map<String, Object> responseContext)
{
DataSource dataSource = queryPlus.getQuery().getDataSource();
if (dataSource instanceof QueryDataSource) {
return run(queryPlus.withQuery((Query<T>) ((QueryDataSource) dataSource).getQuery()), responseContext);
} else {
return baseRunner.run(queryPlus, responseContext);
}
}
}
代码示例来源:origin: io.druid/druid-server
/**
* Authorize the query. Will return an Access object denoting whether the query is authorized or not.
*
* @param authenticationResult authentication result indicating the identity of the requester
*
* @return authorization result
*/
public Access authorize(final AuthenticationResult authenticationResult)
{
transition(State.INITIALIZED, State.AUTHORIZING);
return doAuthorize(
authenticationResult,
AuthorizationUtils.authorizeAllResourceActions(
authenticationResult,
Iterables.transform(
queryPlus.getQuery().getDataSource().getNames(),
AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR
),
authorizerMapper
)
);
}
代码示例来源:origin: io.druid/druid-server
Sequence<T> run(final UnaryOperator<TimelineLookup<String, ServerSelector>> timelineConverter)
{
@Nullable TimelineLookup<String, ServerSelector> timeline = serverView.getTimeline(query.getDataSource());
if (timeline == null) {
return Sequences.empty();
}
timeline = timelineConverter.apply(timeline);
if (uncoveredIntervalsLimit > 0) {
computeUncoveredIntervals(timeline);
}
final Set<ServerToSegment> segments = computeSegmentsToQuery(timeline);
@Nullable final byte[] queryCacheKey = computeQueryCacheKey();
if (query.getContext().get(QueryResource.HEADER_IF_NONE_MATCH) != null) {
@Nullable final String prevEtag = (String) query.getContext().get(QueryResource.HEADER_IF_NONE_MATCH);
@Nullable final String currentEtag = computeCurrentEtag(segments, queryCacheKey);
if (currentEtag != null && currentEtag.equals(prevEtag)) {
return Sequences.empty();
}
}
final List<Pair<Interval, byte[]>> alreadyCachedResults = pruneSegmentsWithCachedResults(queryCacheKey, segments);
final SortedMap<DruidServer, List<SegmentDescriptor>> segmentsByServer = groupSegmentsByServer(segments);
return new LazySequence<>(() -> {
List<Sequence<T>> sequencesByInterval = new ArrayList<>(alreadyCachedResults.size() + segmentsByServer.size());
addSequencesFromCache(sequencesByInterval, alreadyCachedResults);
addSequencesFromServer(sequencesByInterval, segmentsByServer);
return Sequences
.simple(sequencesByInterval)
.flatMerge(seq -> seq, query.getResultOrdering());
});
}
代码示例来源:origin: io.druid/druid-server
@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(final Query<T> query, final Iterable<SegmentDescriptor> specs)
{
final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
final Map<Integer, FireChief> partitionChiefs = chiefs.get(Iterables.getOnlyElement(query.getDataSource()
.getNames()));
return partitionChiefs == null
? new NoopQueryRunner<T>()
: factory.getToolchest().mergeResults(
factory.mergeRunners(
MoreExecutors.sameThreadExecutor(),
Iterables.transform(
specs,
new Function<SegmentDescriptor, QueryRunner<T>>()
{
@Override
public QueryRunner<T> apply(SegmentDescriptor spec)
{
final FireChief retVal = partitionChiefs.get(spec.getPartitionNumber());
return retVal == null
? new NoopQueryRunner<T>()
: retVal.getQueryRunner(query.withQuerySegmentSpec(new SpecificSegmentSpec(spec)));
}
}
)
)
);
}
代码示例来源:origin: com.n3twork.druid/druid-indexing-service
private <T> QueryRunner<T> getQueryRunnerImpl(Query<T> query)
{
QueryRunner<T> queryRunner = null;
String queryDataSource;
try {
queryDataSource = ((TableDataSource)query.getDataSource()).getName();
}
catch (ClassCastException e) {
throw new IllegalArgumentException("Subqueries are not welcome here");
}
for (final ThreadPoolTaskRunnerWorkItem taskRunnerWorkItem : ImmutableList.copyOf(runningItems)) {
final Task task = taskRunnerWorkItem.getTask();
if (task.getDataSource().equals(queryDataSource)) {
final QueryRunner<T> taskQueryRunner = task.getQueryRunner(query);
if (taskQueryRunner != null) {
if (queryRunner == null) {
queryRunner = taskQueryRunner;
} else {
log.makeAlert("Found too many query runners for datasource")
.addData("dataSource", queryDataSource)
.emit();
}
}
}
}
return queryRunner == null ? new NoopQueryRunner<T>() : queryRunner;
}
代码示例来源:origin: io.druid/druid-server
/**
* Authorize the query. Will return an Access object denoting whether the query is authorized or not.
*
* @param req HTTP request object of the request. If provided, the auth-related fields in the HTTP request
* will be automatically set.
*
* @return authorization result
*/
public Access authorize(HttpServletRequest req)
{
transition(State.INITIALIZED, State.AUTHORIZING);
return doAuthorize(
AuthorizationUtils.authenticationResultFromRequest(req),
AuthorizationUtils.authorizeAllResourceActions(
req,
Iterables.transform(
queryPlus.getQuery().getDataSource().getNames(),
AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR
),
authorizerMapper
)
);
}
代码示例来源:origin: io.druid/druid-indexing-service
private <T> QueryRunner<T> getQueryRunnerImpl(Query<T> query)
{
QueryRunner<T> queryRunner = null;
final String queryDataSource = Iterables.getOnlyElement(query.getDataSource().getNames());
for (final ThreadPoolTaskRunnerWorkItem taskRunnerWorkItem : ImmutableList.copyOf(runningItems)) {
final Task task = taskRunnerWorkItem.getTask();
if (task.getDataSource().equals(queryDataSource)) {
final QueryRunner<T> taskQueryRunner = task.getQueryRunner(query);
if (taskQueryRunner != null) {
if (queryRunner == null) {
queryRunner = taskQueryRunner;
} else {
log.makeAlert("Found too many query runners for datasource")
.addData("dataSource", queryDataSource)
.emit();
}
}
}
}
return new SetAndVerifyContextQueryRunner(
serverConfig,
queryRunner == null ? new NoopQueryRunner<T>() : queryRunner
);
}
代码示例来源:origin: io.druid/druid-processing
@Override
public Sequence<T> run(final QueryPlus<T> queryPlus, final Map<String, Object> responseContext)
{
Query<T> query = queryPlus.getQuery();
DataSource dataSource = query.getDataSource();
if (dataSource instanceof UnionDataSource) {
return new MergeSequence<>(
query.getResultOrdering(),
Sequences.simple(
Lists.transform(
((UnionDataSource) dataSource).getDataSources(),
new Function<DataSource, Sequence<T>>()
{
@Override
public Sequence<T> apply(DataSource singleSource)
{
return baseRunner.run(
queryPlus.withQuery(query.withDataSource(singleSource)),
responseContext
);
}
}
)
)
);
} else {
return baseRunner.run(queryPlus, responseContext);
}
}
代码示例来源:origin: com.n3twork.druid/druid-server
.setUser2(theQuery.getDataSource().getName())
.setUser4(theQuery.getType())
.setUser5(COMMA_JOIN.join(theQuery.getIntervals()))
内容来源于网络,如有侵权,请联系作者删除!