org.apache.druid.query.Query.getDataSource()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(11.2k)|赞(0)|评价(0)|浏览(118)

本文整理了Java中org.apache.druid.query.Query.getDataSource方法的一些代码示例,展示了Query.getDataSource的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Query.getDataSource方法的具体详情如下:
包路径:org.apache.druid.query.Query
类名称:Query
方法名:getDataSource

Query.getDataSource介绍

暂无

代码示例

代码示例来源:origin: apache/incubator-druid

@Override
public DataSource getDataSource()
{
 return query.getDataSource();
}

代码示例来源:origin: apache/incubator-druid

private Query findInnerMostQuery(Query outerQuery)
{
 Query query = outerQuery;
 while (query.getDataSource() instanceof QueryDataSource) {
  query = ((QueryDataSource) query.getDataSource()).getQuery();
 }
 return query;
}

代码示例来源:origin: apache/incubator-druid

@Override
public List<String> getNames()
{
 return query.getDataSource().getNames();
}

代码示例来源:origin: apache/incubator-druid

default List<Interval> getIntervalsOfInnerMostQuery()
 {
  if (getDataSource() instanceof QueryDataSource) {
   //noinspection unchecked
   return ((QueryDataSource) getDataSource()).getQuery().getIntervalsOfInnerMostQuery();
  } else {
   return getIntervals();
  }
 }
}

代码示例来源:origin: apache/incubator-druid

@Override
public void dataSource(QueryType query)
{
 setDimension(DruidMetrics.DATASOURCE, DataSourceUtil.getMetricName(query.getDataSource()));
}

代码示例来源:origin: apache/incubator-druid

private DataSource getInnerMostDataSource(DataSource dataSource)
{
 if (dataSource instanceof QueryDataSource) {
  return getInnerMostDataSource(((QueryDataSource) dataSource).getQuery().getDataSource());
 }
 return dataSource;
}

代码示例来源:origin: apache/incubator-druid

private Object findInnerDatasource(Query query)
{
 DataSource _ds = query.getDataSource();
 if (_ds instanceof TableDataSource) {
  return ((TableDataSource) _ds).getName();
 }
 if (_ds instanceof QueryDataSource) {
  return findInnerDatasource(((QueryDataSource) _ds).getQuery());
 }
 if (_ds instanceof UnionDataSource) {
  return Joiner.on(",")
         .join(
           ((UnionDataSource) _ds)
             .getDataSources()
             .stream()
             .map(TableDataSource::getName)
             .collect(Collectors.toList())
         );
 } else {
  // should not come here
  return query.getDataSource();
 }
}

代码示例来源:origin: apache/incubator-druid

private static int countRequiredMergeBufferNum(Query query, int foundNum)
{
 // Note: A broker requires merge buffers for processing the groupBy layers beyond the inner-most one.
 // For example, the number of required merge buffers for a nested groupBy (groupBy -> groupBy -> table) is 1.
 // If the broker processes an outer groupBy which reads input from an inner groupBy,
 // it requires two merge buffers for inner and outer groupBys to keep the intermediate result of inner groupBy
 // until the outer groupBy processing completes.
 // This is same for subsequent groupBy layers, and thus the maximum number of required merge buffers becomes 2.
 final DataSource dataSource = query.getDataSource();
 if (foundNum == MAX_MERGE_BUFFER_NUM + 1 || !(dataSource instanceof QueryDataSource)) {
  return foundNum - 1;
 } else {
  return countRequiredMergeBufferNum(((QueryDataSource) dataSource).getQuery(), foundNum + 1);
 }
}

代码示例来源:origin: apache/incubator-druid

private <T> VersionedIntervalTimeline<String, Segment> getTimelineForTableDataSource(Query<T> query)
{
 if (query.getDataSource() instanceof TableDataSource) {
  return timelines.get(((TableDataSource) query.getDataSource()).getName());
 } else {
  throw new UOE("DataSource type[%s] unsupported", query.getDataSource().getClass().getName());
 }
}

代码示例来源:origin: apache/incubator-druid

/**
 * Authorize the query. Will return an Access object denoting whether the query is authorized or not.
 *
 * @param authenticationResult authentication result indicating the identity of the requester
 *
 * @return authorization result
 */
public Access authorize(final AuthenticationResult authenticationResult)
{
 transition(State.INITIALIZED, State.AUTHORIZING);
 return doAuthorize(
   authenticationResult,
   AuthorizationUtils.authorizeAllResourceActions(
     authenticationResult,
     Iterables.transform(
       baseQuery.getDataSource().getNames(),
       AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR
     ),
     authorizerMapper
   )
 );
}

代码示例来源:origin: apache/incubator-druid

@Override
public <T> QueryRunner<T> getQueryRunnerForIntervals(final Query<T> query, Iterable<Interval> intervals)
{
 final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
 final Map<Integer, FireChief> partitionChiefs = chiefs.get(Iterables.getOnlyElement(query.getDataSource()
                                              .getNames()));
 return partitionChiefs == null ? new NoopQueryRunner<T>() : factory.getToolchest().mergeResults(
   factory.mergeRunners(
     Execs.directExecutor(),
     // Chaining query runners which wait on submitted chain query runners can make executor pools deadlock
     Iterables.transform(
       partitionChiefs.values(), new Function<FireChief, QueryRunner<T>>()
       {
        @Override
        public QueryRunner<T> apply(FireChief fireChief)
        {
         return fireChief.getQueryRunner(query);
        }
       }
     )
   )
 );
}

代码示例来源:origin: apache/incubator-druid

@Override
 public Sequence run(QueryPlus queryPlus, Map responseContext)
 {
  // verify that table datasource is passed to baseQueryRunner
  Assert.assertTrue(queryPlus.getQuery().getDataSource() instanceof TableDataSource);
  String dsName = Iterables.getOnlyElement(queryPlus.getQuery().getDataSource().getNames());
  if ("ds1".equals(dsName)) {
   responseContext.put("ds1", "ds1");
   return Sequences.simple(Arrays.asList(1, 2, 3));
  } else if ("ds2".equals(dsName)) {
   responseContext.put("ds2", "ds2");
   return Sequences.simple(Arrays.asList(4, 5, 6));
  } else {
   throw new AssertionError("Unexpected DataSource");
  }
 }
};

代码示例来源:origin: apache/incubator-druid

private SortedMap<DruidServer, List<SegmentDescriptor>> groupSegmentsByServer(Set<ServerToSegment> segments)
{
 final SortedMap<DruidServer, List<SegmentDescriptor>> serverSegments = new TreeMap<>();
 for (ServerToSegment serverToSegment : segments) {
  final QueryableDruidServer queryableDruidServer = serverToSegment.getServer().pick();
  if (queryableDruidServer == null) {
   log.makeAlert(
     "No servers found for SegmentDescriptor[%s] for DataSource[%s]?! How can this be?!",
     serverToSegment.getSegmentDescriptor(),
     query.getDataSource()
   ).emit();
  } else {
   final DruidServer server = queryableDruidServer.getServer();
   serverSegments.computeIfAbsent(server, s -> new ArrayList<>()).add(serverToSegment.getSegmentDescriptor());
  }
 }
 return serverSegments;
}

代码示例来源:origin: apache/incubator-druid

/**
 * Authorize the query. Will return an Access object denoting whether the query is authorized or not.
 *
 * @param req HTTP request object of the request. If provided, the auth-related fields in the HTTP request
 *            will be automatically set.
 *
 * @return authorization result
 */
public Access authorize(HttpServletRequest req)
{
 transition(State.INITIALIZED, State.AUTHORIZING);
 return doAuthorize(
   AuthorizationUtils.authenticationResultFromRequest(req),
   AuthorizationUtils.authorizeAllResourceActions(
     req,
     Iterables.transform(
       baseQuery.getDataSource().getNames(),
       AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR
     ),
     authorizerMapper
   )
 );
}

代码示例来源:origin: apache/incubator-druid

@Override
public void registerQuery(Query query, final ListenableFuture future)
{
 final String id = query.getId();
 final List<String> datasources = query.getDataSource().getNames();
 queries.put(id, future);
 queryDatasources.putAll(id, datasources);
 future.addListener(
   new Runnable()
   {
    @Override
    public void run()
    {
     queries.remove(id, future);
     for (String datasource : datasources) {
      queryDatasources.remove(id, datasource);
     }
    }
   },
   Execs.directExecutor()
 );
}

代码示例来源:origin: apache/incubator-druid

@Override
 public Sequence<Result<TimeseriesResultValue>> run(
   QueryPlus<Result<TimeseriesResultValue>> queryPlus,
   Map<String, Object> responseContext
 )
 {
  if (queryPlus.getQuery().getDataSource().equals(new TableDataSource("ds1"))) {
   return Sequences.simple(descending ? Lists.reverse(ds1) : ds1);
  } else {
   return Sequences.simple(descending ? Lists.reverse(ds2) : ds2);
  }
 }
}

代码示例来源:origin: apache/incubator-druid

private <T> QueryRunner<T> getQueryRunnerImpl(Query<T> query)
{
 QueryRunner<T> queryRunner = null;
 final String queryDataSource = Iterables.getOnlyElement(query.getDataSource().getNames());
 if (runningItem != null) {
  final Task task = runningItem.getTask();
  if (task.getDataSource().equals(queryDataSource)) {
   final QueryRunner<T> taskQueryRunner = task.getQueryRunner(query);
   if (taskQueryRunner != null) {
    if (queryRunner == null) {
     queryRunner = taskQueryRunner;
    } else {
     log.makeAlert("Found too many query runners for datasource")
       .addData("dataSource", queryDataSource)
       .emit();
    }
   }
  }
 }
 return new SetAndVerifyContextQueryRunner<>(
   serverConfig,
   queryRunner == null ? new NoopQueryRunner<>() : queryRunner
 );
}

代码示例来源:origin: apache/incubator-druid

@Override
 public Sequence<T> run(final QueryPlus<T> queryPlus, Map<String, Object> responseContext)
 {
  DataSource dataSource = queryPlus.getQuery().getDataSource();
  boolean forcePushDownNestedQuery = queryPlus.getQuery()
                        .getContextBoolean(
                          GroupByQueryConfig.CTX_KEY_FORCE_PUSH_DOWN_NESTED_QUERY,
                          false
                        );
  if (dataSource instanceof QueryDataSource && !forcePushDownNestedQuery) {
   return run(queryPlus.withQuery((Query<T>) ((QueryDataSource) dataSource).getQuery()), responseContext);
  } else {
   QueryPlus newQuery = queryPlus;
   if (forcePushDownNestedQuery) {
    // Disable any more push downs before firing off the query. But do let the historical know
    // that it is executing the complete nested query and not just the inner most part of it
    newQuery = queryPlus.withQuery(
      queryPlus.getQuery()
           .withOverriddenContext(
             ImmutableMap.of(
               GroupByQueryConfig.CTX_KEY_FORCE_PUSH_DOWN_NESTED_QUERY, false,
               GroupByQueryConfig.CTX_KEY_EXECUTING_NESTED_QUERY, true
             )
           )
    );
   }
   return baseRunner.run(newQuery, responseContext);
  }
 }
}

代码示例来源:origin: apache/incubator-druid

@Test
public void testSerde() throws Exception
{
 String queryStr = "{\n"
          + "  \"queryType\":\"segmentMetadata\",\n"
          + "  \"dataSource\":\"test_ds\",\n"
          + "  \"intervals\":[\"2013-12-04T00:00:00.000Z/2013-12-05T00:00:00.000Z\"],\n"
          + "  \"analysisTypes\":[\"cardinality\",\"size\"]\n"
          + "}";
 EnumSet<SegmentMetadataQuery.AnalysisType> expectedAnalysisTypes = EnumSet.of(
   SegmentMetadataQuery.AnalysisType.CARDINALITY,
   SegmentMetadataQuery.AnalysisType.SIZE
 );
 Query query = MAPPER.readValue(queryStr, Query.class);
 Assert.assertTrue(query instanceof SegmentMetadataQuery);
 Assert.assertEquals("test_ds", Iterables.getOnlyElement(query.getDataSource().getNames()));
 Assert.assertEquals(
   Intervals.of("2013-12-04T00:00:00.000Z/2013-12-05T00:00:00.000Z"),
   query.getIntervals().get(0)
 );
 Assert.assertEquals(expectedAnalysisTypes, ((SegmentMetadataQuery) query).getAnalysisTypes());
 // test serialize and deserialize
 Assert.assertEquals(query, MAPPER.readValue(MAPPER.writeValueAsString(query), Query.class));
}

代码示例来源:origin: apache/incubator-druid

@Test
public void testSerdeWithDefaultInterval() throws Exception
{
 String queryStr = "{\n"
          + "  \"queryType\":\"segmentMetadata\",\n"
          + "  \"dataSource\":\"test_ds\"\n"
          + "}";
 Query query = MAPPER.readValue(queryStr, Query.class);
 Assert.assertTrue(query instanceof SegmentMetadataQuery);
 Assert.assertEquals("test_ds", Iterables.getOnlyElement(query.getDataSource().getNames()));
 Assert.assertEquals(Intervals.ETERNITY, query.getIntervals().get(0));
 Assert.assertTrue(((SegmentMetadataQuery) query).isUsingDefaultInterval());
 // test serialize and deserialize
 Assert.assertEquals(query, MAPPER.readValue(MAPPER.writeValueAsString(query), Query.class));
 // test copy
 Assert.assertEquals(query, Druids.SegmentMetadataQueryBuilder.copy((SegmentMetadataQuery) query).build());
}

相关文章