本文整理了Java中org.apache.druid.java.util.common.logger.Logger.info()
方法的一些代码示例,展示了Logger.info()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Logger.info()
方法的具体详情如下:
包路径:org.apache.druid.java.util.common.logger.Logger
类名称:Logger
方法名:info
暂无
代码示例来源:origin: apache/incubator-druid
@Override
public void killAll()
{
log.info("Noop: No task logs are deleted.");
}
代码示例来源:origin: apache/incubator-druid
@Inject
public MetadataStoragePollingBasicAuthenticatorCacheManager(
BasicAuthenticatorMetadataStorageUpdater storageUpdater
)
{
this.storageUpdater = storageUpdater;
log.info("Starting coordinator basic authenticator cache manager.");
}
代码示例来源:origin: apache/incubator-druid
public static String runTask(String[] args) throws Exception
{
String workingPath = args[0];
log.info("Deleting indexing hadoop working path [%s].", workingPath);
Path p = new Path(workingPath);
FileSystem fs = p.getFileSystem(new Configuration());
fs.delete(p, true);
return null;
}
}
代码示例来源:origin: apache/incubator-druid
@Override
public void start()
{
try {
log.info("Starting Derby Metadata Storage");
server.start(null);
}
catch (Exception e) {
throw Throwables.propagate(e);
}
}
代码示例来源:origin: apache/incubator-druid
@Override
public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable)
{
log.info("Adding SegmentHandoffCallback for dataSource[%s] Segment[%s]", dataSource, descriptor);
Pair<Executor, Runnable> prev = handOffCallbacks.putIfAbsent(
descriptor,
new Pair<>(exec, handOffRunnable)
);
return prev == null;
}
代码示例来源:origin: apache/incubator-druid
private DatabaseReader openGeoIpDb(File geoDb)
{
try {
DatabaseReader reader = new DatabaseReader(geoDb);
LOG.info("Using geo ip database at [%s].", geoDb);
return reader;
}
catch (IOException e) {
throw new RuntimeException("Could not open geo db at [" + geoDb.getAbsolutePath() + "].", e);
}
}
代码示例来源:origin: apache/incubator-druid
@Inject
public void configure(Properties properties)
{
this.properties = properties;
beOverlord = isOverlord(properties);
if (beOverlord) {
log.info("Coordinator is configured to act as Overlord as well.");
}
}
代码示例来源:origin: apache/incubator-druid
public Collection<Server> getAll()
{
try {
return Collections2.transform(serviceProvider.getAllInstances(), TO_SERVER);
}
catch (Exception e) {
log.info(e, "Unable to get all instances");
return Collections.emptyList();
}
}
代码示例来源:origin: apache/incubator-druid
public void register(String serviceName, EventReceiverFirehoseMetric metric)
{
log.info("Registering EventReceiverFirehoseMetric for service [%s]", serviceName);
if (metrics.putIfAbsent(serviceName, metric) != null) {
throw new ISE("Service [%s] is already registered!", serviceName);
}
}
代码示例来源:origin: apache/incubator-druid
@Override
public void close()
{
log.info("CLOSE twitterstream");
twitterStream.shutdown(); // invokes twitterStream.cleanUp()
}
};
代码示例来源:origin: apache/incubator-druid
@Override
public void becomeLeader()
{
logger.info("listener3.becomeLeader().");
currLeader.set("h3:8080");
}
代码示例来源:origin: apache/incubator-druid
@Override
public ByteBuffer get()
{
log.info(
"Allocating new %s buffer[%,d] of size[%,d]",
description,
count.getAndIncrement(),
computationBufferSize
);
return ByteBuffer.allocateDirect(computationBufferSize);
}
}
代码示例来源:origin: apache/incubator-druid
@Override
public ByteBuffer get()
{
log.info(
"Allocating new %s buffer[%,d] of size[%,d]",
description,
count.getAndIncrement(),
computationBufferSize
);
return ByteBuffer.allocateDirect(computationBufferSize);
}
}
代码示例来源:origin: apache/incubator-druid
@Override
public void pushTaskLog(final String taskid, final File logFile) throws IOException
{
final String taskKey = getTaskLogKey(taskid, "log");
log.info("Pushing task log %s to: %s", logFile, taskKey);
pushTaskFile(logFile, taskKey);
}
代码示例来源:origin: apache/incubator-druid
@Override
public void pushTaskLog(final String taskid, final File logFile)
{
final String taskKey = getTaskLogKey(taskid);
log.info("Pushing task log %s to: %s", logFile, taskKey);
pushTaskFile(taskid, logFile, taskKey);
}
代码示例来源:origin: apache/incubator-druid
@Override
public void pushTaskReports(String taskid, File reportFile) throws IOException
{
final String taskKey = getTaskReportsKey(taskid);
log.info("Pushing task reports %s to: %s", reportFile, taskKey);
pushTaskFile(taskid, reportFile, taskKey);
}
代码示例来源:origin: apache/incubator-druid
@LifecycleStop
public void stop()
{
log.info("Stopping WorkerCuratorCoordinator for worker[%s]", worker.getHost());
synchronized (lock) {
if (!started) {
return;
}
announcer.stop();
started = false;
}
}
代码示例来源:origin: apache/incubator-druid
@AfterClass
public static void cleanupAfterClass()
{
List<Class<? extends BitmapSerdeFactory>[]> classes = factoryClasses();
for (int i = 0; i < classes.size(); ++i) {
log.info("Entry [%d] is %s", i, classes.get(i)[0].getCanonicalName());
}
}
代码示例来源:origin: apache/incubator-druid
public static IncrementalIndex makeRealtimeIndex(final String resourceFilename, boolean rollup)
{
final URL resource = TestIndex.class.getClassLoader().getResource(resourceFilename);
if (resource == null) {
throw new IllegalArgumentException("cannot find resource " + resourceFilename);
}
log.info("Realtime loading index file[%s]", resource);
CharSource stream = Resources.asByteSource(resource).asCharSource(StandardCharsets.UTF_8);
return makeRealtimeIndex(stream, rollup);
}
代码示例来源:origin: apache/incubator-druid
private void submitTaskAndWait(String taskSpec, String dataSourceName)
{
final String taskID = indexer.submitTask(taskSpec);
LOG.info("TaskID for loading index task %s", taskID);
indexer.waitUntilTaskCompletes(taskID);
RetryUtil.retryUntilTrue(
() -> coordinator.areSegmentsLoaded(dataSourceName), "Segment Load"
);
}
}
内容来源于网络,如有侵权,请联系作者删除!