org.apache.kafka.clients.consumer.Consumer.metrics()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(6.7k)|赞(0)|评价(0)|浏览(235)

本文整理了Java中org.apache.kafka.clients.consumer.Consumer.metrics()方法的一些代码示例,展示了Consumer.metrics()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Consumer.metrics()方法的具体详情如下:
包路径:org.apache.kafka.clients.consumer.Consumer
类名称:Consumer
方法名:metrics

Consumer.metrics介绍

暂无

代码示例

代码示例来源:origin: openzipkin/brave

@Override public Map<MetricName, ? extends Metric> metrics() {
 return delegate.metrics();
}

代码示例来源:origin: spring-projects/spring-kafka

@Override
public Map<String, Map<MetricName, ? extends Metric>> metrics() {
  ListenerConsumer listenerConsumerForMetrics = this.listenerConsumer;
  if (listenerConsumerForMetrics != null) {
    Map<MetricName, ? extends Metric> metrics = listenerConsumerForMetrics.consumer.metrics();
    Iterator<MetricName> metricIterator = metrics.keySet().iterator();
    if (metricIterator.hasNext()) {
      String clientId = metricIterator.next().tags().get("client-id");
      return Collections.singletonMap(clientId, metrics);
    }
  }
  return Collections.emptyMap();
}

代码示例来源:origin: io.opentracing.contrib/opentracing-kafka-client

@Override
public Map<MetricName, ? extends Metric> metrics() {
 return consumer.metrics();
}

代码示例来源:origin: rayokota/kafka-graphs

@Override
public Map<MetricName, ? extends Metric> metrics() {
  return kafkaConsumer.metrics();
}

代码示例来源:origin: opentracing-contrib/java-kafka-client

@Override
public Map<MetricName, ? extends Metric> metrics() {
 return consumer.metrics();
}

代码示例来源:origin: linkedin/li-apache-kafka-clients

@Override
public Map<MetricName, ? extends Metric> metrics() {
 return _kafkaConsumer.metrics();
}

代码示例来源:origin: io.zipkin.brave/brave-instrumentation-kafka-clients

@Override public Map<MetricName, ? extends Metric> metrics() {
 return delegate.metrics();
}

代码示例来源:origin: org.apache.kafka/kafka-streams

public Map<MetricName, Metric> consumerMetrics() {
    return Collections.unmodifiableMap(globalConsumer.metrics());
  }
}

代码示例来源:origin: org.apache.kafka/kafka-streams

public Map<MetricName, Metric> consumerMetrics() {
  final Map<MetricName, ? extends Metric> consumerMetrics = consumer.metrics();
  final Map<MetricName, ? extends Metric> restoreConsumerMetrics = restoreConsumer.metrics();
  final LinkedHashMap<MetricName, Metric> result = new LinkedHashMap<>();
  result.putAll(consumerMetrics);
  result.putAll(restoreConsumerMetrics);
  return result;
}

代码示例来源:origin: apache/samza

private void populateCurrentLags(Set<SystemStreamPartition> ssps) {
 Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics();
 // populate the MetricNames first time
 if (perPartitionMetrics.isEmpty()) {
  HashMap<String, String> tags = new HashMap<>();
  tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics
  for (SystemStreamPartition ssp : ssps) {
   TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp);
   perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags));
  }
 }
 for (SystemStreamPartition ssp : ssps) {
  MetricName mn = perPartitionMetrics.get(ssp);
  Metric currentLagMetric = consumerMetrics.get(mn);
  // High watermark is fixed to be the offset of last available message,
  // so the lag is now at least 0, which is the same as Samza's definition.
  // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling.
  long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L;
  latestLags.put(ssp, currentLag);
  // calls the setIsAtHead for the BlockingEnvelopeMap
  sink.setIsAtHighWatermark(ssp, currentLag == 0);
 }
}

代码示例来源:origin: org.apache.samza/samza-kafka

private void populateCurrentLags(Set<SystemStreamPartition> ssps) {
 Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics();
 // populate the MetricNames first time
 if (perPartitionMetrics.isEmpty()) {
  HashMap<String, String> tags = new HashMap<>();
  tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics
  for (SystemStreamPartition ssp : ssps) {
   TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp);
   perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags));
  }
 }
 for (SystemStreamPartition ssp : ssps) {
  MetricName mn = perPartitionMetrics.get(ssp);
  Metric currentLagMetric = consumerMetrics.get(mn);
  // High watermark is fixed to be the offset of last available message,
  // so the lag is now at least 0, which is the same as Samza's definition.
  // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling.
  long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L;
  latestLags.put(ssp, currentLag);
  // calls the setIsAtHead for the BlockingEnvelopeMap
  sink.setIsAtHighWatermark(ssp, currentLag == 0);
 }
}

代码示例来源:origin: org.apache.samza/samza-kafka_2.11

private void populateCurrentLags(Set<SystemStreamPartition> ssps) {
 Map<MetricName, ? extends Metric> consumerMetrics = kafkaConsumer.metrics();
 // populate the MetricNames first time
 if (perPartitionMetrics.isEmpty()) {
  HashMap<String, String> tags = new HashMap<>();
  tags.put("client-id", clientId); // this is required by the KafkaConsumer to get the metrics
  for (SystemStreamPartition ssp : ssps) {
   TopicPartition tp = KafkaSystemConsumer.toTopicPartition(ssp);
   perPartitionMetrics.put(ssp, new MetricName(tp + ".records-lag", "consumer-fetch-manager-metrics", "", tags));
  }
 }
 for (SystemStreamPartition ssp : ssps) {
  MetricName mn = perPartitionMetrics.get(ssp);
  Metric currentLagMetric = consumerMetrics.get(mn);
  // High watermark is fixed to be the offset of last available message,
  // so the lag is now at least 0, which is the same as Samza's definition.
  // If the lag is not 0, then isAtHead is not true, and kafkaClient keeps polling.
  long currentLag = (currentLagMetric != null) ? (long) currentLagMetric.value() : -1L;
  latestLags.put(ssp, currentLag);
  // calls the setIsAtHead for the BlockingEnvelopeMap
  sink.setIsAtHighWatermark(ssp, currentLag == 0);
 }
}

代码示例来源:origin: reactor/reactor-kafka

@Test
public void consumerMethods() throws Exception {
  testConsumerMethod(c -> assertEquals(this.assignedPartitions, c.assignment()));
  testConsumerMethod(c -> assertEquals(Collections.singleton(topic), c.subscription()));
  testConsumerMethod(c -> assertEquals(2, c.partitionsFor(topics.get(2)).size()));
  testConsumerMethod(c -> assertEquals(topics.size(), c.listTopics().size()));
  testConsumerMethod(c -> assertEquals(0, c.metrics().size()));
  testConsumerMethod(c -> {
    Collection<TopicPartition> partitions = Collections.singleton(new TopicPartition(topic, 1));
    c.pause(partitions);
    assertEquals(partitions, c.paused());
    c.resume(partitions);
  });
  testConsumerMethod(c -> {
    TopicPartition partition = new TopicPartition(topic, 1);
    Collection<TopicPartition> partitions = Collections.singleton(partition);
    long position = c.position(partition);
    c.seekToBeginning(partitions);
    assertEquals(0, c.position(partition));
    c.seekToEnd(partitions);
    assertTrue("Did not seek to end", c.position(partition) > 0);
    c.seek(partition, position);
  });
}

相关文章