本文整理了Java中org.apache.kafka.clients.consumer.Consumer.assign()
方法的一些代码示例,展示了Consumer.assign()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Consumer.assign()
方法的具体详情如下:
包路径:org.apache.kafka.clients.consumer.Consumer
类名称:Consumer
方法名:assign
暂无
代码示例来源:origin: openzipkin/brave
@Override public void assign(Collection<TopicPartition> partitions) {
delegate.assign(partitions);
}
代码示例来源:origin: apache/storm
/**
* Assign partitions to the KafkaConsumer.
* @param <K> The consumer key type
* @param <V> The consumer value type
* @param consumer The Kafka consumer to assign partitions to
* @param newAssignment The partitions to assign.
* @param listener The rebalance listener to call back on when the assignment changes
*/
public <K, V> void assignPartitions(Consumer<K, V> consumer, Set<TopicPartition> newAssignment,
ConsumerRebalanceListener listener) {
Set<TopicPartition> currentAssignment = consumer.assignment();
if (!newAssignment.equals(currentAssignment)) {
listener.onPartitionsRevoked(currentAssignment);
consumer.assign(newAssignment);
listener.onPartitionsAssigned(newAssignment);
}
}
代码示例来源:origin: confluentinc/ksql
CommandTopic(
final String commandTopicName,
final Consumer<CommandId, Command> commandConsumer,
final Producer<CommandId, Command> commandProducer
) {
this.commandTopicPartition = new TopicPartition(commandTopicName, 0);
this.commandConsumer = Objects.requireNonNull(commandConsumer, "commandConsumer");
this.commandProducer = Objects.requireNonNull(commandProducer, "commandProducer");
this.commandTopicName = Objects.requireNonNull(commandTopicName, "commandTopicName");
commandConsumer.assign(Collections.singleton(commandTopicPartition));
}
代码示例来源:origin: apache/metron
@Override
public String getSampleMessage(final String topic) {
String message = null;
if (listTopics().contains(topic)) {
try (Consumer<String, String> kafkaConsumer = kafkaConsumerFactory.createConsumer()) {
kafkaConsumer.assign(kafkaConsumer.partitionsFor(topic).stream()
.map(partitionInfo -> new TopicPartition(topic, partitionInfo.partition()))
.collect(Collectors.toList()));
kafkaConsumer.assignment().stream()
.filter(p -> (kafkaConsumer.position(p) - 1) >= 0)
.forEach(p -> kafkaConsumer.seek(p, kafkaConsumer.position(p) - 1));
final ConsumerRecords<String, String> records = kafkaConsumer.poll(KAFKA_CONSUMER_TIMEOUT);
message = records.isEmpty() ? null : records.iterator().next().value();
kafkaConsumer.unsubscribe();
}
}
return message;
}
代码示例来源:origin: apache/incubator-gobblin
@Override
public long getEarliestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException {
TopicPartition topicPartition = new TopicPartition(partition.getTopicName(), partition.getId());
this.consumer.assign(Collections.singletonList(topicPartition));
this.consumer.seekToBeginning(topicPartition);
return this.consumer.position(topicPartition);
}
代码示例来源:origin: apache/incubator-gobblin
@Override
public long getLatestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException {
TopicPartition topicPartition = new TopicPartition(partition.getTopicName(), partition.getId());
this.consumer.assign(Collections.singletonList(topicPartition));
this.consumer.seekToEnd(topicPartition);
return this.consumer.position(topicPartition);
}
代码示例来源:origin: confluentinc/ksql
@Test
public void shouldAssignCorrectPartitionToConsumer() {
verify(commandConsumer)
.assign(eq(Collections.singleton(new TopicPartition(COMMAND_TOPIC_NAME, 0))));
}
代码示例来源:origin: apache/incubator-gobblin
@Override
public Iterator<KafkaConsumerRecord> consume(KafkaPartition partition, long nextOffset, long maxOffset) {
if (nextOffset > maxOffset) {
return null;
}
this.consumer.assign(Lists.newArrayList(new TopicPartition(partition.getTopicName(), partition.getId())));
this.consumer.seek(new TopicPartition(partition.getTopicName(), partition.getId()), nextOffset);
ConsumerRecords<K, V> consumerRecords = consumer.poll(super.fetchTimeoutMillis);
return Iterators.transform(consumerRecords.iterator(), new Function<ConsumerRecord<K, V>, KafkaConsumerRecord>() {
@Override
public KafkaConsumerRecord apply(ConsumerRecord<K, V> input) {
return new Kafka09ConsumerRecord<>(input);
}
});
}
代码示例来源:origin: apache/incubator-gobblin
public KafkaSimpleStreamingExtractor(WorkUnitState state) {
super(state);
_consumer = KafkaSimpleStreamingSource.getKafkaConsumer(ConfigUtils.propertiesToConfig(state.getProperties()));
closer.register(_consumer);
_partition = new TopicPartition(KafkaSimpleStreamingSource.getTopicNameFromState(state),
KafkaSimpleStreamingSource.getPartitionIdFromState(state));
_consumer.assign(Collections.singletonList(_partition));
this._schemaRegistry = state.contains(KafkaSchemaRegistry.KAFKA_SCHEMA_REGISTRY_CLASS) ? Optional
.of(KafkaSchemaRegistry.<String, S>get(state.getProperties()))
: Optional.<KafkaSchemaRegistry<String, S>>absent();
this.fetchTimeOut = state.getPropAsLong(AbstractBaseKafkaConsumerClient.CONFIG_KAFKA_FETCH_TIMEOUT_VALUE,
AbstractBaseKafkaConsumerClient.CONFIG_KAFKA_FETCH_TIMEOUT_VALUE_DEFAULT);
}
代码示例来源:origin: apache/kylin
public void initialize(InputSplit split, Configuration conf) throws IOException, InterruptedException {
this.conf = conf;
this.split = (KafkaInputSplit) split;
brokers = this.split.getBrokers();
topic = this.split.getTopic();
partition = this.split.getPartition();
watermark = this.split.getOffsetStart();
if (conf.get(KafkaFlatTableJob.CONFIG_KAFKA_TIMEOUT) != null) {
timeOut = Long.parseLong(conf.get(KafkaFlatTableJob.CONFIG_KAFKA_TIMEOUT));
}
String consumerGroup = conf.get(KafkaFlatTableJob.CONFIG_KAFKA_CONSUMER_GROUP);
Properties kafkaProperties = KafkaConsumerProperties.extractKafkaConfigToProperties(conf);
consumer = org.apache.kylin.source.kafka.util.KafkaClient.getKafkaConsumer(brokers, consumerGroup,
kafkaProperties);
earliestOffset = this.split.getOffsetStart();
latestOffset = this.split.getOffsetEnd();
TopicPartition topicPartition = new TopicPartition(topic, partition);
consumer.assign(Arrays.asList(topicPartition));
log.info("Split {} Topic: {} Broker: {} Partition: {} Start: {} End: {}",
new Object[] { this.split, topic, this.split.getBrokers(), partition, earliestOffset, latestOffset });
}
代码示例来源:origin: apache/hive
final List<TopicPartition> topicPartitionList = Collections.singletonList(topicPartition);
consumer.assign(topicPartitionList);
代码示例来源:origin: spring-projects/spring-kafka
assertThat(smallOffsetCommitted.get()).isFalse();
Consumer<Integer, String> consumer = cf.createConsumer();
consumer.assign(Arrays.asList(new TopicPartition(topic9, 0), new TopicPartition(topic9, 1)));
assertThat(consumer.position(new TopicPartition(topic9, 0))).isEqualTo(2);
assertThat(consumer.position(new TopicPartition(topic9, 1))).isEqualTo(2);
代码示例来源:origin: spring-projects/spring-kafka
consumer.assign(Arrays.asList(new TopicPartition(topic8, 0), new TopicPartition(topic8, 1)));
assertThat(consumer.position(new TopicPartition(topic8, 0))).isEqualTo(2);
assertThat(consumer.position(new TopicPartition(topic8, 1))).isEqualTo(2);
代码示例来源:origin: spring-projects/spring-kafka
consumer.assign(Arrays.asList(new TopicPartition(topic7, 0), new TopicPartition(topic7, 1)));
assertThat(consumer.position(new TopicPartition(topic7, 0))).isEqualTo(2);
assertThat(consumer.position(new TopicPartition(topic7, 1))).isEqualTo(2);
代码示例来源:origin: spring-projects/spring-kafka
consumer.assign(Arrays.asList(new TopicPartition(topic, 0), new TopicPartition(topic, 0)));
代码示例来源:origin: spring-projects/spring-kafka
container.stop();
Consumer<Integer, String> consumer = cf.createConsumer();
consumer.assign(Arrays.asList(new TopicPartition(topic9, 0), new TopicPartition(topic9, 1)));
代码示例来源:origin: spring-projects/spring-kafka
assertThat(commitLatch.await(60, TimeUnit.SECONDS)).isTrue();
Consumer<Integer, String> consumer = cf.createConsumer();
consumer.assign(Arrays.asList(new TopicPartition(topic10, 0), new TopicPartition(topic10, 1)));
assertThat(consumer.position(new TopicPartition(topic10, 0))).isEqualTo(2);
assertThat(consumer.position(new TopicPartition(topic10, 1))).isEqualTo(2);
代码示例来源:origin: spring-projects/spring-kafka
container2.stop();
Consumer<Integer, String> consumer = cf.createConsumer();
consumer.assign(Arrays.asList(new TopicPartition(topic18, 0), new TopicPartition(topic18, 1)));
代码示例来源:origin: spring-projects/spring-kafka
assertThat(latch.await(60, TimeUnit.SECONDS)).isTrue();
Consumer<Integer, String> consumer = cf.createConsumer();
consumer.assign(Arrays.asList(new TopicPartition(topic6, 0), new TopicPartition(topic6, 1)));
assertThat(consumer.position(new TopicPartition(topic6, 0))).isEqualTo(2);
assertThat(consumer.position(new TopicPartition(topic6, 1))).isEqualTo(2);
代码示例来源:origin: spring-projects/spring-kafka
container2.stop();
Consumer<Integer, String> consumer = cf.createConsumer();
consumer.assign(Arrays.asList(new TopicPartition(topic14, 0), new TopicPartition(topic14, 1)));
assertThat(consumer.position(new TopicPartition(topic14, 0))).isEqualTo(2);
assertThat(consumer.position(new TopicPartition(topic14, 1))).isEqualTo(2);
内容来源于网络,如有侵权,请联系作者删除!