本文整理了Java中kafka.message.Message
类的一些代码示例,展示了Message
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Message
类的具体详情如下:
包路径:kafka.message.Message
类名称:Message
暂无
代码示例来源:origin: Graylog2/graylog2-server
final MessageAndOffset messageAndOffset = iterator.next();
if (firstOffset == Long.MIN_VALUE) firstOffset = messageAndOffset.offset();
lastOffset = messageAndOffset.offset();
final byte[] payloadBytes = ByteBufferUtils.readBytes(messageAndOffset.message().payload());
if (LOG.isTraceEnabled()) {
final byte[] keyBytes = ByteBufferUtils.readBytes(messageAndOffset.message().key());
LOG.trace("Read message {} contains {}", bytesToHex(keyBytes), bytesToHex(payloadBytes));
代码示例来源:origin: Graylog2/graylog2-server
final Message newMessage = new Message(messageBytes, idBytes);
代码示例来源:origin: prestodb/presto
private boolean nextRow(MessageAndOffset messageAndOffset)
cursorOffset = messageAndOffset.offset() + 1; // Cursor now points to the next message.
totalBytes += messageAndOffset.message().payloadSize();
totalMessages++;
ByteBuffer key = messageAndOffset.message().key();
if (key != null) {
keyData = new byte[key.remaining()];
ByteBuffer message = messageAndOffset.message().payload();
if (message != null) {
messageData = new byte[message.remaining()];
代码示例来源:origin: pinterest/secor
topicPartition.getTopic(), topicPartition.getPartition()).iterator().next();
byte[] keyBytes = null;
if (messageAndOffset.message().hasKey()) {
ByteBuffer key = messageAndOffset.message().key();
keyBytes = new byte[key.limit()];
key.get(keyBytes);
if (!messageAndOffset.message().isNull()) {
ByteBuffer payload = messageAndOffset.message().payload();
payloadBytes = new byte[payload.limit()];
payload.get(payloadBytes);
代码示例来源:origin: alibaba/jstorm
public EmitState emit(SpoutOutputCollector collector) {
if (emittingMessages.isEmpty()) {
fillMessages();
}
int count = 0;
while (true) {
MessageAndOffset toEmitMsg = emittingMessages.pollFirst();
if (toEmitMsg == null) {
return EmitState.EMIT_END;
}
count ++;
Iterable<List<Object>> tups = generateTuples(toEmitMsg.message());
if (tups != null) {
for (List<Object> tuple : tups) {
LOG.debug("emit message {}", new String(Utils.toByteArray(toEmitMsg.message().payload())));
collector.emit(tuple, new KafkaMessageId(partition, toEmitMsg.offset()));
}
if(count>=config.batchSendCount) {
break;
}
} else {
ack(toEmitMsg.offset());
}
}
if (emittingMessages.isEmpty()) {
return EmitState.EMIT_END;
} else {
return EmitState.EMIT_MORE;
}
}
代码示例来源:origin: apache/flink
if (running) {
messagesInFetch++;
final ByteBuffer payload = msg.message().payload();
final long offset = msg.offset();
LOG.info("Skipping message with offset " + msg.offset()
+ " because we have seen messages until (including) "
+ currentPartition.getOffset()
int keySize = msg.message().keySize();
ByteBuffer keyPayload = msg.message().key();
keyBytes = new byte[keySize];
keyPayload.get(keyBytes);
代码示例来源:origin: apache/incubator-pinot
public byte[] getMessageAtIndex(int index) {
return messageList.get(index).message().payload().array();
}
代码示例来源:origin: linkedin/camus
/**
* Fetches the next Kafka message and stuffs the results into the key and
* value
*
* @param etlKey
* @return true if there exists more events
* @throws IOException
*/
public KafkaMessage getNext(EtlKey etlKey) throws IOException {
if (hasNext()) {
MessageAndOffset msgAndOffset = messageIter.next();
Message message = msgAndOffset.message();
byte[] payload = getBytes(message.payload());
byte[] key = getBytes(message.key());
if (payload == null) {
log.warn("Received message with null message.payload(): " + msgAndOffset);
}
etlKey.clear();
etlKey.set(kafkaRequest.getTopic(), kafkaRequest.getLeaderId(), kafkaRequest.getPartition(), currentOffset,
msgAndOffset.offset() + 1, message.checksum());
etlKey.setMessageSize(msgAndOffset.message().size());
currentOffset = msgAndOffset.offset() + 1; // increase offset
currentCount++; // increase count
return new KafkaMessage(payload, key, kafkaRequest.getTopic(), kafkaRequest.getPartition(),
msgAndOffset.offset(), message.checksum());
} else {
return null;
}
}
代码示例来源:origin: apache/apex-malhar
private void initializeLastProcessingOffset()
{
// read last received kafka message
TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic());
if (tm == null) {
throw new RuntimeException("Failed to retrieve topic metadata");
}
partitionNum = tm.partitionsMetadata().size();
lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum);
for (PartitionMetadata pm : tm.partitionsMetadata()) {
String leadBroker = pm.leader().host();
int port = pm.leader().port();
String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId();
SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName);
FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build();
FetchResponse fetchResponse = consumer.fetch(req);
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) {
Message m = messageAndOffset.message();
ByteBuffer payload = m.payload();
ByteBuffer key = m.key();
byte[] valueBytes = new byte[payload.limit()];
byte[] keyBytes = new byte[key.limit()];
payload.get(valueBytes);
key.get(keyBytes);
lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes));
}
}
}
代码示例来源:origin: apache/incubator-gobblin
public Kafka08ConsumerRecord(MessageAndOffset messageAndOffset) {
super(messageAndOffset.offset(), messageAndOffset.message().size());
this.messageAndOffset = messageAndOffset;
}
代码示例来源:origin: alibaba/jstorm
@SuppressWarnings("unchecked")
public Iterable<List<Object>> generateTuples(Message msg) {
Iterable<List<Object>> tups = null;
ByteBuffer payload = msg.payload();
if (payload == null) {
return null;
}
tups = Arrays.asList(Utils.tuple(Utils.toByteArray(payload)));
return tups;
}
代码示例来源:origin: apache/incubator-gobblin
@Override
public byte[] getKeyBytes() {
return getBytes(this.messageAndOffset.message().key());
}
代码示例来源:origin: org.apache.storm/storm-kafka
public static Iterable<List<Object>> generateTuples(KafkaConfig kafkaConfig, Message msg, String topic) {
Iterable<List<Object>> tups;
ByteBuffer payload = msg.payload();
if (payload == null) {
return null;
}
ByteBuffer key = msg.key();
if (key != null && kafkaConfig.scheme instanceof KeyValueSchemeAsMultiScheme) {
tups = ((KeyValueSchemeAsMultiScheme) kafkaConfig.scheme).deserializeKeyAndValue(key, payload);
} else {
if (kafkaConfig.scheme instanceof StringMultiSchemeWithTopic) {
tups = ((StringMultiSchemeWithTopic)kafkaConfig.scheme).deserializeWithTopic(topic, payload);
} else {
tups = kafkaConfig.scheme.deserialize(payload);
}
}
return tups;
}
代码示例来源:origin: HomeAdvisor/Kafdrop
private MessageVO createMessage(Message message, MessageDeserializer deserializer)
{
MessageVO vo = new MessageVO();
if (message.hasKey())
{
vo.setKey(ByteUtils.readString(message.key()));
}
if (!message.isNull())
{
final String messageString = deserializer.deserializeMessage(message.payload());
vo.setMessage(messageString);
}
vo.setValid(message.isValid());
vo.setCompressionCodec(message.compressionCodec().name());
vo.setChecksum(message.checksum());
vo.setComputedChecksum(message.computeChecksum());
return vo;
}
代码示例来源:origin: apache/incubator-pinot
public int getMessageLengthAtIndex(int index) {
return messageList.get(index).message().payloadSize();
}
代码示例来源:origin: linkedin/camus
public void validate() throws IOException {
// check the checksum of message.
Message readMessage;
if (key == null){
readMessage = new Message(payload);
} else {
readMessage = new Message(payload, key);
}
if (checksum != readMessage.checksum()) {
throw new ChecksumException("Invalid message checksum : " + readMessage.checksum() + ". Expected " + checksum,
offset);
}
}
代码示例来源:origin: org.apache.apex/malhar-contrib
final protected void putMessage(KafkaPartition partition, Message msg, long offset) throws InterruptedException{
// block from receiving more message
holdingBuffer.put(new KafkaMessage(partition, msg, offset));
statsSnapShot.mark(partition, msg.payloadSize());
}
代码示例来源:origin: apache/apex-malhar
if ((emitCount > 0) && ((maxTotalMsgSizePerWindow - emitTotalMsgSize) < message.msg.size())) {
pendingMessage = message;
break;
emitTotalMsgSize += message.msg.size();
offsetStats.put(message.kafkaPart, message.offSet);
MutablePair<Long, Integer> offsetAndCount = currentWindowRecoveryState.get(message.kafkaPart);
代码示例来源:origin: co.cask.cdap/cdap-watchdog
/**
* Fetches Kafka messages from an offset.
* @param offset message offset to start.
* @param callback callback to handle the messages fetched.
* @return number of messages fetched.
*/
public int fetchMessages(long offset, Callback callback) throws OffsetOutOfRangeException {
ByteBufferMessageSet messageSet = fetchMessageSet(offset);
int msgCount = 0;
for (MessageAndOffset msg : messageSet) {
++msgCount;
callback.handle(msg.offset(), msg.message().payload());
}
return msgCount;
}
代码示例来源:origin: org.apache.flink/flink-connector-kafka-0.8_2.11
if (running) {
messagesInFetch++;
final ByteBuffer payload = msg.message().payload();
final long offset = msg.offset();
LOG.info("Skipping message with offset " + msg.offset()
+ " because we have seen messages until (including) "
+ currentPartition.getOffset()
int keySize = msg.message().keySize();
ByteBuffer keyPayload = msg.message().key();
keyBytes = new byte[keySize];
keyPayload.get(keyBytes);
内容来源于网络,如有侵权,请联系作者删除!