java.nio.ByteBuffer.rewind()方法的使用及代码示例

x33g5p2x  于2022-01-16 转载在 其他  
字(8.6k)|赞(0)|评价(0)|浏览(322)

本文整理了Java中java.nio.ByteBuffer.rewind()方法的一些代码示例,展示了ByteBuffer.rewind()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ByteBuffer.rewind()方法的具体详情如下:
包路径:java.nio.ByteBuffer
类名称:ByteBuffer
方法名:rewind

ByteBuffer.rewind介绍

暂无

代码示例

代码示例来源:origin: apache/kafka

private static ByteBuffer sizeBuffer(int size) {
  ByteBuffer sizeBuffer = ByteBuffer.allocate(4);
  sizeBuffer.putInt(size);
  sizeBuffer.rewind();
  return sizeBuffer;
}

代码示例来源:origin: apache/incubator-druid

private ByteBuffer computeKey(byte[] idBytes, byte[] key)
{
 final ByteBuffer retVal = ByteBuffer.allocate(key.length + 4).put(idBytes).put(key);
 retVal.rewind();
 return retVal;
}

代码示例来源:origin: apache/kafka

public byte[] serialize(String topic, ByteBuffer data) {
  if (data == null)
    return null;
  data.rewind();
  if (data.hasArray()) {
    byte[] arr = data.array();
    if (data.arrayOffset() == 0 && arr.length == data.remaining()) {
      return arr;
    }
  }
  byte[] ret = new byte[data.remaining()];
  data.get(ret, 0, ret.length);
  data.rewind();
  return ret;
}

代码示例来源:origin: apache/incubator-druid

@Override
public Object get(ByteBuffer buf, int position)
{
 final int size = HyperLogLogCollector.getLatestNumBytesForDenseStorage();
 ByteBuffer dataCopyBuffer = ByteBuffer.allocate(size);
 ByteBuffer mutationBuffer = buf.duplicate();
 mutationBuffer.position(position);
 mutationBuffer.limit(position + size);
 dataCopyBuffer.put(mutationBuffer);
 dataCopyBuffer.rewind();
 return HyperLogLogCollector.makeCollector(dataCopyBuffer);
}

代码示例来源:origin: apache/incubator-druid

public ByteBuffer toByteBuffer()
{
 final short numNonZeroRegisters = getNumNonZeroRegisters();
 // store sparsely
 if (storageBuffer.remaining() == getNumBytesForDenseStorage() && numNonZeroRegisters < DENSE_THRESHOLD) {
  final ByteBuffer retVal = ByteBuffer.wrap(new byte[numNonZeroRegisters * 3 + getNumHeaderBytes()]);
  setVersion(retVal);
  setRegisterOffset(retVal, getRegisterOffset());
  setNumNonZeroRegisters(retVal, numNonZeroRegisters);
  setMaxOverflowValue(retVal, getMaxOverflowValue());
  setMaxOverflowRegister(retVal, getMaxOverflowRegister());
  final int startPosition = getPayloadBytePosition();
  retVal.position(getPayloadBytePosition(retVal));
  final byte[] zipperBuffer = new byte[NUM_BYTES_FOR_BUCKETS];
  ByteBuffer roStorageBuffer = storageBuffer.asReadOnlyBuffer();
  roStorageBuffer.position(startPosition);
  roStorageBuffer.get(zipperBuffer);
  for (int i = 0; i < NUM_BYTES_FOR_BUCKETS; ++i) {
   if (zipperBuffer[i] != 0) {
    final short val = (short) (0xffff & (i + startPosition - initPosition));
    retVal.putShort(val);
    retVal.put(zipperBuffer[i]);
   }
  }
  retVal.rewind();
  return retVal.asReadOnlyBuffer();
 }
 return storageBuffer.asReadOnlyBuffer();
}

代码示例来源:origin: jmxtrans/jmxtrans

@Override
public void flush() throws IOException {
  synchronized (lock) {
    final int sizeOfBuffer = buffer.position();
    // empty buffer
    if (sizeOfBuffer <= 0) return;
    // send and reset the buffer
    buffer.flip();
    channel.write(buffer);
    buffer.limit(buffer.capacity());
    buffer.rewind();
  }
}

代码示例来源:origin: sannies/mp4parser

@Override
public void getBox(WritableByteChannel writableByteChannel) throws IOException {
  writableByteChannel.write(getHeader());
  ByteBuffer byteBuffer = ByteBuffer.allocate(8);
  byteBuffer.position(6);
  IsoTypeWriter.writeUInt16(byteBuffer, dataReferenceIndex);
  byteBuffer.rewind();
  writableByteChannel.write(byteBuffer);
  writableByteChannel.write(ByteBuffer.wrap(data));
}

代码示例来源:origin: apache/hive

public static byte[] getBytesFromByteBuffer(ByteBuffer byteBuffer) {
 byteBuffer.rewind();
 byte[] result = new byte[byteBuffer.limit()];
 byteBuffer.get(result);
 return result;
}

代码示例来源:origin: apache/incubator-druid

private void convertToDenseStorage()
{
 ByteBuffer tmpBuffer = ByteBuffer.allocate(getNumBytesForDenseStorage());
 // put header
 setVersion(tmpBuffer);
 setRegisterOffset(tmpBuffer, getRegisterOffset());
 setNumNonZeroRegisters(tmpBuffer, getNumNonZeroRegisters());
 setMaxOverflowValue(tmpBuffer, getMaxOverflowValue());
 setMaxOverflowRegister(tmpBuffer, getMaxOverflowRegister());
 storageBuffer.position(getPayloadBytePosition());
 tmpBuffer.position(getPayloadBytePosition(tmpBuffer));
 // put payload
 while (storageBuffer.hasRemaining()) {
  tmpBuffer.put(storageBuffer.getShort(), storageBuffer.get());
 }
 tmpBuffer.rewind();
 storageBuffer = tmpBuffer;
 initPosition = 0;
}

代码示例来源:origin: ethereum/ethereumj

static void setWord(byte[] arr, int wordOff, long val) {
  ByteBuffer bb = ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN).putInt((int) val);
  bb.rewind();
  bb.get(arr, wordOff * 4, 4);
}

代码示例来源:origin: apache/kafka

private RecordBatch loadBatchWithSize(int size, String description) {
  FileChannel channel = fileRecords.channel();
  try {
    ByteBuffer buffer = ByteBuffer.allocate(size);
    Utils.readFullyOrFail(channel, buffer, position, description);
    buffer.rewind();
    return toMemoryRecordBatch(buffer);
  } catch (IOException e) {
    throw new KafkaException("Failed to load record batch at position " + position + " from " + fileRecords, e);
  }
}

代码示例来源:origin: apache/kafka

@Test
public void testReadNegativeStringSize() {
  byte[] stringBytes = "foo".getBytes();
  ByteBuffer invalidBuffer = ByteBuffer.allocate(2 + stringBytes.length);
  invalidBuffer.putShort((short) -1);
  invalidBuffer.put(stringBytes);
  invalidBuffer.rewind();
  try {
    Type.STRING.read(invalidBuffer);
    fail("String size not validated");
  } catch (SchemaException e) {
    // Expected exception
  }
}

代码示例来源:origin: apache/kafka

/**
 * Read a size-delimited byte buffer starting at the given offset.
 * @param buffer Buffer containing the size and data
 * @param start Offset in the buffer to read from
 * @return A slice of the buffer containing only the delimited data (excluding the size)
 */
public static ByteBuffer sizeDelimited(ByteBuffer buffer, int start) {
  int size = buffer.getInt(start);
  if (size < 0) {
    return null;
  } else {
    ByteBuffer b = buffer.duplicate();
    b.position(start + 4);
    b = b.slice();
    b.limit(size);
    b.rewind();
    return b;
  }
}

代码示例来源:origin: com.h2database/h2

@Override
public int read() throws IOException {
  if (buffer == null) {
    buffer = ByteBuffer.allocate(1);
  }
  buffer.rewind();
  int len = channel.read(buffer, pos++);
  if (len < 0) {
    return -1;
  }
  return buffer.get(0) & 0xff;
}

代码示例来源:origin: spring-projects/spring-framework

@Test
public void testReadableChannel() throws IOException {
  Resource resource = new FileSystemResource(getClass().getResource("Resource.class").getFile());
  ReadableByteChannel channel = null;
  try {
    channel = resource.readableChannel();
    ByteBuffer buffer = ByteBuffer.allocate((int) resource.contentLength());
    channel.read(buffer);
    buffer.rewind();
    assertTrue(buffer.limit() > 0);
  }
  finally {
    if (channel != null) {
      channel.close();
    }
  }
}

代码示例来源:origin: com.h2database/h2

private void writeNewEntry(long pos, ByteBuffer src) {
  int length = src.remaining();
  writeCount.incrementAndGet();
  writeBytes.addAndGet(length);
  ByteBuffer buff = ByteBuffer.allocateDirect(length);
  buff.put(src);
  buff.rewind();
  memory.put(pos, buff);
}

代码示例来源:origin: apache/kafka

@Test
public void toArrayDirectByteBuffer() {
  byte[] input = {0, 1, 2, 3, 4};
  ByteBuffer buffer = ByteBuffer.allocateDirect(5);
  buffer.put(input);
  buffer.rewind();
  assertArrayEquals(input, Utils.toArray(buffer));
  assertEquals(0, buffer.position());
  assertArrayEquals(new byte[] {1, 2}, Utils.toArray(buffer, 1, 2));
  assertEquals(0, buffer.position());
  buffer.position(2);
  assertArrayEquals(new byte[] {2, 3, 4}, Utils.toArray(buffer));
  assertEquals(2, buffer.position());
}

代码示例来源:origin: apache/kafka

@Override
public FileChannelRecordBatch nextBatch() throws IOException {
  FileChannel channel = fileRecords.channel();
  if (position >= end - HEADER_SIZE_UP_TO_MAGIC)
    return null;
  logHeaderBuffer.rewind();
  Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header");
  logHeaderBuffer.rewind();
  long offset = logHeaderBuffer.getLong(OFFSET_OFFSET);
  int size = logHeaderBuffer.getInt(SIZE_OFFSET);
  // V0 has the smallest overhead, stricter checking is done later
  if (size < LegacyRecord.RECORD_OVERHEAD_V0)
    throw new CorruptRecordException(String.format("Found record size %d smaller than minimum record " +
            "overhead (%d) in file %s.", size, LegacyRecord.RECORD_OVERHEAD_V0, fileRecords.file()));
  if (position > end - LOG_OVERHEAD - size)
    return null;
  byte magic = logHeaderBuffer.get(MAGIC_OFFSET);
  final FileChannelRecordBatch batch;
  if (magic < RecordBatch.MAGIC_VALUE_V2)
    batch = new LegacyFileChannelRecordBatch(offset, magic, fileRecords, position, size);
  else
    batch = new DefaultFileChannelRecordBatch(offset, magic, fileRecords, position, size);
  position += batch.sizeInBytes();
  return batch;
}

代码示例来源:origin: apache/incubator-druid

@Override
public void write(long value) throws IOException
{
 if (count == 8) {
  buffer.put(curByte);
  count = 0;
  if (!buffer.hasRemaining() && output != null) {
   output.write(buffer.array());
   buffer.rewind();
  }
 }
 curByte = (byte) ((curByte << 1) | (value & 1));
 count++;
}

代码示例来源:origin: sannies/mp4parser

@Override
protected void getContent(ByteBuffer byteBuffer) {
  writeVersionAndFlags(byteBuffer);
  IsoTypeWriter.writeUInt64(byteBuffer, systemId.getMostSignificantBits());
  IsoTypeWriter.writeUInt64(byteBuffer, systemId.getLeastSignificantBits());
  ByteBuffer data = protectionSpecificHeader.getData();
  data.rewind();
  IsoTypeWriter.writeUInt32(byteBuffer, data.limit());
  byteBuffer.put(data);
}

相关文章