本文整理了Java中java.nio.ByteBuffer.arrayOffset()
方法的一些代码示例,展示了ByteBuffer.arrayOffset()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。ByteBuffer.arrayOffset()
方法的具体详情如下:
包路径:java.nio.ByteBuffer
类名称:ByteBuffer
方法名:arrayOffset
[英]Returns the offset of the byte array which this buffer is based on, if there is one.
The offset is the index of the array which corresponds to the zero position of the buffer.
[中]返回此缓冲区所基于的字节数组的偏移量(如果有)。
偏移量是对应于缓冲区零位置的数组索引。
代码示例来源:origin: google/guava
@Override
public Hasher putBytes(ByteBuffer b) {
if (b.hasArray()) {
putBytes(b.array(), b.arrayOffset() + b.position(), b.remaining());
b.position(b.limit());
} else {
for (int remaining = b.remaining(); remaining > 0; remaining--) {
putByte(b.get());
}
}
return this;
}
代码示例来源:origin: alibaba/jstorm
/**
* Returns a new byte array, copied from the passed ByteBuffer.
*
* @param bb A ByteBuffer
* @return the byte array
*/
public static byte[] toBytes(ByteBuffer bb) {
int length = bb.limit();
byte[] result = new byte[length];
System.arraycopy(bb.array(), bb.arrayOffset(), result, 0, length);
return result;
}
代码示例来源:origin: netty/netty
/**
* Create a copy of {@code value} into this instance using the encoding type of {@code charset}.
* The copy will start at index {@code start} and copy {@code length} bytes.
*/
public AsciiString(CharSequence value, Charset charset, int start, int length) {
CharBuffer cbuf = CharBuffer.wrap(value, start, start + length);
CharsetEncoder encoder = CharsetUtil.encoder(charset);
ByteBuffer nativeBuffer = ByteBuffer.allocate((int) (encoder.maxBytesPerChar() * length));
encoder.encode(cbuf, nativeBuffer, true);
final int offset = nativeBuffer.arrayOffset();
this.value = Arrays.copyOfRange(nativeBuffer.array(), offset, offset + nativeBuffer.position());
this.offset = 0;
this.length = this.value.length;
}
代码示例来源:origin: apache/incubator-druid
@Override
public GenericRecord parse(ByteBuffer bytes)
{
try {
bytes.get(); // ignore first \0 byte
int id = bytes.getInt(); // extract schema registry id
int length = bytes.limit() - 1 - 4;
int offset = bytes.position() + bytes.arrayOffset();
Schema schema = registry.getByID(id);
DatumReader<GenericRecord> reader = new GenericDatumReader<>(schema);
return reader.read(null, DecoderFactory.get().binaryDecoder(bytes.array(), offset, length, null));
}
catch (Exception e) {
throw new ParseException(e, "Fail to decode avro message!");
}
}
代码示例来源:origin: netty/netty
@Override
public int setBytes(int index, InputStream in, int length) throws IOException {
ensureAccessible();
if (buffer.hasArray()) {
return in.read(buffer.array(), buffer.arrayOffset() + index, length);
} else {
byte[] tmp = ByteBufUtil.threadLocalTempArray(length);
int readBytes = in.read(tmp, 0, length);
if (readBytes <= 0) {
return readBytes;
}
ByteBuffer tmpBuf = internalNioBuffer();
tmpBuf.clear().position(index);
tmpBuf.put(tmp, 0, readBytes);
return readBytes;
}
}
代码示例来源:origin: apache/kafka
@Test
public void testUpdateInt() {
final int value = 1000;
final ByteBuffer buffer = ByteBuffer.allocate(4);
buffer.putInt(value);
Checksum crc1 = Crc32C.create();
Checksum crc2 = Crc32C.create();
Checksums.updateInt(crc1, value);
crc2.update(buffer.array(), buffer.arrayOffset(), 4);
assertEquals("Crc values should be the same", crc1.getValue(), crc2.getValue());
}
代码示例来源:origin: google/guava
/** Updates this hasher with bytes from the given buffer. */
protected void update(ByteBuffer b) {
if (b.hasArray()) {
update(b.array(), b.arrayOffset() + b.position(), b.remaining());
b.position(b.limit());
} else {
for (int remaining = b.remaining(); remaining > 0; remaining--) {
update(b.get());
}
}
}
代码示例来源:origin: alibaba/jstorm
/**
* Converts the given byte buffer, from its array offset to its limit, to
* a string. The position and the mark are ignored.
*
* @param buf a byte buffer
* @return a string representation of the buffer's binary contents
*/
public static String toStringBinary(ByteBuffer buf) {
if (buf == null)
return "null";
return toStringBinary(buf.array(), buf.arrayOffset(), buf.limit());
}
代码示例来源:origin: netty/netty
/**
* Create a copy of {@code value} into a this instance using the encoding type of {@code charset}.
* The copy will start at index {@code start} and copy {@code length} bytes.
*/
public AsciiString(char[] value, Charset charset, int start, int length) {
CharBuffer cbuf = CharBuffer.wrap(value, start, length);
CharsetEncoder encoder = CharsetUtil.encoder(charset);
ByteBuffer nativeBuffer = ByteBuffer.allocate((int) (encoder.maxBytesPerChar() * length));
encoder.encode(cbuf, nativeBuffer, true);
final int bufferOffset = nativeBuffer.arrayOffset();
this.value = Arrays.copyOfRange(nativeBuffer.array(), bufferOffset, bufferOffset + nativeBuffer.position());
this.offset = 0;
this.length = this.value.length;
}
代码示例来源:origin: apache/kafka
@Test
public void testUpdateLong() {
final long value = Integer.MAX_VALUE + 1;
final ByteBuffer buffer = ByteBuffer.allocate(8);
buffer.putLong(value);
Checksum crc1 = new Crc32();
Checksum crc2 = new Crc32();
Checksums.updateLong(crc1, value);
crc2.update(buffer.array(), buffer.arrayOffset(), 8);
assertEquals("Crc values should be the same", crc1.getValue(), crc2.getValue());
}
代码示例来源:origin: apache/incubator-dubbo
@Override
public void getBytes(int index, OutputStream out, int length) throws IOException {
if (length == 0) {
return;
}
if (buffer.hasArray()) {
out.write(
buffer.array(),
index + buffer.arrayOffset(),
length);
} else {
byte[] tmp = new byte[length];
((ByteBuffer) buffer.duplicate().position(index)).get(tmp);
out.write(tmp);
}
}
代码示例来源:origin: apache/hbase
/**
* @param bb
* @return A KeyValue made of a byte buffer that holds the key-only part.
* Needed to convert hfile index members to KeyValues.
*/
public static KeyValue createKeyValueFromKey(final ByteBuffer bb) {
return createKeyValueFromKey(bb.array(), bb.arrayOffset(), bb.limit());
}
代码示例来源:origin: redisson/redisson
/**
* Create a copy of {@code value} into this instance using the encoding type of {@code charset}.
* The copy will start at index {@code start} and copy {@code length} bytes.
*/
public AsciiString(CharSequence value, Charset charset, int start, int length) {
CharBuffer cbuf = CharBuffer.wrap(value, start, start + length);
CharsetEncoder encoder = CharsetUtil.encoder(charset);
ByteBuffer nativeBuffer = ByteBuffer.allocate((int) (encoder.maxBytesPerChar() * length));
encoder.encode(cbuf, nativeBuffer, true);
final int offset = nativeBuffer.arrayOffset();
this.value = Arrays.copyOfRange(nativeBuffer.array(), offset, offset + nativeBuffer.position());
this.offset = 0;
this.length = this.value.length;
}
代码示例来源:origin: apache/incubator-dubbo
@Override
public void getBytes(int index, OutputStream out, int length) throws IOException {
if (length == 0) {
return;
}
if (buffer.hasArray()) {
out.write(
buffer.array(),
index + buffer.arrayOffset(),
length);
} else {
byte[] tmp = new byte[length];
((ByteBuffer) buffer.duplicate().position(index)).get(tmp);
out.write(tmp);
}
}
代码示例来源:origin: apache/hbase
private static int compareCacheBlock(Cacheable left, Cacheable right,
boolean includeNextBlockMetadata) {
ByteBuffer l = ByteBuffer.allocate(left.getSerializedLength());
left.serialize(l, includeNextBlockMetadata);
ByteBuffer r = ByteBuffer.allocate(right.getSerializedLength());
right.serialize(r, includeNextBlockMetadata);
return Bytes.compareTo(l.array(), l.arrayOffset(), l.limit(),
r.array(), r.arrayOffset(), r.limit());
}
代码示例来源:origin: apache/storm
/**
* Returns an Object which is created using Kryo deserialization of given {@code byteBuffer} instance.
*
* @param byteBuffer byte buffer to be deserialized into an Object
*/
public Object deserialize(ByteBuffer byteBuffer) {
input.setBuffer(byteBuffer.array(), byteBuffer.arrayOffset(), byteBuffer.position());
return kryo.readClassAndObject(input);
}
}
代码示例来源:origin: apache/kafka
public static void update(Checksum checksum, ByteBuffer buffer, int offset, int length) {
if (buffer.hasArray()) {
checksum.update(buffer.array(), buffer.position() + buffer.arrayOffset() + offset, length);
} else {
int start = buffer.position() + offset;
for (int i = start; i < start + length; i++)
checksum.update(buffer.get(i));
}
}
代码示例来源:origin: apache/kafka
/**
* Add a legacy record without doing offset/magic validation (this should only be used in testing).
* @param offset The offset of the record
* @param record The record to add
*/
public void appendUncheckedWithOffset(long offset, LegacyRecord record) {
ensureOpenForRecordAppend();
try {
int size = record.sizeInBytes();
AbstractLegacyRecordBatch.writeHeader(appendStream, toInnerOffset(offset), size);
ByteBuffer buffer = record.buffer().duplicate();
appendStream.write(buffer.array(), buffer.arrayOffset(), buffer.limit());
recordWritten(offset, record.timestamp(), size + Records.LOG_OVERHEAD);
} catch (IOException e) {
throw new KafkaException("I/O exception when writing to the append stream, closing", e);
}
}
代码示例来源:origin: apache/incubator-dubbo
public static ChannelBuffer wrappedBuffer(ByteBuffer buffer) {
if (!buffer.hasRemaining()) {
return EMPTY_BUFFER;
}
if (buffer.hasArray()) {
return wrappedBuffer(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());
} else {
return new ByteBufferBackedChannelBuffer(buffer);
}
}
代码示例来源:origin: apache/hbase
private void assertArrayEqualsBuffer(String msgPrefix, byte[] arr,
ByteBuffer buf) {
assertEquals(msgPrefix + ": expected " + Bytes.toStringBinary(arr)
+ ", actual " + Bytes.toStringBinary(buf), 0, Bytes.compareTo(arr, 0,
arr.length, buf.array(), buf.arrayOffset(), buf.limit()));
}
内容来源于网络,如有侵权,请联系作者删除!