org.apache.flink.util.FileUtils.writeCompletely()方法的使用及代码示例

x33g5p2x  于2022-01-19 转载在 其他  
字(8.2k)|赞(0)|评价(0)|浏览(110)

本文整理了Java中org.apache.flink.util.FileUtils.writeCompletely()方法的一些代码示例,展示了FileUtils.writeCompletely()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。FileUtils.writeCompletely()方法的具体详情如下:
包路径:org.apache.flink.util.FileUtils
类名称:FileUtils
方法名:writeCompletely

FileUtils.writeCompletely介绍

暂无

代码示例

代码示例来源:origin: apache/flink

private static void writeBuffer(FileChannel fileChannel, int size, int channelIndex) throws IOException {
  ByteBuffer data = ByteBuffer.allocate(size + 9);
  data.order(ByteOrder.LITTLE_ENDIAN);
  data.putInt(channelIndex);
  data.putInt(size);
  data.put((byte) 0);
  for (int i = 0; i < size; i++) {
    data.put((byte) i);
  }
  data.flip();
  FileUtils.writeCompletely(fileChannel, data);
}

代码示例来源:origin: apache/flink

FileUtils.writeCompletely(currentChannel, headBuffer);
FileUtils.writeCompletely(currentChannel, contents);

代码示例来源:origin: apache/flink

private static BufferOrEvent generateAndWriteEvent(FileChannel fileChannel, Random rnd, int numberOfChannels) throws IOException {
  long magicNumber = rnd.nextLong();
  byte[] data = new byte[rnd.nextInt(1000)];
  rnd.nextBytes(data);
  TestEvent evt = new TestEvent(magicNumber, data);
  int channelIndex = rnd.nextInt(numberOfChannels);
  ByteBuffer serializedEvent = EventSerializer.toSerializedEvent(evt);
  ByteBuffer header = ByteBuffer.allocate(9);
  header.order(ByteOrder.LITTLE_ENDIAN);
  header.putInt(channelIndex);
  header.putInt(serializedEvent.remaining());
  header.put((byte) 1);
  header.flip();
  FileUtils.writeCompletely(fileChannel, header);
  FileUtils.writeCompletely(fileChannel, serializedEvent);
  return new BufferOrEvent(evt, channelIndex);
}

代码示例来源:origin: apache/flink

@Test
public void testCleanup() {
  try {
    ByteBuffer data = ByteBuffer.allocate(157);
    data.order(ByteOrder.LITTLE_ENDIAN);
    FileUtils.writeCompletely(fileChannel, data);
    fileChannel.position(54);
    SpilledBufferOrEventSequence seq = new SpilledBufferOrEventSequence(tempFile, fileChannel, buffer, pageSize);
    seq.open();
    seq.cleanup();
    assertFalse(fileChannel.isOpen());
    assertFalse(tempFile.exists());
  }
  catch (Exception e) {
    e.printStackTrace();
    fail(e.getMessage());
  }
}

代码示例来源:origin: apache/flink

@Test
public void testIncompleteHeaderOnFirstElement() {
  try {
    ByteBuffer buf = ByteBuffer.allocate(7);
    buf.order(ByteOrder.LITTLE_ENDIAN);
    FileUtils.writeCompletely(fileChannel, buf);
    fileChannel.position(0);
    SpilledBufferOrEventSequence seq = new SpilledBufferOrEventSequence(tempFile, fileChannel, buffer, pageSize);
    seq.open();
    try {
      seq.getNext();
      fail("should fail with an exception");
    }
    catch (IOException e) {
      // expected
    }
  }
  catch (Exception e) {
    e.printStackTrace();
    fail(e.getMessage());
  }
}

代码示例来源:origin: apache/flink

data.position(0);
data.limit(312);
FileUtils.writeCompletely(fileChannel, data);
fileChannel.position(0L);

代码示例来源:origin: com.alibaba.blink/flink-runtime

public void flush() throws IOException {
  if (!closed) {
    // now flush
    if (writeBuffer.position() > 0) {
      writeBuffer.flip();
      FileUtils.writeCompletely(fileChannel, writeBuffer);
      writeBuffer.clear();
    }
  } else {
    throw new IOException("closed");
  }
}

代码示例来源:origin: org.apache.flink/flink-runtime_2.11

@Override
public void write() throws IOException {
  ByteBuffer nioBufferReadable = buffer.getNioBufferReadable();
  final ByteBuffer header = ByteBuffer.allocateDirect(8);
  header.putInt(buffer.isBuffer() ? 1 : 0);
  header.putInt(nioBufferReadable.remaining());
  header.flip();
  FileUtils.writeCompletely(channel.fileChannel, header);
  FileUtils.writeCompletely(channel.fileChannel, nioBufferReadable);
}

代码示例来源:origin: org.apache.flink/flink-runtime

@Override
public void write() throws IOException {
  ByteBuffer nioBufferReadable = buffer.getNioBufferReadable();
  final ByteBuffer header = ByteBuffer.allocateDirect(8);
  header.putInt(buffer.isBuffer() ? 1 : 0);
  header.putInt(nioBufferReadable.remaining());
  header.flip();
  FileUtils.writeCompletely(channel.fileChannel, header);
  FileUtils.writeCompletely(channel.fileChannel, nioBufferReadable);
}

代码示例来源:origin: org.apache.flink/flink-runtime_2.11

@Override
public void write() throws IOException {
  try {
    FileUtils.writeCompletely(this.channel.fileChannel, this.segment.wrap(0, this.segment.size()));
  }
  catch (NullPointerException npex) {
    throw new IOException("Memory segment has been released.");
  }
}

代码示例来源:origin: org.apache.flink/flink-runtime

@Override
public void write() throws IOException {
  try {
    FileUtils.writeCompletely(this.channel.fileChannel, this.segment.wrap(0, this.segment.size()));
  }
  catch (NullPointerException npex) {
    throw new IOException("Memory segment has been released.");
  }
}

代码示例来源:origin: com.alibaba.blink/flink-runtime

@Override
public void write() throws IOException {
  ByteBuffer nioBufferReadable = buffer.getNioBufferReadable();
  final ByteBuffer header = ByteBuffer.allocate(8);
  header.putInt(buffer.isBuffer() ? 1 : 0);
  header.putInt(nioBufferReadable.remaining());
  header.flip();
  if (bufferSize == -1) {
    FileUtils.writeCompletely(channel.fileChannel, header);
    FileUtils.writeCompletely(channel.fileChannel, nioBufferReadable);
  } else {
    NioBufferedFileOutputStream out = channel.getBufferedOutputStream(bufferSize);
    out.write(header);
    out.write(nioBufferReadable);
  }
}

代码示例来源:origin: com.alibaba.blink/flink-runtime

@Override
public void write() throws IOException {
  try {
    if (bufferSize == -1) {
      FileUtils.writeCompletely(channel.fileChannel, segment.wrap(0, segment.size()));
    } else {
      channel.getBufferedOutputStream(bufferSize).write(segment, 0, segment.size());
    }
  } catch (NullPointerException npex) {
    throw new IOException("Memory segment has been released.");
  }
}

代码示例来源:origin: org.apache.flink/flink-runtime

FileUtils.writeCompletely(this.spillingChannel, toWrite);
} else {
  segment.get(segmentPosition, buffer, this.accumulatedRecordBytes, toCopy);

代码示例来源:origin: org.apache.flink/flink-streaming-java_2.11

FileUtils.writeCompletely(currentChannel, headBuffer);
FileUtils.writeCompletely(currentChannel, contents);

代码示例来源:origin: org.apache.flink/flink-streaming-java

FileUtils.writeCompletely(currentChannel, headBuffer);
FileUtils.writeCompletely(currentChannel, contents);

代码示例来源:origin: com.alibaba.blink/flink-runtime

@Override
public void write() throws IOException {
  checkArgument(buffer.isBuffer(), "Cannot write event buffer in StreamWriteRequest.");
  if (bufferSize == -1) {
    FileUtils.writeCompletely(channel.fileChannel, buffer.getNioBufferReadable());
  } else {
    NioBufferedFileOutputStream out = channel.getBufferedOutputStream(bufferSize);
    out.write(buffer.getNioBufferReadable());
  }
}

代码示例来源:origin: com.alibaba.blink/flink-runtime

protected void initializeWithPartialRecord(NonSpanningWrapper partial, int nextRecordLength) throws IOException {
  // set the length and copy what is available to the buffer
  this.recordLength = nextRecordLength;
  final int numBytesChunk = partial.remaining();
  if (nextRecordLength > THRESHOLD_FOR_SPILLING) {
    // create a spilling channel and put the data there
    this.spillingChannel = createSpillingChannel();
    ByteBuffer toWrite = partial.segment.wrap(partial.position, numBytesChunk);
    FileUtils.writeCompletely(spillingChannel, toWrite);
  }
  else {
    // collect in memory
    ensureBufferCapacity(numBytesChunk);
    partial.segment.get(partial.position, buffer, 0, numBytesChunk);
  }
  this.accumulatedRecordBytes = numBytesChunk;
}

代码示例来源:origin: org.apache.flink/flink-runtime_2.11

private void initializeWithPartialRecord(NonSpanningWrapper partial, int nextRecordLength) throws IOException {
  // set the length and copy what is available to the buffer
  this.recordLength = nextRecordLength;
  final int numBytesChunk = partial.remaining();
  if (nextRecordLength > THRESHOLD_FOR_SPILLING) {
    // create a spilling channel and put the data there
    this.spillingChannel = createSpillingChannel();
    ByteBuffer toWrite = partial.segment.wrap(partial.position, numBytesChunk);
    FileUtils.writeCompletely(this.spillingChannel, toWrite);
  }
  else {
    // collect in memory
    ensureBufferCapacity(nextRecordLength);
    partial.segment.get(partial.position, buffer, 0, numBytesChunk);
  }
  this.accumulatedRecordBytes = numBytesChunk;
}

代码示例来源:origin: org.apache.flink/flink-runtime

private void initializeWithPartialRecord(NonSpanningWrapper partial, int nextRecordLength) throws IOException {
  // set the length and copy what is available to the buffer
  this.recordLength = nextRecordLength;
  final int numBytesChunk = partial.remaining();
  if (nextRecordLength > THRESHOLD_FOR_SPILLING) {
    // create a spilling channel and put the data there
    this.spillingChannel = createSpillingChannel();
    ByteBuffer toWrite = partial.segment.wrap(partial.position, numBytesChunk);
    FileUtils.writeCompletely(this.spillingChannel, toWrite);
  }
  else {
    // collect in memory
    ensureBufferCapacity(nextRecordLength);
    partial.segment.get(partial.position, buffer, 0, numBytesChunk);
  }
  this.accumulatedRecordBytes = numBytesChunk;
}

相关文章