本文整理了Java中org.apache.hadoop.mapreduce.RecordReader.nextKeyValue
方法的一些代码示例,展示了RecordReader.nextKeyValue
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。RecordReader.nextKeyValue
方法的具体详情如下:
包路径:org.apache.hadoop.mapreduce.RecordReader
类名称:RecordReader
方法名:nextKeyValue
[英]Read the next key, value pair.
[中]阅读下一个键,值对。
代码示例来源:origin: apache/flink
private void fetchNext() throws IOException {
try {
this.hasNext = this.recordReader.nextKeyValue();
} catch (InterruptedException e) {
throw new IOException("Could not fetch next KeyValue pair.", e);
} finally {
this.fetched = true;
}
}
代码示例来源:origin: apache/flink
protected void fetchNext() throws IOException {
try {
this.hasNext = this.recordReader.nextKeyValue();
} catch (InterruptedException e) {
throw new IOException("Could not fetch next KeyValue pair.", e);
} finally {
this.fetched = true;
}
}
代码示例来源:origin: apache/ignite
/** {@inheritDoc} */
@Override public boolean nextKeyValue() throws IOException, InterruptedException {
if (cancelled)
throw new HadoopTaskCancelledException("Task cancelled.");
return reader.nextKeyValue();
}
代码示例来源:origin: apache/hive
@Override
public boolean hasNext() {
try {
boolean retVal = curRecReader.nextKeyValue();
if (retVal) {
return true;
}
// if its false, we need to close recordReader.
curRecReader.close();
return false;
} catch (IOException e) {
throw new RuntimeException(e);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
代码示例来源:origin: apache/incubator-gobblin
/**
* {@inheritDoc}.
*
* This method will throw a {@link ClassCastException} if type {@link #<D>} is not compatible
* with type {@link #<K>} if keys are supposed to be read, or if it is not compatible with type
* {@link #<V>} if values are supposed to be read.
*/
@Override
@SuppressWarnings("unchecked")
public D readRecord(@Deprecated D reuse) throws DataRecordException, IOException {
try {
if (this.recordReader.nextKeyValue()) {
return this.readKeys ? (D) this.recordReader.getCurrentKey() : (D) this.recordReader.getCurrentValue();
}
} catch (InterruptedException ie) {
throw new IOException(ie);
}
return null;
}
代码示例来源:origin: apache/hive
@Override public boolean next(ImmutableBytesWritable rowKey, ResultWritable value) throws IOException {
boolean next = false;
try {
next = recordReader.nextKeyValue();
if (next) {
rowKey.set(recordReader.getCurrentValue().getRow());
value.setResult(recordReader.getCurrentValue());
}
} catch (InterruptedException e) {
throw new IOException(e);
}
return next;
}
};
代码示例来源:origin: apache/hive
if (firstRecord) { // key & value are already read.
firstRecord = false;
} else if (!realReader.nextKeyValue()) {
eof = true; // strictly not required, just for consistency
return false;
代码示例来源:origin: apache/drill
if (firstRecord) { // key & value are already read.
firstRecord = false;
} else if (!realReader.nextKeyValue()) {
eof = true; // strictly not required, just for consistency
return false;
代码示例来源:origin: thinkaurelius/titan
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {
while (reader.nextKeyValue()) {
// TODO titan05 integration -- the duplicate() call may be unnecessary
final TinkerVertex maybeNullTinkerVertex =
deser.readHadoopVertex(reader.getCurrentKey(), reader.getCurrentValue());
if (null != maybeNullTinkerVertex) {
vertex = new VertexWritable(maybeNullTinkerVertex);
//vertexQuery.filterRelationsOf(vertex); // TODO reimplement vertexquery filtering
return true;
}
}
return false;
}
代码示例来源:origin: elastic/elasticsearch-hadoop
public Tuple getNext() throws IOException {
try {
if (!reader.nextKeyValue()) {
return null;
代码示例来源:origin: apache/hive
@Override
public Tuple getNext() throws IOException {
try {
HCatRecord hr = (HCatRecord) (reader.nextKeyValue() ? reader.getCurrentValue() : null);
Tuple t = PigHCatUtil.transformToTuple(hr, outputSchema);
// TODO : we were discussing an iter interface, and also a LazyTuple
// change this when plans for that solidifies.
return t;
} catch (ExecException e) {
int errCode = 6018;
String errMsg = "Error while reading input";
throw new ExecException(errMsg, errCode,
PigException.REMOTE_ENVIRONMENT, e);
} catch (Exception eOther) {
int errCode = 6018;
String errMsg = "Error converting read value to tuple";
throw new ExecException(errMsg, errCode,
PigException.REMOTE_ENVIRONMENT, eOther);
}
}
代码示例来源:origin: apache/incubator-gobblin
@Test
public void testRecordReader()
throws Exception {
List<String> paths = Lists.newArrayList("/path1", "/path2");
GobblinWorkUnitsInputFormat.GobblinSplit split = new GobblinWorkUnitsInputFormat.GobblinSplit(paths);
GobblinWorkUnitsInputFormat inputFormat = new GobblinWorkUnitsInputFormat();
RecordReader<LongWritable, Text> recordReader =
inputFormat.createRecordReader(split, new TaskAttemptContextImpl(new Configuration(), new TaskAttemptID("a", 1,
TaskType.MAP, 1, 1)));
recordReader.nextKeyValue();
Assert.assertEquals(recordReader.getCurrentKey().get(), 0);
Assert.assertEquals(recordReader.getCurrentValue().toString(), "/path1");
recordReader.nextKeyValue();
Assert.assertEquals(recordReader.getCurrentKey().get(), 1);
Assert.assertEquals(recordReader.getCurrentValue().toString(), "/path2");
Assert.assertFalse(recordReader.nextKeyValue());
}
代码示例来源:origin: apache/hive
if (realReader.nextKeyValue()) {
firstRecord = true;
valueObj = realReader.getCurrentValue();
代码示例来源:origin: apache/drill
if (realReader.nextKeyValue()) {
firstRecord = true;
valueObj = realReader.getCurrentValue();
代码示例来源:origin: apache/hive
RecordReader<LongWritable, BytesRefArrayWritable> rr = inputFormat.createRecordReader(splits.get(i), tac);
rr.initialize(splits.get(i), tac);
while (rr.nextKeyValue()) {
readCount++;
代码示例来源:origin: apache/avro
assertTrue("Expected at least one record", recordReader.nextKeyValue());
key = recordReader.getCurrentKey();
value = recordReader.getCurrentValue();
assertTrue("Expected to read a second record", recordReader.nextKeyValue());
key = recordReader.getCurrentKey();
value = recordReader.getCurrentValue();
assertFalse("Expected only 2 records", recordReader.nextKeyValue());
代码示例来源:origin: apache/avro
assertTrue("Expected at least one record", recordReader.nextKeyValue());
key = recordReader.getCurrentKey();
value = recordReader.getCurrentValue();
assertTrue("Expected to read a second record", recordReader.nextKeyValue());
key = recordReader.getCurrentKey();
value = recordReader.getCurrentValue();
assertFalse("Expected only 2 records", recordReader.nextKeyValue());
代码示例来源:origin: apache/hbase
while (rr.nextKeyValue()) {
byte[] row = rr.getCurrentKey().get();
verifyRowFromMap(rr.getCurrentKey(), rr.getCurrentValue());
代码示例来源:origin: apache/tinkerpop
@Override
public Edge next() {
try {
while (true) {
if (this.edgeIterator.hasNext())
return new HadoopEdge(this.edgeIterator.next(), this.graph);
if (this.readers.isEmpty())
throw FastNoSuchElementException.instance();
if (this.readers.peek().nextKeyValue()) {
this.edgeIterator = this.readers.peek().getCurrentValue().get().edges(Direction.OUT);
} else {
this.readers.remove().close();
}
}
} catch (final Exception e) {
throw new IllegalStateException(e.getMessage(), e);
}
}
代码示例来源:origin: apache/tinkerpop
@Override
public Vertex next() {
try {
if (this.nextVertex != null) {
final Vertex temp = this.nextVertex;
this.nextVertex = null;
return temp;
} else {
while (!this.readers.isEmpty()) {
if (this.readers.peek().nextKeyValue())
return new HadoopVertex(this.readers.peek().getCurrentValue().get(), this.graph);
else
this.readers.remove().close();
}
}
throw FastNoSuchElementException.instance();
} catch (final Exception e) {
throw new IllegalStateException(e.getMessage(), e);
}
}
内容来源于网络,如有侵权,请联系作者删除!