本文整理了Java中org.apache.parquet.column.Dictionary.decodeToFloat()
方法的一些代码示例,展示了Dictionary.decodeToFloat()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Dictionary.decodeToFloat()
方法的具体详情如下:
包路径:org.apache.parquet.column.Dictionary
类名称:Dictionary
方法名:decodeToFloat
暂无
代码示例来源:origin: apache/hive
@Override
public double readDouble(int id) {
return dict.decodeToFloat(id);
}
代码示例来源:origin: apache/hive
@Override
public float readFloat(int id) {
return dict.decodeToFloat(id);
}
代码示例来源:origin: apache/hive
@Override
public long readLong(int id) {
return (long)(super.validatedDouble(dict.decodeToFloat(id), serdeConstants.BIGINT_TYPE_NAME));
}
代码示例来源:origin: apache/hive
@Override
public byte[] readDecimal(int id) {
return super.validatedDecimal(dict.decodeToFloat(id));
}
}
代码示例来源:origin: apache/hive
@Override
public long readTinyInt(int id) {
return (long)super.validatedDouble(dict.decodeToFloat(id), serdeConstants.TINYINT_TYPE_NAME);
}
代码示例来源:origin: apache/hive
@Override
public byte[] readString(int id) {
return convertToBytes(dict.decodeToFloat(id));
}
代码示例来源:origin: apache/hive
@Override
public long readSmallInt(int id) {
return (long)super.validatedDouble(dict.decodeToFloat(id), serdeConstants.SMALLINT_TYPE_NAME);
}
代码示例来源:origin: apache/hive
@Override
public long readInteger(int id) {
return (long)(super.validatedDouble(dict.decodeToFloat(id), serdeConstants.INT_TYPE_NAME));
}
代码示例来源:origin: apache/hive
@Override
public byte[] readVarchar(int id) {
String value = enforceMaxLength(
convertToString(dict.decodeToFloat(id)));
return convertToBytes(value);
}
代码示例来源:origin: apache/hive
@Override
public byte[] readChar(int id) {
String value = enforceMaxLength(
convertToString(dict.decodeToFloat(id)));
return convertToBytes(value);
}
代码示例来源:origin: org.apache.spark/spark-sql
@Override
public float decodeToFloat(int id) {
return dictionary.decodeToFloat(id);
}
代码示例来源:origin: org.apache.spark/spark-sql_2.11
@Override
public float decodeToFloat(int id) {
return dictionary.decodeToFloat(id);
}
代码示例来源:origin: org.apache.spark/spark-sql_2.10
@Override
public float getFloat(int rowId) {
if (dictionary == null) {
return floatData[rowId];
} else {
return dictionary.decodeToFloat(dictionaryIds.getDictId(rowId));
}
}
代码示例来源:origin: org.apache.spark/spark-sql_2.10
@Override
public float getFloat(int rowId) {
if (dictionary == null) {
return Platform.getFloat(null, data + rowId * 4);
} else {
return dictionary.decodeToFloat(dictionaryIds.getDictId(rowId));
}
}
代码示例来源:origin: io.snappydata/snappy-spark-sql
@Override
public float getFloat(int rowId) {
if (dictionary == null) {
return floatData[rowId];
} else {
return dictionary.decodeToFloat(dictionaryIds.getDictId(rowId));
}
}
代码示例来源:origin: io.snappydata/snappy-spark-sql
@Override
public float getFloat(int rowId) {
if (dictionary == null) {
return Platform.getFloat(null, data + rowId * 4);
} else {
return dictionary.decodeToFloat(dictionaryIds.getDictId(rowId));
}
}
代码示例来源:origin: org.apache.parquet/parquet-column
@Override
public float readFloat() {
try {
return dictionary.decodeToFloat(decoder.readInt());
} catch (IOException e) {
throw new ParquetDecodingException(e);
}
}
代码示例来源:origin: org.apache.spark/spark-sql_2.10
for (int i = rowId; i < rowId + num; ++i) {
if (!column.isNullAt(i)) {
column.putFloat(i, dictionary.decodeToFloat(dictionaryIds.getDictId(i)));
代码示例来源:origin: org.apache.spark/spark-sql
for (int i = rowId; i < rowId + num; ++i) {
if (!column.isNullAt(i)) {
column.putFloat(i, dictionary.decodeToFloat(dictionaryIds.getDictId(i)));
代码示例来源:origin: org.apache.spark/spark-sql_2.11
for (int i = rowId; i < rowId + num; ++i) {
if (!column.isNullAt(i)) {
column.putFloat(i, dictionary.decodeToFloat(dictionaryIds.getDictId(i)));
内容来源于网络,如有侵权,请联系作者删除!