org.apache.spark.unsafe.Platform.putLong()方法的使用及代码示例

x33g5p2x  于2022-01-26 转载在 其他  
字(9.7k)|赞(0)|评价(0)|浏览(109)

本文整理了Java中org.apache.spark.unsafe.Platform.putLong()方法的一些代码示例,展示了Platform.putLong()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Platform.putLong()方法的具体详情如下:
包路径:org.apache.spark.unsafe.Platform
类名称:Platform
方法名:putLong

Platform.putLong介绍

暂无

代码示例

代码示例来源:origin: org.apache.spark/spark-core_2.10

/**
  * Specialization of sortAtByte() for key-prefix arrays.
  */
 private static void sortKeyPrefixArrayAtByte(
   LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex,
   boolean desc, boolean signed) {
  assert counts.length == 256;
  long[] offsets = transformCountsToOffsets(
   counts, numRecords, array.getBaseOffset() + outIndex * 8L, 16, desc, signed);
  Object baseObject = array.getBaseObject();
  long baseOffset = array.getBaseOffset() + inIndex * 8L;
  long maxOffset = baseOffset + numRecords * 16L;
  for (long offset = baseOffset; offset < maxOffset; offset += 16) {
   long key = Platform.getLong(baseObject, offset);
   long prefix = Platform.getLong(baseObject, offset + 8);
   int bucket = (int)((prefix >>> (byteIdx * 8)) & 0xff);
   long dest = offsets[bucket];
   Platform.putLong(baseObject, dest, key);
   Platform.putLong(baseObject, dest + 8, prefix);
   offsets[bucket] += 16;
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-core

/**
  * Specialization of sortAtByte() for key-prefix arrays.
  */
 private static void sortKeyPrefixArrayAtByte(
   LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex,
   boolean desc, boolean signed) {
  assert counts.length == 256;
  long[] offsets = transformCountsToOffsets(
   counts, numRecords, array.getBaseOffset() + outIndex * 8L, 16, desc, signed);
  Object baseObject = array.getBaseObject();
  long baseOffset = array.getBaseOffset() + inIndex * 8L;
  long maxOffset = baseOffset + numRecords * 16L;
  for (long offset = baseOffset; offset < maxOffset; offset += 16) {
   long key = Platform.getLong(baseObject, offset);
   long prefix = Platform.getLong(baseObject, offset + 8);
   int bucket = (int)((prefix >>> (byteIdx * 8)) & 0xff);
   long dest = offsets[bucket];
   Platform.putLong(baseObject, dest, key);
   Platform.putLong(baseObject, dest + 8, prefix);
   offsets[bucket] += 16;
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

/**
  * Specialization of sortAtByte() for key-prefix arrays.
  */
 private static void sortKeyPrefixArrayAtByte(
   LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex,
   boolean desc, boolean signed) {
  assert counts.length == 256;
  long[] offsets = transformCountsToOffsets(
   counts, numRecords, array.getBaseOffset() + outIndex * 8L, 16, desc, signed);
  Object baseObject = array.getBaseObject();
  long baseOffset = array.getBaseOffset() + inIndex * 8L;
  long maxOffset = baseOffset + numRecords * 16L;
  for (long offset = baseOffset; offset < maxOffset; offset += 16) {
   long key = Platform.getLong(baseObject, offset);
   long prefix = Platform.getLong(baseObject, offset + 8);
   int bucket = (int)((prefix >>> (byteIdx * 8)) & 0xff);
   long dest = offsets[bucket];
   Platform.putLong(baseObject, dest, key);
   Platform.putLong(baseObject, dest + 8, prefix);
   offsets[bucket] += 16;
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

/**
 * Performs a partial sort by copying data into destination offsets for each byte value at the
 * specified byte offset.
 *
 * @param array array to partially sort.
 * @param numRecords number of data records in the array.
 * @param counts counts for each byte value. This routine destructively modifies this array.
 * @param byteIdx the byte in a long to sort at, counting from the least significant byte.
 * @param inIndex the starting index in the array where input data is located.
 * @param outIndex the starting index where sorted output data should be written.
 * @param desc whether this is a descending (binary-order) sort.
 * @param signed whether this is a signed (two's complement) sort (only applies to last byte).
 */
private static void sortAtByte(
  LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex,
  boolean desc, boolean signed) {
 assert counts.length == 256;
 long[] offsets = transformCountsToOffsets(
  counts, numRecords, array.getBaseOffset() + outIndex * 8L, 8, desc, signed);
 Object baseObject = array.getBaseObject();
 long baseOffset = array.getBaseOffset() + inIndex * 8L;
 long maxOffset = baseOffset + numRecords * 8L;
 for (long offset = baseOffset; offset < maxOffset; offset += 8) {
  long value = Platform.getLong(baseObject, offset);
  int bucket = (int)((value >>> (byteIdx * 8)) & 0xff);
  Platform.putLong(baseObject, offsets[bucket], value);
  offsets[bucket] += 8;
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

/**
 * Performs a partial sort by copying data into destination offsets for each byte value at the
 * specified byte offset.
 *
 * @param array array to partially sort.
 * @param numRecords number of data records in the array.
 * @param counts counts for each byte value. This routine destructively modifies this array.
 * @param byteIdx the byte in a long to sort at, counting from the least significant byte.
 * @param inIndex the starting index in the array where input data is located.
 * @param outIndex the starting index where sorted output data should be written.
 * @param desc whether this is a descending (binary-order) sort.
 * @param signed whether this is a signed (two's complement) sort (only applies to last byte).
 */
private static void sortAtByte(
  LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex,
  boolean desc, boolean signed) {
 assert counts.length == 256;
 long[] offsets = transformCountsToOffsets(
  counts, numRecords, array.getBaseOffset() + outIndex * 8L, 8, desc, signed);
 Object baseObject = array.getBaseObject();
 long baseOffset = array.getBaseOffset() + inIndex * 8L;
 long maxOffset = baseOffset + numRecords * 8L;
 for (long offset = baseOffset; offset < maxOffset; offset += 8) {
  long value = Platform.getLong(baseObject, offset);
  int bucket = (int)((value >>> (byteIdx * 8)) & 0xff);
  Platform.putLong(baseObject, offsets[bucket], value);
  offsets[bucket] += 8;
 }
}

代码示例来源:origin: org.apache.spark/spark-core

/**
 * Performs a partial sort by copying data into destination offsets for each byte value at the
 * specified byte offset.
 *
 * @param array array to partially sort.
 * @param numRecords number of data records in the array.
 * @param counts counts for each byte value. This routine destructively modifies this array.
 * @param byteIdx the byte in a long to sort at, counting from the least significant byte.
 * @param inIndex the starting index in the array where input data is located.
 * @param outIndex the starting index where sorted output data should be written.
 * @param desc whether this is a descending (binary-order) sort.
 * @param signed whether this is a signed (two's complement) sort (only applies to last byte).
 */
private static void sortAtByte(
  LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex,
  boolean desc, boolean signed) {
 assert counts.length == 256;
 long[] offsets = transformCountsToOffsets(
  counts, numRecords, array.getBaseOffset() + outIndex * 8L, 8, desc, signed);
 Object baseObject = array.getBaseObject();
 long baseOffset = array.getBaseOffset() + inIndex * 8L;
 long maxOffset = baseOffset + numRecords * 8L;
 for (long offset = baseOffset; offset < maxOffset; offset += 8) {
  long value = Platform.getLong(baseObject, offset);
  int bucket = (int)((value >>> (byteIdx * 8)) & 0xff);
  Platform.putLong(baseObject, offsets[bucket], value);
  offsets[bucket] += 8;
 }
}

代码示例来源:origin: org.apache.spark/spark-sql

@Override
public void putLongs(int rowId, int count, long value) {
 long offset = data + 8L * rowId;
 for (int i = 0; i < count; ++i, offset += 8) {
  Platform.putLong(null, offset, value);
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Override
public void putLongs(int rowId, int count, long value) {
 long offset = data + 8L * rowId;
 for (int i = 0; i < count; ++i, offset += 8) {
  Platform.putLong(null, offset, value);
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@Override
public void putLongs(int rowId, int count, long value) {
 long offset = data + 8 * rowId;
 for (int i = 0; i < count; ++i, offset += 8) {
  Platform.putLong(null, offset, value);
 }
}

代码示例来源:origin: org.apache.spark/spark-sql

@Override
public void putLong(int rowId, long value) {
 Platform.putLong(null, data + 8L * rowId, value);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@Override
public void putLong(int rowId, long value) {
 Platform.putLong(null, data + 8 * rowId, value);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Override
public void putLong(int rowId, long value) {
 Platform.putLong(null, data + 8L * rowId, value);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Override
public void putLongsLittleEndian(int rowId, int count, byte[] src, int srcIndex) {
 if (!bigEndianPlatform) {
  Platform.copyMemory(src, srcIndex + Platform.BYTE_ARRAY_OFFSET,
    null, data + 8L * rowId, count * 8L);
 } else {
  int srcOffset = srcIndex + Platform.BYTE_ARRAY_OFFSET;
  long offset = data + 8L * rowId;
  for (int i = 0; i < count; ++i, offset += 8, srcOffset += 8) {
   Platform.putLong(null, offset,
     java.lang.Long.reverseBytes(Platform.getLong(src, srcOffset)));
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql

@Override
public void putLongsLittleEndian(int rowId, int count, byte[] src, int srcIndex) {
 if (!bigEndianPlatform) {
  Platform.copyMemory(src, srcIndex + Platform.BYTE_ARRAY_OFFSET,
    null, data + 8L * rowId, count * 8L);
 } else {
  int srcOffset = srcIndex + Platform.BYTE_ARRAY_OFFSET;
  long offset = data + 8L * rowId;
  for (int i = 0; i < count; ++i, offset += 8, srcOffset += 8) {
   Platform.putLong(null, offset,
     java.lang.Long.reverseBytes(Platform.getLong(src, srcOffset)));
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@Override
public void putLongsLittleEndian(int rowId, int count, byte[] src, int srcIndex) {
 if (!bigEndianPlatform) {
  Platform.copyMemory(src, srcIndex + Platform.BYTE_ARRAY_OFFSET,
    null, data + 8 * rowId, count * 8);
 } else {
  int srcOffset = srcIndex + Platform.BYTE_ARRAY_OFFSET;
  long offset = data + 8 * rowId;
  for (int i = 0; i < count; ++i, offset += 8, srcOffset += 8) {
   Platform.putLong(null, offset,
     java.lang.Long.reverseBytes(Platform.getLong(src, srcOffset)));
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-core

offset += vlen;
Platform.putLong(base, offset, isDefined ? longArray.get(pos * 2) : 0);

代码示例来源:origin: org.apache.spark/spark-core_2.10

offset += vlen;
Platform.putLong(base, offset, isDefined ? longArray.get(pos * 2) : 0);

代码示例来源:origin: org.apache.spark/spark-core_2.11

offset += vlen;
Platform.putLong(base, offset, isDefined ? longArray.get(pos * 2) : 0);

代码示例来源:origin: shunfei/indexr

@Override
public void setLong(int ordinal, long value) {
  assertIndexIsValid(ordinal);
  Platform.putLong(baseObject, getFieldOffset(ordinal), value);
}

代码示例来源:origin: org.apache.spark/spark-catalyst_2.10

public void write(int ordinal, boolean value) {
 final long offset = getFieldOffset(ordinal);
 Platform.putLong(holder.buffer, offset, 0L);
 Platform.putBoolean(holder.buffer, offset, value);
}

相关文章