org.apache.spark.unsafe.Platform.getLong()方法的使用及代码示例

x33g5p2x  于2022-01-26 转载在 其他  
字(14.1k)|赞(0)|评价(0)|浏览(128)

本文整理了Java中org.apache.spark.unsafe.Platform.getLong()方法的一些代码示例,展示了Platform.getLong()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Platform.getLong()方法的具体详情如下:
包路径:org.apache.spark.unsafe.Platform
类名称:Platform
方法名:getLong

Platform.getLong介绍

暂无

代码示例

代码示例来源:origin: org.apache.spark/spark-core_2.10

/**
 * Find the next pair that has the same key as current one.
 */
public boolean nextValue() {
 assert isDefined;
 long nextAddr = Platform.getLong(baseObject, valueOffset + valueLength);
 if (nextAddr == 0) {
  return false;
 } else {
  updateAddressesAndSizes(nextAddr);
  return true;
 }
}

代码示例来源:origin: org.apache.spark/spark-core

/**
 * Find the next pair that has the same key as current one.
 */
public boolean nextValue() {
 assert isDefined;
 long nextAddr = Platform.getLong(baseObject, valueOffset + valueLength);
 if (nextAddr == 0) {
  return false;
 } else {
  updateAddressesAndSizes(nextAddr);
  return true;
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

/**
 * Find the next pair that has the same key as current one.
 */
public boolean nextValue() {
 assert isDefined;
 long nextAddr = Platform.getLong(baseObject, valueOffset + valueLength);
 if (nextAddr == 0) {
  return false;
 } else {
  updateAddressesAndSizes(nextAddr);
  return true;
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

/**
 * Specialization of getCounts() for key-prefix arrays. We could probably combine this with
 * getCounts with some added parameters but that seems to hurt in benchmarks.
 */
private static long[][] getKeyPrefixArrayCounts(
  LongArray array, long startIndex, long numRecords, int startByteIndex, int endByteIndex) {
 long[][] counts = new long[8][];
 long bitwiseMax = 0;
 long bitwiseMin = -1L;
 long baseOffset = array.getBaseOffset() + startIndex * 8L;
 long limit = baseOffset + numRecords * 16L;
 Object baseObject = array.getBaseObject();
 for (long offset = baseOffset; offset < limit; offset += 16) {
  long value = Platform.getLong(baseObject, offset + 8);
  bitwiseMax |= value;
  bitwiseMin &= value;
 }
 long bitsChanged = bitwiseMin ^ bitwiseMax;
 for (int i = startByteIndex; i <= endByteIndex; i++) {
  if (((bitsChanged >>> (i * 8)) & 0xff) != 0) {
   counts[i] = new long[256];
   for (long offset = baseOffset; offset < limit; offset += 16) {
    counts[i][(int)((Platform.getLong(baseObject, offset + 8) >>> (i * 8)) & 0xff)]++;
   }
  }
 }
 return counts;
}

代码示例来源:origin: org.apache.spark/spark-core

/**
 * Specialization of getCounts() for key-prefix arrays. We could probably combine this with
 * getCounts with some added parameters but that seems to hurt in benchmarks.
 */
private static long[][] getKeyPrefixArrayCounts(
  LongArray array, long startIndex, long numRecords, int startByteIndex, int endByteIndex) {
 long[][] counts = new long[8][];
 long bitwiseMax = 0;
 long bitwiseMin = -1L;
 long baseOffset = array.getBaseOffset() + startIndex * 8L;
 long limit = baseOffset + numRecords * 16L;
 Object baseObject = array.getBaseObject();
 for (long offset = baseOffset; offset < limit; offset += 16) {
  long value = Platform.getLong(baseObject, offset + 8);
  bitwiseMax |= value;
  bitwiseMin &= value;
 }
 long bitsChanged = bitwiseMin ^ bitwiseMax;
 for (int i = startByteIndex; i <= endByteIndex; i++) {
  if (((bitsChanged >>> (i * 8)) & 0xff) != 0) {
   counts[i] = new long[256];
   for (long offset = baseOffset; offset < limit; offset += 16) {
    counts[i][(int)((Platform.getLong(baseObject, offset + 8) >>> (i * 8)) & 0xff)]++;
   }
  }
 }
 return counts;
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

/**
 * Specialization of getCounts() for key-prefix arrays. We could probably combine this with
 * getCounts with some added parameters but that seems to hurt in benchmarks.
 */
private static long[][] getKeyPrefixArrayCounts(
  LongArray array, long startIndex, long numRecords, int startByteIndex, int endByteIndex) {
 long[][] counts = new long[8][];
 long bitwiseMax = 0;
 long bitwiseMin = -1L;
 long baseOffset = array.getBaseOffset() + startIndex * 8L;
 long limit = baseOffset + numRecords * 16L;
 Object baseObject = array.getBaseObject();
 for (long offset = baseOffset; offset < limit; offset += 16) {
  long value = Platform.getLong(baseObject, offset + 8);
  bitwiseMax |= value;
  bitwiseMin &= value;
 }
 long bitsChanged = bitwiseMin ^ bitwiseMax;
 for (int i = startByteIndex; i <= endByteIndex; i++) {
  if (((bitsChanged >>> (i * 8)) & 0xff) != 0) {
   counts[i] = new long[256];
   for (long offset = baseOffset; offset < limit; offset += 16) {
    counts[i][(int)((Platform.getLong(baseObject, offset + 8) >>> (i * 8)) & 0xff)]++;
   }
  }
 }
 return counts;
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

/**
  * Specialization of sortAtByte() for key-prefix arrays.
  */
 private static void sortKeyPrefixArrayAtByte(
   LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex,
   boolean desc, boolean signed) {
  assert counts.length == 256;
  long[] offsets = transformCountsToOffsets(
   counts, numRecords, array.getBaseOffset() + outIndex * 8L, 16, desc, signed);
  Object baseObject = array.getBaseObject();
  long baseOffset = array.getBaseOffset() + inIndex * 8L;
  long maxOffset = baseOffset + numRecords * 16L;
  for (long offset = baseOffset; offset < maxOffset; offset += 16) {
   long key = Platform.getLong(baseObject, offset);
   long prefix = Platform.getLong(baseObject, offset + 8);
   int bucket = (int)((prefix >>> (byteIdx * 8)) & 0xff);
   long dest = offsets[bucket];
   Platform.putLong(baseObject, dest, key);
   Platform.putLong(baseObject, dest + 8, prefix);
   offsets[bucket] += 16;
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-core

/**
  * Specialization of sortAtByte() for key-prefix arrays.
  */
 private static void sortKeyPrefixArrayAtByte(
   LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex,
   boolean desc, boolean signed) {
  assert counts.length == 256;
  long[] offsets = transformCountsToOffsets(
   counts, numRecords, array.getBaseOffset() + outIndex * 8L, 16, desc, signed);
  Object baseObject = array.getBaseObject();
  long baseOffset = array.getBaseOffset() + inIndex * 8L;
  long maxOffset = baseOffset + numRecords * 16L;
  for (long offset = baseOffset; offset < maxOffset; offset += 16) {
   long key = Platform.getLong(baseObject, offset);
   long prefix = Platform.getLong(baseObject, offset + 8);
   int bucket = (int)((prefix >>> (byteIdx * 8)) & 0xff);
   long dest = offsets[bucket];
   Platform.putLong(baseObject, dest, key);
   Platform.putLong(baseObject, dest + 8, prefix);
   offsets[bucket] += 16;
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

/**
  * Specialization of sortAtByte() for key-prefix arrays.
  */
 private static void sortKeyPrefixArrayAtByte(
   LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex,
   boolean desc, boolean signed) {
  assert counts.length == 256;
  long[] offsets = transformCountsToOffsets(
   counts, numRecords, array.getBaseOffset() + outIndex * 8L, 16, desc, signed);
  Object baseObject = array.getBaseObject();
  long baseOffset = array.getBaseOffset() + inIndex * 8L;
  long maxOffset = baseOffset + numRecords * 16L;
  for (long offset = baseOffset; offset < maxOffset; offset += 16) {
   long key = Platform.getLong(baseObject, offset);
   long prefix = Platform.getLong(baseObject, offset + 8);
   int bucket = (int)((prefix >>> (byteIdx * 8)) & 0xff);
   long dest = offsets[bucket];
   Platform.putLong(baseObject, dest, key);
   Platform.putLong(baseObject, dest + 8, prefix);
   offsets[bucket] += 16;
  }
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

/**
 * Performs a partial sort by copying data into destination offsets for each byte value at the
 * specified byte offset.
 *
 * @param array array to partially sort.
 * @param numRecords number of data records in the array.
 * @param counts counts for each byte value. This routine destructively modifies this array.
 * @param byteIdx the byte in a long to sort at, counting from the least significant byte.
 * @param inIndex the starting index in the array where input data is located.
 * @param outIndex the starting index where sorted output data should be written.
 * @param desc whether this is a descending (binary-order) sort.
 * @param signed whether this is a signed (two's complement) sort (only applies to last byte).
 */
private static void sortAtByte(
  LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex,
  boolean desc, boolean signed) {
 assert counts.length == 256;
 long[] offsets = transformCountsToOffsets(
  counts, numRecords, array.getBaseOffset() + outIndex * 8L, 8, desc, signed);
 Object baseObject = array.getBaseObject();
 long baseOffset = array.getBaseOffset() + inIndex * 8L;
 long maxOffset = baseOffset + numRecords * 8L;
 for (long offset = baseOffset; offset < maxOffset; offset += 8) {
  long value = Platform.getLong(baseObject, offset);
  int bucket = (int)((value >>> (byteIdx * 8)) & 0xff);
  Platform.putLong(baseObject, offsets[bucket], value);
  offsets[bucket] += 8;
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

/**
 * Performs a partial sort by copying data into destination offsets for each byte value at the
 * specified byte offset.
 *
 * @param array array to partially sort.
 * @param numRecords number of data records in the array.
 * @param counts counts for each byte value. This routine destructively modifies this array.
 * @param byteIdx the byte in a long to sort at, counting from the least significant byte.
 * @param inIndex the starting index in the array where input data is located.
 * @param outIndex the starting index where sorted output data should be written.
 * @param desc whether this is a descending (binary-order) sort.
 * @param signed whether this is a signed (two's complement) sort (only applies to last byte).
 */
private static void sortAtByte(
  LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex,
  boolean desc, boolean signed) {
 assert counts.length == 256;
 long[] offsets = transformCountsToOffsets(
  counts, numRecords, array.getBaseOffset() + outIndex * 8L, 8, desc, signed);
 Object baseObject = array.getBaseObject();
 long baseOffset = array.getBaseOffset() + inIndex * 8L;
 long maxOffset = baseOffset + numRecords * 8L;
 for (long offset = baseOffset; offset < maxOffset; offset += 8) {
  long value = Platform.getLong(baseObject, offset);
  int bucket = (int)((value >>> (byteIdx * 8)) & 0xff);
  Platform.putLong(baseObject, offsets[bucket], value);
  offsets[bucket] += 8;
 }
}

代码示例来源:origin: org.apache.spark/spark-core

/**
 * Performs a partial sort by copying data into destination offsets for each byte value at the
 * specified byte offset.
 *
 * @param array array to partially sort.
 * @param numRecords number of data records in the array.
 * @param counts counts for each byte value. This routine destructively modifies this array.
 * @param byteIdx the byte in a long to sort at, counting from the least significant byte.
 * @param inIndex the starting index in the array where input data is located.
 * @param outIndex the starting index where sorted output data should be written.
 * @param desc whether this is a descending (binary-order) sort.
 * @param signed whether this is a signed (two's complement) sort (only applies to last byte).
 */
private static void sortAtByte(
  LongArray array, long numRecords, long[] counts, int byteIdx, long inIndex, long outIndex,
  boolean desc, boolean signed) {
 assert counts.length == 256;
 long[] offsets = transformCountsToOffsets(
  counts, numRecords, array.getBaseOffset() + outIndex * 8L, 8, desc, signed);
 Object baseObject = array.getBaseObject();
 long baseOffset = array.getBaseOffset() + inIndex * 8L;
 long maxOffset = baseOffset + numRecords * 8L;
 for (long offset = baseOffset; offset < maxOffset; offset += 8) {
  long value = Platform.getLong(baseObject, offset);
  int bucket = (int)((value >>> (byteIdx * 8)) & 0xff);
  Platform.putLong(baseObject, offsets[bucket], value);
  offsets[bucket] += 8;
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

iter.hasNext();
 iter.loadNext();
 assertTrue(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i);
 lastv = i;
assertTrue(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == lastv);
for (int i = n / 3; i < n; i++) {
 iter.hasNext();
 iter.loadNext();
 assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));

代码示例来源:origin: org.apache.spark/spark-core_2.10

iter.hasNext();
 iter.loadNext();
 assertTrue(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i);
 lastv = i;
assertTrue(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == lastv);
for (int i = n / 3; i < n; i++) {
 iter.hasNext();
 iter.loadNext();
 assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));

代码示例来源:origin: org.apache.spark/spark-core

iter.hasNext();
 iter.loadNext();
 assertTrue(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == i);
 lastv = i;
assertTrue(Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()) == lastv);
for (int i = n / 3; i < n; i++) {
 iter.hasNext();
 iter.loadNext();
 assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));

代码示例来源:origin: org.apache.spark/spark-core_2.10

iter.hasNext();
iter.loadNext();
assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));

代码示例来源:origin: org.apache.spark/spark-core_2.11

iter.hasNext();
iter.loadNext();
assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));

代码示例来源:origin: org.apache.spark/spark-core

@Test
public void forcedSpillingWithNotReadIterator() throws Exception {
 final UnsafeExternalSorter sorter = newSorter();
 long[] record = new long[100];
 int recordSize = record.length * 8;
 int n = (int) pageSizeBytes / recordSize * 3;
 for (int i = 0; i < n; i++) {
  record[0] = (long) i;
  sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0, false);
 }
 assertTrue(sorter.getNumberOfAllocatedPages() >= 2);
 UnsafeExternalSorter.SpillableIterator iter =
  (UnsafeExternalSorter.SpillableIterator) sorter.getSortedIterator();
 assertTrue(iter.spill() > 0);
 assertEquals(0, iter.spill());
 for (int i = 0; i < n; i++) {
  iter.hasNext();
  iter.loadNext();
  assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));
 }
 sorter.cleanupResources();
 assertSpillFilesWereCleanedUp();
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Test
public void forcedSpillingWithNotReadIterator() throws Exception {
 final UnsafeExternalSorter sorter = newSorter();
 long[] record = new long[100];
 int recordSize = record.length * 8;
 int n = (int) pageSizeBytes / recordSize * 3;
 for (int i = 0; i < n; i++) {
  record[0] = (long) i;
  sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0, false);
 }
 assertTrue(sorter.getNumberOfAllocatedPages() >= 2);
 UnsafeExternalSorter.SpillableIterator iter =
  (UnsafeExternalSorter.SpillableIterator) sorter.getSortedIterator();
 assertTrue(iter.spill() > 0);
 assertEquals(0, iter.spill());
 for (int i = 0; i < n; i++) {
  iter.hasNext();
  iter.loadNext();
  assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));
 }
 sorter.cleanupResources();
 assertSpillFilesWereCleanedUp();
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

@Test
public void forcedSpillingWithNotReadIterator() throws Exception {
 final UnsafeExternalSorter sorter = newSorter();
 long[] record = new long[100];
 int recordSize = record.length * 8;
 int n = (int) pageSizeBytes / recordSize * 3;
 for (int i = 0; i < n; i++) {
  record[0] = (long) i;
  sorter.insertRecord(record, Platform.LONG_ARRAY_OFFSET, recordSize, 0, false);
 }
 assertTrue(sorter.getNumberOfAllocatedPages() >= 2);
 UnsafeExternalSorter.SpillableIterator iter =
  (UnsafeExternalSorter.SpillableIterator) sorter.getSortedIterator();
 assertTrue(iter.spill() > 0);
 assertEquals(0, iter.spill());
 for (int i = 0; i < n; i++) {
  iter.hasNext();
  iter.loadNext();
  assertEquals(i, Platform.getLong(iter.getBaseObject(), iter.getBaseOffset()));
 }
 sorter.cleanupResources();
 assertSpillFilesWereCleanedUp();
}

相关文章