本文整理了Java中org.apache.spark.unsafe.Platform.throwException()
方法的一些代码示例,展示了Platform.throwException()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Platform.throwException()
方法的具体详情如下:
包路径:org.apache.spark.unsafe.Platform
类名称:Platform
方法名:throwException
[英]Raises an exception bypassing compiler checks for checked exceptions.
[中]绕过编译器对已检查异常的检查引发异常。
代码示例来源:origin: org.apache.spark/spark-core_2.11
@Override
public void close() {
// Need to implement this because DiskObjectWriter uses it to close the compression stream
try {
s.close();
} catch (IOException e) {
Platform.throwException(e);
}
}
};
代码示例来源:origin: org.apache.spark/spark-core_2.11
@Override
public void flush() {
// Need to implement this because DiskObjectWriter uses it to flush the compression stream
try {
s.flush();
} catch (IOException e) {
Platform.throwException(e);
}
}
代码示例来源:origin: org.apache.spark/spark-core
@Override
public void close() {
// Need to implement this because DiskObjectWriter uses it to close the compression stream
try {
s.close();
} catch (IOException e) {
Platform.throwException(e);
}
}
};
代码示例来源:origin: org.apache.spark/spark-core_2.10
@Override
public void flush() {
// Need to implement this because DiskObjectWriter uses it to flush the compression stream
try {
s.flush();
} catch (IOException e) {
Platform.throwException(e);
}
}
代码示例来源:origin: org.apache.spark/spark-core_2.10
@Override
public void close() {
// Need to implement this because DiskObjectWriter uses it to close the compression stream
try {
s.close();
} catch (IOException e) {
Platform.throwException(e);
}
}
};
代码示例来源:origin: org.apache.spark/spark-core
@Override
public void flush() {
// Need to implement this because DiskObjectWriter uses it to flush the compression stream
try {
s.flush();
} catch (IOException e) {
Platform.throwException(e);
}
}
代码示例来源:origin: org.apache.spark/spark-core_2.10
} catch (IOException e) {
Platform.throwException(e);
代码示例来源:origin: org.apache.spark/spark-core
private void advanceToNextPage() {
synchronized (this) {
int nextIdx = dataPages.indexOf(currentPage) + 1;
if (destructive && currentPage != null) {
dataPages.remove(currentPage);
freePage(currentPage);
nextIdx --;
}
if (dataPages.size() > nextIdx) {
currentPage = dataPages.get(nextIdx);
pageBaseObject = currentPage.getBaseObject();
offsetInPage = currentPage.getBaseOffset();
recordsInPage = UnsafeAlignedOffset.getSize(pageBaseObject, offsetInPage);
offsetInPage += UnsafeAlignedOffset.getUaoSize();
} else {
currentPage = null;
if (reader != null) {
handleFailedDelete();
}
try {
Closeables.close(reader, /* swallowIOException = */ false);
reader = spillWriters.getFirst().getReader(serializerManager);
recordsInPage = -1;
} catch (IOException e) {
// Scala iterator does not handle exception
Platform.throwException(e);
}
}
}
}
代码示例来源:origin: org.apache.spark/spark-core_2.11
private void advanceToNextPage() {
synchronized (this) {
int nextIdx = dataPages.indexOf(currentPage) + 1;
if (destructive && currentPage != null) {
dataPages.remove(currentPage);
freePage(currentPage);
nextIdx --;
}
if (dataPages.size() > nextIdx) {
currentPage = dataPages.get(nextIdx);
pageBaseObject = currentPage.getBaseObject();
offsetInPage = currentPage.getBaseOffset();
recordsInPage = UnsafeAlignedOffset.getSize(pageBaseObject, offsetInPage);
offsetInPage += UnsafeAlignedOffset.getUaoSize();
} else {
currentPage = null;
if (reader != null) {
handleFailedDelete();
}
try {
Closeables.close(reader, /* swallowIOException = */ false);
reader = spillWriters.getFirst().getReader(serializerManager);
recordsInPage = -1;
} catch (IOException e) {
// Scala iterator does not handle exception
Platform.throwException(e);
}
}
}
}
代码示例来源:origin: org.apache.spark/spark-core
Platform.throwException(e);
代码示例来源:origin: org.apache.spark/spark-core_2.11
Platform.throwException(e);
代码示例来源:origin: org.apache.spark/spark-core_2.10
Platform.throwException(e);
代码示例来源:origin: org.apache.spark/spark-unsafe
/**
* Uses internal JDK APIs to allocate a DirectByteBuffer while ignoring the JVM's
* MaxDirectMemorySize limit (the default limit is too low and we do not want to require users
* to increase it).
*/
@SuppressWarnings("unchecked")
public static ByteBuffer allocateDirectBuffer(int size) {
try {
Class<?> cls = Class.forName("java.nio.DirectByteBuffer");
Constructor<?> constructor = cls.getDeclaredConstructor(Long.TYPE, Integer.TYPE);
constructor.setAccessible(true);
Field cleanerField = cls.getDeclaredField("cleaner");
cleanerField.setAccessible(true);
long memory = allocateMemory(size);
ByteBuffer buffer = (ByteBuffer) constructor.newInstance(memory, size);
Cleaner cleaner = Cleaner.create(buffer, () -> freeMemory(memory));
cleanerField.set(buffer, cleaner);
return buffer;
} catch (Exception e) {
throwException(e);
}
throw new IllegalStateException("unreachable");
}
代码示例来源:origin: io.snappydata/snappy-spark-unsafe
/**
* Uses internal JDK APIs to allocate a DirectByteBuffer while ignoring the JVM's
* MaxDirectMemorySize limit (the default limit is too low and we do not want to require users
* to increase it).
*/
@SuppressWarnings("unchecked")
public static ByteBuffer allocateDirectBuffer(int size) {
try {
Class<?> cls = Class.forName("java.nio.DirectByteBuffer");
Constructor<?> constructor = cls.getDeclaredConstructor(Long.TYPE, Integer.TYPE);
constructor.setAccessible(true);
Field cleanerField = cls.getDeclaredField("cleaner");
cleanerField.setAccessible(true);
final long memory = allocateMemory(size);
ByteBuffer buffer = (ByteBuffer) constructor.newInstance(memory, size);
Cleaner cleaner = Cleaner.create(buffer, new Runnable() {
@Override
public void run() {
freeMemory(memory);
}
});
cleanerField.set(buffer, cleaner);
return buffer;
} catch (Exception e) {
throwException(e);
}
throw new IllegalStateException("unreachable");
}
代码示例来源:origin: org.apache.spark/spark-unsafe_2.11
/**
* Uses internal JDK APIs to allocate a DirectByteBuffer while ignoring the JVM's
* MaxDirectMemorySize limit (the default limit is too low and we do not want to require users
* to increase it).
*/
@SuppressWarnings("unchecked")
public static ByteBuffer allocateDirectBuffer(int size) {
try {
Class<?> cls = Class.forName("java.nio.DirectByteBuffer");
Constructor<?> constructor = cls.getDeclaredConstructor(Long.TYPE, Integer.TYPE);
constructor.setAccessible(true);
Field cleanerField = cls.getDeclaredField("cleaner");
cleanerField.setAccessible(true);
long memory = allocateMemory(size);
ByteBuffer buffer = (ByteBuffer) constructor.newInstance(memory, size);
Cleaner cleaner = Cleaner.create(buffer, () -> freeMemory(memory));
cleanerField.set(buffer, cleaner);
return buffer;
} catch (Exception e) {
throwException(e);
}
throw new IllegalStateException("unreachable");
}
代码示例来源:origin: org.apache.spark/spark-unsafe_2.10
/**
* Uses internal JDK APIs to allocate a DirectByteBuffer while ignoring the JVM's
* MaxDirectMemorySize limit (the default limit is too low and we do not want to require users
* to increase it).
*/
@SuppressWarnings("unchecked")
public static ByteBuffer allocateDirectBuffer(int size) {
try {
Class<?> cls = Class.forName("java.nio.DirectByteBuffer");
Constructor<?> constructor = cls.getDeclaredConstructor(Long.TYPE, Integer.TYPE);
constructor.setAccessible(true);
Field cleanerField = cls.getDeclaredField("cleaner");
cleanerField.setAccessible(true);
long memory = allocateMemory(size);
ByteBuffer buffer = (ByteBuffer) constructor.newInstance(memory, size);
Cleaner cleaner = Cleaner.create(buffer, () -> freeMemory(memory));
cleanerField.set(buffer, cleaner);
return buffer;
} catch (Exception e) {
throwException(e);
}
throw new IllegalStateException("unreachable");
}
代码示例来源:origin: shunfei/indexr
@Override
public UnsafeRow next() {
try {
sortedIterator.loadNext();
row.pointTo(
sortedIterator.getBaseObject(),
sortedIterator.getBaseOffset(),
sortedIterator.getRecordLength());
if (!hasNext()) {
UnsafeRow copy = row.copy(); // so that we don't have dangling pointers to freed page
row = null; // so that we don't keep references to the base object
cleanupResources();
return copy;
} else {
return row;
}
} catch (IOException e) {
cleanupResources();
// Scala iterators don't declare any checked exceptions, so we need to use this hack
// to re-throw the exception:
Platform.throwException(e);
}
throw new RuntimeException("Exception should have been re-thrown in next()");
}
代码示例来源:origin: org.apache.spark/spark-catalyst_2.10
@Override
public UnsafeRow next() {
try {
sortedIterator.loadNext();
row.pointTo(
sortedIterator.getBaseObject(),
sortedIterator.getBaseOffset(),
sortedIterator.getRecordLength());
if (!hasNext()) {
UnsafeRow copy = row.copy(); // so that we don't have dangling pointers to freed page
row = null; // so that we don't keep references to the base object
cleanupResources();
return copy;
} else {
return row;
}
} catch (IOException e) {
cleanupResources();
// Scala iterators don't declare any checked exceptions, so we need to use this hack
// to re-throw the exception:
Platform.throwException(e);
}
throw new RuntimeException("Exception should have been re-thrown in next()");
}
};
代码示例来源:origin: org.apache.spark/spark-catalyst_2.11
@Override
public UnsafeRow next() {
try {
sortedIterator.loadNext();
row.pointTo(
sortedIterator.getBaseObject(),
sortedIterator.getBaseOffset(),
sortedIterator.getRecordLength());
if (!hasNext()) {
UnsafeRow copy = row.copy(); // so that we don't have dangling pointers to freed page
row = null; // so that we don't keep references to the base object
cleanupResources();
return copy;
} else {
return row;
}
} catch (IOException e) {
cleanupResources();
// Scala iterators don't declare any checked exceptions, so we need to use this hack
// to re-throw the exception:
Platform.throwException(e);
}
throw new RuntimeException("Exception should have been re-thrown in next()");
}
};
代码示例来源:origin: org.apache.spark/spark-catalyst
@Override
public UnsafeRow next() {
try {
sortedIterator.loadNext();
row.pointTo(
sortedIterator.getBaseObject(),
sortedIterator.getBaseOffset(),
sortedIterator.getRecordLength());
if (!hasNext()) {
UnsafeRow copy = row.copy(); // so that we don't have dangling pointers to freed page
row = null; // so that we don't keep references to the base object
cleanupResources();
return copy;
} else {
return row;
}
} catch (IOException e) {
cleanupResources();
// Scala iterators don't declare any checked exceptions, so we need to use this hack
// to re-throw the exception:
Platform.throwException(e);
}
throw new RuntimeException("Exception should have been re-thrown in next()");
}
};
内容来源于网络,如有侵权,请联系作者删除!