本文整理了Java中org.apache.hadoop.ipc.RemoteException.getClassName
方法的一些代码示例,展示了RemoteException.getClassName
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。RemoteException.getClassName
方法的具体详情如下:
包路径:org.apache.hadoop.ipc.RemoteException
类名称:RemoteException
方法名:getClassName
暂无
代码示例来源:origin: apache/hbase
public static boolean shouldRetryCreate(RemoteException e) {
// RetryStartFileException is introduced in HDFS 2.6+, so here we can only use the class name.
// For exceptions other than this, we just throw it out. This is same with
// DFSOutputStream.newStreamForCreate.
return e.getClassName().endsWith("RetryStartFileException");
}
代码示例来源:origin: org.apache.hadoop/hadoop-common
@Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isIdempotentOrAtMostOnce) throws Exception {
RetryPolicy policy = null;
if (e instanceof RemoteException) {
policy = exceptionNameToPolicyMap.get(
((RemoteException) e).getClassName());
}
if (policy == null) {
policy = defaultPolicy;
}
return policy.shouldRetry(e, retries, failovers, isIdempotentOrAtMostOnce);
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-common
/**
* Instantiate and return the exception wrapped up by this remote exception.
*
* <p> This unwraps any <code>Throwable</code> that has a constructor taking
* a <code>String</code> as a parameter.
* Otherwise it returns this.
*
* @return <code>Throwable
*/
public IOException unwrapRemoteException() {
try {
Class<?> realClass = Class.forName(getClassName());
return instantiateException(realClass.asSubclass(IOException.class));
} catch(Exception e) {
// cannot instantiate the original exception, just return this
}
return this;
}
代码示例来源:origin: org.apache.hadoop/hadoop-common
/**
* If this remote exception wraps up one of the lookupTypes
* then return this exception.
* <p>
* Unwraps any IOException.
*
* @param lookupTypes the desired exception class. may be null.
* @return IOException, which is either the lookupClass exception or this.
*/
public IOException unwrapRemoteException(Class<?>... lookupTypes) {
if(lookupTypes == null)
return this;
for(Class<?> lookupClass : lookupTypes) {
if(!lookupClass.getName().equals(getClassName()))
continue;
try {
return instantiateException(lookupClass.asSubclass(IOException.class));
} catch(Exception e) {
// cannot instantiate lookupClass, just return this
return this;
}
}
// wrapped up exception is not in lookupTypes, just return this
return this;
}
代码示例来源:origin: org.apache.hadoop/hadoop-common
@Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isMethodIdempotent) throws Exception {
if (e instanceof ServiceException) {
//unwrap ServiceException
final Throwable cause = e.getCause();
if (cause != null && cause instanceof Exception) {
e = (Exception)cause;
}
}
//see (1) and (2) in the javadoc of this method.
final RetryPolicy p;
if (e instanceof RetriableException
|| RetryPolicies.getWrappedRetriableException(e) != null) {
// RetriableException or RetriableException wrapped
p = multipleLinearRandomRetry;
} else if (e instanceof RemoteException) {
final RemoteException re = (RemoteException)e;
p = re.getClassName().equals(remoteExceptionToRetry)
? multipleLinearRandomRetry : RetryPolicies.TRY_ONCE_THEN_FAIL;
} else if (e instanceof IOException || e instanceof ServiceException) {
p = multipleLinearRandomRetry;
} else { //non-IOException
p = RetryPolicies.TRY_ONCE_THEN_FAIL;
}
LOG.debug("RETRY {}) policy={}", retries,
p.getClass().getSimpleName(), e);
return p.shouldRetry(e, retries, failovers, isMethodIdempotent);
}
代码示例来源:origin: apache/hbase
if (exception instanceof RemoteException &&
HadoopIllegalArgumentException.class.getName().equals(
((RemoteException)exception).getClassName())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Given storage policy, '" +storagePolicy +"', was rejected and probably " +
代码示例来源:origin: apache/hbase
@Override
public FSDataOutputStream call() throws IOException {
try {
FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);
FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,
HConstants.DATA_FILE_UMASK_KEY);
Path tmpDir = getTmpDir(conf);
this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);
fs.mkdirs(tmpDir);
final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);
out.writeBytes(InetAddress.getLocalHost().toString());
// Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.
out.writeBytes(" Written by an hbase-2.x Master to block an " +
"attempt by an hbase-1.x HBCK tool making modification to state. " +
"See 'HBCK must match HBase server version' in the hbase refguide.");
out.flush();
return out;
} catch(RemoteException e) {
if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
return null;
} else {
throw e;
}
}
}
代码示例来源:origin: apache/hive
if(AlreadyBeingCreatedException.class.getName().equals(eAppend.getClassName())){
inuse = true;
} else if (UnsupportedOperationException.class.getName().equals(eAppend.getClassName())) {
if (AlreadyBeingCreatedException.class.getName().equals(eCreate.getClassName())){
代码示例来源:origin: apache/drill
if(AlreadyBeingCreatedException.class.getName().equals(eAppend.getClassName())){
inuse = true;
} else if (UnsupportedOperationException.class.getName().equals(eAppend.getClassName())) {
if (AlreadyBeingCreatedException.class.getName().equals(eCreate.getClassName())){
代码示例来源:origin: apache/phoenix
private boolean isExceptionInstanceOf(Throwable io, Class<? extends Exception> exception) {
return exception.isInstance(io) || (io instanceof RemoteException
&& (((RemoteException)io).getClassName().equals(exception.getName())));
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Check if any of the responses came back with an AssertionError.
* If so, it re-throws it, even if there was a quorum of responses.
* This code only runs if assertions are enabled for this class,
* otherwise it should JIT itself away.
*
* This is done since AssertionError indicates programmer confusion
* rather than some kind of expected issue, and thus in the context
* of test cases we'd like to actually fail the test case instead of
* continuing through.
*/
private synchronized void checkAssertionErrors() {
boolean assertsEnabled = false;
assert assertsEnabled = true; // sets to true if enabled
if (assertsEnabled) {
for (Throwable t : exceptions.values()) {
if (t instanceof AssertionError) {
throw (AssertionError)t;
} else if (t instanceof RemoteException &&
((RemoteException)t).getClassName().equals(
AssertionError.class.getName())) {
throw new AssertionError(t);
}
}
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
return null;
} else {
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
String reClass = re.getClassName();
if (UnregisteredNodeException.class.getName().equals(reClass) ||
DisallowedDatanodeException.class.getName().equals(reClass) ||
代码示例来源:origin: com.facebook.hadoop/hadoop-core
private OutputStream checkAndMarkRunningBalancer() throws IOException {
try {
DataOutputStream out = fs.create(BALANCER_ID_PATH);
out. writeBytes(InetAddress.getLocalHost().getHostName());
out.flush();
return out;
} catch(RemoteException e) {
if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
return null;
} else {
throw e;
}
}
}
代码示例来源:origin: org.apache.hama/hama-core
public boolean shouldRetry(Exception e, int retries) throws Exception {
RetryPolicy policy = null;
if (e instanceof RemoteException) {
policy = exceptionNameToPolicyMap.get(
((RemoteException) e).getClassName());
}
if (policy == null) {
policy = defaultPolicy;
}
return policy.shouldRetry(e, retries);
}
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
public boolean shouldRetry(Exception e, int retries) throws Exception {
RetryPolicy policy = null;
if (e instanceof RemoteException) {
policy = exceptionNameToPolicyMap.get(
((RemoteException) e).getClassName());
}
if (policy == null) {
policy = defaultPolicy;
}
return policy.shouldRetry(e, retries);
}
}
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
public boolean shouldRetry(Exception e, int retries) throws Exception {
RetryPolicy policy = null;
if (e instanceof RemoteException) {
policy = exceptionNameToPolicyMap.get(
((RemoteException) e).getClassName());
}
if (policy == null) {
policy = defaultPolicy;
}
return policy.shouldRetry(e, retries);
}
}
代码示例来源:origin: apache/hama
public boolean shouldRetry(Exception e, int retries) throws Exception {
RetryPolicy policy = null;
if (e instanceof RemoteException) {
policy = exceptionNameToPolicyMap.get(
((RemoteException) e).getClassName());
}
if (policy == null) {
policy = defaultPolicy;
}
return policy.shouldRetry(e, retries);
}
}
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
/** Write the object to XML format */
public void writeXml(String path, XMLOutputter doc) throws IOException {
doc.startTag(RemoteException.class.getSimpleName());
doc.attribute("path", path);
doc.attribute("class", getClassName());
String msg = getLocalizedMessage();
int i = msg.indexOf("\n");
if (i >= 0) {
msg = msg.substring(0, i);
}
doc.attribute("message", msg.substring(msg.indexOf(":") + 1).trim());
doc.endTag();
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
private void getBlocksWithException(NamenodeProtocol namenode,
DatanodeInfo datanode, long size) throws IOException {
boolean getException = false;
try {
namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2);
} catch (RemoteException e) {
getException = true;
assertTrue(e.getClassName().contains("HadoopIllegalArgumentException"));
}
assertTrue(getException);
}
内容来源于网络,如有侵权,请联系作者删除!