本文整理了Java中org.apache.hadoop.ipc.RemoteException
类的一些代码示例,展示了RemoteException
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。RemoteException
类的具体详情如下:
包路径:org.apache.hadoop.ipc.RemoteException
类名称:RemoteException
暂无
代码示例来源:origin: apache/hbase
/**
* Checks to see if the specified file system is available
*
* @param fs filesystem
* @throws IOException e
*/
public static void checkFileSystemAvailable(final FileSystem fs)
throws IOException {
if (!(fs instanceof DistributedFileSystem)) {
return;
}
IOException exception = null;
DistributedFileSystem dfs = (DistributedFileSystem) fs;
try {
if (dfs.exists(new Path("/"))) {
return;
}
} catch (IOException e) {
exception = e instanceof RemoteException ?
((RemoteException)e).unwrapRemoteException() : e;
}
try {
fs.close();
} catch (Exception e) {
LOG.error("file system close failed: ", e);
}
IOException io = new IOException("File system is not available");
io.initCause(exception);
throw io;
}
代码示例来源:origin: apache/hive
@Override
public void run() {
try {
Path rootHDFSDirPath = new Path(rootHDFSDir);
FileSystem fs = FileSystem.get(rootHDFSDirPath.toUri(), conf);
FileStatus[] userHDFSDirList = fs.listStatus(rootHDFSDirPath);
FileStatus[] scratchDirList = fs.listStatus(userHDFSDir.getPath());
for (FileStatus scratchDir : scratchDirList) {
Path lockFilePath = new Path(scratchDir.getPath(), SessionState.LOCK_FILE_NAME);
if (!fs.exists(lockFilePath)) {
String message = "Skipping " + scratchDir.getPath() + " since it does not contain " +
if(AlreadyBeingCreatedException.class.getName().equals(eAppend.getClassName())){
inuse = true;
} else if (UnsupportedOperationException.class.getName().equals(eAppend.getClassName())) {
IOUtils.closeStream(fs.create(lockFilePath, false));
} catch (RemoteException eCreate) {
if (AlreadyBeingCreatedException.class.getName().equals(eCreate.getClassName())){
consoleMessage("Unexpected error:" + eCreate.getMessage());
consoleMessage("Unexpected error:" + eAppend.getMessage());
代码示例来源:origin: org.apache.hadoop/hadoop-common
/**
* Create RemoteException from attributes
* @param attrs may not be null
*/
public static RemoteException valueOf(Attributes attrs) {
return new RemoteException(attrs.getValue("class"),
attrs.getValue("message"));
}
代码示例来源:origin: org.apache.hadoop/hadoop-common
/**
* Instantiate and return the exception wrapped up by this remote exception.
*
* <p> This unwraps any <code>Throwable</code> that has a constructor taking
* a <code>String</code> as a parameter.
* Otherwise it returns this.
*
* @return <code>Throwable
*/
public IOException unwrapRemoteException() {
try {
Class<?> realClass = Class.forName(getClassName());
return instantiateException(realClass.asSubclass(IOException.class));
} catch(Exception e) {
// cannot instantiate the original exception, just return this
}
return this;
}
代码示例来源:origin: apache/storm
@Test
public void testDoubleCreateSemantics() throws Exception {
//1 create an already existing open file w/o override flag
Path file1 = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1");
try (FSDataOutputStream os1 = fs.create(file1, false)) {
fs.create(file1, false); // should fail
fail("Create did not throw an exception");
} catch (RemoteException e) {
Assert.assertEquals(AlreadyBeingCreatedException.class, e.unwrapRemoteException().getClass());
}
//2 close file and retry creation
try {
fs.create(file1, false); // should still fail
fail("Create did not throw an exception");
} catch (FileAlreadyExistsException e) {
// expecting this exception
}
//3 delete file and retry creation
fs.delete(file1, false);
try (FSDataOutputStream os2 = fs.create(file1, false)) {
Assert.assertNotNull(os2);
}
}
代码示例来源:origin: apache/storm
@Test
public void testAppendSemantics() throws Exception {
//1 try to append to an open file
Path file1 = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1");
try (FSDataOutputStream os1 = fs.create(file1, false)) {
fs.append(file1); // should fail
fail("Append did not throw an exception");
} catch (RemoteException e) {
// expecting AlreadyBeingCreatedException inside RemoteException
Assert.assertEquals(AlreadyBeingCreatedException.class, e.unwrapRemoteException().getClass());
}
//2 try to append to a closed file
try (FSDataOutputStream os2 = fs.append(file1)) {
assertThat(os2, notNull());
}
}
代码示例来源:origin: apache/hbase
@Test
public void testOverwrite() throws IOException {
Path file = new Path("/" + name.getMethodName());
FSDataOutputStream out1 = FS.create(file);
FSDataOutputStream out2 = FS.create(file, true);
out1.write(2);
out2.write(1);
try {
out1.close();
// a successful close is also OK for us so no assertion here, we just need to confirm that the
// data in the file are correct.
} catch (FileNotFoundException fnfe) {
// hadoop3 throws one of these.
} catch (RemoteException e) {
// expected
assertThat(e.unwrapRemoteException(), instanceOf(LeaseExpiredException.class));
}
out2.close();
try (FSDataInputStream in = FS.open(file)) {
assertEquals(1, in.read());
assertEquals(-1, in.read());
}
}
}
代码示例来源:origin: apache/hbase
@Test
public void testNotOverwrite() throws IOException {
Path file = new Path("/" + name.getMethodName());
try (FSDataOutputStream out1 = FS.create(file)) {
try {
FS.create(file, false);
fail("Should fail as there is a file with the same name which is being written");
} catch (RemoteException e) {
// expected
assertThat(e.unwrapRemoteException(), instanceOf(AlreadyBeingCreatedException.class));
}
}
}
代码示例来源:origin: org.apache.hadoop/hadoop-hdfs
/**
* Disallow snapshot on a directory.
* Usage: hdfs dfsadmin -disallowSnapshot snapshotDir
* @param argv List of of command line parameters.
* @exception IOException
*/
public void disallowSnapshot(String[] argv) throws IOException {
Path p = new Path(argv[1]);
final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), getConf());
try {
dfs.disallowSnapshot(p);
} catch (SnapshotException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage());
}
System.out.println("Disallowing snapshot on " + argv[1] + " succeeded");
}
代码示例来源:origin: apache/hbase
@Override
public FSDataOutputStream call() throws IOException {
try {
FileSystem fs = FSUtils.getCurrentFileSystem(this.conf);
FsPermission defaultPerms = FSUtils.getFilePermissions(fs, this.conf,
HConstants.DATA_FILE_UMASK_KEY);
Path tmpDir = getTmpDir(conf);
this.hbckLockPath = new Path(tmpDir, HBCK_LOCK_FILE);
fs.mkdirs(tmpDir);
final FSDataOutputStream out = createFileWithRetries(fs, this.hbckLockPath, defaultPerms);
out.writeBytes(InetAddress.getLocalHost().toString());
// Add a note into the file we write on why hbase2 is writing out an hbck1 lock file.
out.writeBytes(" Written by an hbase-2.x Master to block an " +
"attempt by an hbase-1.x HBCK tool making modification to state. " +
"See 'HBCK must match HBase server version' in the hbase refguide.");
out.flush();
return out;
} catch(RemoteException e) {
if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
return null;
} else {
throw e;
}
}
}
代码示例来源:origin: apache/hbase
String rootDir = new Path(FSUtils.getWALRootDir(conf) + "/" + HConstants.HREGION_LOGDIR_NAME +
"/" + targetRs.getServerName().toString()).toUri().getPath();
h.put(p);
DirectoryListing dl = dfs.getClient().listPaths(rootDir, HdfsFileStatus.EMPTY_NAME);
HdfsFileStatus[] hfs = dl.getPartialListing();
LOG.info("Log file found: " + hf.getLocalName() + " in " + rootDir);
String logFile = rootDir + "/" + hf.getLocalName();
FileStatus fsLog = rfs.getFileStatus(new Path(logFile));
"indefinitely you should treat this failure as a symptom.", exception);
} catch (RemoteException exception) {
if (exception.unwrapRemoteException() instanceof FileNotFoundException) {
LOG.debug("Failed to find log file '" + hf.getLocalName() + "'; it probably was " +
"archived out from under us so we'll ignore and retry. If this test hangs " +
代码示例来源:origin: apache/hbase
/**
* This is important for fencing when recover from RS crash.
*/
@Test
public void testCreateParentFailed() throws IOException {
Path f = new Path("/" + name.getMethodName() + "/test");
EventLoop eventLoop = EVENT_LOOP_GROUP.next();
try {
FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3,
FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS);
fail("should fail with parent does not exist");
} catch (RemoteException e) {
LOG.info("expected exception caught", e);
assertThat(e.unwrapRemoteException(), instanceOf(FileNotFoundException.class));
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs
/**
* Allow snapshot on a directory.
* Usage: hdfs dfsadmin -disallowSnapshot snapshotDir
* @param argv List of of command line parameters.
* @exception IOException
*/
public void disallowSnapshot(String[] argv) throws IOException {
DistributedFileSystem dfs = getDFS();
try {
dfs.disallowSnapshot(new Path(argv[1]));
} catch (SnapshotException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage());
}
System.out.println("Disallowing snaphot on " + argv[1] + " succeeded");
}
代码示例来源:origin: apache/hbase
if (path.equals(currentPath)) continue;
try {
in = fs.open(path, bufferSize);
if (pos != 0) in.seek(pos);
assert(in.getPos() == pos) : "Link unable to seek to the right position=" + pos;
IOException ioe = re.unwrapRemoteException(FileNotFoundException.class);
if (!(ioe instanceof FileNotFoundException)) throw re;
代码示例来源:origin: apache/storm
/**
* Returns null if file already exists. throws if there was unexpected problem
*/
public static FSDataOutputStream tryCreateFile(FileSystem fs, Path file) throws IOException {
try {
FSDataOutputStream os = fs.create(file, false);
return os;
} catch (FileAlreadyExistsException e) {
return null;
} catch (RemoteException e) {
if (e.unwrapRemoteException() instanceof AlreadyBeingCreatedException) {
return null;
} else { // unexpected error
throw e;
}
}
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
private OutputStream checkAndMarkRunningBalancer() throws IOException {
try {
DataOutputStream out = fs.create(BALANCER_ID_PATH);
out. writeBytes(InetAddress.getLocalHost().getHostName());
out.flush();
return out;
} catch(RemoteException e) {
if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
return null;
} else {
throw e;
}
}
}
代码示例来源:origin: apache/hbase
/**
* Delete the given files
* @param filesToDelete files to delete
* @return number of deleted files
*/
protected int deleteFiles(Iterable<FileStatus> filesToDelete) {
int deletedFileCount = 0;
for (FileStatus file : filesToDelete) {
Path filePath = file.getPath();
LOG.trace("Removing {} from archive", filePath);
try {
boolean success = this.fs.delete(filePath, false);
if (success) {
deletedFileCount++;
} else {
LOG.warn("Attempted to delete:" + filePath
+ ", but couldn't. Run cleaner chain and attempt to delete on next pass.");
}
} catch (IOException e) {
e = e instanceof RemoteException ?
((RemoteException)e).unwrapRemoteException() : e;
LOG.warn("Error while deleting: " + filePath, e);
}
}
return deletedFileCount;
}
代码示例来源:origin: apache/hbase
if (splitLogWorkerCoordination != null) {
splitLogWorkerCoordination.markCorrupted(walDir, logfile.getPath().getName(), walFS);
} else {
ZKSplitLog.markCorrupted(walDir, logfile.getPath().getName(), walFS);
e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
throw e;
} finally {
代码示例来源:origin: apache/hbase
if (reader == null || !getCurrentPath().equals(path)) {
closeReader();
reader = WALFactory.createReader(fs, path, conf);
handleFileNotFound(path, fnfe);
} catch (RemoteException re) {
IOException ioe = re.unwrapRemoteException(FileNotFoundException.class);
if (!(ioe instanceof FileNotFoundException)) throw ioe;
handleFileNotFound(path, (FileNotFoundException)ioe);
代码示例来源:origin: apache/hbase
private static IOException unwrapException(IOException e) {
if (e instanceof RemoteException) {
return ((RemoteException)e).unwrapRemoteException();
}
return e;
}
内容来源于网络,如有侵权,请联系作者删除!