org.apache.hadoop.security.AccessControlException.<init>()方法的使用及代码示例

x33g5p2x  于2022-01-16 转载在 其他  
字(8.5k)|赞(0)|评价(0)|浏览(381)

本文整理了Java中org.apache.hadoop.security.AccessControlException.<init>()方法的一些代码示例,展示了AccessControlException.<init>()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。AccessControlException.<init>()方法的具体详情如下:
包路径:org.apache.hadoop.security.AccessControlException
类名称:AccessControlException
方法名:<init>

AccessControlException.<init>介绍

[英]Default constructor is needed for unwrapping from org.apache.hadoop.ipc.RemoteException.
[中]从组织展开需要默认构造函数。阿帕奇。hadoop。ipc。远程异常。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

static AccessControlException readOnlyMountTable(final String operation,
  final String p) {
 return new AccessControlException( 
   "InternalDir of ViewFileSystem is readonly; operation=" + operation + 
   "Path=" + p);
}
static AccessControlException readOnlyMountTable(final String operation,

代码示例来源:origin: org.apache.hadoop/hadoop-common

static AccessControlException readOnlyMountTable(final String operation,
  final String p) {
 return new AccessControlException( 
   "InternalDir of ViewFileSystem is readonly; operation=" + operation + 
   "Path=" + p);
}
static AccessControlException readOnlyMountTable(final String operation,

代码示例来源:origin: org.apache.hadoop/hadoop-common

throw new AccessControlException(
  "Server does not support SASL " + authMethod);

代码示例来源:origin: org.apache.hadoop/hadoop-common

ugi = UserGroupInformation.getCurrentUser();
 if (serverId.isEmpty()) {
  throw new AccessControlException(
    "Kerberos principal name does NOT have the expected "
      + "hostname part: " + ugi.getUserName());
 throw new AccessControlException(
   "Server does not support SASL " + authMethod);
throw new AccessControlException(
  "Unable to find SASL server implementation for " + mechanism);

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
public boolean delete(final Path f, final boolean recursive)
  throws AccessControlException, FileNotFoundException,
  UnresolvedLinkException, IOException {
 InodeTree.ResolveResult<AbstractFileSystem> res = 
  fsState.resolve(getUriPath(f), true);
 // If internal dir or target is a mount link (ie remainingPath is Slash)
 if (res.isInternalDir() || res.remainingPath == InodeTree.SlashPath) {
  throw new AccessControlException(
    "Cannot delete internal mount table directory: " + f);
 }
 return res.targetFileSystem.delete(res.remainingPath, recursive);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

serverAuthMethods.add(authType.getMethod());
throw new AccessControlException(
  "Client cannot authenticate via:" + serverAuthMethods);

代码示例来源:origin: org.apache.hadoop/hadoop-common

private AuthProtocol initializeAuthContext(int authType)
  throws IOException {
 AuthProtocol authProtocol = AuthProtocol.valueOf(authType);
 if (authProtocol == null) {
  IOException ioe = new IpcException("Unknown auth protocol:" + authType);
  doSaslReply(ioe);
  throw ioe;        
 }
 boolean isSimpleEnabled = enabledAuthMethods.contains(AuthMethod.SIMPLE);
 switch (authProtocol) {
  case NONE: {
   // don't reply if client is simple and server is insecure
   if (!isSimpleEnabled) {
    IOException ioe = new AccessControlException(
      "SIMPLE authentication is not enabled."
        + "  Available:" + enabledAuthMethods);
    doSaslReply(ioe);
    throw ioe;
   }
   break;
  }
  default: {
   break;
  }
 }
 return authProtocol;
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

private UserGroupInformation getAuthorizedUgi(String authorizedId)
  throws InvalidToken, AccessControlException {
 if (authMethod == AuthMethod.TOKEN) {
  TokenIdentifier tokenId = SaslRpcServer.getIdentifier(authorizedId,
    secretManager);
  UserGroupInformation ugi = tokenId.getUser();
  if (ugi == null) {
   throw new AccessControlException(
     "Can't retrieve username from tokenIdentifier.");
  }
  ugi.addTokenIdentifier(tokenId);
  return ugi;
 } else {
  return UserGroupInformation.createRemoteUser(authorizedId, authMethod);
 }
}

代码示例来源:origin: apache/hive

return;
throw new AccessControlException("action " + action + " not permitted on path "
  + stat.getPath() + " for user " + user);

代码示例来源:origin: org.apache.hadoop/hadoop-common

&& (renewer == null || renewer.toString().isEmpty() || !cancelerShortName
   .equals(renewer.toString()))) {
throw new AccessControlException(canceller
  + " is not authorized to cancel the token " + formatTokenId(id));

代码示例来源:origin: org.apache.hadoop/hadoop-common

new AccessControlException("Authenticated user (" + user
  + ") doesn't match what the client claims to be ("
  + protocolUser + ")"));

代码示例来源:origin: org.apache.hadoop/hadoop-common

throw new AccessControlException(String.format(
 "Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(),
 stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));

代码示例来源:origin: org.apache.hadoop/hadoop-common

throw new AccessControlException(renewer +
  " tried to renew a token " + formatTokenId(id)
  + " without a renewer");
throw new AccessControlException(renewer
  + " tries to renew a token " + formatTokenId(id)
  + " with non-matching renewer " + id.getRenewer());
throw new AccessControlException(renewer
  + " is trying to renew a token "
  + formatTokenId(id) + " with wrong password");

代码示例来源:origin: org.apache.hadoop/hadoop-common

@Override
public void renameInternal(final Path src, final Path dst,
  final boolean overwrite) throws IOException, UnresolvedLinkException {
 // passing resolveLastComponet as false to catch renaming a mount point 
 // itself we need to catch this as an internal operation and fail.
 InodeTree.ResolveResult<AbstractFileSystem> resSrc = 
  fsState.resolve(getUriPath(src), false); 

 if (resSrc.isInternalDir()) {
  throw new AccessControlException(
    "Cannot Rename within internal dirs of mount table: src=" + src
      + " is readOnly");
 }
 InodeTree.ResolveResult<AbstractFileSystem> resDst =
               fsState.resolve(getUriPath(dst), false);
 if (resDst.isInternalDir()) {
  throw new AccessControlException(
    "Cannot Rename within internal dirs of mount table: dest=" + dst
      + " is readOnly");
 }
 //Alternate 1: renames within same file system
 URI srcUri = resSrc.targetFileSystem.getUri();
 URI dstUri = resDst.targetFileSystem.getUri();
 ViewFileSystem.verifyRenameStrategy(srcUri, dstUri,
   resSrc.targetFileSystem == resDst.targetFileSystem, renameStrategy);
 ChRootedFs srcFS = (ChRootedFs) resSrc.targetFileSystem;
 ChRootedFs dstFS = (ChRootedFs) resDst.targetFileSystem;
 srcFS.getMyFs().renameInternal(srcFS.fullPath(resSrc.remainingPath),
   dstFS.fullPath(resDst.remainingPath), overwrite);
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

if (sentNegotiate) {
 throw new AccessControlException(
   "Client already attempted negotiation");
if (!negotiateResponse.getAuthsList().contains(clientSaslAuth)) {
 if (sentNegotiate) {
  throw new AccessControlException(
    clientSaslAuth.getMethod() + " authentication is not enabled."
      + "  Available:" + enabledAuthMethods);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private void checkBlockLocalPathAccess() throws IOException {
 checkKerberosAuthMethod("getBlockLocalPathInfo()");
 String currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
 if (!usersWithLocalPathAccess.contains(currentUser)) {
  throw new AccessControlException(
    "Can't continue with getBlockLocalPathInfo() "
      + "authorization. The user " + currentUser
      + " is not configured in "
      + DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Verify if the caller has the required permission. This will result into 
 * an exception if the caller is not allowed to access the resource.
 */
public void checkSuperuserPrivilege()
  throws AccessControlException {
 if (!isSuperUser()) {
  throw new AccessControlException("Access denied for user " 
    + getUser() + ". Superuser privilege is required");
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/** Guarded by {@link FSNamesystem#readLock()} */
private void checkOwner(INodeAttributes[] inodes, byte[][] components, int i)
  throws AccessControlException {
 if (getUser().equals(inodes[i].getUserName())) {
  return;
 }
 throw new AccessControlException(
   "Permission denied. user=" + getUser() +
   " is not the owner of inode=" + getPath(components, 0, i));
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/** Guarded by {@link FSNamesystem#readLock()} */
private void check(INodeAttributes[] inodes, byte[][] components, int i,
  FsAction access) throws AccessControlException {
 INodeAttributes inode = (i >= 0) ? inodes[i] : null;
 if (inode != null && !hasPermission(inode, access)) {
  throw new AccessControlException(
    toAccessControlString(inode, getPath(components, 0, i), access));
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

void checkUnreadableBySuperuser(FSPermissionChecker pc, INodesInPath iip)
  throws IOException {
 if (pc.isSuperUser()) {
  if (FSDirXAttrOp.getXAttrByPrefixedName(this, iip,
    SECURITY_XATTR_UNREADABLE_BY_SUPERUSER) != null) {
   throw new AccessControlException(
     "Access is denied for " + pc.getUser() + " since the superuser "
     + "is not allowed to perform this operation.");
  }
 }
}

相关文章