无法在hdfs“datanode”中写入数据-已在排除列表中添加节点

lokaqttq  于 2021-05-29  发布在  Hadoop
关注(0)|答案(1)|浏览(380)

我在同一个jvm中运行“namenode”和“datanode”,当我尝试写入数据时,出现以下异常
org.apache.hadoop.hdfs.server.blockmanagement.blockplacementpolicy$notenoughreplicasexception:位于org.apache.hadoop.hdfs.server.blockmanagement.blockplacementpolicydefault.chooserandom(blockplacementpolicydefault)。java:836)在org.apache.hadoop.hdfs.server.blockmanagement.blockplacementpolicydefault.chooserandom(blockplacementpolicydefault)。java:724)在org.apache.hadoop.hdfs.server.blockmanagement.blockplacementpolicydefault.chooselocalrack(blockplacementpolicydefault)。java:631)在org.apache.hadoop.hdfs.server.blockmanagement.blockplacementpolicydefault.chooselocalstorage(blockplacementpolicydefault)。java:591)在org.apache.hadoop.hdfs.server.blockmanagement.blockplacementpolicydefault.choosetargetinorder(blockplacementpolicydefault)。java:490)在org.apache.hadoop.hdfs.server.blockmanagement.blockplacementpolicydefault.choosetarget(blockplacementpolicydefault)。java:421)在org.apache.hadoop.hdfs.server.blockmanagement.blockplacementpolicydefault.choosetarget(blockplacementpolicydefault。java:297)在org.apache.hadoop.hdfs.server.blockmanagement.blockplacementpolicydefault.choosetarget(blockplacementpolicydefault)。java:148)在org.apache.hadoop.hdfs.server.blockmanagement.blockplacementpolicydefault.choosetarget(blockplacementpolicydefault。java:164)在org.apache.hadoop.hdfs.server.blockmanagement.blockmanager.choosetarget4newblock(blockmanager。java:2127)在org.apache.hadoop.hdfs.server.namenode.fsdirwritefileop.choosetargetfornewblock(fsdirwritefileop)中。java:294)位于org.apache.hadoop.hdfs.server.namenode.fsnamesystem.getadditionalblock(fsnamesystem)。java:2771)在org.apache.hadoop.hdfs.server.namenode.namenoderpcserver.addblock(namenoderpcserver。java:876)在org.apache.hadoop.hdfs.protocolpb.clientnamenodeprotocolserversidetranslatorpb.addblock(clientnamenodeprotocolserversidetranslatorpb。java:567)在org.apache.hadoop.hdfs.protocol.proto.clientnamenodeprotocolprotos$clientnamenodeprotocol$2.callblockingmethod(clientnamenodeprotocolprotos.java)位于org.apache.hadoop.ipc.protobufrpceengine$server$protobufrpinvoker.call(protobufrpceengine)。java:524)在org.apache.hadoop.ipc.rpc$server.call(rpc。java:1025)在org.apache.hadoop.ipc.server$rpccall.run(服务器。java:876)在org.apache.hadoop.ipc.server$rpccall.run(server。java:822)位于java.base/javax.security.auth.subject.doas(subject)的java.base/java.security.accesscontroller.doprivileged(本机方法)。java:423)在org.apache.hadoop.security.usergroupinformation.doas(usergroupinformation。java:1730)在org.apache.hadoop.ipc.server$handler.run(服务器。java:2682)

final File file = new File("C:\\ManageEngine\\test\\data\\namenode");
        final File file1 = new File("C:\\ManageEngine\\test\\data\\datanode1");
        BasicConfigurator.configure();
        final HdfsConfiguration nameNodeConfiguration = new HdfsConfiguration();
        FileSystem.setDefaultUri(nameNodeConfiguration, "hdfs://localhost:5555");
        nameNodeConfiguration.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, file.toURI().toString());
        nameNodeConfiguration.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1" );
        final NameNode nameNode = new NameNode(nameNodeConfiguration);

        final HdfsConfiguration dataNodeConfiguration1 = new HdfsConfiguration();
        dataNodeConfiguration1.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, file1.toURI().toString());
        dataNodeConfiguration1.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "localhost:5556" );
        dataNodeConfiguration1.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1" );
        FileSystem.setDefaultUri(dataNodeConfiguration1, "hdfs://localhost:5555");
        final DataNode dataNode1 = DataNode.instantiateDataNode(new String[]{}, dataNodeConfiguration1);

        final FileSystem fs = FileSystem.get(dataNodeConfiguration1);

        Path hdfswritepath = new Path(fileName);
        if(!fs.exists(hdfswritepath)) {
            fs.create(hdfswritepath);
            System.out.println("Path "+hdfswritepath+" created.");
        }
        System.out.println("Begin Write file into hdfs");

        FSDataOutputStream outputStream=fs.create(hdfswritepath);
        //Cassical output stream usage
        outputStream.writeBytes(fileContent);
        outputStream.close();
        System.out.println("End Write file into hdfs");

请求数据-图像

vxqlmq5t

vxqlmq5t1#

副本的数量不能大于数据节点的数量。
如果要在单个节点上运行,请设置 dfs.replication 到hdfs-site.xml中的1。

相关问题