org.apache.hadoop.fs.FileUtil.fullyDelete()方法的使用及代码示例

x33g5p2x  于2022-01-19 转载在 其他  
字(10.3k)|赞(0)|评价(0)|浏览(240)

本文整理了Java中org.apache.hadoop.fs.FileUtil.fullyDelete()方法的一些代码示例,展示了FileUtil.fullyDelete()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。FileUtil.fullyDelete()方法的具体详情如下:
包路径:org.apache.hadoop.fs.FileUtil
类名称:FileUtil
方法名:fullyDelete

FileUtil.fullyDelete介绍

[英]Delete a directory and all its contents. If we return false, the directory may be partially-deleted.
[中]删除目录及其所有内容。如果返回false,则目录可能会被部分删除。

代码示例

代码示例来源:origin: apache/incubator-gobblin

@AfterMethod
 public void tearDown() throws IOException {
  // Clean up the staging and/or output directories if necessary
  File testRootDir = new File(TestConstants.TEST_ROOT_DIR);
  if (testRootDir.exists()) {
   FileUtil.fullyDelete(testRootDir);
  }
 }
}

代码示例来源:origin: apache/incubator-gobblin

@AfterClass
public void tearDown()
  throws IOException {
 // Clean up the staging and/or output directories if necessary
 File testRootDir = new File(TestConstants.TEST_ROOT_DIR);
 if (testRootDir.exists()) {
  FileUtil.fullyDelete(testRootDir);
 }
}

代码示例来源:origin: apache/incubator-gobblin

@AfterClass
public void tearDown() throws IOException {
 // Clean up the staging and/or output directories if necessary
 File testRootDir = new File(TestConstants.TEST_ROOT_DIR);
 if (testRootDir.exists()) {
  FileUtil.fullyDelete(testRootDir);
 }
}

代码示例来源:origin: apache/hive

public static File handleWorkDir() throws IOException {
 testName = "test_hcat_partitionpublish_" + Math.abs(new Random().nextLong());
 String testDir = System.getProperty("test.data.dir", "./");
 testDir = testDir + "/" + testName + "/";
 File workDir = new File(new File(testDir).getCanonicalPath());
 FileUtil.fullyDelete(workDir);
 workDir.mkdirs();
 return workDir;
}
@BeforeClass

代码示例来源:origin: apache/hive

private static void createWorkDir() throws IOException {
 String testDir = System.getProperty("test.tmp.dir", "./");
 testDir = testDir + "/test_multiout_" + Math.abs(new Random().nextLong()) + "/";
 workDir = new File(new File(testDir).getCanonicalPath());
 FileUtil.fullyDelete(workDir);
 workDir.mkdirs();
}

代码示例来源:origin: apache/flink

@BeforeClass
public static void createHDFS() {
  try {
    baseDir = new File("./target/localfs/fs_tests").getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);
    org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
    localFsURI = "file:///" + baseDir + "/";
    localFs = new org.apache.hadoop.fs.Path(localFsURI).getFileSystem(hdConf);
  } catch (Throwable e) {
    e.printStackTrace();
    Assert.fail("Test failed " + e.getMessage());
  }
}

代码示例来源:origin: apache/avro

public static void writeLinesTextFile(File dir) throws IOException {
 FileUtil.fullyDelete(dir);
 File fileLines = new File(dir, "lines.avro");
 fileLines.getParentFile().mkdirs();
 try(PrintStream out = new PrintStream(fileLines)) {
  for (String line : LINES) {
   out.println(line);
  }
 }
}

代码示例来源:origin: apache/hive

@BeforeClass
public static void setUpTestDataDir() throws Exception {
 LOG.info("Using warehouse directory " + TEST_WAREHOUSE_DIR);
 File f = new File(TEST_WAREHOUSE_DIR);
 if (f.exists()) {
  FileUtil.fullyDelete(f);
 }
 Assert.assertTrue(new File(TEST_WAREHOUSE_DIR).mkdirs());
}

代码示例来源:origin: apache/hbase

@After
public void tearDown() throws Exception {
 Configuration c = TEST_UTIL.getConfiguration();
 FileUtil.fullyDelete(new File(c.get("hadoop.tmp.dir")));
}

代码示例来源:origin: apache/hbase

@BeforeClass
public static void beforeClass() throws Exception {
 UTIL.startMiniCluster();
 FQ_OUTPUT_DIR = new Path(OUTPUT_DIR).makeQualified(new LocalFileSystem());
 FileUtil.fullyDelete(new File(OUTPUT_DIR));
}

代码示例来源:origin: apache/avro

public static void writeLinesBytesFile(File dir) throws IOException {
 FileUtil.fullyDelete(dir);
 File fileLines = new File(dir + "/lines.avro");
 fileLines.getParentFile().mkdirs();
 DatumWriter<ByteBuffer> writer = new GenericDatumWriter<>();
 try(DataFileWriter<ByteBuffer> out = new DataFileWriter<>(writer)) {
  out.create(Schema.create(Schema.Type.BYTES), fileLines);
  for (String line : LINES) {
   out.append(ByteBuffer.wrap(line.getBytes(StandardCharsets.UTF_8)));
  }
 }
}

代码示例来源:origin: apache/kylin

@Test
public void testsJob8D() throws Exception {
  String input = "src/test/resources/data/base_cuboid/";
  String output = "target/test-output/8d_cuboid";
  String cubeName = "test_kylin_cube_with_slr_1_new_segment";
  String segmentName = "20130331080000_20131212080000";
  String jobname = "8d_cuboid";
  String level = "1";
  FileUtil.fullyDelete(new File(output));
  String[] args = { "-input", input, "-cubename", cubeName, "-segmentname", segmentName, "-output", output, "-jobname", jobname, "-level", level };
  assertEquals("Job failed", 0, ToolRunner.run(conf, new NDCuboidJob(), args));
}

代码示例来源:origin: apache/kylin

@Test
  public void testJob7D() throws Exception {
    final String input = "src/test/resources/data/8d_cuboid/";
    final String output = "target/test-output/7d_cuboid";
    final String cubeName = "test_kylin_cube_with_slr_1_new_segment";
    String segmentName = "20130331080000_20131212080000";
    String jobname = "7d_cuboid";
    String level = "2";

    FileUtil.fullyDelete(new File(output));

    String[] args = { "-input", input, "-cubename", cubeName, "-segmentname", segmentName, "-output", output, "-jobname", jobname, "-level", level };
    assertEquals("Job failed", 0, ToolRunner.run(conf, new NDCuboidJob(), args));
  }
}

代码示例来源:origin: apache/flink

FileUtil.fullyDelete(tempConfPathForSecureRun);
tempConfPathForSecureRun = null;
File target = new File("../target" + YARN_CONFIGURATION.get(TEST_CLUSTER_NAME_KEY));
if (!target.mkdirs()) {
  LOG.warn("Error creating dirs to {}", target);

代码示例来源:origin: apache/kylin

@Test
public void test() throws Exception {
  // String input =
  // "src/test/resources/data/base_cuboid,src/test/resources/data/6d_cuboid";
  String output = "target/test-output/merged_cuboid";
  String cubeName = "test_kylin_cube_with_slr_ready";
  String jobname = "merge_cuboid";
  File baseFolder = File.createTempFile("kylin-f24668f6-dcff-4cb6-a89b-77f1119df8fa-", "base");
  FileUtils.forceDelete(baseFolder);
  baseFolder.mkdir();
  FileUtils.copyDirectory(new File("src/test/resources/data/base_cuboid"), baseFolder);
  FileUtils.forceDeleteOnExit(baseFolder);
  File eightFoler = File.createTempFile("kylin-f24668f6-dcff-4cb6-a89b-77f1119df8fa-", "8d");
  FileUtils.forceDelete(eightFoler);
  eightFoler.mkdir();
  FileUtils.copyDirectory(new File("src/test/resources/data/base_cuboid"), eightFoler);
  FileUtils.forceDeleteOnExit(eightFoler);
  FileUtil.fullyDelete(new File(output));
  // CubeManager cubeManager =
  // CubeManager.getInstanceFromEnv(getTestConfig());
  String[] args = { "-input", baseFolder.getAbsolutePath() + "," + eightFoler.getAbsolutePath(), "-cubename", cubeName, "-segmentname", "20130331080000_20131212080000", "-output", output, "-jobname", jobname };
  assertEquals("Job failed", 0, ToolRunner.run(conf, new MergeCuboidJob(), args));
}

代码示例来源:origin: apache/hive

@BeforeClass
public static void setup() throws Exception {
 System.clearProperty("mapred.job.tracker");
 String testDir = System.getProperty("test.tmp.dir", "./");
 testDir = testDir + "/test_multitable_" + Math.abs(new Random().nextLong()) + "/";
 workDir = new File(new File(testDir).getCanonicalPath());
 FileUtil.fullyDelete(workDir);
 workDir.mkdirs();
 warehousedir = new Path(System.getProperty("test.warehouse.dir"));
 HiveConf metastoreConf = new HiveConf();
 metastoreConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehousedir.toString());
 // Run hive metastore server
 MetaStoreTestUtils.startMetaStoreWithRetry(metastoreConf);
 // Read the warehouse dir, which can be changed so multiple MetaStore tests could be run on
 // the same server
 warehousedir = new Path(MetastoreConf.getVar(metastoreConf, MetastoreConf.ConfVars.WAREHOUSE));
 // LocalJobRunner does not work with mapreduce OutputCommitter. So need
 // to use MiniMRCluster. MAPREDUCE-2350
 Configuration conf = new Configuration(true);
 conf.set("yarn.scheduler.capacity.root.queues", "default");
 conf.set("yarn.scheduler.capacity.root.default.capacity", "100");
 FileSystem fs = FileSystem.get(conf);
 System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
 mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null,
  new JobConf(conf));
 mrConf = mrCluster.createJobConf();
 initializeSetup(metastoreConf);
 warehousedir.getFileSystem(conf).mkdirs(warehousedir);
}

代码示例来源:origin: apache/hbase

private void runTestOnTable()
throws IOException, InterruptedException, ClassNotFoundException {
 Job job = null;
 try {
  job = new Job(UTIL.getConfiguration(), "test123");
  job.setOutputFormatClass(NullOutputFormat.class);
  job.setNumReduceTasks(0);
  Scan scan = new Scan();
  scan.addColumn(FAMILY_NAME, COLUMN_NAME);
  scan.setTimeRange(MINSTAMP, MAXSTAMP);
  scan.setMaxVersions();
  TableMapReduceUtil.initTableMapperJob(TABLE_NAME,
   scan, ProcessTimeRangeMapper.class, Text.class, Text.class, job);
  job.waitForCompletion(true);
 } catch (IOException e) {
  // TODO Auto-generated catch block
  e.printStackTrace();
 } finally {
  if (job != null) {
   FileUtil.fullyDelete(
    new File(job.getConfiguration().get("hadoop.tmp.dir")));
  }
 }
}

代码示例来源:origin: apache/hbase

@AfterClass
public static void cleanup() throws Exception {
 server.stop();
 FileUtil.fullyDelete(new File(BASEDIR));
 KeyStoreTestUtil.cleanupSSLConfig(keystoresDir, sslConfDir);
 clientSslFactory.destroy();
}

代码示例来源:origin: apache/hbase

@Test
public void testMiniClusterWithSSLOn() throws Exception {
 final String BASEDIR = System.getProperty("test.build.dir",
   "target/test-dir") + "/" + TestHBaseTestingUtility.class.getSimpleName();
 String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestHBaseTestingUtility.class);
 String keystoresDir = new File(BASEDIR).getAbsolutePath();
 HBaseTestingUtility hbt = new HBaseTestingUtility();
 File base = new File(BASEDIR);
 FileUtil.fullyDelete(base);
 base.mkdirs();
 KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, hbt.getConfiguration(), false);
 hbt.getConfiguration().set("hbase.ssl.enabled", "true");
 hbt.getConfiguration().addResource("ssl-server.xml");
 hbt.getConfiguration().addResource("ssl-client.xml");
 MiniHBaseCluster cluster = hbt.startMiniCluster();
 try {
  assertEquals(1, cluster.getLiveRegionServerThreads().size());
 } finally {
  hbt.shutdownMiniCluster();
 }
}

代码示例来源:origin: apache/hbase

}finally{
 t.close();
 FileUtil.fullyDelete(new File(OUTPUT_DIR));

相关文章