本文整理了Java中org.apache.hadoop.fs.FileSystem.delete()
方法的一些代码示例,展示了FileSystem.delete()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。FileSystem.delete()
方法的具体详情如下:
包路径:org.apache.hadoop.fs.FileSystem
类名称:FileSystem
方法名:delete
[英]Delete a file/directory.
[中]删除文件/目录。
代码示例来源:origin: voldemort/voldemort
public static void deletePathIfExists(JobConf conf, String stepOutputPath) throws IOException {
Path path = new Path(stepOutputPath);
FileSystem fs = path.getFileSystem(conf);
if(fs.exists(path)) {
fs.delete(path, true);
}
}
代码示例来源:origin: h2oai/h2o-2
@Override public Object call() throws Exception {
FileSystem fs = FileSystem.get(_iceRoot.toUri(), CONF);
fs.delete(_iceRoot, true);
return null;
}
}, false, 0);
代码示例来源:origin: apache/incubator-druid
public static String runTask(String[] args) throws Exception
{
String workingPath = args[0];
log.info("Deleting indexing hadoop working path [%s].", workingPath);
Path p = new Path(workingPath);
FileSystem fs = p.getFileSystem(new Configuration());
fs.delete(p, true);
return null;
}
}
代码示例来源:origin: prestodb/presto
private void cleanupFile(Path file)
{
try {
fileSystem.delete(file, false);
if (fileSystem.exists(file)) {
throw new IOException("Delete failed");
}
}
catch (IOException e) {
log.warn(e, "Failed to delete temporary file: " + file);
}
}
代码示例来源:origin: apache/flink
@Override
public void run() {
LOG.info("Cancelling deployment from Deployment Failure Hook");
failSessionDuringDeployment(yarnClient, yarnApplication);
LOG.info("Deleting files in {}.", yarnFilesDir);
try {
FileSystem fs = FileSystem.get(yarnConfiguration);
if (!fs.delete(yarnFilesDir, true)) {
throw new IOException("Deleting files in " + yarnFilesDir + " was unsuccessful");
}
fs.close();
} catch (IOException e) {
LOG.error("Failed to delete Flink Jar and configuration files in HDFS", e);
}
}
}
代码示例来源:origin: apache/hbase
@After
public void cleanUp() throws IOException {
// delete and recreate the test directory, ensuring a clean test dir between tests
Path testDir = UTIL.getDataTestDir();
FileSystem fs = UTIL.getTestFileSystem();
fs.delete(testDir, true);
if (!fs.mkdirs(testDir)) throw new IOException("Failed mkdir " + testDir);
}
代码示例来源:origin: apache/kylin
@Override
public void deleteSlice(String workingDir, String sliceFileName) throws IOException {
Path path = new Path(workingDir, sliceFileName);
logger.trace("delete slice at {}", path);
if (fileSystem.exists(path)) {
fileSystem.delete(path, false);
}
}
代码示例来源:origin: h2oai/h2o-2
@Override public Object call() throws Exception {
Path p = new Path(_iceRoot, getIceName(v));
FileSystem fs = FileSystem.get(p.toUri(), CONF);
fs.delete(p, true);
return null;
}
}, false, 0);
代码示例来源:origin: apache/hive
private void moveUpFiles(Path specPath, Configuration hconf, Logger log)
throws IOException, HiveException {
FileSystem fs = specPath.getFileSystem(hconf);
if (fs.exists(specPath)) {
FileStatus[] taskOutputDirs = fs.listStatus(specPath);
if (taskOutputDirs != null) {
for (FileStatus dir : taskOutputDirs) {
Utilities.renameOrMoveFiles(fs, dir.getPath(), specPath);
fs.delete(dir.getPath(), true);
}
}
}
}
代码示例来源:origin: apache/incubator-gobblin
@Test
public void testFromInstrumentedScheme() throws Exception {
File tmpDir = Files.createTempDir();
tmpDir.deleteOnExit();
FileSystem fs = FileSystem.get(new URI(InstrumentedLocalFileSystem.SCHEME + ":///"), new Configuration());
Assert.assertTrue(fs instanceof InstrumentedLocalFileSystem);
Assert.assertTrue(DecoratorUtils.resolveUnderlyingObject(fs) instanceof LocalFileSystem);
Assert.assertEquals(fs.getFileStatus(new Path("/tmp")).getPath(), new Path("instrumented-file:///tmp"));
Assert.assertEquals(fs.getUri().getScheme(), "instrumented-file");
Path basePath = new Path(tmpDir.getAbsolutePath());
Assert.assertTrue(fs.exists(basePath));
Path file = new Path(basePath, "file");
Assert.assertFalse(fs.exists(file));
fs.create(new Path(basePath, "file"));
Assert.assertTrue(fs.exists(file));
Assert.assertEquals(fs.getFileStatus(file).getLen(), 0);
Assert.assertEquals(fs.listStatus(basePath).length, 1);
fs.delete(file, false);
Assert.assertFalse(fs.exists(file));
}
代码示例来源:origin: apache/hive
public PerformTestRCFileAndSeqFile(boolean local, String file)
throws IOException {
if (local) {
fs = FileSystem.getLocal(conf);
} else {
fs = FileSystem.get(conf);
}
conf.setInt(RCFile.Writer.COLUMNS_BUFFER_SIZE_CONF_STR, 1 * 1024 * 1024);
if (file == null) {
Path dir = new Path(System.getProperty("test.tmp.dir", ".") + "/mapred");
testRCFile = new Path(dir, "test_rcfile");
testSeqFile = new Path(dir, "test_seqfile");
} else {
testRCFile = new Path(file + "-rcfile");
testSeqFile = new Path(file + "-seqfile");
}
fs.delete(testRCFile, true);
fs.delete(testSeqFile, true);
System.out.println("RCFile:" + testRCFile.toString());
System.out.println("SequenceFile:" + testSeqFile.toString());
}
代码示例来源:origin: apache/hive
private void corruptDataFile(final String file, final Configuration conf, final int addRemoveBytes)
throws Exception {
Path bPath = new Path(file);
Path cPath = new Path(bPath.getParent(), bPath.getName() + ".corrupt");
FileSystem fs = bPath.getFileSystem(conf);
FileStatus fileStatus = fs.getFileStatus(bPath);
int len = addRemoveBytes == Integer.MIN_VALUE ? 0 : (int) fileStatus.getLen() + addRemoveBytes;
byte[] buffer = new byte[len];
FSDataInputStream fdis = fs.open(bPath);
fdis.readFully(0, buffer, 0, (int) Math.min(fileStatus.getLen(), buffer.length));
fdis.close();
FSDataOutputStream fdos = fs.create(cPath, true);
fdos.write(buffer, 0, buffer.length);
fdos.close();
fs.delete(bPath, false);
fs.rename(cPath, bPath);
}
代码示例来源:origin: alibaba/mdrill
public static void truncate(FileSystem lfs,Path target) throws IOException
{
LOG.info("truncate "+target.toString());
if (lfs.exists(target)) {
lfs.delete(target, true);
}
lfs.mkdirs(target.getParent());
}
public static String readVertify(FileSystem fs, Path file) throws IOException {
代码示例来源:origin: apache/storm
@Test
public void testDoubleCreateSemantics() throws Exception {
//1 create an already existing open file w/o override flag
Path file1 = new Path(dir.toString() + Path.SEPARATOR_CHAR + "file1");
try (FSDataOutputStream os1 = fs.create(file1, false)) {
fs.create(file1, false); // should fail
fail("Create did not throw an exception");
} catch (RemoteException e) {
Assert.assertEquals(AlreadyBeingCreatedException.class, e.unwrapRemoteException().getClass());
}
//2 close file and retry creation
try {
fs.create(file1, false); // should still fail
fail("Create did not throw an exception");
} catch (FileAlreadyExistsException e) {
// expecting this exception
}
//3 delete file and retry creation
fs.delete(file1, false);
try (FSDataOutputStream os2 = fs.create(file1, false)) {
Assert.assertNotNull(os2);
}
}
代码示例来源:origin: apache/hbase
private String setRootDirAndCleanIt(final HBaseTestingUtility htu, final String subdir)
throws IOException {
Path testdir = htu.getDataTestDir(subdir);
FileSystem fs = FileSystem.get(htu.getConfiguration());
if (fs.exists(testdir)) assertTrue(fs.delete(testdir, true));
FSUtils.setRootDir(htu.getConfiguration(), testdir);
return FSUtils.getRootDir(htu.getConfiguration()).toString();
}
代码示例来源:origin: apache/incubator-druid
@Override
public void killAll() throws IOException
{
log.info("Deleting all segment files from hdfs dir [%s].", storageDirectory.toUri().toString());
final FileSystem fs = storageDirectory.getFileSystem(config);
fs.delete(storageDirectory, true);
}
代码示例来源:origin: prestodb/presto
private void deleteSchemaFile(String type, Path metadataDirectory)
{
try {
if (!metadataFileSystem.delete(new Path(metadataDirectory, PRESTO_SCHEMA_FILE_NAME), false)) {
throw new PrestoException(HIVE_METASTORE_ERROR, "Could not delete " + type + " schema");
}
}
catch (IOException e) {
throw new PrestoException(HIVE_METASTORE_ERROR, "Could not delete " + type + " schema", e);
}
}
代码示例来源:origin: apache/kylin
public static void deletePath(Configuration conf, Path path) throws IOException {
FileSystem fs = FileSystem.get(path.toUri(), conf);
if (fs.exists(path)) {
fs.delete(path, true);
}
}
代码示例来源:origin: apache/storm
private static void createSeqFile(FileSystem fs, Path file, int rowCount) throws IOException {
Configuration cOnf= new Configuration();
try {
if (fs.exists(file)) {
fs.delete(file, false);
}
SequenceFile.Writer w = SequenceFile.createWriter(fs, conf, file, IntWritable.class, Text.class);
for (int i = 0; i
}
w.close();
System.out.println("done");
} catch (IOException e) {
e.printStackTrace();
}
}
代码示例来源:origin: Alluxio/alluxio
/**
* Creates the HDFS filesystem to store output files.
*
* @param conf Hadoop configuration
*/
private void createHdfsFilesystem(Configuration conf) throws Exception {
// Inits HDFS file system object
mFileSystem = FileSystem.get(URI.create(conf.get("fs.defaultFS")), conf);
mOutputFilePath = new Path("./MapReduceOutputFile");
if (mFileSystem.exists(mOutputFilePath)) {
mFileSystem.delete(mOutputFilePath, true);
}
}