Skip to content

Commit

Permalink
fix egen#58 Remove the hard dependency to HDFS
Browse files Browse the repository at this point in the history
Modify the condition to evaluate the case we are NOT on local FS. Doing this way it's wasb(s) or s3 compatible if the cluster is based on this kind of distributed filesystem
  • Loading branch information
PierreAlexMaury authored and Pierre-Alexandre Maury committed Dec 9, 2019
1 parent 0909175 commit a0b34a7
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions src/main/scala/com/springml/spark/sftp/DefaultSource.scala
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ class DefaultSource extends RelationProvider with SchemaRelationProvider with Cr
val hadoopConf = sqlContext.sparkContext.hadoopConfiguration
val hdfsPath = new Path(fileLocation)
val fs = hdfsPath.getFileSystem(hadoopConf)
if ("hdfs".equalsIgnoreCase(fs.getScheme)) {
if (!"file".equalsIgnoreCase(fs.getScheme)) {
fs.copyFromLocalFile(new Path(fileLocation), new Path(hdfsTemp))
val filePath = hdfsTemp + "/" + hdfsPath.getName
fs.deleteOnExit(new Path(filePath))
Expand All @@ -152,7 +152,7 @@ class DefaultSource extends RelationProvider with SchemaRelationProvider with Cr
val hadoopConf = sqlContext.sparkContext.hadoopConfiguration
val hdfsPath = new Path(hdfsTemp)
val fs = hdfsPath.getFileSystem(hadoopConf)
if ("hdfs".equalsIgnoreCase(fs.getScheme)) {
if (!"file".equalsIgnoreCase(fs.getScheme)) {
fs.copyToLocalFile(new Path(hdfsTemp), new Path(fileLocation))
fs.deleteOnExit(new Path(hdfsTemp))
return fileLocation
Expand Down

0 comments on commit a0b34a7

Please sign in to comment.