Spark job to read gzip files, ignoring corrupted files
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import java.io._ | |
import scala.io._ | |
import java.util.zip._ | |
// Spark | |
import org.slf4j.Logger | |
import org.apache.spark.{ SparkConf, SparkContext, Logging } | |
// Hadoop | |
import org.apache.hadoop.io.compress.GzipCodec | |
object FilterBadGzipFiles extends Logging { | |
def main(args: Array[String]): Unit = { | |
val sparkConf = new SparkConf() | |
val sc = new SparkContext(sparkConf) | |
val files = sc.binaryFiles(args(0)) | |
val lines = | |
files.flatMap { | |
case (path, stream) => | |
try { | |
val is = | |
if (path.toLowerCase.endsWith(".gz")) | |
new GZIPInputStream(stream.open) | |
else | |
stream.open | |
try { | |
Source.fromInputStream(is).getLines.toList | |
} finally { | |
try { is.close } catch { case _: Throwable => } | |
} | |
} catch { | |
case e: Throwable => | |
log.warn(s"error reading from ${path}: ${e.getMessage}", e) | |
List.empty[String] | |
} | |
} | |
lines.saveAsTextFile(args(1), classOf[GzipCodec]) | |
} | |
} |
thanks
your method hava an error where you deal with a big data
you can also use this conf to do it。
.set("spark.files.ignoreCorruptFiles", "true")
Hi,
That option works, but is there anyway to know the files that are corrupted and just not ignore them?
Thanks,
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Works like a Charm. thanks