Created
March 2, 2022 03:57
-
-
Save justinTM/d963c7d99ebc356d534955b88e533ca9 to your computer and use it in GitHub Desktop.
Apache Spark RDD parallelize pipe JSON file through jq multi-core
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
from pyspark.sql import SparkSession | |
# create the spark session on a cluster of multiple cores | |
DIR_JSONS = '/tmp/in/jsons' | |
SPARK = SparkSession.builder.appName('APP_NAME').getOrCreate() | |
sc = SPARK.sparkContext | |
# execute a shell command using python | |
def os_shell_jq(filepath): | |
os.system(f"jq -c '.' '{filepath}' > '{filepath}'") | |
# get all JSON filepaths in a directory | |
jsons = [ os.path.join(DIR_JSONS, f) for f in listdir(DIR_JSONS) ] | |
# call the python function (shell command) for each string JSON filepath | |
rdd = sc.parallelize(jsons).foreach(os_shell_jq) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment