This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from pyspark.sql import SparkSession | |
spark = SparkSession \ | |
.builder \ | |
.appName("spark-avro-json-sample") \ | |
.config('spark.hadoop.avro.mapred.ignore.inputs.without.extension', 'false') \ | |
.getOrCreate() | |
in_path = '/mnt/iotsmarthousedatalake/rawdata/sandbox/eventhubiotsmarthouse/eventhubiotsmarthouse/eventhubiotsmarthouse/0/*/*/*/*/*/*.avro' | |
#storage->avro | |
avroDf = spark.read.format("com.databricks.spark.avro").load(in_path) | |
#avro->json | |
jsonRdd = avroDf.select(avroDf.Body.cast("string")).rdd.map(lambda x: x[0]) | |
data = spark.read.json(jsonRdd) # in real world it's better to specify a schema for the JSON | |
#do whatever you want with `data` |
From where can I get in_path as in screenshot if I take it contains only storage/container//.
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
can you please the in_path , is it the location to the file i want to load for reading ??
thank you