Skip to content

Instantly share code, notes, and snippets.

@tingletech
Created April 15, 2020 23:23
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save tingletech/5b4071dfa4d4021df28119b1d4a79ba0 to your computer and use it in GitHub Desktop.
Save tingletech/5b4071dfa4d4021df28119b1d4a79ba0 to your computer and use it in GitHub Desktop.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import json
from urllib.parse import urlparse
import boto3
# https://stackoverflow.com/q/48914324/1763984
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from pyspark.sql import SparkSession
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
sc = SparkContext()
log4jLogger = sc._jvm.org.apache.log4j # https://stackoverflow.com/q/48914324/1763984
log = log4jLogger.LogManager.getLogger(__name__)
log.warn('hello logger')
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
# transformation, create dataframe with the results from this SQL,
# then .foreach() over that to write metadata summary JSON to S3
SQL_START = """SELECT
count(*) as item_count,"""
SQL_LOOP = """
round(sum(case when size({0}) > 0 then 1 else 0 end) * 100 / count(*), 2)
as {0}_percent,
round(approx_count_distinct({0})/count({0}) * 100, 2)
-- round(count(distinct({0}))/count({0}) * 100, 2)
AS {0}_uniq_percent, """ # .format() template; note `{0}` is the solr field name
SQL_END = """
round(sum(case when char_length(rights_uri) > 0 then 1 else 0 end) * 100 / count(*), 2)
as rights_uri_percent,
round(sum(case when char_length(transcription) > 0 then 1 else 0 end) * 100 / count(*), 2)
as transcription_percent,
split(collection_data[0], "::")[0] as collection_url
-- no trailing commas allowed!
FROM calisphere
GROUP BY collection_data[0]
"""
# loop over these fields applying the SQL_LOOP .format() template
UCLDC_SCHEMA_LOOP = [
"title",
"alternative_title",
"contributor",
"coverage",
"creator",
"date",
"extent",
"format",
"genre",
"identifier",
"language",
"location",
"publisher",
"relation",
"rights",
"rights_holder",
"rights_note",
# "rights_date", -- not used yet
"source",
"spatial",
"subject",
"temporal",
"type",
"description",
"provenance",
]
# `rights_uri` and `transcription` are not repeating, that loop is "unrolled"
# in the SQL
def main(argv=None):
#glueContext = GlueContext(SparkContext.getOrCreate())
#spark = glueContext.spark_session
#spark = SparkSession.builder.appName(
#"Python Spark SQL data source example"
#).getOrCreate()
# build the SQL query from SQL_* strings
sql_query = SQL_START
for field in UCLDC_SCHEMA_LOOP:
sql_query += SQL_LOOP.format(field)
sql_query += SQL_END
# where blahDF is hungarian for Data Frame
## @type: DataSource
## @args: [database = "gsdn", table_name = "solr_prod_json_bz2_9b65e16d15fc47034bb4fa764c5f28a8", redshift_tmp_dir = args["TempDir"], transformation_ctx = "<transformation_ctx>"]
## @return: calisphereDF
## @inputs: []
calisphereDF = glueContext.create_dynamic_frame.from_catalog(database = "gsdn", table_name = "solr_prod_json_bz2", redshift_tmp_dir = args["TempDir"], transformation_ctx = "trasnformation_ctx")
# https://stackoverflow.com/questions/52822526/dynamicframe-vs-dataframe
calisphereDF = calisphereDF.toDF()
calisphereDF.createOrReplaceTempView("calisphere")
# calisphereDF.printSchema()
titlesDF = spark.sql(sql_query)
# titlesDF.show(20, False)
# titlesDF.coalesce(1).write.format("csv").option("header", "true").save(argv.outfile[0])
titlesDF.foreach(lambda r: foreach_row_to_json(r, "s3://static-ucldc-cdlib-org/metadata_summary/collection/"))
# for row in titlesDF.collect():
# foreach_row_to_json(row, "s3://static-ucldc-cdlib-org/metadata_summary/collection/")
def foreach_row_to_json(row, outdir):
# this gets run once per unique value of GROUP BY collection_data[0]
row = row.asDict()
out = {}
# reformat the data for the django template
# ["<facet>": {
# "percent": 100,
# "uniq_percent": 1.87
# }, ...]
for key, value in row.items():
# clean up *_uniq_percent, these use `approx_count_distinct`, and can
# thus go over 100% in small collections
if key.endswith("uniq_percent") and value and value > 100.0:
value = 100
facet, kind = substring_key(key)
if not kind: # this is not a metadata facet
out.update({ key: value })
else: # group all the fields for the facet
if not facet in out:
out.update({facet: {}})
out[facet].update({ kind: value })
filename = collection_url_to_filename(row["collection_url"])
write_dict(out, outdir, filename)
def substring_key(key):
facet = key.split("_percent")[0].split("_uniq")[0]
if facet != key:
kind = key.split("{}_".format(facet))[1]
else:
kind = None
return facet, kind
def collection_url_to_filename(url):
return "{}.json".format(os.path.basename(os.path.normpath(url)))
def write_dict(row, outdir, filename):
url = urlparse(outdir)
if url.scheme == "s3":
write_dict_s3(row, url.netloc, os.path.join(url.path.strip('/'), filename))
else:
write_dict_file(row, os.path.join(outdir, filename))
def write_dict_s3(row, bucket, key):
s3 = boto3.resource("s3")
s3object = s3.Object(bucket, key)
s3object.put(Body=(bytes(json.dumps(row), encoding='utf8')))
s3object.Acl().put(ACL='public-read')
def write_dict_file(row, filename):
with open(filename, "w") as outfile:
json.dump(row, outfile)
main()
job.commit()
"""
Copyright © 2019, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the University of California nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment