Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save tingletech/9721648535ab3a804b73584af3d48b3f to your computer and use it in GitHub Desktop.
Save tingletech/9721648535ab3a804b73584af3d48b3f to your computer and use it in GitHub Desktop.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import json
from urllib.parse import urlparse
import boto3
from pyspark.sql import SparkSession
from icecream import ic
# transformation, create dataframe with the results from this SQL,
# then .foreach() over that to write metadata summary JSON to S3
SQL_START = """SELECT
count(*) as item_count,"""
SQL_LOOP = """
round(sum(case when size({0}) > 0 then 1 else 0 end) * 100 / count(*), 2)
as {0}_percent,
round(approx_count_distinct({0})/count({0}) * 100, 2)
-- round(count(distinct({0}))/count({0}) * 100, 2)
AS {0}_uniq_percent, """ # .format() template; note `{0}` is the solr field name
SQL_END = """
round(sum(case when char_length(rights_uri) > 0 then 1 else 0 end) * 100 / count(*), 2)
as percent_rights_uri,
round(sum(case when char_length(transcription) > 0 then 1 else 0 end) * 100 / count(*), 2)
as percent_transcription,
split(collection_data[0], "::")[0] as collection_url
-- no trailing commas allowed!
FROM calisphere
GROUP BY collection_data[0]
"""
# loop over these fields applying the SQL_LOOP .format() template
UCLDC_SCHEMA_LOOP = [
"title",
"alternative_title",
"contributor",
"coverage",
"creator",
"date",
"extent",
"format",
"genre",
"identifier",
"language",
"location",
"publisher",
"relation",
"rights",
"rights_holder",
"rights_note",
# "rights_date", -- not used yet
"source",
"spatial",
"subject",
"temporal",
"type",
"description",
"provenance",
]
# `rights_uri` and `transcription` are not repeating, that loop is "unrolled"
# in the SQL
def main(argv=None):
parser = argparse.ArgumentParser(
description="run some metadata stats on a solr dump"
)
parser.add_argument(
"path", nargs=1, help="path to spark data file (JSON-L, can be gz or bz2)"
)
parser.add_argument(
"outdir", nargs=1, help="directory to dump the collection summary json files"
)
if argv is None:
argv = parser.parse_args()
spark = SparkSession.builder.appName(
"Python Spark SQL data source example"
).getOrCreate()
# build the SQL query from SQL_* strings
sql_query = SQL_START
for field in UCLDC_SCHEMA_LOOP:
sql_query += SQL_LOOP.format(field)
sql_query += SQL_END
# where blahDF is hungarian for Data Frame
calisphereDF = spark.read.json(argv.path)
calisphereDF.createOrReplaceTempView("calisphere")
# calisphereDF.printSchema()
titlesDF = spark.sql(sql_query)
# titlesDF.show(20, False)
# titlesDF.coalesce(1).write.format("csv").option("header", "true").save(argv.outfile[0])
titlesDF.foreach(lambda r: foreach_row_to_json(r, argv.outdir[0]))
def foreach_row_to_json(row, outdir):
# this gets run once per unique value of GROUP BY collection_data[0]
row = row.asDict()
# clean up *_uniq_percent, these use `approx_count_distinct`, and can
# thus go over 100% in small collections
for key, value in row.items():
if key.endswith("uniq_percent") and value and value > 100.0:
row[key] = 100
filename = collection_url_to_filename(row["collection_url"])
write_dict(row, outdir, filename)
def collection_url_to_filename(url):
return "{}.json".format(os.path.basename(os.path.normpath(url)))
def write_dict(row, outdir, filename):
url = urlparse(outdir)
if url.scheme == "s3":
write_dict_s3(row, url.netloc, os.path.join(url.path, filename))
else:
write_dict_file(row, os.path.join(outdir, filename))
def write_dict_s3(row, bucket, key):
s3 = boto3.resource("s3")
s3object = s3.Object(bucket, key)
s3object.put(Body=(bytes(json.dumps(row))))
def write_dict_file(row, filename):
with open(filename, "w") as outfile:
json.dump(row, outfile)
# main() idiom for importing into REPL for debugging
if __name__ == "__main__":
sys.exit(main())
"""
Copyright © 2019, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the University of California nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
$ pyspark
Python 3.7.1 (default, Dec 12 2018, 15:37:01) 
[Clang 10.0.0 (clang-1000.11.45.5)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
19/10/17 13:27:41 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
Setting default log level to "WARN".
To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).
Welcome to
      ____              __
     / __/__  ___ _____/ /__
    _\ \/ _ \/ _ `/ __/  '_/
   /__ / .__/\_,_/_/ /_/\_\   version 2.4.4
      /_/

Using Python version 3.7.1 (default, Dec 12 2018 15:37:01)
SparkSession available as 'spark'.
>>> calisphereDF = spark.read.json("/Users/tingle/data/solr-prod.json.bz2").registerTempTable('calisphere')
19/10/17 13:29:55 WARN Utils: Truncated the string representation of a plan since it was too large. This behavior can be adjusted by setting 'spark.debug.maxToStringFields' in SparkEnv.conf.
>>> titles = spark.sql("SELECT title[0] AS title FROM calisphere")
19/10/17 13:30:42 WARN ObjectStore: Failed to get database global_temp, returning NoSuchObjectException
>>> titles.count
<bound method DataFrame.count of DataFrame[title: string]>
>>> titles.count()
1308232                                                                         
>>> titles.distinct().count()
911067                                                                          
>>> 
pyspark==2.4.4
boto3

911,067 / 1,308,232 = .6964 or about 70% of items have a unique title

1,308,232 - 911,067 = 397,165 need better titles

$ spark-shell -Dspark.executor.memory=6g
19/10/16 23:51:09 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties
Setting default log level to "WARN".
To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).
Spark context Web UI available at http://10.0.1.6:4040
Spark context available as 'sc' (master = local[*], app id = local-1571295074323).
Spark session available as 'spark'.
Welcome to
      ____              __
     / __/__  ___ _____/ /__
    _\ \/ _ \/ _ `/ __/  '_/
   /___/ .__/\_,_/_/ /_/\_\   version 2.4.4
      /_/
         
Using Scala version 2.11.12 (Java HotSpot(TM) 64-Bit Server VM, Java 1.8.0_172)
Type in expressions to have them evaluated.
Type :help for more information.

scala> val calisphereDF = spark.read.json("/Users/tingle/data/solr-prod.json.bz2").registerTempTable("calisphere")
calisphereDF: Unit = ()

scala> val titles = spark.sql("SELECT title[0] AS title FROM calisphere")
titles: org.apache.spark.sql.DataFrame = [title: string]

scala> titles.count
res6: Long = 1308232                                                            

scala> titles.distinct.count
res7: Long = 911067  

scala> val ratio = spark.sql("SELECT COUNT(DISTINCT title[0]) / COUNT(title[0]) FROM calisphere")
ratio: org.apache.spark.sql.DataFrame = [(CAST(count(DISTINCT title[0]) AS DOUBLE) / CAST(count(title[0]) AS DOUBLE)): double]

scala> ratio.show
+----------------------------------------------------------------------------+  
|(CAST(count(DISTINCT title[0]) AS DOUBLE) / CAST(count(title[0]) AS DOUBLE))|
+----------------------------------------------------------------------------+
|                                                          0.6964108812504204|
+----------------------------------------------------------------------------+

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment