Skip to content

Instantly share code, notes, and snippets.

@dynamicguy
Forked from rxin/df.py
Created November 28, 2015 18:28
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save dynamicguy/4d9adefc2b757dc3fe6b to your computer and use it in GitHub Desktop.
Save dynamicguy/4d9adefc2b757dc3fe6b to your computer and use it in GitHub Desktop.
DataFrame simple aggregation performance benchmark
data = sqlContext.load("/home/rxin/ints.parquet")
data.groupBy("a").agg(col("a"), avg("num")).collect()
val data = sqlContext.load("/home/rxin/ints.parquet")
data.groupBy("a").agg(col("a"), avg("num")).collect()
import random
from pyspark.sql import Row
data = sc.parallelize(xrange(1000)).flatMap(lambda x: [Row(a=random.randint(1, 10), num=random.randint(1, 100), str=("a" * random.randint(1, 30))) for i in xrange(10000)])
dataTable = sqlContext.createDataFrame(data)
dataTable.saveAsParquetFile("/home/rxin/ints.parquet")
pdata = sqlContext.load("/home/rxin/ints.parquet").select("a", "num")
sum_count = (
pdata.map(lambda x: (x.a, [x.num, 1]))
.reduceByKey(lambda x, y:
[x[0] + y[0], x[1] + y[1]])
.collect())
[(x[0], float(x[1][0]) / x[1][1]) for x in sum_count]
val pdata = sqlContext.load("/home/rxin/ints.parquet").select("a", "num")
val sum_count = pdata.map { row => (row.getInt(0), (row.getInt(1), 1)) }
.reduceByKey { (a, b) =>
(a._1 + b._1, a._2 + b._2)
}.collect()
sum_count.foreach { case (a, (sum, count)) => println(s"$a: ${sum/count}") }
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment