Skip to content

Instantly share code, notes, and snippets.

@mvaz
Forked from rxin/df.py
Last active August 29, 2015 14:16
Show Gist options
  • Save mvaz/8a7ecb539a03ae5d4c42 to your computer and use it in GitHub Desktop.
Save mvaz/8a7ecb539a03ae5d4c42 to your computer and use it in GitHub Desktop.
data = sqlContext.load("/home/rxin/ints.parquet")
data.groupBy("a").agg(col("a"), avg("num")).collect()
val data = sqlContext.load("/home/rxin/ints.parquet")
data.groupBy("a").agg(col("a"), avg("num")).collect()
import random
from pyspark.sql import Row
data = sc.parallelize(xrange(1000)).flatMap(lambda x: [Row(a=random.randint(1, 10), num=random.randint(1, 100), str=("a" * random.randint(1, 30))) for i in xrange(10000)])
dataTable = sqlContext.createDataFrame(data)
dataTable.saveAsParquetFile("/home/rxin/ints.parquet")
pdata = sqlContext.load("/home/rxin/ints.parquet").select("a", "num")
sum_count = (
pdata.map(lambda x: (x.a, [x.num, 1]))
.reduceByKey(lambda x, y:
[x[0] + y[0], x[1] + y[1]])
.collect())
[(x[0], float(x[1][0]) / x[1][1]) for x in sum_count]
val pdata = sqlContext.load("/home/rxin/ints.parquet").select("a", "num")
val sum_count = pdata.map { row => (row.getInt(0), (row.getInt(1), 1)) }
.reduceByKey { (a, b) =>
(a._1 + b._1, a._2 + b._2)
}.collect()
sum_count.foreach { case (a, (sum, count)) => println(s"$a: ${sum/count}") }
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment