Skip to content

Instantly share code, notes, and snippets.

-- code snippets
https://docs.aws.amazon.com/glue/latest/dg/interactive-sessions-magics.html
@dondrake
dondrake / RedshiftQueries.sql
Last active September 11, 2019 01:30
Redshift Queries
-- find # rows in each table
select "table", tbl_rows
from svv_table_info
where SCHEMA = 'sh'
order by 1;
-- view history of queries
select query, datediff(seconds, starttime, endtime), trim(querytxt) as sqlquery
from stl_query
case class C1(f1: String, f2: String, f3: String, f4: String)
case class C2(g1: String, g2: String, g3: String, g4: String)
case class C3(h1: String, h2: String, h3: String, h4: String)
val sqlContext = spark.sqlContext
val c1 = sc.parallelize(Seq(
C1("h1", "c1a1", "c1b1", "c1c1"),
C1("h2", "c1a2", "c1b2", "c1c2"),
C1(null, "c1a3", "c1b3", "c1c3")
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{Encoder,Row}
import org.apache.spark.sql.functions._
import java.util.Calendar
import sqlContext.implicits._
case class C1(f1: String, f2: String, f3: String, f4:java.sql.Date, f5: Double)
case class C2(f2:String, seqC1:Seq[C1])
case class C3(f2:String, seqOfSeqC1:Seq[Seq[C1]])
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{StructType,StructField,StringType,IntegerType}
import sqlContext.implicits._
val r1 = Row(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 1
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{Encoder,Row}
import org.apache.spark.sql.functions._
import java.util.Calendar
import sqlContext.implicits._
case class C1(f1: String, f2: String, f3: String, f4:java.sql.Date, f5: Double)
case class C2(f2:String, seqC1:Seq[C1])
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.{Encoder,Row}
import org.apache.spark.sql.functions._
import java.util.Calendar
import sqlContext.implicits._
case class C1(f1: String, f2: String, f3: String, f4:java.sql.Date, f5: Double)
val teams = sc.parallelize(Seq(
C1("hash1", "NLC", "Cubs", java.sql.Date.valueOf("2016-01-23"), 3253.21),
# Copyright 2015 Don Drake
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,