Skip to content

Instantly share code, notes, and snippets.

@hadoopsters
hadoopsters / hiveToCsv_2.sh
Created August 16, 2017 22:45
Export Hive Table to CSV: Method 2
#!/bin/bash
hive -e "drop table if exists csv_dump;
create table csv_dump ROW FORMAT DELIMITED
FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n'
LOCATION '/temp/storage/path' as
select * from my_data_table;"
hadoop fs -getmerge /temp/storage/path/ /local/path/my.csv
val mydataframe = ... //put some data in your dataframe, friend
mydataframe
.write
.option("orc.compress", "snappy")
.mode(SaveMode.Append)
.orc("/this/is/an/hdfs/directory/")
val mydataframe = ... //put some data in your dataframe, friend
mydataframe
.write
.partitionBy("year", "month", "day", "hour")
.option("orc.compress", "snappy")
.mode(SaveMode.Append)
.orc("/this/is/another/hdfs/directory")
val mydstream = ... // these usually come from Spark Streaming apps
// they basically contain a chain of RDDs that you can convert to DFs
mydstream.foreachRDD(rdd => {
hiveContext.createDataFrame(rdd)
.write
.option("orc.compress", "snappy")
.mode(SaveMode.Append)
.orc("/this/is/an/hdfs/directory/too")
// import this guy
import org.apache.spark.sql.hive.HiveContext
// this should look familiar
val conf = new SparkConf()
val sc = new SparkContext(conf)
// setup this fella
val hiveContext = new HiveContext(sc)
CREATE TABLE my_database.my_table
(
column_1 string,
column_2 int,
column_3 double
)
STORED AS ORC
TBLPROPERTIES('ORC.COMPRESS'='SNAPPY'); -- ensure SNAPPY is uppercase, lowercase triggers a nasty bug in Hive (fixed in later versions)
CREATE TABLE my_database.my_table
(
column_1 string,
column_2 int,
column_3 double
)
PARTITIONED BY
(
year int,
month smallint,
CREATE TABLE my_database.my_table
(
column_1 string,
column_2 int,
column_3 double
)
PARTITIONED BY
(
year int,
month smallint,
ANALYZE TABLE my_database.my_table compute statistics;
ANALYZE TABLE my_database.my_table PARTITION (YEAR=2017, MONTH=11, DAY=30) compute statistics;