Skip to content

Instantly share code, notes, and snippets.

@smartkiwi
Created April 23, 2015 19:01
Show Gist options
  • Save smartkiwi/7489fc509b0ce3ea047d to your computer and use it in GitHub Desktop.
Save smartkiwi/7489fc509b0ce3ea047d to your computer and use it in GitHub Desktop.
import pyspark
import os
os.environ['SPARK_HOME'] = '/root/spark/'
# And Python path
import sys
sys.path.insert(0, '/root/spark/python')
# Detect the PySpark URL
CLUSTER_URL = open('/root/spark-ec2/cluster-url').read().strip()
print CLUSTER_URL
# <codecell>
from pyspark import SparkContext
sc = SparkContext( CLUSTER_URL, 'pyspark')
@smartkiwi
Copy link
Author

import os
import sys

# Set the path for spark installation
# this is the path where you have built spark using sbt/sbt assembly
spark_home = "/Users/vvlad/spark/spark-1.2.0-bin-hadoop2.4"
os.environ['SPARK_HOME'] = spark_home

# Append to PYTHONPATH so that pyspark could be found
sys.path.append(spark_home+"/python")
sys.path.append(spark_home+"/python/lib/py4j-0.8.2.1-src.zip")

# Now we are ready to import Spark Modules
try:
    from pyspark import SparkContext
    from pyspark import SparkConf
except ImportError as e:
    raise
    print ("Error importing Spark Modules", e)
    sys.exit(1)

@smartkiwi
Copy link
Author

based on http://blog.cloudera.com/blog/2014/08/how-to-use-ipython-notebook-with-apache-spark/
Install spark
download latest release 
decompress
install scala - brew scala
compile spark
sbt/sbt assembly

setting up spark cluster
1) generate and download ec2 key pair
2) start cluster
./spark-ec2 -k vvlad-ec2 -i ~/.ssh/vvlad-ec2.pem -s 6 launch vvlad-spark-cluster --instance-type=r3.large --zone

# would block loadtest
## ./spark-ec2 -k vvlad-ec2-west1 -i ~/.ssh/vvlad-ec2-west1.pem -s 5 launch vvlad-spark-cluster --instance-type=r3.xlarge --region=us-west-1 -z us-west-1a

##
#./spark-ec2 -k vvlad-ec2-west2 -i ~/.ssh/vvlad-ec2-west2.pem -s 8 launch vvlad-spark-cluster --instance-type=r3.xlarge --region=us-west-2 -z us-west-2a
# ./spark-ec2 -k vvlad-ec2-west2 -i ~/.ssh/vvlad-ec2-west2.pem login vvlad-spark-cluster --region=us-west-2 -z us-west-2a

# ./spark-ec2 -k vvlad-ec2 -i ~/.ssh/vvlad-ec2.pem -s 6 launch vvlad-spark-cluster --instance-type=r3.large

3) add incoming port to security group

start / stop cluster:
./spark-ec2 -k vvlad-ec2 -i ~/.ssh/vvlad-ec2.pem start vvlad-spark-cluster
./spark-ec2 -k vvlad-ec2 -i ~/.ssh/vvlad-ec2.pem stop vvlad-spark-cluster

4) login to cluster
./spark-ec2 -k vvlad-ec2 -i ~/.ssh/vvlad-ec2.pem login vvlad-spark-cluster

5) export AWS key and secret
export AWS_ACCESS_KEY_ID=AKIA...
export AWS_SECRET_ACCESS_KEY=...

6) (configure startup options) start python notebook

ipython profile create pyspark

vi ~/.ipython/profile_pyspark/ipython_notebook_config.py
c.NoteBookApp.ip = '*'
c.NotebookApp.port = 18888
c.NotebookApp.open_browser = False


vi ~/.ipython/profile_pyspark/startup/00-pyspark-setup.py
import os
import sys

spark_home = os.environ.get('SPARK_HOME', None)
if not spark_home:
    raise ValueError('SPARK_HOME environment variable is not set')
sys.path.insert(0, os.path.join(spark_home, 'python'))
sys.path.insert(0, os.path.join(spark_home, 'python/lib/py4j-0.8.2.1-src.zip'))
execfile(os.path.join(spark_home, 'python/pyspark/shell.py'))



---
export SPARK_HOME=/root/spark

#IPYTHON_OPTS="notebook --pylab inline --port=18888 --ip='*'" ./bin/pyspark
IPYTHON_OPTS="notebook --pylab inline --port=18888 --ip='*'" ipython notebook --profile=pyspark  ./bin/pyspark

7) load notebook configuration
8) start SparkContext

# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>

# <codecell>

# Configure the necessary Spark environment
import os
os.environ['SPARK_HOME'] = '/root/spark/'

# And Python path
import sys
sys.path.insert(0, '/root/spark/python')

# Detect the PySpark URL
CLUSTER_URL = open('/root/spark-ec2/cluster-url').read().strip()
print CLUSTER_URL

# <codecell>

from pyspark import  SparkContext
sc = SparkContext( CLUSTER_URL, 'pyspark')

# <codecell>

bidlog = sc.textFile("s3n://com.domdex.rtb/2014/07/11/06/1405058*_bid*.smq.mgnt.cc_bids.log.gz")
print bidlog.first()

@srykanth
Copy link

hi, i have been trying to implement pyspark (actually took python program and customized to fit into spark realm by using hdfs to read input data), the program is running locally and doing fine, but somehow its not getting distributed across the clusters. Any clue is appreciated.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment