Skip to content

Instantly share code, notes, and snippets.

@gbrayut
Last active August 26, 2015 17:41
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save gbrayut/3af4bcd0458d2aa57dd2 to your computer and use it in GitHub Desktop.
Save gbrayut/3af4bcd0458d2aa57dd2 to your computer and use it in GitHub Desktop.
HBase setup scripts using Cloudera CDH 5.4.4 on RHEL 6.x
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.rootdir</name>
<value>hdfs://localhost/hbase</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>0.0.0.0:2181</value>
</property>
<property>
<name>hbase.master.port</name>
<value>60000</value>
<property>
<name>hbase.master.info.port</name>
<value>60010</value>
</property>
<property>
<name>hbase.regionserver.port</name>
<value>60020</value>
</property>
<property>
<name>hbase.regionserver.info.port</name>
<value>60030</value>
</property>
</configuration>
#Author: Young Lee (@youngl)
#Transcript: https://bosun.slack.com/archives/general/p1440504748000172
curl http://archive.cloudera.com/cdh5/redhat/6/x86_64/cdh/cloudera-cdh5.repo -o /etc/yum.repos.d/cloudera-cdh5.repo
sed -i "s|cdh/5/|cdh/5.4.4/|" /etc/yum.repos.d/cloudera-cdh5.repo
yum -y install hadoop-conf-pseudo hadoop-lzo hbase hbase-master hbase-regionserver zookeeper zookeeper-server
#=======================
# Setup zookeeper
#=======================
mkdir -p /var/lib/zookeeper
chown -R zookeeper /var/lib/zookeeper/
service zookeeper-server init
# -----------------------------
# Remove limit
# -----------------------------
# ref : https://github.com/ambling/hadoop-docker/commit/cd549b12fc939e12f8afe67cd9050f298e98a4b8
rm -f /etc/security/limits.d/hdfs.conf /etc/security/limits.d/mapreduce.conf /etc/security/limits.d/yarn.conf
rm -f /etc/security/limits.d/hbase.conf
# -----------------------------
# Configuration HDFS pseduo-distributed
# -----------------------------
# step1: format
su hdfs -c "hdfs namenode -format"
# step2: start HDFS
for x in `cd /etc/init.d ; ls hadoop-hdfs-*` ; do service $x stop ; done
for x in `cd /etc/init.d ; ls hadoop-hdfs-*` ; do service $x start ; done
# step3: Create /tmp, Staging and Log Dir
su hdfs -c "hadoop fs -mkdir -p /tmp/"
su hdfs -c "hadoop fs -chmod -R 1777 /tmp "
su hdfs -c "hadoop fs -mkdir -p /var/log/"
# step4: Verify the HDFS
su hdfs -c "hadoop fs -ls -R /"
# -----------------------------
# Setup HBase pseduo-distributed
# -----------------------------
# stop service
service hbase-master stop
service hbase-regionserver stop
# create /hbase dir in HDFS
su hdfs -c "hadoop fs -mkdir /hbase"
su hdfs -c "hadoop fs -chown hbase /hbase"
# -----------------------------
# Cleanup
# -----------------------------
for x in `cd /etc/init.d ; ls hadoop-hdfs-*` ; do service $x stop ; done
service hbase-master stop
service hbase-regionserver stop
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost/</value>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>io.compression.codecs</name>
<value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,
org.apache.hadoop.io.compress.BZip2Codec,com.hadoop.compression.lzo.LzoCodec,
com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
</property>
</configuration>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment