Skip to content

Instantly share code, notes, and snippets.

@darKoram
Created February 11, 2014 18:13
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save darKoram/8dcc63e212d052c70e29 to your computer and use it in GitHub Desktop.
Save darKoram/8dcc63e212d052c70e29 to your computer and use it in GitHub Desktop.
accumulo config files
#! /usr/bin/env bash
ACCUMULO_CONF_DIR=/opt/accumulo/accumulo-1.5.0/conf
ACCUMULO_HOME=/opt/accumulo/accumulo-1.5.0
JAVA_HOME=/usr/java/default
ACCUMULO_LOG_DIR="/var/log/accumulo/logs"
ZOOKEEPER_HOME=/usr/lib/zookeeper
#Duplicated in lynx-ansible/group_vars/all but probably fixed.
HADOOP_PREFIX=/usr/lib/hadoop
HADOOP_CONF_DIR=/etc/hadoop/conf
#ACCUMULO_TSERVER_OPTS
# examples/3GB/native-standalone/accumulo-env.sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
### Configure these environment variables to point to your local installations.
###
### The functional tests require conditional values, so keep this style:
###
### test -z "/usr/java/default" && export JAVA_HOME=/usr/local/lib/jdk-1.6.0
###
###
### Note that the -Xmx -Xms settings below require substantial free memory:
### you may want to use smaller values, especially when running everything
### on a single machine.
###
if [ -z "$HADOOP_HOME" ]
then
test -z "/usr/lib/hadoop" && export HADOOP_PREFIX=/usr/lib/hadoop
else
HADOOP_PREFIX="$HADOOP_HOME"
unset HADOOP_HOME
fi
test -z "/etc/hadoop/conf" && export HADOOP_CONF_DIR="/usr/lib/hadoop/conf"
# hadoop-2.0:
# test -z "/etc/hadoop/conf" && export HADOOP_CONF_DIR="/usr/lib/hadoop/etc/hadoop"
test -z "/usr/java/default" && export JAVA_HOME=/usr/lib/jvm
test -z "/usr/lib/zookeeper" && export ZOOKEEPER_HOME=/usr/lib/zookeeper
test -z "/var/log/accumulo/logs" && export ACCUMULO_LOG_DIR=/opt/accumulo/accumulo-1.5.0/logs
if [ -f /opt/accumulo/accumulo-1.5.0/conf/accumulo.policy ]
then
POLICY="-Djava.security.manager -Djava.security.policy=/opt/accumulo/accumulo-1.5.0/conf/accumulo.policy"
fi
test -z "$ACCUMULO_TSERVER_OPTS" && export ACCUMULO_TSERVER_OPTS="${POLICY} -Xmx1g -Xms1g -XX:NewSize=500m -XX:MaxNewSize=500m "
test -z "$ACCUMULO_MASTER_OPTS" && export ACCUMULO_MASTER_OPTS="${POLICY} -Xmx1g -Xms1g"
test -z "$ACCUMULO_MONITOR_OPTS" && export ACCUMULO_MONITOR_OPTS="${POLICY} -Xmx1g -Xms256m"
test -z "$ACCUMULO_GC_OPTS" && export ACCUMULO_GC_OPTS="-Xmx256m -Xms256m"
test -z "$ACCUMULO_GENERAL_OPTS" && export ACCUMULO_GENERAL_OPTS="-XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75"
test -z "$ACCUMULO_OTHER_OPTS" && export ACCUMULO_OTHER_OPTS="-Xmx1g -Xms256m"
export ACCUMULO_LOG_HOST=`(grep -v '^#' /opt/accumulo/accumulo-1.5.0/conf/monitor ; echo localhost ) 2>/dev/null | head -1
# Cranked the memory settings (tserver.memory.maps.max, way up to 8Gb, 1Gb, 1Gb for this deployment.
# Typically, we are using the 2Gb accumulo-site.xml unmodified except for zookeepers.
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- Put your site-specific accumulo configurations here. The available configuration values along with their defaults are documented in docs/config.html Unless
you are simply testing at your workstation, you will most definitely need to change the three entries below. -->
<property>
<name>instance.zookeeper.host</name>
<value>10.x.y.66:2181,10.x.y.67:2181,10.x.y.68:2181</value>
<description>comma separated list of zookeeper servers</description>
</property>
<property>
<name>logger.dir.walog</name>
<value>/var/accumulo/walogs</value>
<description>The directory used to store write-ahead logs on the local filesystem. It is possible to specify a comma-separated list of directories.
</description>
</property>
<property>
<name>tserver.memory.maps.max</name>
<value>8G</value>
</property>
<property>
<name>tserver.cache.data.size</name>
<value>1G</value>
</property>
<property>
<name>tserver.cache.index.size</name>
<value>1G</value>
</property>
<property>
<name>instance.secret</name>
<value>DEFAULT</value>
<description>A secret unique to a given instance that all servers must know in order to communicate with one another.
Change it before initialization. To
change it later use ./bin/accumulo org.apache.accumulo.server.util.ChangeSecret --old [oldpasswd] --new [newpasswd],
and then update this file.
</description>
</property>
<property>
<name>trace.token.property.password</name>
<!-- change this to the root user's password, and/or change the user below -->
<value>****secret-password*****</value>
</property>
<property>
<name>trace.user</name>
<value>root</value>
</property>
<property>
<name>general.classpaths</name>
<!--
Add the following for hadoop-2.0
/usr/lib/hadoop/share/hadoop/common/.*.jar,
/usr/lib/hadoop/share/hadoop/common/lib/.*.jar,
/usr/lib/hadoop/share/hadoop/hdfs/.*.jar,
/usr/lib/hadoop/share/hadoop/mapreduce/.*.jar,
/usr/lib/hadoop/share/hadoop/yarn/.*.jar,
-->
<value>
/usr/lib/hadoop/lib/.*.jar,
/usr/lib/hadoop/lib/hadoop-hdfs/.*.jar,
/usr/lib/hadoop/lib/hadoop-yarn/.*.jar,
/usr/lib/hadoop/lib/hadoop-mapreduce/.*.jar,
/usr/lib/hadoop/lib/hadoop/bin/.*.jar,
/usr/lib/hadoop/lib/hadoop/client/.*.jar,
/opt/accumulo/accumulo-1.5.0/server/target/classes/,
/opt/accumulo/accumulo-1.5.0/lib/accumulo-server.jar,
/opt/accumulo/accumulo-1.5.0/core/target/classes/,
/opt/accumulo/accumulo-1.5.0/lib/accumulo-core.jar,
/opt/accumulo/accumulo-1.5.0/start/target/classes/,
/opt/accumulo/accumulo-1.5.0/lib/accumulo-start.jar,
/opt/accumulo/accumulo-1.5.0/fate/target/classes/,
/opt/accumulo/accumulo-1.5.0/lib/accumulo-fate.jar,
/opt/accumulo/accumulo-1.5.0/proxy/target/classes/,
/opt/accumulo/accumulo-1.5.0/lib/accumulo-proxy.jar,
/opt/accumulo/accumulo-1.5.0/lib/[^.].*.jar,
/usr/lib/zookeeper/zookeeper[^.].*.jar,
/etc/hadoop/conf,
/usr/lib/hadoop/[^.].*.jar,
/usr/lib/hadoop/lib/[^.].*.jar,
</value>
<description>Classpaths that accumulo checks for updates and class files.
When using the Security Manager, please remove the ".../target/classes/" values.
</description>
</property>
</configuration>
<!--Thu Feb 6 15:27:11 2014-->
<configuration>
<property>
<name>dfs.block.access.token.enable</name>
<value>true</value>
</property>
<property>
<name>dfs.umaskmode</name>
<value>077</value>
</property>
<property>
<name>dfs.safemode.threshold.pct</name>
<value>1.0f</value>
</property>
<property>
<name>dfs.cluster.administrators</name>
<value> hdfs</value>
</property>
<property>
<name>dfs.durable.sync</name>
<value>true</value>
</property>
<property>
<name>dfs.replication.max</name>
<value>50</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.block.size</name>
<value>134217728</value>
</property>
<property>
<name>dfs.datanode.data.dir.perm</name>
<value>750</value>
</property>
<property>
<name>dfs.datanode.address</name>
<value>0.0.0.0:50010</value>
</property>
<property>
<name>dfs.namenode.handler.count</name>
<value>100</value>
</property>
<property>
<name>dfs.namenode.avoid.read.stale.datanode</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.write.stale.datanode.ratio</name>
<value>1.0f</value>
</property>
<property>
<name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
<value>false</value>
</property>
<property>
<name>dfs.permissions</name>
<value>true</value>
</property>
<property>
<name>dfs.datanode.http.address</name>
<value>0.0.0.0:50075</value>
</property>
<property>
<name>dfs.synconclose</name>
<value>true</value>
</property>
<property>
<name>dfs.datanode.max.xcievers</name>
<value>4096</value>
</property>
<property>
<name>dfs.namenode.avoid.write.stale.datanode</name>
<value>true</value>
</property>
<property>
<name>dfs.https.address</name>
<value>node-hdfs01.tx1.21ct.com:50470</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.web.ugi</name>
<value>gopher,gopher</value>
</property>
<property>
<name>dfs.permissions.supergroup</name>
<value>hdfs</value>
</property>
<property>
<name>dfs.heartbeat.interval</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.stale.datanode.interval</name>
<value>30000</value>
</property>
<property>
<name>dfs.https.port</name>
<value>50070</value>
</property>
<property>
<name>dfs.access.time.precision</name>
<value>0</value>
</property>
<property>
<name>dfs.balance.bandwidthPerSec</name>
<value>6250000</value>
</property>
<property>
<name>dfs.datanode.du.pct</name>
<value>0.85f</value>
</property>
<property>
<name>ipc.server.read.threadpool.size</name>
<value>5</value>
</property>
<property>
<name>dfs.datanode.ipc.address</name>
<value>0.0.0.0:8010</value>
</property>
<property>
<name>dfs.secondary.https.port</name>
<value>50490</value>
</property>
<property>
<name>dfs.block.local-path-access.user</name>
<value>hbase</value>
</property>
<property>
<name>dfs.secondary.http.address</name>
<value>node-hdfs03.tx1.21ct.com:50090</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>/data/1/dfs/dn,/data/2/dfs/dn,/data/3/dfs/dn,/data/4/dfs/dn,/data/5/dfs/dn,/data/6/dfs/dn,/data/7/dfs/dn,/data/8/dfs/dn</value>
</property>
<property>
<name>dfs.support.append</name>
<value>true</value>
</property>
<property>
<name>dfs.name.dir</name>
<value>/hadoop/hdfs/namenode</value>
</property>
<property>
<name>dfs.hosts.exclude</name>
<value>/etc/hadoop/conf/dfs.exclude</value>
</property>
<property>
<name>dfs.datanode.socket.write.timeout</name>
<value>0</value>
</property>
<property>
<name>dfs.blockreport.initialDelay</name>
<value>120</value>
</property>
<property>
<name>dfs.http.address</name>
<value>node-hdfs01.tx1.21ct.com:50070</value>
</property>
<property>
<name>ipc.server.max.response.size</name>
<value>5242880</value>
</property>
<property>
<name>dfs.datanode.du.reserved</name>
<value>180000</value>
</property>
<property>
<name>dfs.hosts</name>
<value>/etc/hadoop/conf/dfs.include</value>
</property>
<property>
<name>dfs.datanode.failed.volumes.tolerated</name>
<value>0</value>
</property>
</configuration>
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# The number of milliseconds of each tick
tickTime=2000
# The number of ticks that the initial
# synchronization phase can take
initLimit=10
# The number of ticks that can pass between
# sending a request and getting an acknowledgement
syncLimit=5
# the directory where the snapshot is stored.
dataDir=/hadoop/zookeeper
# the port at which the clients will connect
clientPort=2181
server.1=node-hdfs01.tx1.21ct.com:2888:3888
server.2=node-hdfs02.tx1.21ct.com:2888:3888
server.3=node-hdfs03.tx1.21ct.com:2888:3888
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment