Skip to content

Instantly share code, notes, and snippets.

@hectorj2f
Last active April 24, 2018 11:58
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save hectorj2f/6121aa9afac4f97747b6f62e429d66e0 to your computer and use it in GitHub Desktop.
Save hectorj2f/6121aa9afac4f97747b6f62e429d66e0 to your computer and use it in GitHub Desktop.
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>hadoop.hdfs.configuration.version</name>
<value>1</value>
</property>
<property>
<name>dfs.nameservice.id</name>
<value>hdfs</value>
</property>
<property>
<name>dfs.nameservices</name>
<value>hdfs</value>
</property>
<property>
<name>dfs.ha.namenodes.hdfs</name>
<value>journal-0-node,journal-1-node</value>
</property>
<property>
<name>dfs.cluster.administrators</name>
<value>core,centos,azureuser</value>
</property>
<!-- namenode -->
<property>
<name>dfs.namenode.logging.level</name>
<value>info</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://journal-0-node.hdfs.autoip.dcos.thisdcos.directory:8485;journal-1-node.hdfs.autoip.dcos.thisdcos.directory:8485;journal-2-node.hdfs.autoip.dcos.thisdcos.directory:8485/hdfs</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>sandboxpath/name-data</value>
</property>
<property>
<name>dfs.namenode.safemode.threshold-pct</name>
<value>0.999f</value>
</property>
<property>
<name>dfs.namenode.heartbeat.recheck-interval</name>
<value>60000</value>
</property>
<property>
<name>dfs.namenode.handler.count</name>
<value>10</value>
</property>
<property>
<name>dfs.namenode.invalidate.work.pct.per.iteration</name>
<value>0.32f</value>
</property>
<property>
<name>dfs.namenode.replication.min</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.replication.work.multiplier.per.iteration</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.name.dir.restore</name>
<value>false</value>
</property>
<property>
<name>dfs.namenode.replication.considerLoad</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.fs-limits.max-component-length</name>
<value>255</value>
</property>
<property>
<name>dfs.namenode.fs-limits.max-directory-items</name>
<value>1048576</value>
</property>
<property>
<name>dfs.namenode.fs-limits.min-block-size</name>
<value>1048576</value>
</property>
<property>
<name>dfs.namenode.fs-limits.max-blocks-per-file</name>
<value>1048576</value>
</property>
<property>
<name>dfs.namenode.edits.dir</name>
<value>sandboxpath/name-data</value>
</property>
<property>
<name>dfs.namenode.acls.enabled</name>
<value>false</value>
</property>
<property>
<name>dfs.namenode.lazypersist.file.scrub.interval.sec</name>
<value>300</value>
</property>
<property>
<name>dfs.namenode.safemode.min.datanodes</name>
<value>0</value>
</property>
<property>
<name>dfs.namenode.safemode.extension</name>
<value>30000</value>
</property>
<property>
<name>dfs.namenode.resource.check.interval</name>
<value>5000</value>
</property>
<property>
<name>dfs.namenode.resource.du.reserved</name>
<value>104857600</value>
</property>
<property>
<name>dfs.namenode.resource.checked.volumes</name>
<value></value>
</property>
<property>
<name>dfs.namenode.resource.checked.volumes.minimum</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.max.objects</name>
<value>0</value>
</property>
<property>
<name>dfs.namenode.decommission.interval</name>
<value>30</value>
</property>
<property>
<name>dfs.namenode.decommission.blocks.per.interval</name>
<value>500000</value>
</property>
<property>
<name>dfs.namenode.replication.interval</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.accesstime.precision</name>
<value>3600000</value>
</property>
<property>
<name>dfs.namenode.plugins</name>
<value></value>
</property>
<property>
<name>dfs.namenode.checkpoint.dir</name>
<value>file://name-data/namesecondary</value>
</property>
<property>
<name>dfs.namenode.checkpoint.edits.dir</name>
<value>file://name-data/namesecondary</value>
</property>
<property>
<name>dfs.namenode.checkpoint.period</name>
<value>3600</value>
</property>
<property>
<name>dfs.namenode.checkpoint.txns</name>
<value>1000000</value>
</property>
<property>
<name>dfs.namenode.checkpoint.check.period</name>
<value>60</value>
</property>
<property>
<name>dfs.namenode.checkpoint.max-retries</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.num.checkpoints.retained</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.num.extra.edits.retained</name>
<value>1000000</value>
</property>
<property>
<name>dfs.namenode.max.extra.edits.segments.retained</name>
<value>10000</value>
</property>
<property>
<name>dfs.namenode.delegation.key.update-interval</name>
<value>86400000</value>
</property>
<property>
<name>dfs.namenode.delegation.token.max-lifetime</name>
<value>604800000</value>
</property>
<property>
<name>dfs.namenode.support.allow.format</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.avoid.read.stale.datanode</name>
<value>false</value>
</property>
<property>
<name>dfs.namenode.avoid.write.stale.datanode</name>
<value>false</value>
</property>
<property>
<name>dfs.namenode.stale.datanode.interval</name>
<value>30000</value>
</property>
<property>
<name>dfs.namenode.write.stale.datanode.ratio</name>
<value>0.5f</value>
</property>
<property>
<name>dfs.namenode.audit.loggers</name>
<value>default</value>
</property>
<property>
<name>dfs.namenode.edits.noeditlogchannelflush</name>
<value>false</value>
</property>
<property>
<name>dfs.namenode.enable.retrycache</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.retrycache.expirytime.millis</name>
<value>600000</value>
</property>
<property>
<name>dfs.namenode.retrycache.heap.percent</name>
<value>0.03f</value>
</property>
<property>
<name>dfs.namenode.path.based.cache.block.map.allocation.percent</name>
<value>0.25</value>
</property>
<property>
<name>dfs.namenode.list.cache.directives.num.responses</name>
<value>100</value>
</property>
<property>
<name>dfs.namenode.list.cache.pools.num.responses</name>
<value>100</value>
</property>
<property>
<name>dfs.namenode.path.based.cache.refresh.interval.ms</name>
<value>30000</value>
</property>
<property>
<name>dfs.namenode.path.based.cache.retry.interval.ms</name>
<value>30000</value>
</property>
<property>
<name>dfs.namenode.edit.log.autoroll.multiplier.threshold</name>
<value>2</value>
</property>
<property>
<name>dfs.namenode.edit.log.autoroll.check.interval.ms</name>
<value>300000</value>
</property>
<property>
<name>dfs.namenode.reject-unresolved-dn-topology-mapping</name>
<value>false</value>
</property>
<property>
<name>dfs.namenode.xattrs.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.namenode.fs-limits.max-xattrs-per-inode</name>
<value>32</value>
</property>
<property>
<name>dfs.namenode.fs-limits.max-xattr-size</name>
<value>16384</value>
</property>
<property>
<name>dfs.namenode.startup.delay.block.deletion.sec</name>
<value>0</value>
</property>
<property>
<name>dfs.namenode.list.encryption.zones.num.responses</name>
<value>100</value>
</property>
<property>
<name>dfs.namenode.inotify.max.events.per.rpc</name>
<value>1000</value>
</property>
<property>
<name>dfs.namenode.legacy-oiv-image.dir</name>
<value></value>
</property>
<!-- name-0-node -->
<property>
<name>dfs.namenode.rpc-address.hdfs.name-0-node</name>
<value>name-0-node.hdfs.autoip.dcos.thisdcos.directory:9001</value>
</property>
<property>
<name>dfs.namenode.rpc-bind-host.hdfs.name-0-node</name>
<value>0.0.0.0</value>
</property>
<property>
<name>dfs.namenode.http-address.hdfs.name-0-node</name>
<value>name-0-node.hdfs.autoip.dcos.thisdcos.directory:9002</value>
</property>
<property>
<name>dfs.namenode.http-bind-host.hdfs.name-0-node</name>
<value>0.0.0.0</value>
</property>
<!-- name-1-node -->
<property>
<name>dfs.namenode.rpc-address.hdfs.name-1-node</name>
<value>name-1-node.hdfs.autoip.dcos.thisdcos.directory:9001</value>
</property>
<property>
<name>dfs.namenode.rpc-bind-host.hdfs.name-1-node</name>
<value>0.0.0.0</value>
</property>
<property>
<name>dfs.namenode.http-address.hdfs.name-1-node</name>
<value>name-1-node.hdfs.autoip.dcos.thisdcos.directory:9002</value>
</property>
<property>
<name>dfs.namenode.http-bind-host.hdfs.name-1-node</name>
<value>0.0.0.0</value>
</property>
<property>
<name>dfs.ha.zkfc.port</name>
<value>8019</value>
</property>
<!-- journalnode -->
<property>
<name>dfs.journalnode.rpc-address</name>
<value>0.0.0.0:8485</value>
</property>
<property>
<name>dfs.journalnode.http-address</name>
<value>0.0.0.0:8480</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>sandboxpath/journal-data</value>
</property>
<!-- datanode -->
<property>
<name>dfs.datanode.address</name>
<value>0.0.0.0:9003</value>
</property>
<property>
<name>dfs.datanode.http.address</name>
<value>0.0.0.0:9004</value>
</property>
<property>
<name>dfs.datanode.ipc.address</name>
<value>0.0.0.0:9005</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>sandboxpath/data-data</value>
</property>
<property>
<name>dfs.datanode.balance.bandwidthPerSec</name>
<value>41943040</value>
</property>
<property>
<name>dfs.datanode.handler.count</name>
<value>10</value>
</property>
<property>
<name>dfs.datanode.du.reserved</name>
<value>0</value>
</property>
<property>
<name>dfs.datanode.directoryscan.interval</name>
<value>21600</value>
</property>
<property>
<name>dfs.datanode.directoryscan.threads</name>
<value>1</value>
</property>
<property>
<name>dfs.datanode.balance.bandwidthPerSec</name>
<value>1048576</value>
</property>
<property>
<name>dfs.datanode.plugins</name>
<value></value>
</property>
<property>
<name>dfs.datanode.failed.volumes.tolerated</name>
<value>0</value>
</property>
<property>
<name>dfs.datanode.max.transfer.threads</name>
<value>4096</value>
</property>
<property>
<name>dfs.datanode.readahead.bytes</name>
<value>4193404</value>
</property>
<property>
<name>dfs.datanode.drop.cache.behind.reads</name>
<value>false</value>
</property>
<property>
<name>dfs.datanode.drop.cache.behind.writes</name>
<value>false</value>
</property>
<property>
<name>dfs.datanode.sync.behind.writes</name>
<value>false</value>
</property>
<property>
<name>dfs.datanode.use.datanode.hostname</name>
<value>false</value>
</property>
<property>
<name>dfs.datanode.shared.file.descriptor.paths</name>
<value>/dev/shm,/tmp</value>
</property>
<property>
<name>dfs.datanode.hdfs-blocks-metadata.enabled</name>
<value>false</value>
</property>
<property>
<name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-threshold</name>
<value>1.073741824E10</value>
</property>
<property>
<name>dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction</name>
<value>0.75f</value>
</property>
<property>
<name>dfs.datanode.max.locked.memory</name>
<value>0</value>
</property>
<property>
<name>dfs.datanode.fsdatasetcache.max.threads.per.volume</name>
<value>4</value>
</property>
<property>
<name>dfs.datanode.slow.io.warning.threshold.ms</name>
<value>300</value>
</property>
<property>
<name>dfs.datanode.cache.revocation.timeout.ms</name>
<value>900000</value>
</property>
<property>
<name>dfs.datanode.cache.revocation.polling.ms</name>
<value>500</value>
</property>
<property>
<name>dfs.datanode.block.id.layout.upgrade.threads</name>
<value>12</value>
</property>
<!-- HA -->
<property>
<name>ha.zookeeper.quorum</name>
<value>master.mesos:2181</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>shell(/bin/true)</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.image.compress</name>
<value>true</value>
</property>
<property>
<name>dfs.image.compression.codec</name>
<value>org.apache.hadoop.io.compress.SnappyCodec</value>
</property>
<property>
<name>dfs.client.read.shortcircuit</name>
<value>true</value>
</property>
<property>
<name>dfs.domain.socket.path</name>
<value>dn_socket</value>
</property>
<property>
<name>dfs.client.read.shortcircuit.skip.checksum</name>
<value>false</value>
</property>
<property>
<name>dfs.client.read.shortcircuit.streams.cache.size</name>
<value>256</value>
</property>
<property>
<name>dfs.client.read.shortcircuit.streams.cache.expiry.ms</name>
<value>300000</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.hdfs</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
<property>
<name>dfs.permissions.superusergroup</name>
<value>supergroup</value>
</property>
<property>
<name>dfs.client.cached.conn.retry</name>
<value>3</value>
</property>
<property>
<name>dfs.https.server.keystore.resource</name>
<value>ssl-server.xml</value>
</property>
<property>
<name>dfs.client.https.keystore.resource</name>
<value>ssl-client.xml</value>
</property>
<property>
<name>dfs.default.chunk.view.size</name>
<value>32768</value>
</property>
<property>
<name>dfs.block.access.token.enable</name>
<value>false</value>
</property>
<property>
<name>dfs.block.access.key.update.interval</name>
<value>600</value>
</property>
<property>
<name>dfs.block.access.token.lifetime</name>
<value>600</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.replication.max</name>
<value>512</value>
</property>
<property>
<name>dfs.blocksize</name>
<value>134217728</value>
</property>
<property>
<name>dfs.client.block.write.retries</name>
<value>3</value>
</property>
<property>
<name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
<value>true</value>
</property>
<property>
<name>dfs.client.block.write.replace-datanode-on-failure.policy</name>
<value>DEFAULT</value>
</property>
<property>
<name>dfs.client.block.write.replace-datanode-on-failure.best-effort</name>
<value>false</value>
</property>
<property>
<name>dfs.blockreport.intervalMsec</name>
<value>21600000</value>
</property>
<property>
<name>dfs.blockreport.initialDelay</name>
<value>0</value>
</property>
<property>
<name>dfs.blockreport.split.threshold</name>
<value>1000000</value>
</property>
<property>
<name>dfs.heartbeat.interval</name>
<value>3</value>
</property>
<property>
<name>dfs.hosts</name>
<value></value>
</property>
<property>
<name>dfs.hosts.exclude</name>
<value></value>
</property>
<property>
<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
<value>false</value>
</property>
<property>
<name>dfs.stream-buffer-size</name>
<value>4096</value>
</property>
<property>
<name>dfs.bytes-per-checksum</name>
<value>512</value>
</property>
<property>
<name>dfs.client-write-packet-size</name>
<value>65536</value>
</property>
<property>
<name>dfs.client.write.exclude.nodes.cache.expiry.interval.millis</name>
<value>600000</value>
</property>
<property>
<name>dfs.namenode.checkpoint.dir</name>
<value>600000</value>
</property>
<property>
<name>dfs.image.transfer.timeout</name>
<value>60000</value>
</property>
<property>
<name>dfs.image.transfer.bandwidthPerSec</name>
<value>0</value>
</property>
<property>
<name>dfs.image.transfer.chunksize</name>
<value>65536</value>
</property>
<property>
<name>dfs.client.failover.max.attempts</name>
<value>15</value>
</property>
<property>
<name>dfs.client.failover.sleep.base.millis</name>
<value>500</value>
</property>
<property>
<name>dfs.client.failover.sleep.max.millis</name>
<value>15000</value>
</property>
<property>
<name>dfs.client.failover.connection.retries</name>
<value>0</value>
</property>
<property>
<name>dfs.client.failover.connection.retries.on.timeouts</name>
<value>0</value>
</property>
<property>
<name>dfs.client.datanode-restart.timeout</name>
<value>30</value>
</property>
<property>
<name>dfs.ha.log-roll.period</name>
<value>120</value>
</property>
<property>
<name>dfs.ha.tail-edits.period</name>
<value>60</value>
</property>
<property>
<name>dfs.support.append</name>
<value>true</value>
</property>
<property>
<name>dfs.client.use.datanode.hostname</name>
<value>false</value>
</property>
<property>
<name>dfs.client.local.interfaces</name>
<value></value>
</property>
<property>
<name>dfs.short.circuit.shared.memory.watcher.interrupt.check.ms</name>
<value>60000</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<property>
<name>hadoop.fuse.connection.timeout</name>
<value>300</value>
</property>
<property>
<name>hadoop.fuse.timer.period</name>
<value>5</value>
</property>
<property>
<name>dfs.metrics.percentiles.intervals</name>
<value></value>
</property>
<property>
<name>dfs.encrypt.data.transfer</name>
<value>false</value>
</property>
<property>
<name>dfs.encrypt.data.transfer.algorithm</name>
<value></value>
</property>
<property>
<name>dfs.encrypt.data.transfer.cipher.suites</name>
<value></value>
</property>
<property>
<name>dfs.encrypt.data.transfer.cipher.key.bitlength</name>
<value>128</value>
</property>
<property>
<name>dfs.trustedchannel.resolver.class</name>
<value></value>
</property>
<property>
<name>dfs.data.transfer.protection</name>
<value></value>
</property>
<property>
<name>dfs.data.transfer.saslproperties.resolver.class</name>
<value></value>
</property>
<property>
<name>dfs.client.file-block-storage-locations.num-threads</name>
<value>10</value>
</property>
<property>
<name>dfs.client.file-block-storage-locations.timeout.millis</name>
<value>1000</value>
</property>
<property>
<name>dfs.client.cache.drop.behind.writes</name>
<value></value>
</property>
<property>
<name>dfs.client.cache.drop.behind.reads</name>
<value></value>
</property>
<property>
<name>dfs.client.cache.readahead</name>
<value></value>
</property>
<property>
<name>dfs.client.mmap.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.client.mmap.cache.size</name>
<value>256</value>
</property>
<property>
<name>dfs.client.mmap.cache.timeout.ms</name>
<value>3600000</value>
</property>
<property>
<name>dfs.client.mmap.retry.timeout.ms</name>
<value>300000</value>
</property>
<property>
<name>dfs.client.short.circuit.replica.stale.threshold.ms</name>
<value>1800000</value>
</property>
<property>
<name>dfs.cachereport.intervalMsec</name>
<value>10000</value>
</property>
<property>
<name>dfs.webhdfs.user.provider.user.pattern</name>
<value>^[A-Za-z_][A-Za-z0-9._-]*[$]?$</value>
</property>
<property>
<name>dfs.block.local-path-access.user</name>
<value></value>
</property>
<property>
<name>dfs.client.domain.socket.data.traffic</name>
<value>false</value>
</property>
<property>
<name>dfs.client.slow.io.warning.threshold.ms</name>
<value>30000</value>
</property>
<property>
<name>dfs.encryption.key.provider.uri</name>
<value></value>
</property>
<property>
<name>dfs.storage.policy.enabled</name>
<value>true</value>
</property>
</configuration>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment