Skip to content

Instantly share code, notes, and snippets.

@yishenggudou
Created November 12, 2012 07:25
Show Gist options
  • Save yishenggudou/4057979 to your computer and use it in GitHub Desktop.
Save yishenggudou/4057979 to your computer and use it in GitHub Desktop.
hadoop log
2012-11-12 18:59:46,208 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at localhost.localdomain/127.0.0.1
************************************************************/
2012-11-12 19:00:08,092 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: STARTUP_MSG:
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG: host = localhost.localdomain/127.0.0.1
STARTUP_MSG: args = []
STARTUP_MSG: version = 1.0.4
STARTUP_MSG: build = https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.0 -r 1393290; compiled by 'hortonfo' on Wed Oct 3 05:10:00 UTC 2012
************************************************************/
2012-11-12 19:00:08,468 INFO org.apache.hadoop.metrics2.impl.MetricsConfig: loaded properties from hadoop-metrics2.properties
2012-11-12 19:00:08,627 INFO org.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for source MetricsSystem,sub=Stats registered.
2012-11-12 19:00:08,629 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: Scheduled snapshot period at 60 second(s).
2012-11-12 19:00:08,629 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: NameNode metrics system started
2012-11-12 19:00:08,922 INFO org.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for source ugi registered.
2012-11-12 19:00:08,938 INFO org.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for source jvm registered.
2012-11-12 19:00:08,941 INFO org.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for source NameNode registered.
2012-11-12 19:00:08,985 INFO org.apache.hadoop.hdfs.util.GSet: VM type = 32-bit
2012-11-12 19:00:08,988 INFO org.apache.hadoop.hdfs.util.GSet: 2% max memory = 2.27625 MB
2012-11-12 19:00:08,988 INFO org.apache.hadoop.hdfs.util.GSet: capacity = 2^19 = 524288 entries
2012-11-12 19:00:08,988 INFO org.apache.hadoop.hdfs.util.GSet: recommended=524288, actual=524288
2012-11-12 19:00:09,049 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: fsOwner=root
2012-11-12 19:00:09,049 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: supergroup=supergroup
2012-11-12 19:00:09,049 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: isPermissionEnabled=false
2012-11-12 19:00:09,060 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: dfs.block.invalidate.limit=100
2012-11-12 19:00:09,060 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)
2012-11-12 19:00:09,438 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Registered FSNamesystemStateMBean and NameNodeMXBean
2012-11-12 19:00:09,479 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: Caching file names occuring more than 10 times
2012-11-12 19:00:09,496 ERROR org.apache.hadoop.hdfs.server.namenode.FSNamesystem: FSNamesystem initialization failed.
java.io.IOException: NameNode is not formatted.
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:330)
at org.apache.hadoop.hdfs.server.namenode.FSDirectory.loadFSImage(FSDirectory.java:100)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:388)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:362)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:276)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:496)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1279)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1288)
2012-11-12 19:00:09,499 ERROR org.apache.hadoop.hdfs.server.namenode.NameNode: java.io.IOException: NameNode is not formatted.
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:330)
at org.apache.hadoop.hdfs.server.namenode.FSDirectory.loadFSImage(FSDirectory.java:100)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:388)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:362)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:276)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:496)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1279)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1288)
2012-11-12 19:00:09,500 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at localhost.localdomain/127.0.0.1
************************************************************/
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>local.realm</name>
<value>KERBEROS.EXAMPLE.COM</value>
</property>
<!-- file system properties -->
<property>
<name>fs.default.name</name>
<value>hdfs://master:8020</value>
<description>The name of the default file system. Either the
literal string "local" or a host:port for NDFS.
</description>
<final>true</final>
</property>
<property>
<name>fs.trash.interval</name>
<value>360</value>
<description>Number of minutes between trash checkpoints.
If zero, the trash feature is disabled.
</description>
</property>
<property>
<name>hadoop.security.auth_to_local</name>
<value>
RULE:[2:$1@$0]([jt]t@.*KERBEROS.EXAMPLE.COM)s/.*/mapred/
RULE:[2:$1@$0]([nd]n@.*KERBEROS.EXAMPLE.COM)s/.*/hdfs/
RULE:[2:$1@$0](mapred@.*KERBEROS.EXAMPLE.COM)s/.*/mapred/
RULE:[2:$1@$0](hdfs@.*KERBEROS.EXAMPLE.COM)s/.*/hdfs/
RULE:[2:$1@$0](mapredqa@.*KERBEROS.EXAMPLE.COM)s/.*/mapred/
RULE:[2:$1@$0](hdfsqa@.*KERBEROS.EXAMPLE.COM)s/.*/hdfs/
RULE:[2:$1@$0](hm@.*KERBEROS.EXAMPLE.COM)s/.*/hbase/
RULE:[2:$1@$0](rs@.*KERBEROS.EXAMPLE.COM)s/.*/hbase/
DEFAULT
</value>
<description></description>
</property>
<property>
<name>hadoop.security.authentication</name>
<value>simple</value>
<description>
Set the authentication for the cluster. Valid values are: simple or
kerberos.
</description>
</property>
<property>
<name>hadoop.security.authorization</name>
<value>false</value>
<description>
Enable authorization for different protocols.
</description>
</property>
<property>
<name>hadoop.security.groups.cache.secs</name>
<value>14400</value>
</property>
<property>
<name>hadoop.kerberos.kinit.command</name>
<value>/usr/kerberos/bin/kinit</value>
</property>
<property>
<name>hadoop.http.filter.initializers</name>
<value>org.apache.hadoop.http.lib.StaticUserWebFilter</value>
</property>
</configuration>
2012-11-12 18:17:03,244 INFO org.apache.hadoop.net.NetworkTopology: Adding a new node: /default-rack/10.11.50.162:50010
2012-11-12 18:17:03,277 INFO org.apache.hadoop.hdfs.StateChange: *BLOCK* NameNode.blocksBeingWrittenReport: from 10.11.50.162:50010 0 blocks
2012-11-12 18:17:03,310 INFO org.apache.hadoop.hdfs.StateChange: *BLOCK* NameSystem.processReport: from 10.11.50.162:50010, blocks: 0, processing time: 3 msecs
2012-11-12 18:17:07,165 ERROR org.apache.hadoop.security.UserGroupInformation: PriviledgedActionException as:root cause:org.apache.hadoop.security.AccessControlException: Permission denied: user=root, access=WRITE, inode="":hdfs:supergroup:rwxr-xr-x
2012-11-12 18:17:07,171 INFO org.apache.hadoop.ipc.Server: IPC Server handler 8 on 8020, call mkdirs(/mapred/mapredsystem, rwx------) from 10.10.131.224:36645: error: org.apache.hadoop.security.AccessControlException: Permission denied: user=root, access=WRITE, inode="":hdfs:supergroup:rwxr-xr-x
org.apache.hadoop.security.AccessControlException: Permission denied: user=root, access=WRITE, inode="":hdfs:supergroup:rwxr-xr-x
at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:199)
at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:180)
at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:128)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:5214)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkAncestorAccess(FSNamesystem.java:5188)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirsInternal(FSNamesystem.java:2060)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.mkdirs(FSNamesystem.java:2029)
at org.apache.hadoop.hdfs.server.namenode.NameNode.mkdirs(NameNode.java:817)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:601)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:563)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1388)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1384)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1121)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1382)
2012-11-12 18:17:27,577 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* NameSystem.registerDatanode: node registration from 10.11.50.162:50010 storage DS-1462011076-127.0.0.1-50010-1352715423223
2012-11-12 18:17:27,578 INFO org.apache.hadoop.net.NetworkTopology: Removing a node: /default-rack/10.11.50.162:50010
2012-11-12 18:17:27,578 INFO org.apache.hadoop.net.NetworkTopology: Adding a new node: /default-rack/10.11.50.162:50010
2012-11-12 18:17:27,581 INFO org.apache.hadoop.hdfs.StateChange: *BLOCK* NameNode.blocksBeingWrittenReport: from 10.11.50.162:50010 0 blocks
2012-11-12 18:17:27,589 INFO org.apache.hadoop.hdfs.StateChange: *BLOCK* NameSystem.processReport: from 10.11.50.162:50010, blocks: 0, processing time: 0 msecs
2012-11-12 18:17:30,011 ERROR org.apache.hadoop.security.UserGroupInformation: PriviledgedActionException as:root cause:org.apache.hadoop.security.AccessControlException: Permission denied: user=root, access=WRITE, inode="mapred":mapred:hadoop:rwxr-xr-x
2012-11-12 18:17:30,011 INFO org.apache.hadoop.ipc.Server: IPC Server handler 6 on 8020, call delete(/mapred/mapredsystem, true) from 10.10.131.224:36681: error: org.apache.hadoop.security.AccessControlException: Permission denied: user=root, access=WRITE, inode="mapred":mapred:hadoop:rwxr-xr-x
org.apache.hadoop.security.AccessControlException: Permission denied: user=root, access=WRITE, inode="mapred":mapred:hadoop:rwxr-xr-x
at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:199)
at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.check(FSPermissionChecker.java:180)
at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkPermission(FSPermissionChecker.java:131)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkPermission(FSNamesystem.java:5214)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.deleteInternal(FSNamesystem.java:1996)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.delete(FSNamesystem.java:1974)
at org.apache.hadoop.hdfs.server.namenode.NameNode.delete(NameNode.java:792)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:601)
at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:563)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1388)
at org.apache.hadoop.ipc.Server$Handler$1.run(Server.java:1384)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1121)
at org.apache.hadoop.ipc.Server$Handler.run(Server.java:1382)
2012-11-12 18:18:37,697 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at localhost.localdomain/127.0.0.1
************************************************************/
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- file system properties -->
<property>
<name>dfs.name.dir</name>
<value>/data/hadoop/hdfs/namenode</value>
<description>Determines where on the local filesystem the DFS name node
should store the name table. If this is a comma-delimited list
of directories then the name table is replicated in all of the
directories, for redundancy. </description>
<final>true</final>
</property>
<property>
<name>dfs.data.dir</name>
<value>/data/hadoop/hdfs/data</value>
<description>Determines where on the local filesystem an DFS data node
should store its blocks. If this is a comma-delimited
list of directories, then data will be stored in all named
directories, typically on different devices.
Directories that do not exist are ignored.
</description>
<final>true</final>
</property>
<property>
<name>dfs.safemode.threshold.pct</name>
<value>1.0f</value>
<description>
Specifies the percentage of blocks that should satisfy
the minimal replication requirement defined by dfs.replication.min.
Values less than or equal to 0 mean not to start in safe mode.
Values greater than 1 will make safe mode permanent.
</description>
</property>
<property>
<name>dfs.datanode.address</name>
<value>0.0.0.0:50010</value>
</property>
<property>
<name>dfs.datanode.http.address</name>
<value>0.0.0.0:50075</value>
</property>
<property>
<name>dfs.http.address</name>
<value>master:50070</value>
<description>The name of the default file system. Either the
literal string "local" or a host:port for NDFS.
</description>
<final>true</final>
</property>
<!-- Permissions configuration -->
<property>
<name>dfs.umaskmode</name>
<value>077</value>
<description>
The octal umask used when creating files and directories.
</description>
</property>
<property>
<name>dfs.block.access.token.enable</name>
<value>false</value>
<description>
Are access tokens are used as capabilities for accessing datanodes.
</description>
</property>
<property>
<name>dfs.namenode.kerberos.principal</name>
<value>nn/_HOST@${local.realm}</value>
<description>
Kerberos principal name for the NameNode
</description>
</property>
<property>
<name>dfs.secondary.namenode.kerberos.principal</name>
<value>nn/_HOST@${local.realm}</value>
<description>
Kerberos principal name for the secondary NameNode.
</description>
</property>
<property>
<name>dfs.namenode.kerberos.https.principal</name>
<value>host/_HOST@${local.realm}</value>
<description>
The Kerberos principal for the host that the NameNode runs on.
</description>
</property>
<property>
<name>dfs.secondary.namenode.kerberos.https.principal</name>
<value>host/_HOST@${local.realm}</value>
<description>
The Kerberos principal for the hostthat the secondary NameNode runs on.
</description>
</property>
<property>
<name>dfs.secondary.https.port</name>
<value>50490</value>
<description>The https port where secondary-namenode binds</description>
</property>
<property>
<name>dfs.datanode.kerberos.principal</name>
<value>dn/_HOST@${local.realm}</value>
<description>
The Kerberos principal that the DataNode runs as. "_HOST" is replaced by
the real host name.
</description>
</property>
<property>
<name>dfs.web.authentication.kerberos.principal</name>
<value>HTTP/_HOST@${local.realm}</value>
<description>
The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
HTTP SPENGO specification.
</description>
</property>
<property>
<name>dfs.web.authentication.kerberos.keytab</name>
<value>/etc/security/keytabs/nn.service.keytab</value>
<description>
The Kerberos keytab file with the credentials for the
HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
</description>
</property>
<property>
<name>dfs.namenode.keytab.file</name>
<value>/etc/security/keytabs/nn.service.keytab</value>
<description>
Combined keytab file containing the namenode service and host principals.
</description>
</property>
<property>
<name>dfs.secondary.namenode.keytab.file</name>
<value>/etc/security/keytabs/nn.service.keytab</value>
<description>
Combined keytab file containing the namenode service and host principals.
</description>
</property>
<property>
<name>dfs.datanode.keytab.file</name>
<value>/etc/security/keytabs/dn.service.keytab</value>
<description>
The filename of the keytab file for the DataNode.
</description>
</property>
<property>
<name>dfs.https.port</name>
<value>50470</value>
<description>The https port where namenode binds</description>
</property>
<property>
<name>dfs.https.address</name>
<value>master:50470</value>
<description>The https address where namenode binds</description>
</property>
<property>
<name>dfs.datanode.data.dir.perm</name>
<value>700</value>
<description>The permissions that should be there on dfs.data.dir
directories. The datanode will not come up if the permissions are
different on existing dfs.data.dir directories. If the directories
don't exist, they will be created with this permission.
</description>
</property>
<property>
<name>dfs.cluster.administrators</name>
<value>hdfs</value>
<description>ACL for who all can view the default servlets in the HDFS</description>
</property>
<property>
<name>dfs.permissions.superusergroup</name>
<value>hadoop</value>
<description>The name of the group of super-users.</description>
</property>
<property>
<name>dfs.secondary.http.address</name>
<value>localhost.localdomain:50090</value>
<description>
The secondary namenode http server address and port.
If the port is 0 then the server will start on a free port.
</description>
</property>
<property>
<name>dfs.hosts</name>
<value>/etc/hadoop/dfs.include</value>
<description>Names a file that contains a list of hosts that are
permitted to connect to the namenode. The full pathname of the file
must be specified. If the value is empty, all hosts are
permitted.</description>
</property>
<property>
<name>dfs.hosts.exclude</name>
<value>/etc/hadoop/dfs.exclude</value>
<description>Names a file that contains a list of hosts that are
not permitted to connect to the namenode. The full pathname of the
file must be specified. If the value is empty, no hosts are
excluded.
</description>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>false</value>
<description>Enable or disable webhdfs. Defaults to false</description>
</property>
<property>
<name>dfs.support.append</name>
<value>true</value>
<description>Enable or disable append. Defaults to false</description>
</property>
</configuration>
/************************************************************
STARTUP_MSG: Starting NameNode
STARTUP_MSG: host = localhost.localdomain/127.0.0.1
STARTUP_MSG: args = []
STARTUP_MSG: version = 1.0.4
STARTUP_MSG: build = https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1.0 -r 1393290; compiled by 'hortonfo' on Wed Oct 3 05:10:00 UTC 2012
************************************************************/
2012-11-12 14:59:15,344 INFO org.apache.hadoop.metrics2.impl.MetricsConfig: loaded properties from hadoop-metrics2.properties
2012-11-12 14:59:15,519 INFO org.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for source MetricsSystem,sub=Stats registered.
2012-11-12 14:59:15,521 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: Scheduled snapshot period at 60 second(s).
2012-11-12 14:59:15,521 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: NameNode metrics system started
2012-11-12 14:59:15,807 INFO org.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for source ugi registered.
2012-11-12 14:59:15,825 INFO org.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for source jvm registered.
2012-11-12 14:59:15,827 INFO org.apache.hadoop.metrics2.impl.MetricsSourceAdapter: MBean for source NameNode registered.
2012-11-12 14:59:15,868 INFO org.apache.hadoop.hdfs.util.GSet: VM type = 32-bit
2012-11-12 14:59:15,871 INFO org.apache.hadoop.hdfs.util.GSet: 2% max memory = 2.27625 MB
2012-11-12 14:59:15,871 INFO org.apache.hadoop.hdfs.util.GSet: capacity = 2^19 = 524288 entries
2012-11-12 14:59:15,871 INFO org.apache.hadoop.hdfs.util.GSet: recommended=524288, actual=524288
2012-11-12 14:59:15,929 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: fsOwner=root
2012-11-12 14:59:15,929 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: supergroup=supergroup
2012-11-12 14:59:15,929 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: isPermissionEnabled=true
2012-11-12 14:59:15,940 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: dfs.block.invalidate.limit=100
2012-11-12 14:59:15,940 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: isAccessTokenEnabled=false accessKeyUpdateInterval=0 min(s), accessTokenLifetime=0 min(s)
2012-11-12 14:59:16,333 INFO org.apache.hadoop.hdfs.server.namenode.FSNamesystem: Registered FSNamesystemStateMBean and NameNodeMXBean
2012-11-12 14:59:16,378 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: Caching file names occuring more than 10 times
2012-11-12 14:59:16,385 INFO org.apache.hadoop.hdfs.server.common.Storage: Storage directory /data/hadoop/hdfs/namenode does not exist.
2012-11-12 14:59:16,389 ERROR org.apache.hadoop.hdfs.server.namenode.FSNamesystem: FSNamesystem initialization failed.
org.apache.hadoop.hdfs.server.common.InconsistentFSStateException: Directory /data/hadoop/hdfs/namenode is in an inconsistent state: storage directory does not exist or is not accessible.
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:303)
at org.apache.hadoop.hdfs.server.namenode.FSDirectory.loadFSImage(FSDirectory.java:100)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:388)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:362)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:276)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:496)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1279)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1288)
2012-11-12 14:59:16,392 ERROR org.apache.hadoop.hdfs.server.namenode.NameNode: org.apache.hadoop.hdfs.server.common.InconsistentFSStateException: Directory /data/hadoop/hdfs/namenode is in an inconsistent state: storage directory does not exist or is not accessible.
at org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:303)
at org.apache.hadoop.hdfs.server.namenode.FSDirectory.loadFSImage(FSDirectory.java:100)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.initialize(FSNamesystem.java:388)
at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.<init>(FSNamesystem.java:362)
at org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:276)
at org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:496)
at org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1279)
at org.apache.hadoop.hdfs.server.namenode.NameNode.main(NameNode.java:1288)
2012-11-12 14:59:16,393 INFO org.apache.hadoop.hdfs.server.namenode.NameNode: SHUTDOWN_MSG:
/************************************************************
SHUTDOWN_MSG: Shutting down NameNode at localhost.localdomain/127.0.0.1
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment