- 批量处理
以回帖的形式进行记录。
package com.github.winse.btrace; | |
/* BTrace Script Template */ | |
import static com.sun.btrace.BTraceUtils.jstack; | |
import static com.sun.btrace.BTraceUtils.name; | |
import static com.sun.btrace.BTraceUtils.print; | |
import static com.sun.btrace.BTraceUtils.printArray; | |
import static com.sun.btrace.BTraceUtils.println; | |
import static com.sun.btrace.BTraceUtils.probeClass; | |
import static com.sun.btrace.BTraceUtils.probeMethod; |
[hadoop@hadoop-master1 ~]$ hdfs haadmin -transitionToActive --forcemanual nn1 | |
You have specified the forcemanual flag. This flag is dangerous, as it can induce a split-brain scenario that WILL CORRUPT your HDFS namespace, possibly irrecoverably. | |
It is recommended not to use this flag, but instead to shut down the cluster and disable automatic failover if you prefer to manually manage your HA state. | |
You may abort safely by answering 'n' or hitting ^C now. | |
Are you sure you want to continue? (Y or N) Y | |
14/06/18 10:43:18 WARN ha.HAAdmin: Proceeding with manual HA state management even though | |
automatic failover is enabled for NameNode at hadoop-master1/192.168.32.11:8020 |
[hadoop@localhost tmp]$ tar zcvf test.tar.gz . | |
./ | |
./c/ | |
./c/1/ | |
./c/1/2/ | |
./b/ | |
./b/1/ | |
./b/1/2/ | |
./a/ | |
./a/1/ |
winse@Lenovo-PC /cygdrive/e/local/opt | |
$ ssh hadoop@cluster ' cat | tar zxf - --exclude=sources --exclude=doc --exclude=*.cmd ' < ./hadoop-2.2.0.tar.gz | |
winse@Lenovo-PC /cygdrive/e/local/opt | |
$ ssh root@cluster ' cat | tar zxf - ' < ./jdk-7u60-linux-i586.gz | |
winse@Lenovo-PC ~ | |
$ ssh root@cluster | |
Last login: Sat Jun 21 20:54:19 2014 from 192.168.154.1 | |
[root@localhost ~]# cd /etc/yum.repos.d/ | |
[root@localhost yum.repos.d]# wget http://public-repo-1.hortonworks.com/ambari/c entos6/1.x/updates/1.6.0/ambari.repo | |
--2014-06-21 21:36:44-- http://public-repo-1.hortonworks.com/ambari/centos6/1.x /updates/1.6.0/ambari.repo | |
Resolving public-repo-1.hortonworks.com... 54.230.234.103, 54.230.234.83, 54.230.234.203, ... | |
Connecting to public-repo-1.hortonworks.com|54.230.234.103|:80... connected. | |
HTTP request sent, awaiting response... 200 OK | |
Length: 1074 (1.0K) [binary/octet-stream] |
# root用户操作 | |
apache-maven-3.0.4 | |
jdk1.7.0_60 | |
apache-ant-1.9.2 | |
vim /etc/profile | |
source /etc/profile | |
## 节约点下载的时间,直接共享使用主机的maven数据 |
以回帖的形式进行记录。
# 下载hadoop2编译好的版本 | |
[hadoop@umcc97-44 spark-1.0.0-bin-hadoop2]$ hadoop fs -put README.md ./ | |
# 参考 http://spark.apache.org/docs/latest/quick-start.html | |
# http://spark.apache.org/examples.html | |
# https://github.com/apache/spark/tree/master/examples/src/main/java/org/apache/spark/examples | |
[hadoop@umcc97-44 spark-1.0.0-bin-hadoop2]$ bin/spark-shell | |
... |
$ [hadoop@umcc97-44 phoenix-4.0.0-incubating]$ bin/sqlline.py localhost | |
/* | |
hbase(main):020:0> describe 't1' | |
DESCRIPTION ENABLED | |
't1', {NAME => 'f1', DATA_BLOCK_ENCODING => 'NONE', BLOOMFILTER => 'ROW', REPLICATION_SCOPE => '0', VERSIONS => '1', COMPRESS true | |
ION => 'NONE', MIN_VERSIONS => '0', TTL => 'FOREVER', KEEP_DELETED_CELLS => 'false', BLOCKSIZE => '65536', IN_MEMORY => 'fals | |
e', BLOCKCACHE => 'true', METADATA => {'ENCODE_ON_DISK' => 'true'}} | |
1 row(s) in 0.0830 seconds |