- 批量处理
以回帖的形式进行记录。
以回帖的形式进行记录。
# 下载hadoop2编译好的版本 | |
[hadoop@umcc97-44 spark-1.0.0-bin-hadoop2]$ hadoop fs -put README.md ./ | |
# 参考 http://spark.apache.org/docs/latest/quick-start.html | |
# http://spark.apache.org/examples.html | |
# https://github.com/apache/spark/tree/master/examples/src/main/java/org/apache/spark/examples | |
[hadoop@umcc97-44 spark-1.0.0-bin-hadoop2]$ bin/spark-shell | |
... |
$ [hadoop@umcc97-44 phoenix-4.0.0-incubating]$ bin/sqlline.py localhost | |
/* | |
hbase(main):020:0> describe 't1' | |
DESCRIPTION ENABLED | |
't1', {NAME => 'f1', DATA_BLOCK_ENCODING => 'NONE', BLOOMFILTER => 'ROW', REPLICATION_SCOPE => '0', VERSIONS => '1', COMPRESS true | |
ION => 'NONE', MIN_VERSIONS => '0', TTL => 'FOREVER', KEEP_DELETED_CELLS => 'false', BLOCKSIZE => '65536', IN_MEMORY => 'fals | |
e', BLOCKCACHE => 'true', METADATA => {'ENCODE_ON_DISK' => 'true'}} | |
1 row(s) in 0.0830 seconds |
@echo off | |
rem npp-windows app... | |
rem http://stackoverflow.com/questions/636381/what-is-the-best-way-to-do-a-substring-in-a-batch-file | |
set fileRelativePath=%1 | |
set filepath="%~dp0..\..\%fileRelativePath:~17,-1%" | |
start E:\local\usr\share\npp\notepad++.exe %filepath% | |
rem pause |
diff --git a/src/main/java/com/cxy/redisclient/integration/I18nFile.java b/src/main/java/com/cxy/redisclient/integration/I18nFile.java | |
index 1df1322..302e279 100644 | |
--- a/src/main/java/com/cxy/redisclient/integration/I18nFile.java | |
+++ b/src/main/java/com/cxy/redisclient/integration/I18nFile.java | |
@@ -49,6 +49,8 @@ public class I18nFile extends PropertyFile { | |
public static final String NAME = "NAME"; | |
public static final String TYPE = "TYPE"; | |
public static final String SIZE = "SIZE"; | |
+ public static final String COMMENT = "COMMENT"; | |
+ |
import static org.junit.Assert.assertEquals; | |
import java.io.BufferedReader; | |
import java.io.BufferedWriter; | |
import java.io.ByteArrayInputStream; | |
import java.io.ByteArrayOutputStream; | |
import java.io.FileInputStream; | |
import java.io.FileNotFoundException; | |
import java.io.FileOutputStream; |
import java.io.IOException; | |
import java.util.StringTokenizer; | |
import org.apache.hadoop.conf.Configuration; | |
import org.apache.hadoop.fs.FileSystem; | |
import org.apache.hadoop.fs.Path; | |
import org.apache.hadoop.io.IntWritable; | |
import org.apache.hadoop.io.LongWritable; | |
import org.apache.hadoop.io.Text; |
package com.github.winse.hadoop | |
import org.apache.hadoop.mapreduce.Job | |
import org.apache.hadoop.mapreduce.Reducer | |
import org.apache.hadoop.io.Text | |
import org.apache.hadoop.io.IntWritable | |
import org.apache.hadoop.io.LongWritable | |
import org.apache.hadoop.mapreduce.Mapper | |
import org.apache.hadoop.conf.Configuration | |
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat |