-
-
Save xueruini/4fb006c6f4cdf1af32cc621945187583 to your computer and use it in GitHub Desktop.
search
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
HOST=http://elasticsearch.dev-id.internal.hyku.org:9200 | |
# drop index | |
curl -XDELETE $HOST/my_index | |
# create index | |
curl -XPUT $HOST/my_index -d '{ | |
"settings":{ | |
"analysis":{ | |
"analyzer":{ | |
"my_ngram_analyzer":{ | |
"tokenizer":"my_ngram_tokenizer" | |
}, | |
"my_punctuation_analyzer": { | |
"type": "custom", | |
"tokenizer": "punctuation", | |
"filter": [ | |
"lowercase", | |
"stop" | |
] | |
} | |
}, | |
"tokenizer":{ | |
"punctuation": { | |
"type": "pattern", | |
"pattern": "[ .,!?-]" | |
}, | |
"my_ngram_tokenizer":{ | |
"type":"nGram", | |
"min_gram": 1, | |
"max_gram": 14, | |
"token_chars":[ | |
"letter", | |
"digit" | |
] | |
} | |
} | |
} | |
} | |
}' | |
# create table items | |
curl -XPUT $HOST/my_index/_mapping/items -d '{ | |
"items":{ | |
"properties":{ | |
"id":{ | |
"analyzer":"my_ngram_analyzer", | |
"search_analyzer":"my_punctuation_analyzer", | |
"type":"text" | |
} | |
} | |
} | |
}' | |
# create data | |
curl -XPUT $HOST/my_index/items/1 -d '{"id": "86-1344675中国"}' | |
curl -XPUT $HOST/my_index/items/2 -d '{"id": "Red86-1344675中国"}' | |
curl -XPUT $HOST/my_index/items/3 -d '{"id": "Red Great 中"}' | |
curl -XPUT $HOST/my_index/items/4 -d '{"id": "1133"}' | |
# query | |
http POST $HOST/my_index/items/_search Content-type:application/json < a.json |
Author
xueruini
commented
Nov 2, 2017
HOST=http://elasticsearch.dev-id.internal.hyku.org:9200
# drop index
curl -XDELETE $HOST/my_index
# create index
curl -XPUT $HOST/my_index -d '{
"settings":{
"analysis":{
"analyzer":{
"my_ngram_analyzer":{
"tokenizer":"my_ngram_tokenizer"
},
"my_punctuation_analyzer": {
"type": "custom",
"tokenizer": "punctuation",
"filter": [
"lowercase",
"stop"
]
}
},
"tokenizer":{
"punctuation": {
"type": "pattern",
"pattern": "\\p{Punct}|\\h"
},
"my_ngram_tokenizer":{
"type":"nGram",
"min_gram": 1,
"max_gram": 14,
"token_chars":[
"letter",
"digit"
]
}
}
}
}
}'
# create table items
curl -XPUT $HOST/my_index/_mapping/items -d '{
"items":{
"properties":{
"id":{
"analyzer":"my_ngram_analyzer",
"search_analyzer":"my_punctuation_analyzer",
"type":"text"
}
}
}
}'
# create data
curl -XPUT $HOST/my_index/items/1 -d '{"id": "86-1344675中国"}'
curl -XPUT $HOST/my_index/items/2 -d '{"id": "Red86-1344675中国"}'
curl -XPUT $HOST/my_index/items/3 -d '{"id": "Red Great 中"}'
curl -XPUT $HOST/my_index/items/4 -d '{"id": "1133"}'
# query
http POST $HOST/my_index/items/_search Content-type:application/json < a.json
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment