./bin/elasticsearch-plugin install mapper-murmur3
PUT foo/_mapping/bar
{
"properties": {
diff --git a/gulpfile.js b/gulpfile.js | |
index e4c4981..5932c25 100644 | |
--- a/gulpfile.js | |
+++ b/gulpfile.js | |
@@ -5,7 +5,7 @@ const gulp = require('gulp'); | |
const g = require('gulp-load-plugins')(); | |
const path = require('path'); | |
const del = require('del'); | |
-const isparta = require('isparta'); | |
+// const isparta = require('isparta'); |
DELETE i | |
PUT i | |
{ | |
"mappings": { | |
"t": { | |
"properties": { | |
"logstash_stats": { | |
"type": "object", | |
"properties": { | |
"logstash": { |
Test if Monitoring Cluster version X
is able to monitor Production Cluster version Y
, where X
> Y
.
X
. This will be the Monitoring ClusterY
. This will be the Production ClusterY
. This will be the Production Kibanabin/elasticsearch -E cluster.name=esmon -E node.name=esmon_1 -E http.port=9400
Filebeat consumes Elasticsearch logs via its elasticsearch
module. Specifically, for each type of Elasticsearch log (server, gc, deprecation, etc.) there is a corresponding fileset under the Filebeat elasticsearch
module. This fileset is responsible for parsing the Elasticsearch log files into structured event that can then be shipped to Elasticsearch or other outputs.
So whenever the structure of Elasticsearch logs changes, the changes must be tested with the Filebeat elasticsearch
module to ensure two things:
If necessary, the ingest pipeline used by the fileset to do the parsing should be updated.
#!/bin/bash | |
CHILD_PIDFILE=$PWD/child.pid | |
# Clean up old child, if any | |
if [ -f $CHILD_PIDFILE ]; then | |
OLD_CHILD_PID=$(cat $CHILD_PIDFILE) | |
ps $OLD_CHILD_PID >/dev/null | |
if [ $? -eq 0 ]; then | |
kill -9 $OLD_CHILD_PID |
As of: 7.5.0
monitoring-*
indices..monitoring-*
indices.monitoring-*
indices.#!/bin/bash | |
# Usage: | |
# ./find_jenkins_job.sh PR_NUMBER | |
# Example: | |
# ./find_jenkins_job.sh 15790 | |
set -e | |
NUM_JOBS_TO_SEARCH=100 |