Last active
August 29, 2015 14:02
-
-
Save untergeek/fdd8701ce49dadc917dd to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
$ python test.py --help | |
usage: test.py [-h] [-v] [--host HOST] [--url_prefix URL_PREFIX] [--port PORT] | |
[--ssl] [-t TIMEOUT] [-n] [-D] [--loglevel LOG_LEVEL] | |
[-l LOG_FILE] | |
{allocation,bloom,close,delete,optimize,show_indices,snapshot} | |
... | |
Curator for Elasticsearch indices. See | |
http://github.com/elasticsearch/curator/wiki | |
optional arguments: | |
-h, --help show this help message and exit | |
-v, --version show program's version number and exit | |
--host HOST Elasticsearch host. Default: localhost | |
--url_prefix URL_PREFIX | |
Elasticsearch http url prefix. Default: none | |
--port PORT Elasticsearch port. Default: 9200 | |
--ssl Connect to Elasticsearch through SSL. Default: false | |
-t TIMEOUT, --timeout TIMEOUT | |
Connection timeout in seconds. Default: 30 | |
-n, --dry-run If true, does not perform any changes to the | |
Elasticsearch indices. | |
-D, --debug Debug mode | |
--loglevel LOG_LEVEL Log level | |
-l LOG_FILE, --logfile LOG_FILE | |
log file | |
Commands: | |
Valid Commands | |
{allocation,bloom,close,delete,optimize,show_indices,snapshot} | |
Type: curator COMMAND --help for command-specific | |
help. | |
allocation Apply required index routing allocation rule | |
bloom Disable bloom filter cache for indices | |
close Close indices | |
delete Delete indices | |
optimize Optimize indices | |
show_indices Show indices | |
snapshot Snapshot indices |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python | |
import argparse | |
import sys | |
__version__ = '1.1.0-dev' | |
DEFAULT_ARGS = { | |
'host': 'localhost', | |
'url_prefix': '', | |
'port': 9200, | |
'ssl': False, | |
'timeout': 30, | |
'prefix': 'logstash-', | |
'separator': '.', | |
'time_unit': 'days', | |
'max_num_segments': 2, | |
'dry_run': False, | |
'debug': False, | |
'log_level': 'INFO', | |
'wait_for_completion': True, | |
'ignore_unavailable': False, | |
'include_global_state': False, | |
'partial': False, | |
} | |
help_desc = 'Curator for Elasticsearch indices. See http://github.com/elasticsearch/curator/wiki' | |
parser = argparse.ArgumentParser(description=help_desc) | |
# Common args | |
parser.add_argument('-v', '--version', action='version', version='%(prog)s '+__version__) | |
parser.add_argument('--host', help='Elasticsearch host. Default: localhost', default=DEFAULT_ARGS['host']) | |
parser.add_argument('--url_prefix', help='Elasticsearch http url prefix. Default: none', default=DEFAULT_ARGS['url_prefix']) | |
parser.add_argument('--port', help='Elasticsearch port. Default: 9200', default=DEFAULT_ARGS['port'], type=int) | |
parser.add_argument('--ssl', help='Connect to Elasticsearch through SSL. Default: false', action='store_true', default=DEFAULT_ARGS['ssl']) | |
parser.add_argument('-t', '--timeout', help='Connection timeout in seconds. Default: 30', default=DEFAULT_ARGS['timeout'], type=int) | |
parser.add_argument('-n', '--dry-run', action='store_true', help='If true, does not perform any changes to the Elasticsearch indices.', default=DEFAULT_ARGS['dry_run']) | |
parser.add_argument('-D', '--debug', dest='debug', action='store_true', help='Debug mode', default=DEFAULT_ARGS['debug']) | |
parser.add_argument('--loglevel', dest='log_level', action='store', help='Log level', default=DEFAULT_ARGS['log_level'], type=str) | |
parser.add_argument('-l', '--logfile', dest='log_file', help='log file', type=str) | |
# Command sub_parsers | |
subparsers = parser.add_subparsers( | |
title='Commands', description='Valid Commands', | |
help='Type: curator COMMAND --help for command-specific help.') | |
# Allocation | |
parser_allocation = subparsers.add_parser('allocation', help='Apply required index routing allocation rule') | |
parser_allocation.set_defaults(which='allocation') | |
parser_allocation.add_argument('-p', '--prefix', help='Prefix for the indices. Indices that do not have this prefix are skipped. Default: logstash-', default=DEFAULT_ARGS['prefix']) | |
parser_allocation.add_argument('-s', '--separator', help='TIME_UNIT separator. Default: .', default=DEFAULT_ARGS['separator']) | |
parser_allocation.add_argument('-T', '--time-unit', dest='time_unit', action='store', help='Unit of time to reckon by: [days, hours] Default: days', default=DEFAULT_ARGS['time_unit'], type=str) | |
parser_allocation.add_argument( '--older-than', required=True, help='Apply rule to indices older than n TIME_UNITs', type=int) | |
parser_allocation.add_argument( '--rule', required=True, help='Routing allocation rule to apply. Ex. tag=ssd', type=str) | |
# Bloom | |
parser_bloom = subparsers.add_parser('bloom', help='Disable bloom filter cache for indices') | |
parser_bloom.set_defaults(which='bloom') | |
parser_bloom.add_argument('-p', '--prefix', help='Prefix for the indices. Indices that do not have this prefix are skipped. Default: logstash-', default=DEFAULT_ARGS['prefix']) | |
parser_bloom.add_argument('-s', '--separator', help='TIME_UNIT separator. Default: .', default=DEFAULT_ARGS['separator']) | |
parser_bloom.add_argument('-T', '--time-unit', dest='time_unit', action='store', help='Unit of time to reckon by: [days, hours] Default: days', default=DEFAULT_ARGS['time_unit'], type=str) | |
parser_bloom.add_argument( '--older-than', required=True, help='Disable bloom filter cache for indices older than n TIME_UNITs', type=int) | |
# Close | |
parser_close = subparsers.add_parser('close', help='Close indices') | |
parser_close.set_defaults(which='close') | |
parser_close.add_argument('-p', '--prefix', help='Prefix for the indices. Indices that do not have this prefix are skipped. Default: logstash-', default=DEFAULT_ARGS['prefix']) | |
parser_close.add_argument('-s', '--separator', help='TIME_UNIT separator. Default: .', default=DEFAULT_ARGS['separator']) | |
parser_close.add_argument('-T', '--time-unit', dest='time_unit', action='store', help='Unit of time to reckon by: [days, hours] Default: days', default=DEFAULT_ARGS['time_unit'], type=str) | |
parser_close.add_argument( '--older-than', required=True, help='Close indices older than n TIME_UNITs', type=int) | |
# Delete | |
parser_delete = subparsers.add_parser('delete', help='Delete indices') | |
parser_delete.set_defaults(which='delete') | |
parser_delete.add_argument('-p', '--prefix', help='Prefix for the indices. Indices that do not have this prefix are skipped. Default: logstash-', default=DEFAULT_ARGS['prefix']) | |
parser_delete.add_argument('-s', '--separator', help='TIME_UNIT separator. Default: .', default=DEFAULT_ARGS['separator']) | |
parser_delete.add_argument('-T', '--time-unit', dest='time_unit', action='store', help='Unit of time to reckon by: [days, hours] Default: days', default=DEFAULT_ARGS['time_unit'], type=str) | |
delete_group = parser_delete.add_mutually_exclusive_group() | |
delete_group.add_argument( '--older-than', help='Delete indices older than n TIME_UNITs', type=int) | |
delete_group.add_argument( '--disk-space', help='Delete indices beyond DISK_SPACE gigabytes.', type=float) | |
# Optimize | |
parser_optimize = subparsers.add_parser('optimize', help='Optimize indices') | |
parser_optimize.set_defaults(which='optimize') | |
parser_optimize.add_argument('-p', '--prefix', help='Prefix for the indices. Indices that do not have this prefix are skipped. Default: logstash-', default=DEFAULT_ARGS['prefix']) | |
parser_optimize.add_argument('-s', '--separator', help='TIME_UNIT separator. Default: .', default=DEFAULT_ARGS['separator']) | |
parser_optimize.add_argument('-T', '--time-unit', dest='time_unit', action='store', help='Unit of time to reckon by: [days, hours] Default: days', default=DEFAULT_ARGS['time_unit'], type=str) | |
parser_optimize.add_argument( '--older-than', required=True, help='Optimize indices older than n TIME_UNITs', type=int) | |
parser_optimize.add_argument( '--max_num_segments', help='Optimize segment count to n segments per shard.', default=DEFAULT_ARGS['max_num_segments'], type=int) | |
# Show indices | |
parser_show = subparsers.add_parser('show_indices', help='Show indices') | |
parser_show.set_defaults(which='show_indices') | |
parser_show.add_argument('-p', '--prefix', help='Prefix for the indices. Indices that do not have this prefix are skipped. Default: logstash-', default=DEFAULT_ARGS['prefix']) | |
parser_show.add_argument('-s', '--separator', help='TIME_UNIT separator. Default: .', default=DEFAULT_ARGS['separator']) | |
# Snapshot | |
parser_snapshot = subparsers.add_parser('snapshot', help='Snapshot indices') | |
parser_snapshot.set_defaults(which='snapshot') | |
parser_snapshot.add_argument('-p', '--prefix', help='Prefix for the indices. Indices that do not have this prefix are skipped. Default: logstash-', default=DEFAULT_ARGS['prefix']) | |
parser_snapshot.add_argument('-s', '--separator', help='TIME_UNIT separator. Default: .', default=DEFAULT_ARGS['separator']) | |
parser_snapshot.add_argument('-T', '--time-unit', dest='time_unit', action='store', help='Unit of time to reckon by: [days, hours] Default: days', default=DEFAULT_ARGS['time_unit'], type=str) | |
parser_snapshot.add_argument('--repository', required=True, type=str, help='Repository name') | |
snapshot_group = parser_snapshot.add_mutually_exclusive_group() | |
snapshot_group.add_argument('--older-than', type=int, help='Capture snapshots for indices older than n TIME_UNITs.') | |
snapshot_group.add_argument('--most-recent', type=int, help='Capture snapshots for n most recent number of indices.') | |
snapshot_group.add_argument('--delete-older-than', type=int, help='Delete snapshots older than n TIME_UNITs.') | |
snapshot_group.add_argument('--show-snapshots', action='store_true', help='Show all snapshots in REPOSITORY.') | |
parser_snapshot.add_argument('--no_wait_for_completion', action='store_false', | |
help='Do not wait until complete to return. Waits by default.', default=DEFAULT_ARGS['wait_for_completion']) | |
parser_snapshot.add_argument('--ignore_unavailable', action='store_true', | |
help='Ignore unavailable shards/indices. Default=False', default=DEFAULT_ARGS['ignore_unavailable']) | |
parser_snapshot.add_argument('--include_global_state', action='store_true', | |
help='Store cluster global state with snapshot. Default=False', default=DEFAULT_ARGS['include_global_state']) | |
parser_snapshot.add_argument('--partial', action='store_true', | |
help='Do not fail if primary shard is unavailable. Default=False', default=DEFAULT_ARGS['partial']) | |
arguments = parser.parse_args() | |
if arguments.which == 'delete': | |
if not arguments.older_than and not arguments.disk_space: | |
print('{0} delete: error: expect one of --older-than or --disk-space'.format(sys.argv[0])) | |
sys.exit(1) | |
if arguments.which == 'snapshot': | |
if not arguments.older_than and not arguments.most_recent and not arguments.delete_older_than and not arguments.show_snapshots: | |
print('{0} snapshot: error: expect one of --older-than, --most-recent, --delete-older-than, or --show-snapshots'.format(sys.argv[0])) | |
sys.exit(1) | |
print("Arguments: {}".format(arguments.__dict__)) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment