Add ansible provisioning (#122)

* first ansible skeleton

* first commit of ansible installation of vulnwhisperer outside docker

* first ansible skeleton

* first commit of ansible installation of vulnwhisperer outside docker

* refactor the ansible role a bit

* update readme, add fail validation step to provision.yml and fix
typo when calling a logging funciton
This commit is contained in:
Andrea Lusuardi
2018-11-14 10:14:12 +01:00
committed by Quim Montal
parent a8671a7303
commit 3a09f60543
95 changed files with 4459 additions and 1 deletions

View File

@ -0,0 +1,83 @@
################################
# Elasticsearch
################################
# Elasticsearch home directory
ES_HOME={{es_home}}
# Elasticsearch Java path
#JAVA_HOME=
# Elasticsearch configuration directory
CONF_DIR={{conf_dir}}
ES_PATH_CONF={{conf_dir}}
# Elasticsearch data directory
DATA_DIR={{ data_dirs | array_to_str }}
# Elasticsearch logs directory
LOG_DIR={{log_dir}}
# Elasticsearch PID directory
PID_DIR={{pid_dir}}
ES_JVM_OPTIONS={{conf_dir}}/jvm.options
# Configure restart on package upgrade (true, every other setting will lead to not restarting)
#ES_RESTART_ON_UPGRADE=true
# Path to the GC log file
#ES_GC_LOG_FILE=/var/log/elasticsearch/gc.log
################################
# Elasticsearch service
################################
# SysV init.d
#
# When executing the init script, this user will be used to run the elasticsearch service.
# The default value is 'elasticsearch' and is declared in the init.d file.
# Note that this setting is only used by the init script. If changed, make sure that
# the configured user can read and write into the data, work, plugins and log directories.
# For systemd service, the user is usually configured in file /usr/lib/systemd/system/elasticsearch.service
ES_USER={{es_user}}
ES_GROUP={{es_group}}
# The number of seconds to wait before checking if Elasticsearch started successfully as a daemon process
ES_STARTUP_SLEEP_TIME=5
################################
# System properties
################################
# Specifies the maximum file descriptor number that can be opened by this process
# When using Systemd, this setting is ignored and the LimitNOFILE defined in
# /usr/lib/systemd/system/elasticsearch.service takes precedence
{% if es_max_open_files is defined %}
#MAX_OPEN_FILES
MAX_OPEN_FILES={{es_max_open_files}}
{% endif %}
# The maximum number of bytes of memory that may be locked into RAM
# Set to "unlimited" if you use the 'bootstrap.memory_lock: true' option
# in elasticsearch.yml
# When using Systemd, the LimitMEMLOCK property must be set
# in /usr/lib/systemd/system/elasticsearch.service
#MAX_LOCKED_MEMORY=
{% if m_lock_enabled %}
MAX_LOCKED_MEMORY=unlimited
{% endif %}
# Maximum number of VMA (Virtual Memory Areas) a process can own
# When using Systemd, this setting is ignored and the 'vm.max_map_count'
# property is set at boot time in /usr/lib/sysctl.d/elasticsearch.conf
#MAX_MAP_COUNT=262144
{% if es_max_map_count is defined %}
MAX_MAP_COUNT={{es_max_map_count}}
{% endif %}
# Specifies the maximum number of threads that can be started.
# Elasticsearch requires a minimum of 2048.
{% if es_max_threads is defined %}
MAX_THREADS={{ es_max_threads }}
{% endif %}

View File

@ -0,0 +1,11 @@
[elasticsearch-{{ es_repo_name }}]
name=Elasticsearch repository for {{ es_repo_name }} packages
baseurl=https://artifacts.elastic.co/packages/{{ es_repo_name }}/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
{% if es_proxy_host is defined and es_proxy_host != '' and es_proxy_port is defined %}
proxy=http://{{ es_proxy_host }}:{{es_proxy_port}}
{% endif %}

View File

@ -0,0 +1,75 @@
{% if es_config %}
{{ es_config | to_nice_yaml }}
{% endif %}
{% if es_config['cluster.name'] is not defined %}
cluster.name: elasticsearch
{% endif %}
{% if es_config['node.name'] is not defined %}
node.name: {{inventory_hostname}}-{{es_instance_name}}
{% endif %}
#################################### Paths ####################################
# Path to directory containing configuration (this file and logging.yml):
{% if (es_version | version_compare('6.0.0', '<')) %}
path.conf: {{ conf_dir }}
{% endif %}
path.data: {{ data_dirs | array_to_str }}
path.logs: {{ log_dir }}
{% if es_path_repo is defined %}
path.repo: {{ es_path_repo }}
{% endif %}
{% if es_action_auto_create_index == true %}
action.auto_create_index: true
{% elif not es_action_auto_create_index %}
action.auto_create_index: false
{% else %}
action.auto_create_index: {{ es_action_auto_create_index }}
{% endif %}
{% if es_enable_xpack %}
{% if not "security" in es_xpack_features %}
xpack.security.enabled: false
{% endif %}
{% if not "monitoring" in es_xpack_features %}
xpack.monitoring.enabled: false
{% endif %}
{% if not "alerting" in es_xpack_features %}
xpack.watcher.enabled: false
{% endif %}
{% if not "ml" in es_xpack_features %}
xpack.ml.enabled: false
{% endif %}
{% if not "graph" in es_xpack_features %}
xpack.graph.enabled: false
{% endif %}
{% endif %}
{% if es_mail_config is defined %}
xpack.notification.email:
account:
{{ es_mail_config['account'] }}:
profile: {{ es_mail_config['profile'] }}
email_defaults:
from: {{ es_mail_config['from'] }}
smtp:
auth: {{ es_mail_config['require_auth'] }}
host: {{ es_mail_config['host'] }}
port: {{ es_mail_config['port'] }}
{% if es_mail_config['require_auth'] == true %}
user: {{ es_mail_config['user'] }}
password: {{ es_mail_config['pass'] }}
{% endif %}
{% endif %}

View File

@ -0,0 +1,229 @@
#!/bin/bash
#
# /etc/init.d/elasticsearch -- startup script for Elasticsearch
#
### BEGIN INIT INFO
# Provides: elasticsearch
# Required-Start: $network $remote_fs $named
# Required-Stop: $network $remote_fs $named
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts elasticsearch
# Description: Starts elasticsearch using start-stop-daemon
### END INIT INFO
PATH=/bin:/usr/bin:/sbin:/usr/sbin
NAME={{es_instance_name}}_{{default_file | basename}}
{% if es_config['node.name'] is defined %}
DESC="Elasticsearch Server - {{es_config['node.name']}}"
{% else %}
DESC="Elasticsearch Server - {{es_instance_name}}"
{% endif %}
DEFAULT=/etc/default/$NAME
if [ `id -u` -ne 0 ]; then
echo "You need root privileges to run this script"
exit 1
fi
. /lib/lsb/init-functions
if [ -r /etc/default/rcS ]; then
. /etc/default/rcS
fi
# The following variables can be overwritten in $DEFAULT
# Run Elasticsearch as this user ID and group ID
ES_USER={{es_user}}
ES_GROUP={{es_group}}
# Directory where the Elasticsearch binary distribution resides
ES_HOME={{es_home}}
# Maximum number of open files
{% if es_max_open_files is defined %}
MAX_OPEN_FILES={{es_max_open_files}}
{% endif %}
# Maximum amount of locked memory
#MAX_LOCKED_MEMORY=
{% if m_lock_enabled %}
MAX_LOCKED_MEMORY=unlimited
{% endif %}
# Elasticsearch log directory
LOG_DIR={{log_dir}}
# Elasticsearch data directory
DATA_DIR={{ data_dirs | array_to_str }}
# Elasticsearch configuration directory
CONF_DIR={{conf_dir}}
ES_PATH_CONF={{ conf_dir }}
# Maximum number of VMA (Virtual Memory Areas) a process can own
{% if es_max_map_count is defined %}
MAX_MAP_COUNT={{es_max_map_count}}
{% endif %}
# Elasticsearch PID file directory
PID_DIR={{pid_dir}}
ES_JVM_OPTIONS="{{conf_dir}}/jvm.options"
# End of variables that can be overwritten in $DEFAULT
# overwrite settings from default file
if [ -f "$DEFAULT" ]; then
. "$DEFAULT"
fi
# CONF_FILE setting was removed
if [ ! -z "$CONF_FILE" ]; then
echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."
exit 1
fi
if [ "$ES_USER" != "elasticsearch" ] || [ "$ES_GROUP" != "elasticsearch" ]; then
echo "WARNING: ES_USER and ES_GROUP are deprecated and will be removed in the next major version of Elasticsearch, got: [$ES_USER:$ES_GROUP]"
fi
# Define other required variables
PID_FILE="$PID_DIR/$NAME.pid"
DAEMON=$ES_HOME/bin/elasticsearch
{% if (es_version | version_compare('6.0.0', '<')) %}
DAEMON_OPTS="-d -p $PID_FILE -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR"
{% else %}
DAEMON_OPTS="-d -p $PID_FILE"
{% endif %}
export ES_JAVA_OPTS
export JAVA_HOME
export ES_INCLUDE
export ES_JVM_OPTIONS
export ES_PATH_CONF
# export unsupported variables so bin/elasticsearch can reject them and inform the user these are unsupported
if test -n "$ES_MIN_MEM"; then export ES_MIN_MEM; fi
if test -n "$ES_MAX_MEM"; then export ES_MAX_MEM; fi
if test -n "$ES_HEAP_SIZE"; then export ES_HEAP_SIZE; fi
if test -n "$ES_HEAP_NEWSIZE"; then export ES_HEAP_NEWSIZE; fi
if test -n "$ES_DIRECT_SIZE"; then export ES_DIRECT_SIZE; fi
if test -n "$ES_USE_IPV4"; then export ES_USE_IPV4; fi
if test -n "$ES_GC_OPTS"; then export ES_GC_OPTS; fi
if test -n "$ES_GC_LOG_FILE"; then export ES_GC_LOG_FILE; fi
# Check DAEMON exists
if [ ! -x "$DAEMON" ]; then
echo "The elasticsearch startup script does not exists or it is not executable, tried: $DAEMON"
exit 1
fi
checkJava() {
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA=`which java`
fi
if [ ! -x "$JAVA" ]; then
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
exit 1
fi
}
case "$1" in
start)
checkJava
log_daemon_msg "Starting $DESC"
pid=`pidofproc -p $PID_FILE elasticsearch`
if [ -n "$pid" ] ; then
log_begin_msg "Already running."
log_end_msg 0
exit 0
fi
# Ensure that the PID_DIR exists (it is cleaned at OS startup time)
if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then
mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR"
fi
if [ -n "$PID_FILE" ] && [ ! -e "$PID_FILE" ]; then
touch "$PID_FILE" && chown "$ES_USER":"$ES_GROUP" "$PID_FILE"
fi
if [ -n "$MAX_OPEN_FILES" ]; then
ulimit -n $MAX_OPEN_FILES
fi
if [ -n "$MAX_LOCKED_MEMORY" ]; then
ulimit -l $MAX_LOCKED_MEMORY
fi
if [ -n "$MAX_THREADS" ]; then
ulimit -u $MAX_THREADS
fi
if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then
sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
fi
# Start Daemon
start-stop-daemon -d $ES_HOME --start --user "$ES_USER" -c "$ES_USER" --pidfile "$PID_FILE" --exec $DAEMON -- $DAEMON_OPTS
return=$?
if [ $return -eq 0 ]; then
i=0
timeout={{es_debian_startup_timeout}}
# Wait for the process to be properly started before exiting
until { kill -0 `cat "$PID_FILE"`; } >/dev/null 2>&1
do
sleep 1
i=$(($i + 1))
if [ $i -gt $timeout ]; then
log_end_msg 1
exit 1
fi
done
fi
log_end_msg $return
exit $return
;;
stop)
log_daemon_msg "Stopping $DESC"
if [ -f "$PID_FILE" ]; then
start-stop-daemon --stop --pidfile "$PID_FILE" \
--user "$ES_USER" \
--quiet \
--retry forever/TERM/20 > /dev/null
if [ $? -eq 1 ]; then
log_progress_msg "$DESC is not running but pid file exists, cleaning up"
elif [ $? -eq 3 ]; then
PID="`cat $PID_FILE`"
log_failure_msg "Failed to stop $DESC (pid $PID)"
exit 1
fi
rm -f "$PID_FILE"
else
log_progress_msg "(not running)"
fi
log_end_msg 0
;;
status)
status_of_proc -p $PID_FILE elasticsearch elasticsearch && exit 0 || exit $?
;;
restart|force-reload)
if [ -f "$PID_FILE" ]; then
$0 stop
fi
$0 start
;;
*)
log_success_msg "Usage: $0 {start|stop|restart|force-reload|status}"
exit 1
;;
esac
exit 0

View File

@ -0,0 +1,217 @@
#!/bin/bash
#
# elasticsearch <summary>
#
# chkconfig: 2345 80 20
# description: Starts and stops a single elasticsearch instance on this system
#
### BEGIN INIT INFO
# Provides: Elasticsearch
# Required-Start: $network $named
# Required-Stop: $network $named
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: This service manages the elasticsearch daemon
# Description: Elasticsearch is a very scalable, schema-free and high-performance search solution supporting multi-tenancy and near realtime search.
### END INIT INFO
#
# init.d / servicectl compatibility (openSUSE)
#
if [ -f /etc/rc.status ]; then
. /etc/rc.status
rc_reset
fi
#
# Source function library.
#
if [ -f /etc/rc.d/init.d/functions ]; then
. /etc/rc.d/init.d/functions
fi
# Sets the default values for elasticsearch variables used in this script
ES_USER="{{es_user}}"
ES_GROUP="{{es_group}}"
ES_HOME="{{es_home}}"
{% if es_max_open_files is defined %}
MAX_OPEN_FILES={{es_max_open_files}}
{% endif %}
# Maximum number of VMA (Virtual Memory Areas) a process can own
{% if es_max_map_count is defined %}
MAX_MAP_COUNT={{es_max_map_count}}
{% endif %}
LOG_DIR="{{log_dir}}"
DATA_DIR={{ data_dirs | array_to_str }}
CONF_DIR="{{conf_dir}}"
ES_PATH_CONF="{{ conf_dir }}"
PID_DIR="{{pid_dir}}"
# Source the default env file
ES_ENV_FILE="{{instance_default_file}}"
if [ -f "$ES_ENV_FILE" ]; then
. "$ES_ENV_FILE"
fi
if [ "$ES_USER" != "elasticsearch" ] || [ "$ES_GROUP" != "elasticsearch" ]; then
echo "WARNING: ES_USER and ES_GROUP are deprecated and will be removed in the next major version of Elasticsearch, got: [$ES_USER:$ES_GROUP]"
fi
# CONF_FILE setting was removed
if [ ! -z "$CONF_FILE" ]; then
echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."
exit 1
fi
exec="$ES_HOME/bin/elasticsearch"
prog="{{es_instance_name}}_{{default_file | basename}}"
pidfile="$PID_DIR/${prog}.pid"
export ES_JAVA_OPTS
export JAVA_HOME
export ES_INCLUDE
export ES_JVM_OPTIONS
export ES_STARTUP_SLEEP_TIME
export ES_PATH_CONF
# export unsupported variables so bin/elasticsearch can reject them and inform the user these are unsupported
if test -n "$ES_MIN_MEM"; then export ES_MIN_MEM; fi
if test -n "$ES_MAX_MEM"; then export ES_MAX_MEM; fi
if test -n "$ES_HEAP_SIZE"; then export ES_HEAP_SIZE; fi
if test -n "$ES_HEAP_NEWSIZE"; then export ES_HEAP_NEWSIZE; fi
if test -n "$ES_DIRECT_SIZE"; then export ES_DIRECT_SIZE; fi
if test -n "$ES_USE_IPV4"; then export ES_USE_IPV4; fi
if test -n "$ES_GC_OPTS"; then export ES_GC_OPTS; fi
if test -n "$ES_GC_LOG_FILE"; then export ES_GC_LOG_FILE; fi
lockfile=/var/lock/subsys/$prog
# backwards compatibility for old config sysconfig files, pre 0.90.1
if [ -n $USER ] && [ -z $ES_USER ] ; then
ES_USER=$USER
fi
if [ ! -x "$exec" ]; then
echo "The elasticsearch startup script does not exists or it is not executable, tried: $exec"
exit 1
fi
checkJava() {
if [ -x "$JAVA_HOME/bin/java" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA=`which java`
fi
if [ ! -x "$JAVA" ]; then
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
exit 1
fi
}
start() {
checkJava
[ -x $exec ] || exit 5
if [ -n "$MAX_OPEN_FILES" ]; then
ulimit -n $MAX_OPEN_FILES
fi
if [ -n "$MAX_LOCKED_MEMORY" ]; then
ulimit -l $MAX_LOCKED_MEMORY
fi
if [ -n "$MAX_THREADS" ]; then
ulimit -u $MAX_THREADS
fi
if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then
sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
fi
# Ensure that the PID_DIR exists (it is cleaned at OS startup time)
if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then
mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR"
fi
if [ -n "$pidfile" ] && [ ! -e "$pidfile" ]; then
touch "$pidfile" && chown "$ES_USER":"$ES_GROUP" "$pidfile"
fi
cd $ES_HOME
echo -n $"Starting $prog: "
# if not running, start it up here, usually something like "daemon $exec"
{% if (es_version | version_compare('6.0.0', '<')) %}
daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR
{% else %}
daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d
{% endif %}
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $prog: "
# stop it here, often "killproc $prog"
killproc -p $pidfile -d 86400 $prog
retval=$?
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
# run checks to determine if the service is running or use generic status
status -p $pidfile $prog
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?

View File

@ -0,0 +1,118 @@
## JVM configuration
################################################################
## IMPORTANT: JVM heap size
################################################################
##
## You should always set the min and max JVM heap
## size to the same value. For example, to set
## the heap to 4 GB, set:
##
## -Xms4g
## -Xmx4g
##
## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
## for more information
##
################################################################
# Xms represents the initial size of total heap space
# Xmx represents the maximum size of total heap space
{% if es_heap_size is defined %}
-Xms{{ es_heap_size }}
-Xmx{{ es_heap_size }}
{% else %}
-Xms2g
-Xmx2g
{% endif %}
################################################################
## Expert settings
################################################################
##
## All settings below this section are considered
## expert settings. Don't tamper with them unless
## you understand what you are doing
##
################################################################
## GC configuration
-XX:+UseConcMarkSweepGC
-XX:CMSInitiatingOccupancyFraction=75
-XX:+UseCMSInitiatingOccupancyOnly
## optimizations
# pre-touch memory pages used by the JVM during initialization
-XX:+AlwaysPreTouch
## basic
# force the server VM
-server
# set to headless, just in case
-Djava.awt.headless=true
# ensure UTF-8 encoding by default (e.g. filenames)
-Dfile.encoding=UTF-8
# use our provided JNA always versus the system one
-Djna.nosys=true
# use old-style file permissions on JDK9
-Djdk.io.permissionsUseCanonicalPath=true
# flags to configure Netty
-Dio.netty.noUnsafe=true
-Dio.netty.noKeySetOptimization=true
-Dio.netty.recycler.maxCapacityPerThread=0
# log4j 2
-Dlog4j.shutdownHookEnabled=false
-Dlog4j2.disable.jmx=true
-Dlog4j.skipJansi=true
## heap dumps
# generate a heap dump when an allocation from the Java heap fails
# heap dumps are created in the working directory of the JVM
-XX:+HeapDumpOnOutOfMemoryError
# specify an alternative path for heap dumps
# ensure the directory exists and has sufficient space
#-XX:HeapDumpPath=${heap.dump.path}
## GC logging
#-XX:+PrintGCDetails
#-XX:+PrintGCTimeStamps
#-XX:+PrintGCDateStamps
#-XX:+PrintClassHistogram
#-XX:+PrintTenuringDistribution
#-XX:+PrintGCApplicationStoppedTime
# log GC status to a file with time stamps
# ensure the directory exists
#-Xloggc:${loggc}
# By default, the GC log file will not rotate.
# By uncommenting the lines below, the GC log file
# will be rotated every 128MB at most 32 times.
#-XX:+UseGCLogFileRotation
#-XX:NumberOfGCLogFiles=32
#-XX:GCLogFileSize=128M
# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON.
# If documents were already indexed with unquoted fields in a previous version
# of Elasticsearch, some operations may throw errors.
#
# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided
# only for migration purposes.
#-Delasticsearch.json.allow_unquoted_field_names=true
{% if es_jvm_custom_parameters !='' %}
{% for item in es_jvm_custom_parameters %}
{{ item }}
{% endfor %}
{% endif %}

View File

@ -0,0 +1,117 @@
status = error
# log action execution errors for easier debugging
logger.action.name = org.elasticsearch.action
logger.action.level = debug
appender.console.type = Console
appender.console.name = console
appender.console.layout.type = PatternLayout
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
appender.rolling.type = RollingFile
appender.rolling.name = rolling
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.rolling.fileName = ${sys:es.logs}.log
{% else %}
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
{% endif %}
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
{% else %}
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
{% endif %}
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
{% if (es_version | version_compare('6.0.0', '>')) %}
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.rolling.policies.size.size = 128MB
appender.rolling.strategy.type = DefaultRolloverStrategy
appender.rolling.strategy.fileIndex = nomax
appender.rolling.strategy.action.type = Delete
appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
appender.rolling.strategy.action.condition.type = IfFileName
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
{% endif %}
rootLogger.level = info
rootLogger.appenderRef.console.ref = console
rootLogger.appenderRef.rolling.ref = rolling
appender.deprecation_rolling.type = RollingFile
appender.deprecation_rolling.name = deprecation_rolling
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
{% else %}
appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
{% endif %}
appender.deprecation_rolling.layout.type = PatternLayout
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
{% else %}
appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz
{% endif %}
appender.deprecation_rolling.policies.type = Policies
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
appender.deprecation_rolling.policies.size.size = 1GB
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
appender.deprecation_rolling.strategy.max = 4
logger.deprecation.name = org.elasticsearch.deprecation
logger.deprecation.level = warn
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
logger.deprecation.additivity = false
appender.index_search_slowlog_rolling.type = RollingFile
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
{% else %}
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log
{% endif %}
appender.index_search_slowlog_rolling.layout.type = PatternLayout
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
{% else %}
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log
{% endif %}
appender.index_search_slowlog_rolling.policies.type = Policies
appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_search_slowlog_rolling.policies.time.interval = 1
appender.index_search_slowlog_rolling.policies.time.modulate = true
logger.index_search_slowlog_rolling.name = index.search.slowlog
logger.index_search_slowlog_rolling.level = trace
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
logger.index_search_slowlog_rolling.additivity = false
appender.index_indexing_slowlog_rolling.type = RollingFile
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log
{% else %}
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log
{% endif %}
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
{% if (es_version | version_compare('6.0.0', '<')) %}
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
{% else %}
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
{% endif %}
appender.index_indexing_slowlog_rolling.policies.type = Policies
appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.index_indexing_slowlog_rolling.policies.time.interval = 1
appender.index_indexing_slowlog_rolling.policies.time.modulate = true
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
logger.index_indexing_slowlog.level = trace
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
logger.index_indexing_slowlog.additivity = false

View File

@ -0,0 +1 @@
{{ es_role_mapping | to_nice_yaml }}

View File

@ -0,0 +1 @@
{{ es_roles.file | to_nice_yaml }}

View File

@ -0,0 +1 @@
{{users_roles | join("\n") }}

View File

@ -0,0 +1,76 @@
[Unit]
Description=Elasticsearch-{{es_instance_name}}
Documentation=http://www.elastic.co
Wants=network-online.target
After=network-online.target
[Service]
Environment=ES_HOME={{es_home}}
Environment=CONF_DIR={{conf_dir}}
Environment=ES_PATH_CONF={{conf_dir}}
Environment=DATA_DIR={{ data_dirs | array_to_str }}
Environment=LOG_DIR={{log_dir}}
Environment=PID_DIR={{pid_dir}}
EnvironmentFile=-{{instance_default_file}}
WorkingDirectory={{es_home}}
User={{es_user}}
Group={{es_group}}
{% if (es_version | version_compare('6.0.0', '<')) %}
ExecStartPre=/usr/share/elasticsearch/bin/elasticsearch-systemd-pre-exec
{% endif %}
ExecStart={{es_home}}/bin/elasticsearch \
-p ${PID_DIR}/elasticsearch.pid \
{% if (es_version | version_compare('6.0.0', '<')) %}
-Edefault.path.logs=${LOG_DIR} \
-Edefault.path.data=${DATA_DIR} \
-Edefault.path.conf=${CONF_DIR} \
{% endif %}
--quiet
# StandardOutput is configured to redirect to journalctl since
# some error messages may be logged in standard output before
# elasticsearch logging system is initialized. Elasticsearch
# stores its logs in /var/log/elasticsearch and does not use
# journalctl by default. If you also want to enable journalctl
# logging, you can simply remove the "quiet" option from ExecStart.
StandardOutput=journal
StandardError=inherit
# Specifies the maximum file descriptor number that can be opened by this process
{% if es_max_open_files is defined %}
LimitNOFILE={{es_max_open_files}}
{% endif %}
# Specifies the maximum number of bytes of memory that may be locked into RAM
# Set to "infinity" if you use the 'bootstrap.memory_lock: true' option
# in elasticsearch.yml and 'MAX_LOCKED_MEMORY=unlimited' in {{instance_default_file}}
{% if m_lock_enabled %}
LimitMEMLOCK=infinity
{% endif %}
# Specifies the maximum number of threads that can be started. Elasticsearch requires a
# minimum of 2048.
LimitNPROC={{ es_max_threads }}
# Disable timeout logic and wait until process is stopped
TimeoutStopSec=0
# SIGTERM signal is used to stop the Java process
KillSignal=SIGTERM
# Send the signal only to the JVM rather than its control group
KillMode=process
# Java process is never killed
SendSIGKILL=no
# When a JVM receives a SIGTERM signal it exits with code 143
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target