Add ansible provisioning (#122)

* first ansible skeleton

* first commit of ansible installation of vulnwhisperer outside docker

* first ansible skeleton

* first commit of ansible installation of vulnwhisperer outside docker

* refactor the ansible role a bit

* update readme, add fail validation step to provision.yml and fix
typo when calling a logging funciton
This commit is contained in:
Andrea Lusuardi
2018-11-14 10:14:12 +01:00
committed by Quim Montal
parent a8671a7303
commit 3a09f60543
95 changed files with 4459 additions and 1 deletions

View File

@ -0,0 +1,38 @@
---
# It is possible to set these are defaults with messy jinja templating one liners however:
# 1. That is really hard to read and debug
# 2. When running multiple plays with the same role the defaults are not re-evaluated. An example of this
# can be seen in our the https://github.com/elastic/ansible-elasticsearch/blob/master/test/integration/xpack.yml
# integration test and in the Multi Node server documentation examples https://github.com/elastic/ansible-elasticsearch/blob/master/test/integration/xpack.yml
- name: Set the defaults here otherwise they can't be overriden in the same play if the role is called twice
set_fact:
es_open_xpack: true
es_install_xpack: false
es_users_path: "users"
es_xpack_conf_subdir: ""
es_repo_name: "{{ es_major_version }}"
es_xpack_users_command: "elasticsearch-users"
- name: Detect if es_version is before X-Pack was open and included
set_fact:
es_open_xpack: false
when: "es_version | version_compare('6.3.0', '<')"
- name: If this is an older version we need to install X-Pack as a plugin and use a differet users command
set_fact:
es_install_xpack: true
es_xpack_users_command: "x-pack/users"
es_xpack_conf_subdir: "/x-pack"
when:
- not es_open_xpack
- es_enable_xpack
- name: Use the oss repo and package if xpack is not being used
set_fact:
es_repo_name: "{{ 'oss-' + es_major_version }}"
es_package_name: "elasticsearch-oss"
when:
- es_open_xpack
- not es_enable_xpack

View File

@ -0,0 +1,6 @@
---
- name: Debian - hold elasticsearch version
become: yes
command: "apt-mark hold {{ es_package_name }}"
register: hold_elasticsearch_result
changed_when: "hold_elasticsearch_result.stdout != '{{ es_package_name }} was already set on hold.'"

View File

@ -0,0 +1,80 @@
---
- name: set fact force_install to no
set_fact: force_install=no
- name: set fact force_install to yes
set_fact: force_install=yes
when: es_allow_downgrades
- name: Debian - Install apt-transport-https to support https APT downloads
become: yes
apt: name=apt-transport-https state=present
when: es_use_repository
- name: Debian - Add Elasticsearch repository key
become: yes
apt_key: url="{{ es_apt_key }}" state=present
when: es_use_repository and es_apt_key
- name: Debian - Add elasticsearch repository
become: yes
apt_repository: repo={{ item.repo }} state={{ item.state}}
with_items:
- { repo: "{{ es_apt_url_old }}", state: "absent" }
- { repo: "{{ es_apt_url }}", state: "present" }
when: es_use_repository
- name: Gracefully stop and remove elasticsearch if we are switching to the oss version
when:
- es_package_name == 'elasticsearch-oss'
block:
- name: Check if the elasticsearch package is installed
shell: dpkg-query -W -f'${Status}' elasticsearch
register: elasticsearch_package
failed_when: False
changed_when: False
- name: stop elasticsearch
become: yes
service:
name: '{{ instance_init_script | basename }}'
state: stopped
when: elasticsearch_package.stdout == 'install ok installed'
- name: Debian - Remove elasticsearch package if we are installing the oss package
become: yes
apt:
name: 'elasticsearch'
state: absent
when: elasticsearch_package.stdout == 'install ok installed'
- name: Debian - Ensure elasticsearch is installed
become: yes
apt:
name: '{{ es_package_name }}{% if es_version is defined and es_version != "" %}={{ es_version }}{% endif %}'
state: present
force: '{{ force_install }}'
allow_unauthenticated: "{{ 'no' if es_apt_key else 'yes' }}"
cache_valid_time: 86400
when: es_use_repository
register: debian_elasticsearch_install_from_repo
notify: restart elasticsearch
environment:
ES_PATH_CONF: "/etc/elasticsearch"
- name: Debian - Include versionlock
include: elasticsearch-Debian-version-lock.yml
when: es_version_lock
- name: Debian - Download elasticsearch from url
get_url: url={% if es_custom_package_url is defined %}{{ es_custom_package_url }}{% else %}{{ es_package_url }}-{{ es_version }}.deb{% endif %} dest=/tmp/elasticsearch-{{ es_version }}.deb validate_certs=no
when: not es_use_repository
- name: Debian - Ensure elasticsearch is installed from downloaded package
become: yes
apt: deb=/tmp/elasticsearch-{{ es_version }}.deb
when: not es_use_repository
register: elasticsearch_install_from_package
notify: restart elasticsearch

View File

@ -0,0 +1,7 @@
---
- name: RedHat - install yum-version-lock
become: yes
yum: name=yum-plugin-versionlock state=present update_cache=yes
- name: RedHat - lock elasticsearch version
become: yes
shell: yum versionlock delete 0:elasticsearch* ; yum versionlock add {{ es_package_name }}{% if es_version is defined and es_version != "" %}-{{ es_version }}{% endif %}

View File

@ -0,0 +1,51 @@
---
- name: set fact allow_downgrade to no
set_fact: allow_downgrade=no
- name: set fact allow_downgrade to yes
set_fact: allow_downgrade=yes
when: es_allow_downgrades
- name: Ensure libselinux-python on CentOS 6.x
become: yes
yum: name=libselinux-python state=present update_cache=yes
when: ( ansible_distribution == "CentOS" ) and ( ansible_distribution_major_version == "6" )
- name: RedHat - add Elasticsearch repo
become: yes
template: src=elasticsearch.repo dest=/etc/yum.repos.d/elasticsearch-{{ es_repo_name }}.repo
when: es_use_repository
- name: RedHat - include versionlock
include: elasticsearch-RedHat-version-lock.yml
when: es_version_lock
- name: RedHat - Remove non oss package if the old elasticsearch package is installed
become: yes
yum:
name: 'elasticsearch'
state: 'absent'
when: es_package_name == 'elasticsearch-oss'
- name: RedHat - Install Elasticsearch
become: yes
yum:
name: '{{ es_package_name }}{% if es_version is defined and es_version != "" %}-{{ es_version }}{% endif %}'
state: present
update_cache: yes
allow_downgrade: '{{ allow_downgrade }}'
when: es_use_repository
register: redhat_elasticsearch_install_from_repo
notify: restart elasticsearch
until: redhat_elasticsearch_install_from_repo.rc == 0
retries: 5
delay: 10
environment:
ES_PATH_CONF: "/etc/elasticsearch"
- name: RedHat - Install Elasticsearch from url
become: yes
yum: name={% if es_custom_package_url is defined %}{{ es_custom_package_url }}{% else %}{{ es_package_url }}-{{ es_version }}.noarch.rpm{% endif %} state=present
when: not es_use_repository
register: elasticsearch_install_from_package
notify: restart elasticsearch

View File

@ -0,0 +1,129 @@
---
# Configure Elasticsearch Node
#Create required directories
- name: Create Directories
become: yes
file: path={{ item }} state=directory owner={{ es_user }} group={{ es_group }}
with_items:
- "{{pid_dir}}"
- "{{log_dir}}"
- "{{conf_dir}}"
- name: Create Data Directories
become: yes
file: path={{ item }} state=directory owner={{ es_user }} group={{ es_group }}
with_items:
- "{{data_dirs}}"
#Copy the config template
- name: Copy Configuration File
become: yes
template: src=elasticsearch.yml.j2 dest={{conf_dir}}/elasticsearch.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes
register: system_change
notify: restart elasticsearch
#Copy the instance specific default file
- name: Copy Default File for Instance
become: yes
template: src=elasticsearch.j2 dest={{instance_default_file}} mode=0644 force=yes
notify: restart elasticsearch
#Copy the instance specific init file
- name: Copy Debian Init File for Instance
become: yes
template: src=init/debian/elasticsearch.j2 dest={{instance_init_script}} mode=0755 force=yes
when: ansible_os_family == 'Debian' and not use_system_d
notify: restart elasticsearch
#Copy the instance specific init file
- name: Copy Redhat Init File for Instance
become: yes
template: src=init/redhat/elasticsearch.j2 dest={{instance_init_script}} mode=0755 force=yes
when: ansible_os_family == 'RedHat' and not use_system_d
notify: restart elasticsearch
#Copy the systemd specific file if systemd is installed
- name: Copy Systemd File for Instance
become: yes
template: src=systemd/elasticsearch.j2 dest={{instance_sysd_script}} mode=0644 force=yes
when: use_system_d
notify:
- reload systemd configuration
- restart elasticsearch
#Copy the logging.yml
- name: Copy log4j2.properties File for Instance
become: yes
template: src={{es_config_log4j2}} dest={{conf_dir}}/log4j2.properties owner={{ es_user }} group={{ es_group }} mode=0644 force=yes
notify: restart elasticsearch
- name: Copy jvm.options File for Instance
become: yes
template: src=jvm.options.j2 dest={{conf_dir}}/jvm.options owner={{ es_user }} group={{ es_group }} mode=0644 force=yes
notify: restart elasticsearch
#Clean up un-wanted package scripts to avoid confusion
- name: Delete Default Init
become: yes
file: dest=/etc/init.d/elasticsearch state=absent
- name: Create empty default environment file
become: yes
changed_when: False
copy:
dest: /etc/default/elasticsearch
content: ''
when: ansible_os_family == 'Debian'
- name: Create empty default environment file
become: yes
changed_when: False
copy:
dest: /etc/sysconfig/elasticsearch
content: ''
when: ansible_os_family == 'RedHat'
- name: Symlink default systemd service to first instance of elasticsearch
when: use_system_d
block:
- name: Check if default systemd file exists
stat:
path: "{{ sysd_script }}"
register: sysd_stat_result
- name: Remove if it is a normal file
become: yes
file:
path: "{{ sysd_script }}"
state: absent
when: sysd_stat_result.stat.exists and not sysd_stat_result.stat.islnk
- name: Create a symbolic link to the default systemd location to the first instance running on this host
become: yes
file:
state: link
src: "{{ instance_sysd_script }}"
path: "{{ sysd_script }}"
when: sysd_stat_result.stat.exists and not sysd_stat_result.stat.islnk
notify:
- reload systemd configuration
- restart elasticsearch
- name: Delete Default Configuration File
become: yes
file: dest=/etc/elasticsearch/elasticsearch.yml state=absent
- name: Delete Default Logging File
become: yes
file: dest=/etc/elasticsearch/logging.yml state=absent
- name: Delete Default Logging File
become: yes
file: dest=/etc/elasticsearch/log4j2.properties state=absent
- name: Delete Default JVM Options File
become: yes
file: dest=/etc/elasticsearch/jvm.options state=absent

View File

@ -0,0 +1,24 @@
---
#Add the elasticsearch user before installing from packages.
- name: Ensure optional elasticsearch group is created with the correct id.
become: yes
#Restart if these change
notify: restart elasticsearch
group:
state: present
name: "{{ es_group }}"
system: yes
gid: "{{ es_group_id }}"
- name: Ensure optional elasticsearch user is created with the correct id.
become: yes
#Restart if these change
notify: restart elasticsearch
user:
state: present
name: "{{ es_user }}"
comment: elasticsearch system user
system: yes
createhome: no
uid: "{{ es_user_id }}"
group: "{{ es_group }}"

View File

@ -0,0 +1,75 @@
# Check for mandatory parameters
- name: fail when es_instance is not defined
fail: msg="es_instance_name must be specified and cannot be blank"
when: es_instance_name is not defined or es_instance_name == ''
- name: fail when es_proxy_port is not defined or is blank
fail: msg="es_proxy_port must be specified and cannot be blank when es_proxy_host is defined"
when: (es_proxy_port is not defined or es_proxy_port == '') and (es_proxy_host is defined and es_proxy_host != '')
- name: debug message
debug: msg="WARNING - It is recommended you specify the parameter 'http.port'"
when: es_config['http.port'] is not defined
- name: debug message
debug: msg="WARNING - It is recommended you specify the parameter 'transport.tcp.port'"
when: es_config['transport.tcp.port'] is not defined
- name: debug message
debug: msg="WARNING - It is recommended you specify the parameter 'discovery.zen.ping.unicast.hosts'"
when: es_config['discovery.zen.ping.unicast.hosts'] is not defined
#If the user attempts to lock memory they must specify a heap size
- name: fail when heap size is not specified when using memory lock
fail: msg="If locking memory with bootstrap.memory_lock a heap size must be specified"
when: es_config['bootstrap.memory_lock'] is defined and es_config['bootstrap.memory_lock'] == True and es_heap_size is not defined
#Check if working with security we have an es_api_basic_auth_username and es_api_basic_auth_username - otherwise any http calls wont work
- name: fail when api credentials are not declared when using security
fail: msg="Enabling security requires an es_api_basic_auth_username and es_api_basic_auth_password to be provided to allow cluster operations"
when: es_enable_xpack and ("security" in es_xpack_features) and es_api_basic_auth_username is not defined and es_api_basic_auth_password is not defined
- name: set fact file_reserved_users
set_fact: file_reserved_users={{ es_users.file.keys() | intersect (reserved_xpack_users) }}
when: es_users is defined and es_users.file is defined and (es_users.file.keys() | length > 0) and (es_users.file.keys() | intersect (reserved_xpack_users) | length > 0)
- name: fail when changing users through file realm
fail:
msg: "ERROR: INVALID CONFIG - YOU CANNOT CHANGE RESERVED USERS THROUGH THE FILE REALM. THE FOLLOWING CANNOT BE CHANGED: {{file_reserved_users}}. USE THE NATIVE REALM."
when: file_reserved_users | default([]) | length > 0
- name: set fact instance_default_file
set_fact: instance_default_file={{default_file | dirname}}/{{es_instance_name}}_{{default_file | basename}}
- name: set fact instance_init_script
set_fact: instance_init_script={{init_script | dirname }}/{{es_instance_name}}_{{init_script | basename}}
- name: set fact conf_dir
set_fact: conf_dir={{ es_conf_dir }}/{{es_instance_name}}
- name: set fact m_lock_enabled
set_fact: m_lock_enabled={{ es_config['bootstrap.memory_lock'] is defined and es_config['bootstrap.memory_lock'] == True }}
#TODO - if transport.host is not local maybe error on boostrap checks
#Use systemd for the following distributions:
#Ubuntu 15 and up
#Debian 8 and up
#Centos 7 and up
#Relies on elasticsearch distribution installing a serviced script to determine whether one should be copied.
- name: set fact use_system_d
set_fact: use_system_d={{(ansible_distribution == 'Debian' and ansible_distribution_version | version_compare('8', '>=')) or (ansible_distribution in ['RedHat','CentOS'] and ansible_distribution_version | version_compare('7', '>=')) or (ansible_distribution == 'Ubuntu' and ansible_distribution_version | version_compare('15', '>=')) }}
- name: set fact instance_sysd_script
set_fact: instance_sysd_script={{sysd_script | dirname }}/{{es_instance_name}}_{{sysd_script | basename}}
when: use_system_d
#For directories we also use the {{inventory_hostname}}-{{ es_instance_name }} - this helps if we have a shared SAN.
- name: set fact instance_suffix
set_fact: instance_suffix={{inventory_hostname}}-{{ es_instance_name }}
- name: set fact pid_dir
set_fact: pid_dir={{ es_pid_dir }}/{{instance_suffix}}
- name: set fact log_dir
set_fact: log_dir={{ es_log_dir }}/{{instance_suffix}}
- name: set fact log_dir
set_fact: data_dirs={{ es_data_dirs | append_to_list('/'+instance_suffix) }}

View File

@ -0,0 +1,86 @@
---
# es_plugins_reinstall will be set to true if elasticsearch_install_from_repo.changed or elasticsearch_install_from_package.changed
# i.e. we have changed ES version(or we have clean installation of ES), or if no plugins listed. Otherwise it is false and requires explicitly setting.
- name: set fact es_plugins_reinstall to true
set_fact: es_plugins_reinstall=true
when: (((debian_elasticsearch_install_from_repo is defined and debian_elasticsearch_install_from_repo.changed) or (redhat_elasticsearch_install_from_repo is defined and redhat_elasticsearch_install_from_repo.changed)) or (elasticsearch_install_from_package is defined and elasticsearch_install_from_package.changed)) or es_plugins is not defined or es_plugins is none
- name: set fact list_command
set_fact: list_command=""
#If we are reinstalling all plugins, e.g. to a version change, we need to remove all plugins (inc. x-pack) to install any plugins. Otherwise we don't consider x-pack so the role stays idempotent.
- name: set fact list_command check for x-pack
set_fact: list_command="| grep -vE 'x-pack'"
when: not es_plugins_reinstall
- name: remove x-pack plugin directory when it isn't a plugin
file:
dest: "{{ es_home }}/plugins/x-pack"
state: "absent"
when: es_open_xpack
#List currently installed plugins. We have to list the directories as the list commmand fails if the ES version is different than the plugin version.
- name: Check installed elasticsearch plugins
become: yes
shell: "ls {{es_home}}/plugins {{list_command}}"
register: installed_plugins
changed_when: False
ignore_errors: yes
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
#if es_plugins_reinstall is set to true we remove ALL plugins
- name: set fact plugins_to_remove to install_plugins.stdout_lines
set_fact: plugins_to_remove="{{ installed_plugins.stdout_lines | default([]) }}"
when: es_plugins_reinstall
#if the plugins listed are different than those requested, we remove those installed but not listed in the config
- name: set fact plugins_to_remove to delete plugins installed but not listed in es_plugins
set_fact: plugins_to_remove="{{ installed_plugins.stdout_lines | difference(es_plugins | json_query('[*].plugin')) | default([]) }}"
when: not es_plugins_reinstall
#if es_plugins_reinstall is set to true we (re)install ALL plugins
- name: set fact plugins_to_install to es_plugins
set_fact: plugins_to_install="{{ es_plugins | json_query('[*].plugin') | default([]) }}"
when: es_plugins_reinstall
#if the plugins listed are different than those requested, we install those not installed but listed in the config
- name: set fact to plugins_to_install to those in es_config but not installed
set_fact: plugins_to_install="{{ es_plugins | json_query('[*].plugin') | difference(installed_plugins.stdout_lines) | default([]) }}"
when: not es_plugins_reinstall
# This removes any currently installed plugins (to prevent errors when reinstalling)
- name: Remove elasticsearch plugins
become: yes
command: "{{es_home}}/bin/elasticsearch-plugin remove {{item}} --silent"
with_items: "{{ plugins_to_remove | default([]) }}"
notify: restart elasticsearch
register: plugin_removed
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
- name: Install elasticsearch plugins
become: yes
command: "{{es_home}}/bin/elasticsearch-plugin install {{ item.url | default(item.plugin) }} --batch --silent"
register: plugin_installed
changed_when: plugin_installed.rc == 0
with_items: "{{ es_plugins }}"
when: item.plugin in plugins_to_install
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
ES_JAVA_OPTS: "{% if item.proxy_host is defined and item.proxy_host != '' and item.proxy_port is defined and item.proxy_port != ''%} -Dhttp.proxyHost={{ item.proxy_host }} -Dhttp.proxyPort={{ item.proxy_port }} -Dhttps.proxyHost={{ item.proxy_host }} -Dhttps.proxyPort={{ item.proxy_port }} {% elif es_proxy_host is defined and es_proxy_host != '' %} -Dhttp.proxyHost={{ es_proxy_host }} -Dhttp.proxyPort={{ es_proxy_port }} -Dhttps.proxyHost={{ es_proxy_host }} -Dhttps.proxyPort={{ es_proxy_port }} {% endif %}"
until: plugin_installed.rc == 0
retries: 5
delay: 5
#Set permissions on plugins directory
- name: Set Plugin Directory Permissions
become: yes
file: state=directory path={{ es_home }}/plugins owner={{ es_user }} group={{ es_group }} recurse=yes

View File

@ -0,0 +1,26 @@
---
- name: set fact es_script_dir
set_fact: es_script_dir={{ es_conf_dir }}/{{es_instance_name}}
tags:
- always
- name: set fact es_script_dir when path.scripts
set_fact: es_script_dir={{es_config['path.scripts']}}
when: es_config['path.scripts'] is defined
tags:
- always
- name: Create script dir
become: yes
file: state=directory path={{ es_script_dir }} owner={{ es_user }} group={{ es_group }} recurse=yes
- name: Copy default scripts to elasticsearch
become: yes
copy: src=scripts dest={{ es_script_dir }} owner={{ es_user }} group={{ es_group }}
when: es_scripts_fileglob is not defined
- name: Copy scripts to elasticsearch
become: yes
copy: src={{ item }} dest={{ es_script_dir }} owner={{ es_user }} group={{ es_group }}
with_fileglob: "{{ es_scripts_fileglob | default('') }}"

View File

@ -0,0 +1,41 @@
---
- name: ensure templates dir is created
file:
path: /etc/elasticsearch/templates
state: directory
owner: "{{ es_user }}"
group: "{{ es_group }}"
- name: Copy templates to elasticsearch
copy: src={{ item }} dest=/etc/elasticsearch/templates owner={{ es_user }} group={{ es_group }}
register: load_templates
with_fileglob:
- "{{ es_templates_fileglob | default('') }}"
- name: Install templates without auth
uri:
url: "http://{{es_api_host}}:{{es_api_port}}/_template/{{item | filename}}"
method: PUT
status_code: 200
body_format: json
body: "{{ lookup('file', item) }}"
when: load_templates.changed and es_start_service and not es_enable_xpack or not es_xpack_features is defined or "security" not in es_xpack_features
with_fileglob:
- "{{ es_templates_fileglob | default('') }}"
run_once: True
- name: Install templates with auth
uri:
url: "http://{{es_api_host}}:{{es_api_port}}/_template/{{item | filename}}"
method: PUT
status_code: 200
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
body_format: json
body: "{{ lookup('file', item) }}"
when: load_templates.changed and es_start_service and es_enable_xpack and es_xpack_features is defined and "security" in es_xpack_features
with_fileglob:
- "{{ es_templates_fileglob | default('') }}"
run_once: True

View File

@ -0,0 +1,13 @@
---
- name: Include optional user and group creation.
when: (es_user_id is defined) and (es_group_id is defined)
include: elasticsearch-optional-user.yml
- name: Include specific Elasticsearch
include: elasticsearch-Debian.yml
when: ansible_os_family == 'Debian'
- name: Include specific Elasticsearch
include: elasticsearch-RedHat.yml
when: ansible_os_family == 'RedHat'

View File

@ -0,0 +1,52 @@
---
- name: set fact java_state to present
set_fact: java_state="present"
- name: set fact java_state to latest
set_fact: java_state="latest"
when: update_java == true
- name: RedHat - Ensure Java is installed
become: yes
yum: name={{ java }} state={{java_state}}
when: ansible_os_family == 'RedHat'
- name: Get the installed java path
shell: "update-alternatives --display java | grep '^/' | awk '{print $1}' | grep 1.8.0"
become: yes
register: java_full_path
failed_when: False
changed_when: False
when: ansible_os_family == 'RedHat'
- name: correct java version selected
alternatives:
name: java
path: "{{ java_full_path.stdout }}"
link: /usr/bin/java
when: ansible_os_family == 'RedHat' and java_full_path is defined
- name: Refresh java repo
become: yes
apt: update_cache=yes
changed_when: false
when: ansible_os_family == 'Debian'
- name: Debian - Ensure Java is installed
become: yes
apt: name={{ java }} state={{java_state}}
when: ansible_os_family == 'Debian'
- name: register open_jdk version
shell: java -version 2>&1 | grep OpenJDK
register: open_jdk
ignore_errors: yes
changed_when: false
#https://github.com/docker-library/openjdk/issues/19 - ensures tests pass due to java 8 broken certs
- name: refresh the java ca-certificates
become: yes
command: /var/lib/dpkg/info/ca-certificates-java.postinst configure
when: ansible_distribution == 'Ubuntu' and open_jdk.rc == 0
changed_when: false

View File

@ -0,0 +1,94 @@
---
- name: os-specific vars
include_vars: "{{ansible_os_family}}.yml"
tags:
- always
- name: set compatibility variables
include: compatibility-variables.yml
tags:
- always
- name: check-set-parameters
include: elasticsearch-parameters.yml
tags:
- always
- name: use snapshot release
include: snapshot-release.yml
when: es_use_snapshot_release
- name: include java.yml
include: java.yml
when: es_java_install
tags:
- java
- name: include elasticsearch.yml
include: elasticsearch.yml
tags:
- install
- name: include elasticsearch-config.yml
include: elasticsearch-config.yml
tags:
- config
- name: include elasticsearch-scripts.yml
include: elasticsearch-scripts.yml
when: es_scripts
tags:
- scripts
- name: include elasticsearch-plugins.yml
include: elasticsearch-plugins.yml
when: es_plugins is defined or es_plugins_reinstall
tags:
- plugins
#We always execute xpack as we may need to remove features
- name: include xpack/elasticsearch-xpack.yml
include: xpack/elasticsearch-xpack.yml
tags:
- xpack
- name: flush handlers
meta: flush_handlers
- name: Make sure elasticsearch is started
become: yes
service: name={{instance_init_script | basename}} state=started enabled=yes
when: es_start_service
- name: Wait for elasticsearch to startup
wait_for: host={{es_api_host}} port={{es_api_port}} delay=5 connect_timeout=1
when: es_restarted is defined and es_restarted.changed and es_start_service
- name: set fact manage_native_realm to false
set_fact: manage_native_realm=false
- name: set fact manage_native_realm to true
set_fact: manage_native_realm=true
when: es_start_service and (es_enable_xpack and "security" in es_xpack_features) and ((es_users is defined and es_users.native is defined) or (es_roles is defined and es_roles.native is defined))
# If playbook runs too fast, Native commands could fail as the Native Realm is not yet up
- name: Wait 15 seconds for the Native Relm to come up
pause: seconds=15
when: manage_native_realm
- name: activate-license
include: ./xpack/security/elasticsearch-xpack-activation.yml
when: es_start_service and es_enable_xpack and es_xpack_license is defined and es_xpack_license != ''
#perform security actions here now elasticsearch is started
- name: include xpack/security/elasticsearch-security-native.yml
include: ./xpack/security/elasticsearch-security-native.yml
when: manage_native_realm
#Templates done after restart - handled by flushing the handlers. e.g. suppose user removes security on a running node and doesn't specify es_api_basic_auth_username and es_api_basic_auth_password. The templates will subsequently not be removed if we don't wait for the node to restart.
#We also do after the native realm to ensure any changes are applied here first and its denf up.
- name: include elasticsearch-template.yml
include: elasticsearch-template.yml
when: es_templates
tags:
- templates

View File

@ -0,0 +1,54 @@
# These tasks are to run ansible-elasticsearch using pre-release snapshot builds
# This should only be used for testing purposes and can be enabled by setting
# es_use_snapshot_release: true
- name: detect if we need the .deb or .rpm
set_fact:
package_type: "{{ 'deb' if (ansible_os_family == 'Debian') else 'rpm' }}"
- name: get the minor version
set_fact:
minor_version: "{{ es_version.split('.')[0:2] | join('.')}}"
- name: set the package_name
set_fact:
package_name: "{{ es_package_name + '-' + es_version + '-SNAPSHOT.' + package_type }}"
- name: generate the artifacts url
set_fact:
artifacts_url: "{{ 'https://artifacts-api.elastic.co/v1/search/' + minor_version + '/' + package_name }}"
- name: get latest snapshot build
uri:
url: "{{ artifacts_url }}"
return_contents: true
register: snapshots
retries: 5
delay: 1
ignore_errors: true
until: "'status' in snapshots and snapshots.status == 200"
- name: use the custom package url instead of the repository
set_fact:
es_custom_package_url: "{{ snapshots.json['packages'][package_name]['url'] }}"
es_use_repository: false
- name: set snapshot urls for es_plugins when it is defined
when: es_plugins is defined
block:
- name: split up the snapshot url so we can create the plugin url
set_fact:
split_url: "{{ es_custom_package_url.split('/') }}"
- name: set base plugin url
set_fact:
plugin_url: "{{ split_url[0] + '//' + split_url[2:5]|join('/') + '/elasticsearch-plugins/'}}"
- name: create es_plugins with the snapshot url
set_fact:
es_plugins_temp: "{{ es_plugins_temp|default([]) + [{'plugin': item.plugin, 'url': plugin_url + item.plugin + '/' + item.plugin + '-' + es_version + '-SNAPSHOT.zip'}] }}"
with_items: "{{ es_plugins }}"
- name: override the original es_plugins with the snapshot version
set_fact:
es_plugins: "{{ es_plugins_temp }}"

View File

@ -0,0 +1,68 @@
---
#Test if feature is installed
- name: Test if x-pack is installed
shell: "{{es_home}}/bin/elasticsearch-plugin list | grep x-pack"
become: yes
register: x_pack_installed
changed_when: False
failed_when: "'ERROR' in x_pack_installed.stdout"
check_mode: no
ignore_errors: yes
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
#Remove X-Pack if installed and its not been requested or the ES version has changed
- name: Remove x-pack plugin
become: yes
command: "{{es_home}}/bin/elasticsearch-plugin remove x-pack"
register: xpack_state
failed_when: "'ERROR' in xpack_state.stdout"
changed_when: xpack_state.rc == 0
when: x_pack_installed.rc == 0 and (not es_enable_xpack or es_version_changed)
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
#Install plugin if not installed, or the es version has changed (so removed above), and its been requested
- name: Download x-pack from url
get_url: url={{ es_xpack_custom_url }} dest=/tmp/x-pack-{{ es_version }}.zip
when: (x_pack_installed.rc == 1 or es_version_changed) and (es_enable_xpack and es_xpack_custom_url is defined)
- name: Install x-pack plugin from local
become: yes
command: >
{{es_home}}/bin/elasticsearch-plugin install --silent --batch file:///tmp/x-pack-{{ es_version }}.zip
register: xpack_state
changed_when: xpack_state.rc == 0
when: (x_pack_installed.rc == 1 or es_version_changed) and (es_enable_xpack and es_xpack_custom_url is defined)
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
- name: Delete x-pack zip file
file: dest=/tmp/x-pack-{{ es_version }}.zip state=absent
when: es_xpack_custom_url is defined
- name: Install x-pack plugin from elastic.co
become: yes
command: >
{{es_home}}/bin/elasticsearch-plugin install --silent --batch x-pack
register: xpack_state
failed_when: "'ERROR' in xpack_state.stdout"
changed_when: xpack_state.rc == 0
when: (x_pack_installed.rc == 1 or es_version_changed) and (es_enable_xpack and es_xpack_custom_url is not defined)
notify: restart elasticsearch
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_INCLUDE: "{{ instance_default_file }}"
ES_JAVA_OPTS: "{% if es_proxy_host is defined and es_proxy_host != '' %}-Dhttp.proxyHost={{ es_proxy_host }} -Dhttp.proxyPort={{ es_proxy_port }} -Dhttps.proxyHost={{ es_proxy_host }} -Dhttps.proxyPort={{ es_proxy_port }}{% endif %}"

View File

@ -0,0 +1,23 @@
---
- name: set fact es_version_changed
set_fact: es_version_changed={{ ((elasticsearch_install_from_package is defined and (debian_elasticsearch_install_from_repo.changed or redhat_elasticsearch_install_from_repo.changed)) or (elasticsearch_install_from_package is defined and elasticsearch_install_from_package.changed)) }}
- name: include elasticsearch-xpack-install.yml
include: elasticsearch-xpack-install.yml
when: es_install_xpack
#Security configuration
- name: include security/elasticsearch-security.yml
include: security/elasticsearch-security.yml
#Add any feature specific configuration here
- name: Set Plugin Directory Permissions
become: yes
file: state=directory path={{ es_home }}/plugins owner={{ es_user }} group={{ es_group }} recurse=yes
#Make sure elasticsearch.keystore has correct Permissions
- name: Set elasticsearch.keystore Permissions
become: yes
file: state=file path={{ conf_dir }}/elasticsearch.keystore owner={{ es_user }} group={{ es_group }}
when: es_enable_xpack and "security" in es_xpack_features and (es_version | version_compare('6.0.0', '>'))

View File

@ -0,0 +1,89 @@
---
- name: set fact manage_file_users
set_fact: manage_file_users=es_users is defined and es_users.file is defined and es_users.file.keys() | length > 0
- name: Create the users file if it doesn't exist
copy:
content: ""
dest: "{{ conf_dir }}{{ es_xpack_conf_subdir }}/users"
force: no # this ensures it only creates it if it does not exist
group: "{{ es_group }}"
owner: "{{ es_user }}"
mode: 0555
#List current users
- name: List Users
become: yes
shell: cat {{conf_dir}}{{es_xpack_conf_subdir}}/users | awk -F':' '{print $1}'
register: current_file_users
when: manage_file_users
changed_when: False
- name: set fact users_to_remove
set_fact: users_to_remove={{ current_file_users.stdout_lines | difference (es_users.file.keys()) }}
when: manage_file_users
#Remove users
- name: Remove Users
become: yes
command: >
{{es_home}}/bin/{{es_xpack_users_command}} userdel {{item}}
with_items: "{{users_to_remove | default([])}}"
when: manage_file_users
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_HOME: "{{es_home}}"
- name: set fact users_to_add
set_fact: users_to_add={{ es_users.file.keys() | difference (current_file_users.stdout_lines) }}
when: manage_file_users
#Add users
- name: Add Users
become: yes
command: >
{{es_home}}/bin/{{es_xpack_users_command}} useradd {{item}} -p {{es_users.file[item].password}}
with_items: "{{ users_to_add | default([]) }}"
when: manage_file_users
no_log: True
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_HOME: "{{es_home}}"
#Set passwords for all users declared - Required as the useradd will not change existing user passwords
- name: Set User Passwords
become: yes
command: >
{{es_home}}/bin/{{es_xpack_users_command}} passwd {{ item }} -p {{es_users.file[item].password}}
with_items: "{{ es_users.file.keys() | default([]) }}"
when: manage_file_users
#Currently no easy way to figure out if the password has changed or to know what it currently is so we can skip.
changed_when: False
no_log: True
environment:
CONF_DIR: "{{ conf_dir }}"
ES_PATH_CONF: "{{ conf_dir }}"
ES_HOME: "{{es_home}}"
- name: set fact users_roles
set_fact: users_roles={{es_users.file | extract_role_users () }}
when: manage_file_users
#Copy Roles files
- name: Copy roles.yml File for Instance
become: yes
template: src=security/roles.yml.j2 dest={{conf_dir}}{{es_xpack_conf_subdir}}/roles.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes
when: es_roles is defined and es_roles.file is defined
#Overwrite users_roles file
- name: Copy User Roles
become: yes
template: src=security/users_roles.j2 dest={{conf_dir}}{{es_xpack_conf_subdir}}/users_roles mode=0644 force=yes
when: manage_file_users and users_roles | length > 0
#Set permission on security directory. E.g. if 2 nodes are installed on the same machine, the second node will not get the users file created at install, causing the files being created at es_users call and then having the wrong Permissions.
- name: Set Security Directory Permissions Recursive
become: yes
file: state=directory path={{conf_dir}}{{es_xpack_conf_subdir}}/ owner={{ es_user }} group={{ es_group }} recurse=yes

View File

@ -0,0 +1,191 @@
---
- name: set fact change_api_password to false
set_fact: change_api_password=false
- name: set fact manage_native_users to false
set_fact: manage_native_users=false
- name: set fact manage_native_users to true
set_fact: manage_native_users=true
when: es_users is defined and es_users.native is defined and es_users.native.keys() | length > 0
- name: set fact manage_native_role to false
set_fact: manage_native_roles=false
- name: set fact manange_native_roles to true
set_fact: manage_native_roles=true
when: es_roles is defined and es_roles.native is defined and es_roles.native.keys() | length > 0
#If the node has just has security installed it maybe either stopped or started 1. if stopped, we need to start to load native realms 2. if started, we need to restart to load
#List current users
- name: List Native Users
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/user
method: GET
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
status_code: 200
register: user_list_response
when: manage_native_users
- name: set fact reserved_users equals user_list_response.json
set_fact: reserved_users={{ user_list_response.json | filter_reserved }}
when: manage_native_users
#Current users not inc. those reserved
- name: set fact current_users equals user_list_response.json.keys not including reserved
set_fact: current_users={{ user_list_response.json.keys() | difference (reserved_users) }}
when: manage_native_users
#We are changing the es_api_basic_auth_username password, so we need to do it first and update the param
- name: set fact native_users
set_fact: native_users={{ es_users.native }}
when: manage_native_users
- name: set fact change_api_password to true
set_fact: change_api_password=true
when: manage_native_users and es_api_basic_auth_username in native_users and native_users[es_api_basic_auth_username].password is defined
- name: Update API User Password
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/user/{{es_api_basic_auth_username}}/_password
method: POST
body_format: json
body: "{ \"password\":\"{{native_users[es_api_basic_auth_username].password}}\" }"
status_code: 200
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
when: change_api_password
- name: set fact es_api_basic_auth_password
set_fact: es_api_basic_auth_password={{native_users[es_api_basic_auth_username].password}}
when: change_api_password
#Identify users that are present in ES but not declared and thus should be removed
- name: set fact users_to_remove
set_fact: users_to_remove={{ current_users | difference ( native_users.keys() ) }}
when: manage_native_users
#Delete all non required users NOT inc. reserved
- name: Delete Native Users
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/user/{{item}}
method: DELETE
status_code: 200
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
when: manage_native_users
with_items: "{{ users_to_remove | default([]) }}"
- name: set fact users_to_ignore
set_fact: users_to_ignore={{ native_users.keys() | intersect (reserved_users) }}
when: manage_native_users
- name: debug message
debug:
msg: "WARNING: YOU CAN ONLY CHANGE THE PASSWORD FOR RESERVED USERS IN THE NATIVE REALM. ANY ROLE CHANGES WILL BE IGNORED: {{users_to_ignore}}"
when: manage_native_users and users_to_ignore | length > 0
#Update password on all reserved users
- name: Update Reserved User Passwords
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/user/{{item}}/_password
method: POST
body_format: json
body: "{ \"password\":\"{{native_users[item].password}}\" }"
status_code: 200
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
when: native_users[item].password is defined
no_log: True
with_items: "{{ users_to_ignore | default([]) }}"
- name: set fact users_to_modify
set_fact: users_to_modify={{ native_users.keys() | difference (reserved_users) }}
when: manage_native_users
#Overwrite all other users NOT inc. those reserved
- name: Update Non-Reserved Native User Details
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/user/{{item}}
method: POST
body_format: json
body: "{{ native_users[item] | to_json }}"
status_code: 200
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
when: manage_native_users
no_log: True
with_items: "{{ users_to_modify | default([]) }}"
## ROLE CHANGES
#List current roles not. inc those reserved
- name: List Native Roles
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/role
method: GET
body_format: json
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
status_code: 200
register: role_list_response
when: manage_native_roles
- name: set fact reserved roles
set_fact: reserved_roles={{ role_list_response.json | filter_reserved }}
when: manage_native_roles
- name: set fact current roles
set_fact: current_roles={{ role_list_response.json.keys() | difference (reserved_roles) }}
when: manage_native_roles
- name: set fact roles to ignore
set_fact: roles_to_ignore={{ es_roles.native.keys() | intersect (reserved_roles) | default([]) }}
when: manage_native_roles
- name: debug message
debug:
msg: "WARNING: YOU CANNOT CHANGE RESERVED ROLES. THE FOLLOWING WILL BE IGNORED: {{roles_to_ignore}}"
when: manage_native_roles and roles_to_ignore | length > 0
- name: set fact roles_to_remove
set_fact: roles_to_remove={{ current_roles | difference ( es_roles.native.keys() ) }}
when: manage_native_roles
#Delete all non required roles NOT inc. reserved
- name: Delete Native Roles
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/role/{{item}}
method: DELETE
status_code: 200
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
when: manage_native_roles
with_items: "{{roles_to_remove | default([]) }}"
- name: set fact roles_to_modify
set_fact: roles_to_modify={{ es_roles.native.keys() | difference (reserved_roles) }}
when: manage_native_roles
#Update other roles - NOT inc. reserved roles
- name: Update Native Roles
uri:
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/role/{{item}}
method: POST
body_format: json
body: "{{ es_roles.native[item] | to_json}}"
status_code: 200
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
force_basic_auth: yes
when: manage_native_roles
with_items: "{{ roles_to_modify | default([]) }}"

View File

@ -0,0 +1,74 @@
---
#Security specific configuration done here
#TODO: 1. Skip users with no password defined or error 2. Passwords | length > 6
#Ensure x-pack conf directory is created if necessary
- name: Ensure x-pack conf directory exists (file)
file: path={{ conf_dir }}{{ es_xpack_conf_subdir }} state=directory owner={{ es_user }} group={{ es_group }}
changed_when: False
when:
- es_enable_xpack and "security" in es_xpack_features
- (es_users is defined and es_users.file is defined) or (es_roles is defined and es_roles.file is defined) or (es_role_mapping is defined)
#-----------------------------Create Bootstrap User-----------------------------------
### START BLOCK elasticsearch keystore ###
- name: create the elasticsearch keystore
when: (es_enable_xpack and "security" in es_xpack_features) and (es_version | version_compare('6.0.0', '>'))
block:
- name: create the keystore if it doesn't exist yet
become: yes
command: >
{{es_home}}/bin/elasticsearch-keystore create
args:
creates: "{{ conf_dir }}/elasticsearch.keystore"
environment:
ES_PATH_CONF: "{{ conf_dir }}"
- name: Check if bootstrap password is set
become: yes
command: >
{{es_home}}/bin/elasticsearch-keystore list
register: list_keystore
changed_when: False
environment:
ES_PATH_CONF: "{{ conf_dir }}"
- name: Create Bootstrap password for elastic user
become: yes
shell: echo "{{es_api_basic_auth_password}}" | {{es_home}}/bin/elasticsearch-keystore add -x 'bootstrap.password'
when:
- es_api_basic_auth_username is defined and list_keystore is defined and es_api_basic_auth_username == 'elastic' and 'bootstrap.password' not in list_keystore.stdout_lines
environment:
ES_PATH_CONF: "{{ conf_dir }}"
no_log: true
### END BLOCK elasticsearch keystore ###
#-----------------------------FILE BASED REALM----------------------------------------
- include: elasticsearch-security-file.yml
when: (es_enable_xpack and "security" in es_xpack_features) and ((es_users is defined and es_users.file is defined) or (es_roles is defined and es_roles.file is defined))
#-----------------------------ROLE MAPPING ----------------------------------------
#Copy Roles files
- name: Copy role_mapping.yml File for Instance
become: yes
template: src=security/role_mapping.yml.j2 dest={{conf_dir}}{{es_xpack_conf_subdir}}/role_mapping.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes
when: es_role_mapping is defined
#-----------------------------AUTH FILE----------------------------------------
- name: Copy message auth key to elasticsearch
become: yes
copy: src={{ es_message_auth_file }} dest={{conf_dir}}{{es_xpack_conf_subdir}}/system_key owner={{ es_user }} group={{ es_group }} mode=0600 force=yes
when: es_message_auth_file is defined
#------------------------------------------------------------------------------------
#Ensure security conf directory is created
- name: Ensure security conf directory exists
become: yes
file: path={{ conf_dir }}/security state=directory owner={{ es_user }} group={{ es_group }}
changed_when: False
when: es_enable_xpack and "security" in es_xpack_features

View File

@ -0,0 +1,37 @@
---
- name: Activate ES license (without security authentication)
uri:
method: PUT
url: "http://{{es_api_host}}:{{es_api_port}}/_xpack/license?acknowledge=true"
body_format: json
body: "{{ es_xpack_license }}"
return_content: yes
register: license_activated
no_log: True
when: not "security" in es_xpack_features
failed_when: >
license_activated.status != 200 or
license_activated.json.license_status is not defined or
license_activated.json.license_status != 'valid'
- name: Activate ES license (with security authentication)
uri:
method: PUT
url: "http://{{es_api_host}}:{{es_api_port}}/_xpack/license?acknowledge=true"
user: "{{es_api_basic_auth_username}}"
password: "{{es_api_basic_auth_password}}"
body_format: json
force_basic_auth: yes
body: "{{ es_xpack_license }}"
return_content: yes
register: license_activated
no_log: True
when: "'security' in es_xpack_features"
failed_when: >
license_activated.status != 200 or
license_activated.json.license_status is not defined or
license_activated.json.license_status != 'valid'
- debug:
msg: "License: {{ license_activated }}"