removing ansible from vulnwhisperer, creating a new repo for ansible deployment
This commit is contained in:
@ -1,65 +0,0 @@
|
|||||||
# Ansible deployment
|
|
||||||
|
|
||||||
The code can also be deployed using [Ansible](https://www.ansible.com/) with a playbook and a role.
|
|
||||||
|
|
||||||
## Code organization
|
|
||||||
|
|
||||||
This ansible configuration is reasonably standard and lives under the `ansible` directory in the main
|
|
||||||
repository root. A brief explanation of each file follows.
|
|
||||||
|
|
||||||
#### ansible.cfg
|
|
||||||
|
|
||||||
Main ansible configuration file, contains some options like the remote username, where to find the roles directory and
|
|
||||||
the inventory file name. The only variables that should ever need to be customized are the `remote_user` and `host_key_checking`
|
|
||||||
to specify the user to use on the remote system (requires *sudo* access) and if the remote host SSH key must be validated
|
|
||||||
before proceeding.
|
|
||||||
|
|
||||||
#### hosts
|
|
||||||
|
|
||||||
The `hosts` file contains the list of hosts that we want to deploy Vulnwhisperer on. There is a default `[ec2]` tag under which
|
|
||||||
any number of hosts can be configured. Please refer to the official [inventory documentation](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html)
|
|
||||||
for further information.
|
|
||||||
|
|
||||||
#### provision.yml
|
|
||||||
|
|
||||||
The main playbook, it's usually used with the `ansible-playbook` command as follows:
|
|
||||||
|
|
||||||
```
|
|
||||||
ansible-playbook provision.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
The playbook will prompt for an install option that can either be `install` or `update`. Each does what the
|
|
||||||
name suggests. A path to the configuration file will be requested. This choice has been made to ensure that
|
|
||||||
there is no coupling between the provisioning process and the configuration one, giving the user full control
|
|
||||||
on what configuration file to use and where to have it reside on the host running the playbook.
|
|
||||||
|
|
||||||
The playbook will then perform some basic sanity checking (in the `pre_tasks` section) to make sure the inputted
|
|
||||||
variables are present and then call the roles that will actually perform the provisioning.
|
|
||||||
|
|
||||||
If you need to customize the `ELK` installation variables please refer to the role section below.
|
|
||||||
|
|
||||||
#### ssh.config
|
|
||||||
|
|
||||||
The SSH configuration that ansible will use to reach the remote host. The reason for this file is to
|
|
||||||
allow the user to customize the local SSH configuration, for example to specify the SSH key to use
|
|
||||||
to authenticate to the remote host (it can be a symbolic link) or a `ProxyCommand`.
|
|
||||||
|
|
||||||
## Roles
|
|
||||||
|
|
||||||
There are a number of roles in this ansible tree, they are detailed below.
|
|
||||||
|
|
||||||
### Elasticsearch
|
|
||||||
|
|
||||||
This roles comes from the upstream elastic ansible [repository](https://github.com/elastic/ansible-elasticsearch) and has been installed via
|
|
||||||
the `ansible-galaxy` tool. Please refer to the [official documentation](https://galaxy.ansible.com/) for more information.
|
|
||||||
The various configuration options for this role can be specified in the `provision.yml` playbook directly by editing the list of
|
|
||||||
variables or can be passed from the command line. Please refer to the upstream role documentation for more information.
|
|
||||||
|
|
||||||
*Note* vulnwhisperer at the time of writing only supports Elasticsearch version 5.x
|
|
||||||
|
|
||||||
### Vulnwhisperer
|
|
||||||
|
|
||||||
This is the main role that configures the vulnwhisperer software. It creates a number of directories in which it clones
|
|
||||||
the repository from mainline, creates a Python [virtualenv](https://virtualenv.pypa.io/en/latest/) virtualenv in which
|
|
||||||
it installs all the required dependencies and Vulnwhisperer itself. The role supports two tags, `install` and `update` that
|
|
||||||
do exactly what the name suggests.
|
|
@ -1,13 +0,0 @@
|
|||||||
[defaults]
|
|
||||||
inventory=hosts
|
|
||||||
remote_user=ubuntu
|
|
||||||
roles_path=./roles
|
|
||||||
host_key_checking = False
|
|
||||||
timeout=30
|
|
||||||
retry_files_enabled = False
|
|
||||||
|
|
||||||
[ssh_connection]
|
|
||||||
ssh_args = -o ControlPersist=15m -F ssh.config -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null
|
|
||||||
scp_if_ssh = True
|
|
||||||
pipelining = True
|
|
||||||
control_path = ~/.ssh/mux-%%r@%%h:%%p
|
|
@ -1,2 +0,0 @@
|
|||||||
[ec2]
|
|
||||||
my.ec2.host.com
|
|
@ -1,40 +0,0 @@
|
|||||||
- hosts: ec2
|
|
||||||
become: true
|
|
||||||
vars:
|
|
||||||
vulnwhisperer:
|
|
||||||
prefix: "/opt"
|
|
||||||
location: "vulnwhisperer"
|
|
||||||
venv_location: "vulnwhisperer_venv"
|
|
||||||
repository: "https://github.com/HASecuritySolutions/VulnWhisperer.git"
|
|
||||||
configuration_file_name: "vulnwhisperer.ini"
|
|
||||||
vars_prompt:
|
|
||||||
- name: install_option
|
|
||||||
prompt: "Please input either install or update"
|
|
||||||
private: no
|
|
||||||
- name: configuration_file
|
|
||||||
prompt: "Full path to the VulnWhisperer configuration file"
|
|
||||||
private: no
|
|
||||||
pre_tasks:
|
|
||||||
- name: assert the correct input is provided
|
|
||||||
fail:
|
|
||||||
msg: "Please specify one of either 'install' or 'update'"
|
|
||||||
when: install_option != "install" and install_option != "update"
|
|
||||||
- name: check that the provided configuration file exists
|
|
||||||
become: false
|
|
||||||
local_action: stat path="{{ configuration_file }}"
|
|
||||||
ignore_errors: true
|
|
||||||
register: configuration_file_stat
|
|
||||||
- name: fail if the file is not available
|
|
||||||
fail:
|
|
||||||
msg: "Configuration file {{ configuration_file }} not found or inaccessible"
|
|
||||||
when: not configuration_file_stat.stat.exists
|
|
||||||
roles:
|
|
||||||
- role: elastic.elasticsearch
|
|
||||||
# set to true to enable closed source branch
|
|
||||||
es_enable_xpack: false
|
|
||||||
es_instance_name: "vulnwhisperer"
|
|
||||||
es_major_version: "5.x"
|
|
||||||
es_version: "5.6.13"
|
|
||||||
update_java: true
|
|
||||||
- role: vulnwhisperer
|
|
||||||
tags: "{{ install_option }}"
|
|
@ -1,41 +0,0 @@
|
|||||||
<!
|
|
||||||
<!--
|
|
||||||
|
|
||||||
** Please read the guidelines below. **
|
|
||||||
|
|
||||||
Issues that do not follow these guidelines are likely to be closed.
|
|
||||||
|
|
||||||
1. GitHub is reserved for bug reports and feature requests. The best place to
|
|
||||||
ask a general question is at the Elastic [forums](https://discuss.elastic.co).
|
|
||||||
GitHub is not the place for general questions.
|
|
||||||
|
|
||||||
2. Is this bug report or feature request for a supported OS? If not, it
|
|
||||||
is likely to be closed. See https://www.elastic.co/support/matrix#show_os
|
|
||||||
|
|
||||||
3. Please fill out EITHER the feature request block or the bug report block
|
|
||||||
below, and delete the other block.
|
|
||||||
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!-- Feature request -->
|
|
||||||
|
|
||||||
**Describe the feature**:
|
|
||||||
|
|
||||||
<!-- Bug report -->
|
|
||||||
|
|
||||||
**Elasticsearch version**
|
|
||||||
|
|
||||||
**Role version**: (If using master please specify github sha)
|
|
||||||
|
|
||||||
**JVM version** (`java -version`):
|
|
||||||
|
|
||||||
**OS version** (`uname -a` if on a Unix-like system):
|
|
||||||
|
|
||||||
**Description of the problem including expected versus actual behaviour**:
|
|
||||||
|
|
||||||
**Playbook**:
|
|
||||||
Please specify the full playbook used to reproduce this issue.
|
|
||||||
|
|
||||||
**Provide logs from Ansible**:
|
|
||||||
|
|
||||||
**ES Logs if relevant**:
|
|
10
ansible/roles/elastic.elasticsearch/.gitignore
vendored
10
ansible/roles/elastic.elasticsearch/.gitignore
vendored
@ -1,10 +0,0 @@
|
|||||||
.kitchen/
|
|
||||||
license*.json
|
|
||||||
*.pyc
|
|
||||||
.vendor
|
|
||||||
.bundle
|
|
||||||
Converging
|
|
||||||
TODO
|
|
||||||
.idea/
|
|
||||||
elasticsearch.iml
|
|
||||||
!/vars/RedHat.yml
|
|
@ -1,124 +0,0 @@
|
|||||||
---
|
|
||||||
driver:
|
|
||||||
name: docker
|
|
||||||
|
|
||||||
provisioner:
|
|
||||||
name: ansible_playbook
|
|
||||||
hosts: localhost
|
|
||||||
roles_path: ../
|
|
||||||
require_ansible_repo: false
|
|
||||||
require_ansible_omnibus: false
|
|
||||||
require_ansible_source: false
|
|
||||||
require_pip: true
|
|
||||||
ansible_version: 2.4.3.0
|
|
||||||
http_proxy: <%= ENV['HTTP_PROXY'] %>
|
|
||||||
https_proxy: <%= ENV['HTTPS_PROXY'] %>
|
|
||||||
no_proxy: localhost,127.0.0.1
|
|
||||||
ignore_extensions_from_root: [".git",".idea",".kitchen.yml"]
|
|
||||||
ignore_paths_from_root: [".git",".idea",".kitchen"]
|
|
||||||
<% if ENV['VERSION'] %>
|
|
||||||
attributes:
|
|
||||||
extra_vars:
|
|
||||||
es_major_version: "<%= ENV['VERSION'] %>"
|
|
||||||
<% if ENV['VERSION'] == '5.x' %>
|
|
||||||
es_version: '5.6.11'
|
|
||||||
<% end %>
|
|
||||||
<% end %>
|
|
||||||
|
|
||||||
transport:
|
|
||||||
max_ssh_sessions: 6
|
|
||||||
|
|
||||||
platforms:
|
|
||||||
- name: ubuntu-14.04
|
|
||||||
driver_config:
|
|
||||||
image: dliappis/ubuntu-devopsci:14.04
|
|
||||||
privileged: true
|
|
||||||
provision_command:
|
|
||||||
- apt-get update && apt-get install -y software-properties-common && add-apt-repository -y ppa:ansible/ansible
|
|
||||||
- apt-get update && apt-get -y -q install python-apt python-pycurl python-pip python-openssl
|
|
||||||
- pip install jmespath pyOpenSSL ndg-httpsclient
|
|
||||||
- pip uninstall -y ansible
|
|
||||||
use_sudo: false
|
|
||||||
volume:
|
|
||||||
- <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json
|
|
||||||
- /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers
|
|
||||||
- name: ubuntu-16.04
|
|
||||||
driver_config:
|
|
||||||
image: dliappis/ubuntu-devopsci:16.04
|
|
||||||
privileged: true
|
|
||||||
provision_command:
|
|
||||||
- apt-get update && apt-get install -y software-properties-common && add-apt-repository -y ppa:ansible/ansible
|
|
||||||
- apt-get install -y -q net-tools
|
|
||||||
- apt-get update && apt-get -y -q install python-apt python-pycurl python-pip
|
|
||||||
- pip install jmespath
|
|
||||||
- pip uninstall -y ansible
|
|
||||||
use_sudo: false
|
|
||||||
volume:
|
|
||||||
- <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json
|
|
||||||
- /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers
|
|
||||||
run_command: "/sbin/init"
|
|
||||||
- name: debian-8
|
|
||||||
driver_config:
|
|
||||||
image: dliappis/debian-devopsci:8
|
|
||||||
privileged: true
|
|
||||||
provision_command:
|
|
||||||
- apt-get update && apt-get -y install python python-dev python-pip build-essential libyaml-dev python-yaml curl wget
|
|
||||||
- apt-get install -y -q net-tools
|
|
||||||
- sed -ri 's/^#?PermitRootLogin .*/PermitRootLogin yes/' /etc/ssh/sshd_config
|
|
||||||
- sed -ri 's/^#?PasswordAuthentication .*/PasswordAuthentication yes/' /etc/ssh/sshd_config
|
|
||||||
- sed -ri 's/^#?UsePAM .*/UsePAM no/' /etc/ssh/sshd_config
|
|
||||||
- pip install jmespath
|
|
||||||
- pip uninstall -y ansible
|
|
||||||
volume:
|
|
||||||
- <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json
|
|
||||||
- /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers
|
|
||||||
use_sudo: false
|
|
||||||
run_command: "/sbin/init"
|
|
||||||
- name: centos-7
|
|
||||||
driver_config:
|
|
||||||
image: dliappis/centos-devopsci:7
|
|
||||||
provision_command:
|
|
||||||
- sed -ri 's/^#?PermitRootLogin .*/PermitRootLogin yes/' /etc/ssh/sshd_config
|
|
||||||
- sed -ri 's/^#?PasswordAuthentication .*/PasswordAuthentication yes/' /etc/ssh/sshd_config
|
|
||||||
- sed -ri 's/^#?UsePAM .*/UsePAM no/' /etc/ssh/sshd_config
|
|
||||||
- rm /etc/yum.repos.d/epel*repo /etc/yum.repos.d/puppetlabs-pc1.repo
|
|
||||||
- yum -y install initscripts
|
|
||||||
- yum -y remove ansible
|
|
||||||
- yum clean all
|
|
||||||
- pip install jmespath
|
|
||||||
volume:
|
|
||||||
- <%=ENV['ES_XPACK_LICENSE_FILE']%>:/tmp/license.json
|
|
||||||
- /etc # This fixes certain java file actions that check the mount point. Without this adding users fails for some docker storage drivers
|
|
||||||
run_command: "/usr/sbin/init"
|
|
||||||
privileged: true
|
|
||||||
use_sudo: false
|
|
||||||
|
|
||||||
suites:
|
|
||||||
- name: oss
|
|
||||||
provisioner:
|
|
||||||
idempotency_test: true
|
|
||||||
playbook: test/integration/oss.yml
|
|
||||||
- name: oss-upgrade
|
|
||||||
provisioner:
|
|
||||||
playbook: test/integration/oss-upgrade.yml
|
|
||||||
idempotency_test: false
|
|
||||||
- name: oss-to-xpack-upgrade
|
|
||||||
provisioner:
|
|
||||||
playbook: test/integration/oss-to-xpack-upgrade.yml
|
|
||||||
idempotency_test: false
|
|
||||||
- name: xpack
|
|
||||||
provisioner:
|
|
||||||
playbook: test/integration/xpack.yml
|
|
||||||
idempotency_test: true
|
|
||||||
- name: xpack-upgrade
|
|
||||||
provisioner:
|
|
||||||
playbook: test/integration/xpack-upgrade.yml
|
|
||||||
idempotency_test: false
|
|
||||||
- name: multi
|
|
||||||
provisioner:
|
|
||||||
playbook: test/integration/multi.yml
|
|
||||||
idempotency_test: true
|
|
||||||
- name: issue-test
|
|
||||||
provisioner:
|
|
||||||
playbook: test/integration/issue-test.yml
|
|
||||||
idempotency_test: false
|
|
@ -1,98 +0,0 @@
|
|||||||
## 6.4.0 - 2018/08/24
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* 6.4.0 as default Elasticsearch version
|
|
||||||
|
|
||||||
### Fixes
|
|
||||||
|
|
||||||
* [#484](https://github.com/elastic/ansible-elasticsearch/pull/484) - @kimoto - Fix downgrading Elasticsearch on RedHat hosts
|
|
||||||
* [#476](https://github.com/elastic/ansible-elasticsearch/pull/476) - @Crazybus - Fix version locking for the elasticsearch-oss package
|
|
||||||
|
|
||||||
|
|
||||||
## 6.3.1 - 2018/07/05
|
|
||||||
|
|
||||||
### Features
|
|
||||||
|
|
||||||
* 6.3.1 as default Elasticsearch version
|
|
||||||
|
|
||||||
## 6.3.0.1 - 2018/06/28
|
|
||||||
|
|
||||||
### Fixes
|
|
||||||
|
|
||||||
* [#460](https://github.com/elastic/ansible-elasticsearch/pull/460) - @toadjaune - Make sure ansible doesn't fail if the default systemd service file doesn't exist
|
|
||||||
* [#461](https://github.com/elastic/ansible-elasticsearch/pull/461) - @bilsch - Add missing become root in tasks that require root access
|
|
||||||
|
|
||||||
|
|
||||||
## 6.3.0 - 2018/06/18
|
|
||||||
|
|
||||||
### Breaking changes
|
|
||||||
|
|
||||||
Elasticsearch 6.3 includes several big changes that are reflected in this role.
|
|
||||||
When upgrading from module versions prior to 6.3, there are a number of upgrade considerations to take into account:
|
|
||||||
|
|
||||||
* This role defaults to the upstream package repositories, which now include X-Pack bundled by default. To preserve previous behavior which does _not_ include X-Pack be sure to explicitly set `es_enable_xpack: false` which will install the `elasticsearch-oss` package.
|
|
||||||
* Great care has been taken in making sure that all upgrade paths work, however as always please take extra caution when upgrading and test in a non-production environment. New automated tests have been added to make sure that the following upgrade paths work:
|
|
||||||
* oss to oss
|
|
||||||
* oss to xpack
|
|
||||||
* xpack to xpack
|
|
||||||
* X-Pack configuration files which used to be in `${ES_PATH_CONF}/x-pack` are now in `${ES_PATH_CONF}/`. If you have any configuration files in this directory not managed by ansible you will need to move them manually.
|
|
||||||
|
|
||||||
#### Features
|
|
||||||
|
|
||||||
* Integration testing has been refactored in [#457](https://github.com/elastic/ansible-elasticsearch/pull/457). This removed a lot of duplicate tests and added new tests to make sure all upgrade paths work.
|
|
||||||
* It is now possible to test elasticsearch snapshot builds by setting `es_use_snapshot_release` to `true`
|
|
||||||
|
|
||||||
#### Fixes
|
|
||||||
|
|
||||||
* Installing `es_plugins` from custom urls is now idempotent. Previously the plugin name was being compared to the url which meant it would be reinstalled every time ansible was run because they didn't match
|
|
||||||
|
|
||||||
#### Pull requests
|
|
||||||
|
|
||||||
* [#452](https://github.com/elastic/ansible-elasticsearch/pull/452) - @Crazybus - Add initial 6.3 support
|
|
||||||
* [#454](https://github.com/elastic/ansible-elasticsearch/pull/454) - @Crazybus - Move jenkins matrix file into the repo so test suites are controlled via the pull request workflow
|
|
||||||
* [#455](https://github.com/elastic/ansible-elasticsearch/pull/455) - @Crazybus - Add automated test for upgrading from oss to oss
|
|
||||||
* [#457](https://github.com/elastic/ansible-elasticsearch/pull/457) - @Crazybus - Refactor integration tests to remove duplication and add extra suites to make sure all upgrade paths are covered
|
|
||||||
|
|
||||||
## 6.2.4.1 - 2018/06/14
|
|
||||||
|
|
||||||
Patch release requested by @average-joe in #453
|
|
||||||
|
|
||||||
#### Pull requests
|
|
||||||
|
|
||||||
* [#445](https://github.com/elastic/ansible-elasticsearch/pull/445) - @gekkeharry13 - Added configuration options for configuring x-pack notifications via email with some other nice fixes.
|
|
||||||
* [#450](https://github.com/elastic/ansible-elasticsearch/pull/450) - @Crazybus - improving some flakey tests which were randomly failing.
|
|
||||||
* [#447](https://github.com/elastic/ansible-elasticsearch/pull/447) - @chaintng - Fix to make sure sudo is used when running `update-alternatives` for java.
|
|
||||||
* [#423](https://github.com/elastic/ansible-elasticsearch/pull/423) - @eRadical - Fixing the until condition being used when installing rpms from a custom repository.
|
|
||||||
|
|
||||||
|
|
||||||
## 6.2.4 - 2018/04/24
|
|
||||||
|
|
||||||
* `6.2.4` and `5.6.9` as the default versions.
|
|
||||||
|
|
||||||
## 6.2.3 - 2018/04/21
|
|
||||||
|
|
||||||
* Thanks to @cl0udf0x for adding proper names to all tasks which were unnamed [#417](https://github.com/elastic/ansible-elasticsearch/pull/417)
|
|
||||||
* Thanks @cyrilleverrier for having a keen eye and spotting this typo. [#432](https://github.com/elastic/ansible-elasticsearch/pull/432)
|
|
||||||
|
|
||||||
## 6.2.2 - 2018/02/22
|
|
||||||
|
|
||||||
* `6.2.2` and `5.6.8` as the default versions
|
|
||||||
* Thanks to @pemontto for fixing up all of the ansible conditional logic https://github.com/elastic/ansible-elasticsearch/pull/429
|
|
||||||
* Thanks @cyrilleverrier for https://github.com/elastic/ansible-elasticsearch/pull/427 which makes sure x-pack settings are not in the config file when x-pack isn't enabled
|
|
||||||
|
|
||||||
## 6.1.3 - 2018/02/01
|
|
||||||
|
|
||||||
* `6.x` is now the default `es_major_version` with `6.1.3` as the default `es_version`
|
|
||||||
* Special thanks to @shribigb, @toddlers and @remil1000 for their efforts in getting `6.x` support working!
|
|
||||||
* `.kitchen.yml` has been updated to allow testing both `6.x` and `5.x` versions
|
|
||||||
* A new [Jenkins job](https://devops-ci.elastic.co/job/elastic+ansible-elasticsearch+pull-request/) has been added for pull requests to automatically test all combinations of `6.x` and `5.x` on ubuntu-1404, ubuntu-1604, debian-8 and centos-7 with the various test suites.
|
|
||||||
|
|
||||||
## 5.5.1 - 2017/08/20
|
|
||||||
|
|
||||||
* Fixes with respect to issues on restart.
|
|
||||||
* 5.5.1 update with supporting package scripts.
|
|
||||||
* Documentation clarification.
|
|
||||||
* Fixes for loading of templates
|
|
||||||
* Support for ML
|
|
||||||
* Ability to install x-pack from remote.
|
|
@ -1,6 +0,0 @@
|
|||||||
source 'https://rubygems.org'
|
|
||||||
|
|
||||||
gem 'test-kitchen', '1.20.0'
|
|
||||||
gem 'kitchen-docker', '2.6.0'
|
|
||||||
gem 'kitchen-ansible', '0.48.1'
|
|
||||||
gem 'net-ssh', '4.2.0'
|
|
@ -1,75 +0,0 @@
|
|||||||
GEM
|
|
||||||
remote: https://rubygems.org/
|
|
||||||
specs:
|
|
||||||
builder (3.2.3)
|
|
||||||
erubis (2.7.0)
|
|
||||||
ffi (1.9.18)
|
|
||||||
gssapi (1.2.0)
|
|
||||||
ffi (>= 1.0.1)
|
|
||||||
gyoku (1.3.1)
|
|
||||||
builder (>= 2.1.2)
|
|
||||||
httpclient (2.8.3)
|
|
||||||
kitchen-ansible (0.48.1)
|
|
||||||
net-ssh (>= 3)
|
|
||||||
test-kitchen (~> 1.4)
|
|
||||||
kitchen-docker (2.6.0)
|
|
||||||
test-kitchen (>= 1.0.0)
|
|
||||||
little-plugger (1.1.4)
|
|
||||||
logging (2.2.2)
|
|
||||||
little-plugger (~> 1.1)
|
|
||||||
multi_json (~> 1.10)
|
|
||||||
mixlib-install (3.9.0)
|
|
||||||
mixlib-shellout
|
|
||||||
mixlib-versioning
|
|
||||||
thor
|
|
||||||
mixlib-shellout (2.3.2)
|
|
||||||
mixlib-versioning (1.2.2)
|
|
||||||
multi_json (1.13.1)
|
|
||||||
net-scp (1.2.1)
|
|
||||||
net-ssh (>= 2.6.5)
|
|
||||||
net-ssh (4.2.0)
|
|
||||||
net-ssh-gateway (1.3.0)
|
|
||||||
net-ssh (>= 2.6.5)
|
|
||||||
nori (2.6.0)
|
|
||||||
rubyntlm (0.6.2)
|
|
||||||
rubyzip (1.2.1)
|
|
||||||
test-kitchen (1.20.0)
|
|
||||||
mixlib-install (~> 3.6)
|
|
||||||
mixlib-shellout (>= 1.2, < 3.0)
|
|
||||||
net-scp (~> 1.1)
|
|
||||||
net-ssh (>= 2.9, < 5.0)
|
|
||||||
net-ssh-gateway (~> 1.2)
|
|
||||||
thor (~> 0.19, < 0.19.2)
|
|
||||||
winrm (~> 2.0)
|
|
||||||
winrm-elevated (~> 1.0)
|
|
||||||
winrm-fs (~> 1.1.0)
|
|
||||||
thor (0.19.1)
|
|
||||||
winrm (2.2.3)
|
|
||||||
builder (>= 2.1.2)
|
|
||||||
erubis (~> 2.7)
|
|
||||||
gssapi (~> 1.2)
|
|
||||||
gyoku (~> 1.0)
|
|
||||||
httpclient (~> 2.2, >= 2.2.0.2)
|
|
||||||
logging (>= 1.6.1, < 3.0)
|
|
||||||
nori (~> 2.0)
|
|
||||||
rubyntlm (~> 0.6.0, >= 0.6.1)
|
|
||||||
winrm-elevated (1.1.0)
|
|
||||||
winrm (~> 2.0)
|
|
||||||
winrm-fs (~> 1.0)
|
|
||||||
winrm-fs (1.1.1)
|
|
||||||
erubis (~> 2.7)
|
|
||||||
logging (>= 1.6.1, < 3.0)
|
|
||||||
rubyzip (~> 1.1)
|
|
||||||
winrm (~> 2.0)
|
|
||||||
|
|
||||||
PLATFORMS
|
|
||||||
ruby
|
|
||||||
|
|
||||||
DEPENDENCIES
|
|
||||||
kitchen-ansible (= 0.48.1)
|
|
||||||
kitchen-docker (= 2.6.0)
|
|
||||||
net-ssh (= 4.2.0)
|
|
||||||
test-kitchen (= 1.20.0)
|
|
||||||
|
|
||||||
BUNDLED WITH
|
|
||||||
1.16.1
|
|
@ -1,13 +0,0 @@
|
|||||||
Copyright (c) 2012-2016 Elasticsearch <http://www.elastic.co>
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
@ -1,32 +0,0 @@
|
|||||||
default: build
|
|
||||||
|
|
||||||
SHELL:=/bin/bash -eux
|
|
||||||
VERSION := 6.x
|
|
||||||
PATTERN := xpack-ubuntu-1604
|
|
||||||
|
|
||||||
.PHONY: converge cerify test login destroy list
|
|
||||||
|
|
||||||
setup:
|
|
||||||
bundle install
|
|
||||||
docker ps
|
|
||||||
|
|
||||||
converge:
|
|
||||||
bundle exec kitchen converge $(PATTERN)
|
|
||||||
|
|
||||||
verify:
|
|
||||||
bundle exec kitchen verify $(PATTERN)
|
|
||||||
|
|
||||||
test:
|
|
||||||
bundle exec kitchen test $(PATTERN) --destroy=always
|
|
||||||
|
|
||||||
login:
|
|
||||||
bundle exec kitchen login $(PATTERN)
|
|
||||||
|
|
||||||
destroy:
|
|
||||||
bundle exec kitchen destroy $(PATTERN)
|
|
||||||
|
|
||||||
destroy-all:
|
|
||||||
bundle exec kitchen destroy
|
|
||||||
|
|
||||||
list:
|
|
||||||
bundle exec kitchen list
|
|
@ -1,478 +0,0 @@
|
|||||||
# ansible-elasticsearch
|
|
||||||
[](https://devops-ci.elastic.co/job/elastic+ansible-elasticsearch+master/)
|
|
||||||
[](https://galaxy.ansible.com/elastic/elasticsearch/)
|
|
||||||
|
|
||||||
**THIS ROLE IS FOR 6.x, 5.x. FOR 2.x SUPPORT PLEASE USE THE 2.x BRANCH.**
|
|
||||||
|
|
||||||
Ansible role for 6.x/5.x Elasticsearch. Currently this works on Debian and RedHat based linux systems. Tested platforms are:
|
|
||||||
|
|
||||||
* Ubuntu 14.04
|
|
||||||
* Ubuntu 16.04
|
|
||||||
* Debian 8
|
|
||||||
* CentOS 7
|
|
||||||
|
|
||||||
The latest Elasticsearch versions of 6.x and 5.x are actively tested. **Only Ansible versions > 2.4.3.0 are supported, as this is currently the only version tested.**
|
|
||||||
|
|
||||||
##### Dependency
|
|
||||||
This role uses the json_query filter which [requires jmespath](https://github.com/ansible/ansible/issues/24319) on the local machine.
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
Create your Ansible playbook with your own tasks, and include the role elasticsearch. You will have to have this repository accessible within the context of playbook.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
ansible-galaxy install elastic.elasticsearch
|
|
||||||
```
|
|
||||||
|
|
||||||
Then create your playbook yaml adding the role elasticsearch. By default, the user is only required to specify a unique es_instance_name per role application. This should be unique per node.
|
|
||||||
The application of the elasticsearch role results in the installation of a node on a host.
|
|
||||||
|
|
||||||
The simplest configuration therefore consists of:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: Simple Example
|
|
||||||
hosts: localhost
|
|
||||||
roles:
|
|
||||||
- role: elastic.elasticsearch
|
|
||||||
es_instance_name: "node1"
|
|
||||||
```
|
|
||||||
|
|
||||||
The above installs a single node 'node1' on the hosts 'localhost'.
|
|
||||||
|
|
||||||
This role also uses [Ansible tags](http://docs.ansible.com/ansible/playbooks_tags.html). Run your playbook with the `--list-tasks` flag for more information.
|
|
||||||
|
|
||||||
## Testing
|
|
||||||
|
|
||||||
This playbook uses [Kitchen](https://kitchen.ci/) for CI and local testing.
|
|
||||||
|
|
||||||
### Requirements
|
|
||||||
|
|
||||||
* Ruby
|
|
||||||
* Bundler
|
|
||||||
* Docker
|
|
||||||
* Make
|
|
||||||
|
|
||||||
### Running the tests
|
|
||||||
|
|
||||||
If you want to test X-Pack features with a license you will first need to export the `ES_XPACK_LICENSE_FILE` variable.
|
|
||||||
```sh
|
|
||||||
export ES_XPACK_LICENSE_FILE="$(pwd)/license.json"
|
|
||||||
```
|
|
||||||
|
|
||||||
To converge an Ubuntu 16.04 host running X-Pack
|
|
||||||
```sh
|
|
||||||
$ make converge
|
|
||||||
```
|
|
||||||
|
|
||||||
To run the tests
|
|
||||||
```sh
|
|
||||||
$ make verify
|
|
||||||
```
|
|
||||||
|
|
||||||
To list all of the different test suits
|
|
||||||
```sh
|
|
||||||
$ make list
|
|
||||||
```
|
|
||||||
|
|
||||||
The default test suite is Ubuntu 16.04 with X-Pack. If you want to test another suite you can override this with the `PATTERN` variable
|
|
||||||
```sh
|
|
||||||
$ make converge PATTERN=oss-centos-7
|
|
||||||
```
|
|
||||||
|
|
||||||
The `PATTERN` is a kitchen pattern which can match multiple suites. To run all tests for CentOS
|
|
||||||
```sh
|
|
||||||
$ make converge PATTERN=centos-7
|
|
||||||
```
|
|
||||||
|
|
||||||
The default version is 6.x If you want to test 5.x you can override it with the `VERSION` variable to test 5.x
|
|
||||||
```sh
|
|
||||||
$ make converge VERSION=5.x PATTERN=oss-centos-7
|
|
||||||
```
|
|
||||||
|
|
||||||
When you are finished testing you can clean up everything with
|
|
||||||
```sh
|
|
||||||
$ make destroy-all
|
|
||||||
```
|
|
||||||
|
|
||||||
### Basic Elasticsearch Configuration
|
|
||||||
|
|
||||||
All Elasticsearch configuration parameters are supported. This is achieved using a configuration map parameter 'es_config' which is serialized into the elasticsearch.yml file.
|
|
||||||
The use of a map ensures the Ansible playbook does not need to be updated to reflect new/deprecated/plugin configuration parameters.
|
|
||||||
|
|
||||||
In addition to the es_config map, several other parameters are supported for additional functions e.g. script installation. These can be found in the role's defaults/main.yml file.
|
|
||||||
|
|
||||||
The following illustrates applying configuration parameters to an Elasticsearch instance.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: Elasticsearch with custom configuration
|
|
||||||
hosts: localhost
|
|
||||||
roles:
|
|
||||||
- role: elastic.elasticsearch
|
|
||||||
vars:
|
|
||||||
es_instance_name: "node1"
|
|
||||||
es_data_dirs:
|
|
||||||
- "/opt/elasticsearch/data"
|
|
||||||
es_log_dir: "/opt/elasticsearch/logs"
|
|
||||||
es_config:
|
|
||||||
node.name: "node1"
|
|
||||||
cluster.name: "custom-cluster"
|
|
||||||
discovery.zen.ping.unicast.hosts: "localhost:9301"
|
|
||||||
http.port: 9201
|
|
||||||
transport.tcp.port: 9301
|
|
||||||
node.data: false
|
|
||||||
node.master: true
|
|
||||||
bootstrap.memory_lock: true
|
|
||||||
es_scripts: false
|
|
||||||
es_templates: false
|
|
||||||
es_version_lock: false
|
|
||||||
es_heap_size: 1g
|
|
||||||
es_api_port: 9201
|
|
||||||
```
|
|
||||||
|
|
||||||
Whilst the role installs Elasticsearch with the default configuration parameters, the following should be configured to ensure a cluster successfully forms:
|
|
||||||
|
|
||||||
* ```es_config['http.port']``` - the http port for the node
|
|
||||||
* ```es_config['transport.tcp.port']``` - the transport port for the node
|
|
||||||
* ```es_config['discovery.zen.ping.unicast.hosts']``` - the unicast discovery list, in the comma separated format ```"<host>:<port>,<host>:<port>"``` (typically the clusters dedicated masters)
|
|
||||||
* ```es_config['network.host']``` - sets both network.bind_host and network.publish_host to the same host value. The network.bind_host setting allows to control the host different network components will bind on.
|
|
||||||
|
|
||||||
The network.publish_host setting allows to control the host the node will publish itself within the cluster so other nodes will be able to connect to it.
|
|
||||||
|
|
||||||
See https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-network.html for further details on default binding behaviour and available options.
|
|
||||||
The role makes no attempt to enforce the setting of these are requires users to specify them appropriately. IT is recommended master nodes are listed and thus deployed first where possible.
|
|
||||||
|
|
||||||
A more complex example:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- name: Elasticsearch with custom configuration
|
|
||||||
hosts: localhost
|
|
||||||
roles:
|
|
||||||
- role: elastic.elasticsearch
|
|
||||||
vars:
|
|
||||||
es_instance_name: "node1"
|
|
||||||
es_data_dirs:
|
|
||||||
- "/opt/elasticsearch/data"
|
|
||||||
es_log_dir: "/opt/elasticsearch/logs"
|
|
||||||
es_config:
|
|
||||||
node.name: "node1"
|
|
||||||
cluster.name: "custom-cluster"
|
|
||||||
discovery.zen.ping.unicast.hosts: "localhost:9301"
|
|
||||||
http.port: 9201
|
|
||||||
transport.tcp.port: 9301
|
|
||||||
node.data: false
|
|
||||||
node.master: true
|
|
||||||
bootstrap.memory_lock: true
|
|
||||||
es_scripts: false
|
|
||||||
es_templates: false
|
|
||||||
es_version_lock: false
|
|
||||||
es_heap_size: 1g
|
|
||||||
es_start_service: false
|
|
||||||
es_plugins_reinstall: false
|
|
||||||
es_api_port: 9201
|
|
||||||
es_plugins:
|
|
||||||
- plugin: ingest-geoip
|
|
||||||
proxy_host: proxy.example.com
|
|
||||||
proxy_port: 8080
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Important Note
|
|
||||||
|
|
||||||
**The role uses es_api_host and es_api_port to communicate with the node for actions only achievable via http e.g. to install templates and to check the NODE IS ACTIVE. These default to "localhost" and 9200 respectively.
|
|
||||||
If the node is deployed to bind on either a different host or port, these must be changed.**
|
|
||||||
|
|
||||||
### Multi Node Server Installations
|
|
||||||
|
|
||||||
The application of the elasticsearch role results in the installation of a node on a host. Specifying the role multiple times for a host therefore results in the installation of multiple nodes for the host.
|
|
||||||
|
|
||||||
An example of a two server deployment is shown below. The first server holds the master and is thus declared first. Whilst not mandatory, this is recommended in any multi node cluster configuration. The second server hosts two data nodes.
|
|
||||||
|
|
||||||
**Note the structure of the below playbook for the data nodes. Whilst a more succinct structures are possible which allow the same role to be applied to a host multiple times, we have found the below structure to be the most reliable with respect to var behaviour. This is the tested approach.**
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
- hosts: master_nodes
|
|
||||||
roles:
|
|
||||||
- role: elastic.elasticsearch
|
|
||||||
vars:
|
|
||||||
es_instance_name: "node1"
|
|
||||||
es_heap_size: "1g"
|
|
||||||
es_config:
|
|
||||||
cluster.name: "test-cluster"
|
|
||||||
discovery.zen.ping.unicast.hosts: "elastic02:9300"
|
|
||||||
http.port: 9200
|
|
||||||
transport.tcp.port: 9300
|
|
||||||
node.data: false
|
|
||||||
node.master: true
|
|
||||||
bootstrap.memory_lock: false
|
|
||||||
es_scripts: false
|
|
||||||
es_templates: false
|
|
||||||
es_version_lock: false
|
|
||||||
ansible_user: ansible
|
|
||||||
es_plugins:
|
|
||||||
- plugin: ingest-geoip
|
|
||||||
|
|
||||||
- hosts: data_nodes
|
|
||||||
roles:
|
|
||||||
- role: elastic.elasticsearch
|
|
||||||
vars:
|
|
||||||
es_instance_name: "node1"
|
|
||||||
es_data_dirs:
|
|
||||||
- "/opt/elasticsearch"
|
|
||||||
es_config:
|
|
||||||
discovery.zen.ping.unicast.hosts: "elastic02:9300"
|
|
||||||
http.port: 9200
|
|
||||||
transport.tcp.port: 9300
|
|
||||||
node.data: true
|
|
||||||
node.master: false
|
|
||||||
bootstrap.memory_lock: false
|
|
||||||
cluster.name: "test-cluster"
|
|
||||||
es_scripts: false
|
|
||||||
es_templates: false
|
|
||||||
es_version_lock: false
|
|
||||||
ansible_user: ansible
|
|
||||||
es_api_port: 9200
|
|
||||||
es_plugins:
|
|
||||||
- plugin: ingest-geoip
|
|
||||||
|
|
||||||
- hosts: data_nodes
|
|
||||||
roles:
|
|
||||||
- role: elastic.elasticsearch
|
|
||||||
vars:
|
|
||||||
es_instance_name: "node2"
|
|
||||||
es_api_port: 9201
|
|
||||||
es_config:
|
|
||||||
discovery.zen.ping.unicast.hosts: "elastic02:9300"
|
|
||||||
http.port: 9201
|
|
||||||
transport.tcp.port: 9301
|
|
||||||
node.data: true
|
|
||||||
node.master: false
|
|
||||||
bootstrap.memory_lock: false
|
|
||||||
cluster.name: "test-cluster"
|
|
||||||
es_scripts: false
|
|
||||||
es_templates: false
|
|
||||||
es_version_lock: false
|
|
||||||
es_api_port: 9201
|
|
||||||
ansible_user: ansible
|
|
||||||
es_plugins:
|
|
||||||
- plugin: ingest-geoip
|
|
||||||
```
|
|
||||||
|
|
||||||
Parameters can additionally be assigned to hosts using the inventory file if desired.
|
|
||||||
|
|
||||||
Make sure your hosts are defined in your ```inventory``` file with the appropriate ```ansible_ssh_host```, ```ansible_ssh_user``` and ```ansible_ssh_private_key_file``` values.
|
|
||||||
|
|
||||||
Then run it:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
ansible-playbook -i hosts ./your-playbook.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
### Installing X-Pack Features
|
|
||||||
|
|
||||||
X-Pack features, such as Security, are supported. This feature is currently experimental.
|
|
||||||
|
|
||||||
The parameter `es_xpack_features` by default enables all features i.e. it defaults to ["alerting","monitoring","graph","security","ml"]
|
|
||||||
|
|
||||||
The following additional parameters allow X-Pack to be configured:
|
|
||||||
|
|
||||||
* ```es_message_auth_file``` System Key field to allow message authentication. This file should be placed in the 'files' directory.
|
|
||||||
* ```es_xpack_custom_url``` Url from which X-Pack can be downloaded. This can be used for installations in isolated environments where the elastic.co repo is not accessible. e.g. ```es_xpack_custom_url: "https://artifacts.elastic.co/downloads/packs/x-pack/x-pack-5.5.1.zip"```
|
|
||||||
* ```es_role_mapping``` Role mappings file declared as yml as described [here](https://www.elastic.co/guide/en/x-pack/current/mapping-roles.html)
|
|
||||||
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
es_role_mapping:
|
|
||||||
power_user:
|
|
||||||
- "cn=admins,dc=example,dc=com"
|
|
||||||
user:
|
|
||||||
- "cn=users,dc=example,dc=com"
|
|
||||||
- "cn=admins,dc=example,dc=com"
|
|
||||||
```
|
|
||||||
|
|
||||||
* ```es_users``` - Users can be declared here as yml. Two sub keys 'native' and 'file' determine the realm under which realm the user is created. Beneath each of these keys users should be declared as yml entries. e.g.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
es_users:
|
|
||||||
native:
|
|
||||||
kibana4_server:
|
|
||||||
password: changeMe
|
|
||||||
roles:
|
|
||||||
- kibana4_server
|
|
||||||
file:
|
|
||||||
es_admin:
|
|
||||||
password: changeMe
|
|
||||||
roles:
|
|
||||||
- admin
|
|
||||||
testUser:
|
|
||||||
password: changeMeAlso!
|
|
||||||
roles:
|
|
||||||
- power_user
|
|
||||||
- user
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
* ```es_roles``` - Elasticsearch roles can be declared here as yml. Two sub keys 'native' and 'file' determine how the role is created i.e. either through a file or http(native) call. Beneath each key list the roles with appropriate permissions, using the file based format described [here] (https://www.elastic.co/guide/en/x-pack/current/file-realm.html) e.g.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
es_roles:
|
|
||||||
file:
|
|
||||||
admin:
|
|
||||||
cluster:
|
|
||||||
- all
|
|
||||||
indices:
|
|
||||||
- names: '*'
|
|
||||||
privileges:
|
|
||||||
- all
|
|
||||||
power_user:
|
|
||||||
cluster:
|
|
||||||
- monitor
|
|
||||||
indices:
|
|
||||||
- names: '*'
|
|
||||||
privileges:
|
|
||||||
- all
|
|
||||||
user:
|
|
||||||
indices:
|
|
||||||
- names: '*'
|
|
||||||
privileges:
|
|
||||||
- read
|
|
||||||
kibana4_server:
|
|
||||||
cluster:
|
|
||||||
- monitor
|
|
||||||
indices:
|
|
||||||
- names: '.kibana'
|
|
||||||
privileges:
|
|
||||||
- all
|
|
||||||
native:
|
|
||||||
logstash:
|
|
||||||
cluster:
|
|
||||||
- manage_index_templates
|
|
||||||
indices:
|
|
||||||
- names: 'logstash-*'
|
|
||||||
privileges:
|
|
||||||
- write
|
|
||||||
- delete
|
|
||||||
- create_index
|
|
||||||
```
|
|
||||||
|
|
||||||
* ```es_xpack_license``` - X-Pack license. The license is a json blob. Set the variable directly (possibly protected by Ansible vault) or from a file in the Ansible project on the control machine via a lookup:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
es_xpack_license: "{{ lookup('file', playbook_dir + '/files/' + es_cluster_name + '/license.json') }}"
|
|
||||||
```
|
|
||||||
|
|
||||||
X-Pack configuration parameters can be added to the elasticsearch.yml file using the normal `es_config` parameter.
|
|
||||||
|
|
||||||
For a full example see [here](https://github.com/elastic/ansible-elasticsearch/blob/master/test/integration/xpack-upgrade.yml)
|
|
||||||
|
|
||||||
#### Important Note for Native Realm Configuration
|
|
||||||
|
|
||||||
In order for native users and roles to be configured, the role calls the Elasticsearch API. Given security is installed this requires definition of two parameters:
|
|
||||||
|
|
||||||
* ```es_api_basic_auth_username``` - admin username
|
|
||||||
* ```es_api_basic_auth_password``` - admin password
|
|
||||||
|
|
||||||
These can either be set to a user declared in the file based realm, with admin permissions, or the default "elastic" superuser (default password is changeme).
|
|
||||||
|
|
||||||
|
|
||||||
### Additional Configuration
|
|
||||||
|
|
||||||
In addition to es_config, the following parameters allow the customization of the Java and Elasticsearch versions as well as the role behaviour. Options include:
|
|
||||||
|
|
||||||
* ```es_enable_xpack``` Default `true`. Setting this to `false` will install the oss release of elasticsearch
|
|
||||||
* ```es_major_version``` Should be consistent with es_version. For versions >= 5.0 and < 6.0 this must be "5.x". For versions >= 6.0 this must be "6.x".
|
|
||||||
* ```es_version``` (e.g. "6.3.0").
|
|
||||||
* ```es_api_host``` The host name used for actions requiring HTTP e.g. installing templates. Defaults to "localhost".
|
|
||||||
* ```es_api_port``` The port used for actions requiring HTTP e.g. installing templates. Defaults to 9200. **CHANGE IF THE HTTP PORT IS NOT 9200**
|
|
||||||
* ```es_api_basic_auth_username``` The Elasticsearch username for making admin changing actions. Used if Security is enabled. Ensure this user is admin.
|
|
||||||
* ```es_api_basic_auth_password``` The password associated with the user declared in `es_api_basic_auth_username`
|
|
||||||
* ```es_start_service``` (true (default) or false)
|
|
||||||
* ```es_plugins_reinstall``` (true or false (default) )
|
|
||||||
* ```es_plugins``` an array of plugin definitions e.g.:
|
|
||||||
```yaml
|
|
||||||
es_plugins:
|
|
||||||
- plugin: ingest-geoip
|
|
||||||
```
|
|
||||||
* ```es_path_repo``` Sets the whitelist for allowing local back-up repositories
|
|
||||||
* ```es_action_auto_create_index ``` Sets the value for auto index creation, use the syntax below for specifying indexes (else true/false):
|
|
||||||
es_action_auto_create_index: '[".watches", ".triggered_watches", ".watcher-history-*"]'
|
|
||||||
* ```es_allow_downgrades``` For development purposes only. (true or false (default) )
|
|
||||||
* ```es_java_install``` If set to false, Java will not be installed. (true (default) or false)
|
|
||||||
* ```update_java``` Updates Java to the latest version. (true or false (default))
|
|
||||||
* ```es_max_map_count``` maximum number of VMA (Virtual Memory Areas) a process can own. Defaults to 262144.
|
|
||||||
* ```es_max_open_files``` the maximum file descriptor number that can be opened by this process. Defaults to 65536.
|
|
||||||
* ```es_max_threads``` the maximum number of threads the process can start. Defaults to 2048 (the minimum required by elasticsearch).
|
|
||||||
* ```es_debian_startup_timeout``` how long Debian-family SysV init scripts wait for the service to start, in seconds. Defaults to 10 seconds.
|
|
||||||
|
|
||||||
Earlier examples illustrate the installation of plugins using `es_plugins`. For officially supported plugins no version or source delimiter is required. The plugin script will determine the appropriate plugin version based on the target Elasticsearch version. For community based plugins include the full url. This approach should NOT be used for the X-Pack plugin. See X-Pack below for details here.
|
|
||||||
|
|
||||||
If installing Monitoring or Alerting, ensure the license plugin is also specified. Security configuration currently has limited support, but more support is planned for later versions.
|
|
||||||
|
|
||||||
To configure X-pack to send mail, the following configuration can be added to the role. When require_auth is true, you will also need to provide the user and password. If not these can be removed:
|
|
||||||
```yaml
|
|
||||||
es_mail_config:
|
|
||||||
account: <functional name>
|
|
||||||
profile: standard
|
|
||||||
from: <from address>
|
|
||||||
require_auth: <true or false>
|
|
||||||
host: <mail domain>
|
|
||||||
port: <port number>
|
|
||||||
user: <e-mail address> --optional
|
|
||||||
pass: <password> --optional
|
|
||||||
```
|
|
||||||
|
|
||||||
* ```es_user``` - defaults to elasticsearch.
|
|
||||||
* ```es_group``` - defaults to elasticsearch.
|
|
||||||
* ```es_user_id``` - default is undefined.
|
|
||||||
* ```es_group_id``` - default is undefined.
|
|
||||||
|
|
||||||
Both ```es_user_id``` and ```es_group_id``` must be set for the user and group ids to be set.
|
|
||||||
|
|
||||||
By default, each node on a host will be installed to use unique pid, plugin, work, data and log directories. These directories are created, using the instance and host name, beneath default locations ]
|
|
||||||
controlled by the following parameters:
|
|
||||||
|
|
||||||
* ```es_pid_dir``` - defaults to "/var/run/elasticsearch".
|
|
||||||
* ```es_data_dirs``` - defaults to "/var/lib/elasticsearch". This can be a list or comma separated string e.g. ["/opt/elasticsearch/data-1","/opt/elasticsearch/data-2"] or "/opt/elasticsearch/data-1,/opt/elasticsearch/data-2"
|
|
||||||
* ```es_log_dir``` - defaults to "/var/log/elasticsearch".
|
|
||||||
* ```es_restart_on_change``` - defaults to true. If false, changes will not result in Elasticsearch being restarted.
|
|
||||||
* ```es_plugins_reinstall``` - defaults to false. If true, all currently installed plugins will be removed from a node. Listed plugins will then be re-installed.
|
|
||||||
|
|
||||||
This role ships with sample scripts and templates located in the [files/scripts/](files/scripts) and [files/templates/](files/templates) directories, respectively. These variables are used with the Ansible [with_fileglob](http://docs.ansible.com/ansible/playbooks_loops.html#id4) loop. When setting the globs, be sure to use an absolute path.
|
|
||||||
* ```es_scripts_fileglob``` - defaults to `<role>/files/scripts/`.
|
|
||||||
* ```es_templates_fileglob``` - defaults to `<role>/files/templates/`.
|
|
||||||
|
|
||||||
### Proxy
|
|
||||||
|
|
||||||
To define proxy globally, set the following variables:
|
|
||||||
|
|
||||||
* ```es_proxy_host``` - global proxy host
|
|
||||||
* ```es_proxy_port``` - global proxy port
|
|
||||||
|
|
||||||
To define proxy only for a particular plugin during its installation:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
es_plugins:
|
|
||||||
- plugin: ingest-geoip
|
|
||||||
proxy_host: proxy.example.com
|
|
||||||
proxy_port: 8080
|
|
||||||
```
|
|
||||||
|
|
||||||
> For plugins installation, proxy_host and proxy_port are used first if they are defined and fallback to the global proxy settings if not. The same values are currently used for both the http and https proxy settings.
|
|
||||||
|
|
||||||
## Notes
|
|
||||||
|
|
||||||
* The role assumes the user/group exists on the server. The elasticsearch packages create the default elasticsearch user. If this needs to be changed, ensure the user exists.
|
|
||||||
* The playbook relies on the inventory_name of each host to ensure its directories are unique
|
|
||||||
* Changing an instance_name for a role application will result in the installation of a new component. The previous component will remain.
|
|
||||||
* KitchenCI has been used for testing. This is used to confirm images reach the correct state after a play is first applied. We currently test the latest version of 6.x and 5.x on all supported platforms.
|
|
||||||
* The role aims to be idempotent. Running the role multiple times, with no changes, should result in no state change on the server. If the configuration is changed, these will be applied and Elasticsearch restarted where required.
|
|
||||||
* Systemd is used for Ubuntu versions >= 15, Debian >=8, Centos >=7. All other versions use init for service scripts.
|
|
||||||
* In order to run x-pack tests a license file with security enabled is required. A trial license is appropriate. Set the environment variable `ES_XPACK_LICENSE_FILE` to the full path of the license file prior to running tests.
|
|
||||||
|
|
||||||
## IMPORTANT NOTES RE PLUGIN MANAGEMENT
|
|
||||||
|
|
||||||
* If the ES version is changed, all plugins will be removed. Those listed in the playbook will be re-installed. This is behaviour is required in ES 6.x.
|
|
||||||
* If no plugins are listed in the playbook for a node, all currently installed plugins will be removed.
|
|
||||||
* The role supports automatic detection of differences between installed and listed plugins - installing those listed but not installed, and removing those installed but not listed. Should users wish to re-install plugins they should set es_plugins_reinstall to true. This will cause all currently installed plugins to be removed and those listed to be installed.
|
|
||||||
|
|
||||||
## Questions on Usage
|
|
||||||
|
|
||||||
We welcome questions on how to use the role. However, in order to keep the GitHub issues list focused on "issues" we ask the community to raise questions at https://discuss.elastic.co/c/elasticsearch. This is monitored by the maintainers.
|
|
@ -1 +0,0 @@
|
|||||||
[defaults]
|
|
@ -1,47 +0,0 @@
|
|||||||
---
|
|
||||||
es_major_version: "6.x"
|
|
||||||
es_version: "6.4.0"
|
|
||||||
es_use_snapshot_release: false
|
|
||||||
es_enable_xpack: true
|
|
||||||
es_package_name: "elasticsearch"
|
|
||||||
es_version_lock: false
|
|
||||||
es_use_repository: true
|
|
||||||
es_templates_fileglob: "files/templates/*.json"
|
|
||||||
es_apt_key: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
|
|
||||||
es_apt_url: "deb https://artifacts.elastic.co/packages/{{ es_repo_name }}/apt stable main"
|
|
||||||
es_apt_url_old: "deb http://packages.elastic.co/elasticsearch/{{ es_repo_name }}/debian stable main"
|
|
||||||
es_start_service: true
|
|
||||||
es_java_install: true
|
|
||||||
update_java: false
|
|
||||||
es_restart_on_change: true
|
|
||||||
es_plugins_reinstall: false
|
|
||||||
es_scripts: false
|
|
||||||
es_templates: false
|
|
||||||
es_user: elasticsearch
|
|
||||||
es_group: elasticsearch
|
|
||||||
es_config: {}
|
|
||||||
es_config_log4j2: log4j2.properties.j2
|
|
||||||
#Need to provide default directories
|
|
||||||
es_pid_dir: "/var/run/elasticsearch"
|
|
||||||
es_data_dirs: "/var/lib/elasticsearch"
|
|
||||||
es_log_dir: "/var/log/elasticsearch"
|
|
||||||
es_action_auto_create_index: true
|
|
||||||
es_max_open_files: 65536
|
|
||||||
es_max_threads: "{{ 2048 if ( es_version | version_compare('6.0.0', '<')) else 8192 }}"
|
|
||||||
es_max_map_count: 262144
|
|
||||||
es_allow_downgrades: false
|
|
||||||
es_xpack_features: ["alerting","monitoring","graph","ml","security"]
|
|
||||||
#These are used for internal operations performed by ansible.
|
|
||||||
#They do not affect the current configuration
|
|
||||||
es_api_host: "localhost"
|
|
||||||
es_api_port: 9200
|
|
||||||
es_debian_startup_timeout: 10
|
|
||||||
|
|
||||||
# Since ansible 2.2 the following variables need to be defined
|
|
||||||
# to allow the role to be conditionally played with a when condition.
|
|
||||||
pid_dir: ''
|
|
||||||
log_dir: ''
|
|
||||||
conf_dir: ''
|
|
||||||
data_dirs: ''
|
|
||||||
# JVM custom parameters
|
|
||||||
es_jvm_custom_parameters: ''
|
|
@ -1,76 +0,0 @@
|
|||||||
#CUSTOM LOG4J FILE
|
|
||||||
|
|
||||||
status = error
|
|
||||||
|
|
||||||
# log action execution errors for easier debugging
|
|
||||||
logger.action.name = org.elasticsearch.action
|
|
||||||
logger.action.level = info
|
|
||||||
|
|
||||||
appender.console.type = Console
|
|
||||||
appender.console.name = console
|
|
||||||
appender.console.layout.type = PatternLayout
|
|
||||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
|
|
||||||
|
|
||||||
appender.rolling.type = RollingFile
|
|
||||||
appender.rolling.name = rolling
|
|
||||||
appender.rolling.fileName = ${sys:es.logs}.log
|
|
||||||
appender.rolling.layout.type = PatternLayout
|
|
||||||
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
|
|
||||||
appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
|
|
||||||
appender.rolling.policies.type = Policies
|
|
||||||
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
|
|
||||||
appender.rolling.policies.time.interval = 1
|
|
||||||
appender.rolling.policies.time.modulate = true
|
|
||||||
|
|
||||||
rootLogger.level = debug
|
|
||||||
rootLogger.appenderRef.console.ref = console
|
|
||||||
rootLogger.appenderRef.rolling.ref = rolling
|
|
||||||
|
|
||||||
appender.deprecation_rolling.type = RollingFile
|
|
||||||
appender.deprecation_rolling.name = deprecation_rolling
|
|
||||||
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
|
|
||||||
appender.deprecation_rolling.layout.type = PatternLayout
|
|
||||||
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
|
|
||||||
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
|
|
||||||
appender.deprecation_rolling.policies.type = Policies
|
|
||||||
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
|
||||||
appender.deprecation_rolling.policies.size.size = 10mb
|
|
||||||
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
|
|
||||||
appender.deprecation_rolling.strategy.max = 4
|
|
||||||
|
|
||||||
logger.deprecation.name = org.elasticsearch.deprecation
|
|
||||||
logger.deprecation.level = debug
|
|
||||||
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
|
|
||||||
logger.deprecation.additivity = false
|
|
||||||
|
|
||||||
appender.index_search_slowlog_rolling.type = RollingFile
|
|
||||||
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
|
|
||||||
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
|
|
||||||
appender.index_search_slowlog_rolling.layout.type = PatternLayout
|
|
||||||
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
|
|
||||||
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
|
|
||||||
appender.index_search_slowlog_rolling.policies.type = Policies
|
|
||||||
appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
|
|
||||||
appender.index_search_slowlog_rolling.policies.time.interval = 1
|
|
||||||
appender.index_search_slowlog_rolling.policies.time.modulate = true
|
|
||||||
|
|
||||||
logger.index_search_slowlog_rolling.name = index.search.slowlog
|
|
||||||
logger.index_search_slowlog_rolling.level = debug
|
|
||||||
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
|
|
||||||
logger.index_search_slowlog_rolling.additivity = false
|
|
||||||
|
|
||||||
appender.index_indexing_slowlog_rolling.type = RollingFile
|
|
||||||
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
|
|
||||||
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log
|
|
||||||
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
|
|
||||||
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
|
|
||||||
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
|
|
||||||
appender.index_indexing_slowlog_rolling.policies.type = Policies
|
|
||||||
appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
|
|
||||||
appender.index_indexing_slowlog_rolling.policies.time.interval = 1
|
|
||||||
appender.index_indexing_slowlog_rolling.policies.time.modulate = true
|
|
||||||
|
|
||||||
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
|
|
||||||
logger.index_indexing_slowlog.level = debug
|
|
||||||
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
|
|
||||||
logger.index_indexing_slowlog.additivity = false
|
|
@ -1 +0,0 @@
|
|||||||
log(_score * 2) + my_modifier
|
|
Binary file not shown.
@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"template" : "te*",
|
|
||||||
"settings" : {
|
|
||||||
"number_of_shards" : 1
|
|
||||||
},
|
|
||||||
"mappings" : {
|
|
||||||
"type1" : {
|
|
||||||
"_source" : { "enabled" : false }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,57 +0,0 @@
|
|||||||
__author__ = 'dale mcdiarmid'
|
|
||||||
|
|
||||||
import re
|
|
||||||
import os.path
|
|
||||||
from six import string_types
|
|
||||||
|
|
||||||
def modify_list(values=[], pattern='', replacement='', ignorecase=False):
|
|
||||||
''' Perform a `re.sub` on every item in the list'''
|
|
||||||
if ignorecase:
|
|
||||||
flags = re.I
|
|
||||||
else:
|
|
||||||
flags = 0
|
|
||||||
_re = re.compile(pattern, flags=flags)
|
|
||||||
return [_re.sub(replacement, value) for value in values]
|
|
||||||
|
|
||||||
def append_to_list(values=[], suffix=''):
|
|
||||||
if isinstance(values, string_types):
|
|
||||||
values = values.split(',')
|
|
||||||
return [str(value+suffix) for value in values]
|
|
||||||
|
|
||||||
def array_to_str(values=[],separator=','):
|
|
||||||
return separator.join(values)
|
|
||||||
|
|
||||||
def extract_role_users(users={},exclude_users=[]):
|
|
||||||
role_users=[]
|
|
||||||
for user,details in users.iteritems():
|
|
||||||
if user not in exclude_users and "roles" in details:
|
|
||||||
for role in details["roles"]:
|
|
||||||
role_users.append(role+":"+user)
|
|
||||||
return role_users
|
|
||||||
|
|
||||||
def filename(filename=''):
|
|
||||||
return os.path.splitext(os.path.basename(filename))[0]
|
|
||||||
|
|
||||||
def remove_reserved(user_roles={}):
|
|
||||||
not_reserved = []
|
|
||||||
for user_role,details in user_roles.items():
|
|
||||||
if not "metadata" in details or not "_reserved" in details["metadata"] or not details["metadata"]["_reserved"]:
|
|
||||||
not_reserved.append(user_role)
|
|
||||||
return not_reserved
|
|
||||||
|
|
||||||
def filter_reserved(users_role={}):
|
|
||||||
reserved = []
|
|
||||||
for user_role,details in users_role.items():
|
|
||||||
if "metadata" in details and "_reserved" in details["metadata"] and details["metadata"]["_reserved"]:
|
|
||||||
reserved.append(user_role)
|
|
||||||
return reserved
|
|
||||||
|
|
||||||
class FilterModule(object):
|
|
||||||
def filters(self):
|
|
||||||
return {'modify_list': modify_list,
|
|
||||||
'append_to_list':append_to_list,
|
|
||||||
'filter_reserved':filter_reserved,
|
|
||||||
'array_to_str':array_to_str,
|
|
||||||
'extract_role_users':extract_role_users,
|
|
||||||
'remove_reserved':remove_reserved,
|
|
||||||
'filename':filename}
|
|
@ -1,14 +0,0 @@
|
|||||||
|
|
||||||
- name: reload systemd configuration
|
|
||||||
become: yes
|
|
||||||
command: systemctl daemon-reload
|
|
||||||
|
|
||||||
# Restart service and ensure it is enabled
|
|
||||||
|
|
||||||
- name: restart elasticsearch
|
|
||||||
become: yes
|
|
||||||
service: name={{instance_init_script | basename}} state=restarted enabled=yes
|
|
||||||
when:
|
|
||||||
- es_restart_on_change
|
|
||||||
- es_start_service
|
|
||||||
register: es_restarted
|
|
@ -1 +0,0 @@
|
|||||||
{install_date: 'Thu Nov 8 16:52:39 2018', version: 6.4.0}
|
|
@ -1,25 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
allow_duplicates: yes
|
|
||||||
|
|
||||||
galaxy_info:
|
|
||||||
author: Robin Clarke, Jakob Reiter, Dale McDiarmid
|
|
||||||
description: Elasticsearch for Linux
|
|
||||||
company: "Elastic.co"
|
|
||||||
license: "license (Apache)"
|
|
||||||
min_ansible_version: 2.3.2
|
|
||||||
platforms:
|
|
||||||
- name: EL
|
|
||||||
versions:
|
|
||||||
- 6
|
|
||||||
- 7
|
|
||||||
- name: Debian
|
|
||||||
versions:
|
|
||||||
- all
|
|
||||||
- name: Ubuntu
|
|
||||||
versions:
|
|
||||||
- all
|
|
||||||
categories:
|
|
||||||
- system
|
|
||||||
|
|
||||||
dependencies: []
|
|
@ -1,38 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
# It is possible to set these are defaults with messy jinja templating one liners however:
|
|
||||||
# 1. That is really hard to read and debug
|
|
||||||
# 2. When running multiple plays with the same role the defaults are not re-evaluated. An example of this
|
|
||||||
# can be seen in our the https://github.com/elastic/ansible-elasticsearch/blob/master/test/integration/xpack.yml
|
|
||||||
# integration test and in the Multi Node server documentation examples https://github.com/elastic/ansible-elasticsearch/blob/master/test/integration/xpack.yml
|
|
||||||
|
|
||||||
- name: Set the defaults here otherwise they can't be overriden in the same play if the role is called twice
|
|
||||||
set_fact:
|
|
||||||
es_open_xpack: true
|
|
||||||
es_install_xpack: false
|
|
||||||
es_users_path: "users"
|
|
||||||
es_xpack_conf_subdir: ""
|
|
||||||
es_repo_name: "{{ es_major_version }}"
|
|
||||||
es_xpack_users_command: "elasticsearch-users"
|
|
||||||
|
|
||||||
- name: Detect if es_version is before X-Pack was open and included
|
|
||||||
set_fact:
|
|
||||||
es_open_xpack: false
|
|
||||||
when: "es_version | version_compare('6.3.0', '<')"
|
|
||||||
|
|
||||||
- name: If this is an older version we need to install X-Pack as a plugin and use a differet users command
|
|
||||||
set_fact:
|
|
||||||
es_install_xpack: true
|
|
||||||
es_xpack_users_command: "x-pack/users"
|
|
||||||
es_xpack_conf_subdir: "/x-pack"
|
|
||||||
when:
|
|
||||||
- not es_open_xpack
|
|
||||||
- es_enable_xpack
|
|
||||||
|
|
||||||
- name: Use the oss repo and package if xpack is not being used
|
|
||||||
set_fact:
|
|
||||||
es_repo_name: "{{ 'oss-' + es_major_version }}"
|
|
||||||
es_package_name: "elasticsearch-oss"
|
|
||||||
when:
|
|
||||||
- es_open_xpack
|
|
||||||
- not es_enable_xpack
|
|
@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Debian - hold elasticsearch version
|
|
||||||
become: yes
|
|
||||||
command: "apt-mark hold {{ es_package_name }}"
|
|
||||||
register: hold_elasticsearch_result
|
|
||||||
changed_when: "hold_elasticsearch_result.stdout != '{{ es_package_name }} was already set on hold.'"
|
|
@ -1,80 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: set fact force_install to no
|
|
||||||
set_fact: force_install=no
|
|
||||||
|
|
||||||
- name: set fact force_install to yes
|
|
||||||
set_fact: force_install=yes
|
|
||||||
when: es_allow_downgrades
|
|
||||||
|
|
||||||
- name: Debian - Install apt-transport-https to support https APT downloads
|
|
||||||
become: yes
|
|
||||||
apt: name=apt-transport-https state=present
|
|
||||||
when: es_use_repository
|
|
||||||
|
|
||||||
- name: Debian - Add Elasticsearch repository key
|
|
||||||
become: yes
|
|
||||||
apt_key: url="{{ es_apt_key }}" state=present
|
|
||||||
when: es_use_repository and es_apt_key
|
|
||||||
|
|
||||||
- name: Debian - Add elasticsearch repository
|
|
||||||
become: yes
|
|
||||||
apt_repository: repo={{ item.repo }} state={{ item.state}}
|
|
||||||
with_items:
|
|
||||||
- { repo: "{{ es_apt_url_old }}", state: "absent" }
|
|
||||||
- { repo: "{{ es_apt_url }}", state: "present" }
|
|
||||||
when: es_use_repository
|
|
||||||
|
|
||||||
|
|
||||||
- name: Gracefully stop and remove elasticsearch if we are switching to the oss version
|
|
||||||
when:
|
|
||||||
- es_package_name == 'elasticsearch-oss'
|
|
||||||
block:
|
|
||||||
- name: Check if the elasticsearch package is installed
|
|
||||||
shell: dpkg-query -W -f'${Status}' elasticsearch
|
|
||||||
register: elasticsearch_package
|
|
||||||
failed_when: False
|
|
||||||
changed_when: False
|
|
||||||
|
|
||||||
- name: stop elasticsearch
|
|
||||||
become: yes
|
|
||||||
service:
|
|
||||||
name: '{{ instance_init_script | basename }}'
|
|
||||||
state: stopped
|
|
||||||
when: elasticsearch_package.stdout == 'install ok installed'
|
|
||||||
|
|
||||||
- name: Debian - Remove elasticsearch package if we are installing the oss package
|
|
||||||
become: yes
|
|
||||||
apt:
|
|
||||||
name: 'elasticsearch'
|
|
||||||
state: absent
|
|
||||||
when: elasticsearch_package.stdout == 'install ok installed'
|
|
||||||
|
|
||||||
- name: Debian - Ensure elasticsearch is installed
|
|
||||||
become: yes
|
|
||||||
apt:
|
|
||||||
name: '{{ es_package_name }}{% if es_version is defined and es_version != "" %}={{ es_version }}{% endif %}'
|
|
||||||
state: present
|
|
||||||
force: '{{ force_install }}'
|
|
||||||
allow_unauthenticated: "{{ 'no' if es_apt_key else 'yes' }}"
|
|
||||||
cache_valid_time: 86400
|
|
||||||
when: es_use_repository
|
|
||||||
register: debian_elasticsearch_install_from_repo
|
|
||||||
notify: restart elasticsearch
|
|
||||||
environment:
|
|
||||||
ES_PATH_CONF: "/etc/elasticsearch"
|
|
||||||
|
|
||||||
- name: Debian - Include versionlock
|
|
||||||
include: elasticsearch-Debian-version-lock.yml
|
|
||||||
when: es_version_lock
|
|
||||||
|
|
||||||
- name: Debian - Download elasticsearch from url
|
|
||||||
get_url: url={% if es_custom_package_url is defined %}{{ es_custom_package_url }}{% else %}{{ es_package_url }}-{{ es_version }}.deb{% endif %} dest=/tmp/elasticsearch-{{ es_version }}.deb validate_certs=no
|
|
||||||
when: not es_use_repository
|
|
||||||
|
|
||||||
- name: Debian - Ensure elasticsearch is installed from downloaded package
|
|
||||||
become: yes
|
|
||||||
apt: deb=/tmp/elasticsearch-{{ es_version }}.deb
|
|
||||||
when: not es_use_repository
|
|
||||||
register: elasticsearch_install_from_package
|
|
||||||
notify: restart elasticsearch
|
|
@ -1,7 +0,0 @@
|
|||||||
---
|
|
||||||
- name: RedHat - install yum-version-lock
|
|
||||||
become: yes
|
|
||||||
yum: name=yum-plugin-versionlock state=present update_cache=yes
|
|
||||||
- name: RedHat - lock elasticsearch version
|
|
||||||
become: yes
|
|
||||||
shell: yum versionlock delete 0:elasticsearch* ; yum versionlock add {{ es_package_name }}{% if es_version is defined and es_version != "" %}-{{ es_version }}{% endif %}
|
|
@ -1,51 +0,0 @@
|
|||||||
---
|
|
||||||
- name: set fact allow_downgrade to no
|
|
||||||
set_fact: allow_downgrade=no
|
|
||||||
|
|
||||||
- name: set fact allow_downgrade to yes
|
|
||||||
set_fact: allow_downgrade=yes
|
|
||||||
when: es_allow_downgrades
|
|
||||||
|
|
||||||
- name: Ensure libselinux-python on CentOS 6.x
|
|
||||||
become: yes
|
|
||||||
yum: name=libselinux-python state=present update_cache=yes
|
|
||||||
when: ( ansible_distribution == "CentOS" ) and ( ansible_distribution_major_version == "6" )
|
|
||||||
|
|
||||||
- name: RedHat - add Elasticsearch repo
|
|
||||||
become: yes
|
|
||||||
template: src=elasticsearch.repo dest=/etc/yum.repos.d/elasticsearch-{{ es_repo_name }}.repo
|
|
||||||
when: es_use_repository
|
|
||||||
|
|
||||||
- name: RedHat - include versionlock
|
|
||||||
include: elasticsearch-RedHat-version-lock.yml
|
|
||||||
when: es_version_lock
|
|
||||||
|
|
||||||
- name: RedHat - Remove non oss package if the old elasticsearch package is installed
|
|
||||||
become: yes
|
|
||||||
yum:
|
|
||||||
name: 'elasticsearch'
|
|
||||||
state: 'absent'
|
|
||||||
when: es_package_name == 'elasticsearch-oss'
|
|
||||||
|
|
||||||
- name: RedHat - Install Elasticsearch
|
|
||||||
become: yes
|
|
||||||
yum:
|
|
||||||
name: '{{ es_package_name }}{% if es_version is defined and es_version != "" %}-{{ es_version }}{% endif %}'
|
|
||||||
state: present
|
|
||||||
update_cache: yes
|
|
||||||
allow_downgrade: '{{ allow_downgrade }}'
|
|
||||||
when: es_use_repository
|
|
||||||
register: redhat_elasticsearch_install_from_repo
|
|
||||||
notify: restart elasticsearch
|
|
||||||
until: redhat_elasticsearch_install_from_repo.rc == 0
|
|
||||||
retries: 5
|
|
||||||
delay: 10
|
|
||||||
environment:
|
|
||||||
ES_PATH_CONF: "/etc/elasticsearch"
|
|
||||||
|
|
||||||
- name: RedHat - Install Elasticsearch from url
|
|
||||||
become: yes
|
|
||||||
yum: name={% if es_custom_package_url is defined %}{{ es_custom_package_url }}{% else %}{{ es_package_url }}-{{ es_version }}.noarch.rpm{% endif %} state=present
|
|
||||||
when: not es_use_repository
|
|
||||||
register: elasticsearch_install_from_package
|
|
||||||
notify: restart elasticsearch
|
|
@ -1,129 +0,0 @@
|
|||||||
---
|
|
||||||
# Configure Elasticsearch Node
|
|
||||||
|
|
||||||
#Create required directories
|
|
||||||
- name: Create Directories
|
|
||||||
become: yes
|
|
||||||
file: path={{ item }} state=directory owner={{ es_user }} group={{ es_group }}
|
|
||||||
with_items:
|
|
||||||
- "{{pid_dir}}"
|
|
||||||
- "{{log_dir}}"
|
|
||||||
- "{{conf_dir}}"
|
|
||||||
|
|
||||||
- name: Create Data Directories
|
|
||||||
become: yes
|
|
||||||
file: path={{ item }} state=directory owner={{ es_user }} group={{ es_group }}
|
|
||||||
with_items:
|
|
||||||
- "{{data_dirs}}"
|
|
||||||
|
|
||||||
|
|
||||||
#Copy the config template
|
|
||||||
- name: Copy Configuration File
|
|
||||||
become: yes
|
|
||||||
template: src=elasticsearch.yml.j2 dest={{conf_dir}}/elasticsearch.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes
|
|
||||||
register: system_change
|
|
||||||
notify: restart elasticsearch
|
|
||||||
|
|
||||||
#Copy the instance specific default file
|
|
||||||
- name: Copy Default File for Instance
|
|
||||||
become: yes
|
|
||||||
template: src=elasticsearch.j2 dest={{instance_default_file}} mode=0644 force=yes
|
|
||||||
notify: restart elasticsearch
|
|
||||||
|
|
||||||
#Copy the instance specific init file
|
|
||||||
- name: Copy Debian Init File for Instance
|
|
||||||
become: yes
|
|
||||||
template: src=init/debian/elasticsearch.j2 dest={{instance_init_script}} mode=0755 force=yes
|
|
||||||
when: ansible_os_family == 'Debian' and not use_system_d
|
|
||||||
notify: restart elasticsearch
|
|
||||||
|
|
||||||
#Copy the instance specific init file
|
|
||||||
- name: Copy Redhat Init File for Instance
|
|
||||||
become: yes
|
|
||||||
template: src=init/redhat/elasticsearch.j2 dest={{instance_init_script}} mode=0755 force=yes
|
|
||||||
when: ansible_os_family == 'RedHat' and not use_system_d
|
|
||||||
notify: restart elasticsearch
|
|
||||||
|
|
||||||
#Copy the systemd specific file if systemd is installed
|
|
||||||
- name: Copy Systemd File for Instance
|
|
||||||
become: yes
|
|
||||||
template: src=systemd/elasticsearch.j2 dest={{instance_sysd_script}} mode=0644 force=yes
|
|
||||||
when: use_system_d
|
|
||||||
notify:
|
|
||||||
- reload systemd configuration
|
|
||||||
- restart elasticsearch
|
|
||||||
|
|
||||||
#Copy the logging.yml
|
|
||||||
- name: Copy log4j2.properties File for Instance
|
|
||||||
become: yes
|
|
||||||
template: src={{es_config_log4j2}} dest={{conf_dir}}/log4j2.properties owner={{ es_user }} group={{ es_group }} mode=0644 force=yes
|
|
||||||
notify: restart elasticsearch
|
|
||||||
|
|
||||||
- name: Copy jvm.options File for Instance
|
|
||||||
become: yes
|
|
||||||
template: src=jvm.options.j2 dest={{conf_dir}}/jvm.options owner={{ es_user }} group={{ es_group }} mode=0644 force=yes
|
|
||||||
notify: restart elasticsearch
|
|
||||||
|
|
||||||
#Clean up un-wanted package scripts to avoid confusion
|
|
||||||
|
|
||||||
- name: Delete Default Init
|
|
||||||
become: yes
|
|
||||||
file: dest=/etc/init.d/elasticsearch state=absent
|
|
||||||
|
|
||||||
- name: Create empty default environment file
|
|
||||||
become: yes
|
|
||||||
changed_when: False
|
|
||||||
copy:
|
|
||||||
dest: /etc/default/elasticsearch
|
|
||||||
content: ''
|
|
||||||
when: ansible_os_family == 'Debian'
|
|
||||||
|
|
||||||
- name: Create empty default environment file
|
|
||||||
become: yes
|
|
||||||
changed_when: False
|
|
||||||
copy:
|
|
||||||
dest: /etc/sysconfig/elasticsearch
|
|
||||||
content: ''
|
|
||||||
when: ansible_os_family == 'RedHat'
|
|
||||||
|
|
||||||
- name: Symlink default systemd service to first instance of elasticsearch
|
|
||||||
when: use_system_d
|
|
||||||
block:
|
|
||||||
- name: Check if default systemd file exists
|
|
||||||
stat:
|
|
||||||
path: "{{ sysd_script }}"
|
|
||||||
register: sysd_stat_result
|
|
||||||
|
|
||||||
- name: Remove if it is a normal file
|
|
||||||
become: yes
|
|
||||||
file:
|
|
||||||
path: "{{ sysd_script }}"
|
|
||||||
state: absent
|
|
||||||
when: sysd_stat_result.stat.exists and not sysd_stat_result.stat.islnk
|
|
||||||
|
|
||||||
- name: Create a symbolic link to the default systemd location to the first instance running on this host
|
|
||||||
become: yes
|
|
||||||
file:
|
|
||||||
state: link
|
|
||||||
src: "{{ instance_sysd_script }}"
|
|
||||||
path: "{{ sysd_script }}"
|
|
||||||
when: sysd_stat_result.stat.exists and not sysd_stat_result.stat.islnk
|
|
||||||
notify:
|
|
||||||
- reload systemd configuration
|
|
||||||
- restart elasticsearch
|
|
||||||
|
|
||||||
- name: Delete Default Configuration File
|
|
||||||
become: yes
|
|
||||||
file: dest=/etc/elasticsearch/elasticsearch.yml state=absent
|
|
||||||
|
|
||||||
- name: Delete Default Logging File
|
|
||||||
become: yes
|
|
||||||
file: dest=/etc/elasticsearch/logging.yml state=absent
|
|
||||||
|
|
||||||
- name: Delete Default Logging File
|
|
||||||
become: yes
|
|
||||||
file: dest=/etc/elasticsearch/log4j2.properties state=absent
|
|
||||||
|
|
||||||
- name: Delete Default JVM Options File
|
|
||||||
become: yes
|
|
||||||
file: dest=/etc/elasticsearch/jvm.options state=absent
|
|
@ -1,24 +0,0 @@
|
|||||||
---
|
|
||||||
#Add the elasticsearch user before installing from packages.
|
|
||||||
- name: Ensure optional elasticsearch group is created with the correct id.
|
|
||||||
become: yes
|
|
||||||
#Restart if these change
|
|
||||||
notify: restart elasticsearch
|
|
||||||
group:
|
|
||||||
state: present
|
|
||||||
name: "{{ es_group }}"
|
|
||||||
system: yes
|
|
||||||
gid: "{{ es_group_id }}"
|
|
||||||
|
|
||||||
- name: Ensure optional elasticsearch user is created with the correct id.
|
|
||||||
become: yes
|
|
||||||
#Restart if these change
|
|
||||||
notify: restart elasticsearch
|
|
||||||
user:
|
|
||||||
state: present
|
|
||||||
name: "{{ es_user }}"
|
|
||||||
comment: elasticsearch system user
|
|
||||||
system: yes
|
|
||||||
createhome: no
|
|
||||||
uid: "{{ es_user_id }}"
|
|
||||||
group: "{{ es_group }}"
|
|
@ -1,75 +0,0 @@
|
|||||||
# Check for mandatory parameters
|
|
||||||
|
|
||||||
- name: fail when es_instance is not defined
|
|
||||||
fail: msg="es_instance_name must be specified and cannot be blank"
|
|
||||||
when: es_instance_name is not defined or es_instance_name == ''
|
|
||||||
|
|
||||||
- name: fail when es_proxy_port is not defined or is blank
|
|
||||||
fail: msg="es_proxy_port must be specified and cannot be blank when es_proxy_host is defined"
|
|
||||||
when: (es_proxy_port is not defined or es_proxy_port == '') and (es_proxy_host is defined and es_proxy_host != '')
|
|
||||||
|
|
||||||
- name: debug message
|
|
||||||
debug: msg="WARNING - It is recommended you specify the parameter 'http.port'"
|
|
||||||
when: es_config['http.port'] is not defined
|
|
||||||
|
|
||||||
- name: debug message
|
|
||||||
debug: msg="WARNING - It is recommended you specify the parameter 'transport.tcp.port'"
|
|
||||||
when: es_config['transport.tcp.port'] is not defined
|
|
||||||
|
|
||||||
- name: debug message
|
|
||||||
debug: msg="WARNING - It is recommended you specify the parameter 'discovery.zen.ping.unicast.hosts'"
|
|
||||||
when: es_config['discovery.zen.ping.unicast.hosts'] is not defined
|
|
||||||
|
|
||||||
#If the user attempts to lock memory they must specify a heap size
|
|
||||||
- name: fail when heap size is not specified when using memory lock
|
|
||||||
fail: msg="If locking memory with bootstrap.memory_lock a heap size must be specified"
|
|
||||||
when: es_config['bootstrap.memory_lock'] is defined and es_config['bootstrap.memory_lock'] == True and es_heap_size is not defined
|
|
||||||
|
|
||||||
#Check if working with security we have an es_api_basic_auth_username and es_api_basic_auth_username - otherwise any http calls wont work
|
|
||||||
- name: fail when api credentials are not declared when using security
|
|
||||||
fail: msg="Enabling security requires an es_api_basic_auth_username and es_api_basic_auth_password to be provided to allow cluster operations"
|
|
||||||
when: es_enable_xpack and ("security" in es_xpack_features) and es_api_basic_auth_username is not defined and es_api_basic_auth_password is not defined
|
|
||||||
|
|
||||||
- name: set fact file_reserved_users
|
|
||||||
set_fact: file_reserved_users={{ es_users.file.keys() | intersect (reserved_xpack_users) }}
|
|
||||||
when: es_users is defined and es_users.file is defined and (es_users.file.keys() | length > 0) and (es_users.file.keys() | intersect (reserved_xpack_users) | length > 0)
|
|
||||||
|
|
||||||
- name: fail when changing users through file realm
|
|
||||||
fail:
|
|
||||||
msg: "ERROR: INVALID CONFIG - YOU CANNOT CHANGE RESERVED USERS THROUGH THE FILE REALM. THE FOLLOWING CANNOT BE CHANGED: {{file_reserved_users}}. USE THE NATIVE REALM."
|
|
||||||
when: file_reserved_users | default([]) | length > 0
|
|
||||||
|
|
||||||
- name: set fact instance_default_file
|
|
||||||
set_fact: instance_default_file={{default_file | dirname}}/{{es_instance_name}}_{{default_file | basename}}
|
|
||||||
- name: set fact instance_init_script
|
|
||||||
set_fact: instance_init_script={{init_script | dirname }}/{{es_instance_name}}_{{init_script | basename}}
|
|
||||||
- name: set fact conf_dir
|
|
||||||
set_fact: conf_dir={{ es_conf_dir }}/{{es_instance_name}}
|
|
||||||
- name: set fact m_lock_enabled
|
|
||||||
set_fact: m_lock_enabled={{ es_config['bootstrap.memory_lock'] is defined and es_config['bootstrap.memory_lock'] == True }}
|
|
||||||
|
|
||||||
#TODO - if transport.host is not local maybe error on boostrap checks
|
|
||||||
|
|
||||||
|
|
||||||
#Use systemd for the following distributions:
|
|
||||||
#Ubuntu 15 and up
|
|
||||||
#Debian 8 and up
|
|
||||||
#Centos 7 and up
|
|
||||||
#Relies on elasticsearch distribution installing a serviced script to determine whether one should be copied.
|
|
||||||
|
|
||||||
- name: set fact use_system_d
|
|
||||||
set_fact: use_system_d={{(ansible_distribution == 'Debian' and ansible_distribution_version | version_compare('8', '>=')) or (ansible_distribution in ['RedHat','CentOS'] and ansible_distribution_version | version_compare('7', '>=')) or (ansible_distribution == 'Ubuntu' and ansible_distribution_version | version_compare('15', '>=')) }}
|
|
||||||
|
|
||||||
- name: set fact instance_sysd_script
|
|
||||||
set_fact: instance_sysd_script={{sysd_script | dirname }}/{{es_instance_name}}_{{sysd_script | basename}}
|
|
||||||
when: use_system_d
|
|
||||||
#For directories we also use the {{inventory_hostname}}-{{ es_instance_name }} - this helps if we have a shared SAN.
|
|
||||||
|
|
||||||
- name: set fact instance_suffix
|
|
||||||
set_fact: instance_suffix={{inventory_hostname}}-{{ es_instance_name }}
|
|
||||||
- name: set fact pid_dir
|
|
||||||
set_fact: pid_dir={{ es_pid_dir }}/{{instance_suffix}}
|
|
||||||
- name: set fact log_dir
|
|
||||||
set_fact: log_dir={{ es_log_dir }}/{{instance_suffix}}
|
|
||||||
- name: set fact log_dir
|
|
||||||
set_fact: data_dirs={{ es_data_dirs | append_to_list('/'+instance_suffix) }}
|
|
@ -1,86 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
# es_plugins_reinstall will be set to true if elasticsearch_install_from_repo.changed or elasticsearch_install_from_package.changed
|
|
||||||
# i.e. we have changed ES version(or we have clean installation of ES), or if no plugins listed. Otherwise it is false and requires explicitly setting.
|
|
||||||
- name: set fact es_plugins_reinstall to true
|
|
||||||
set_fact: es_plugins_reinstall=true
|
|
||||||
when: (((debian_elasticsearch_install_from_repo is defined and debian_elasticsearch_install_from_repo.changed) or (redhat_elasticsearch_install_from_repo is defined and redhat_elasticsearch_install_from_repo.changed)) or (elasticsearch_install_from_package is defined and elasticsearch_install_from_package.changed)) or es_plugins is not defined or es_plugins is none
|
|
||||||
|
|
||||||
- name: set fact list_command
|
|
||||||
set_fact: list_command=""
|
|
||||||
#If we are reinstalling all plugins, e.g. to a version change, we need to remove all plugins (inc. x-pack) to install any plugins. Otherwise we don't consider x-pack so the role stays idempotent.
|
|
||||||
- name: set fact list_command check for x-pack
|
|
||||||
set_fact: list_command="| grep -vE 'x-pack'"
|
|
||||||
when: not es_plugins_reinstall
|
|
||||||
|
|
||||||
- name: remove x-pack plugin directory when it isn't a plugin
|
|
||||||
file:
|
|
||||||
dest: "{{ es_home }}/plugins/x-pack"
|
|
||||||
state: "absent"
|
|
||||||
when: es_open_xpack
|
|
||||||
|
|
||||||
#List currently installed plugins. We have to list the directories as the list commmand fails if the ES version is different than the plugin version.
|
|
||||||
- name: Check installed elasticsearch plugins
|
|
||||||
become: yes
|
|
||||||
shell: "ls {{es_home}}/plugins {{list_command}}"
|
|
||||||
register: installed_plugins
|
|
||||||
changed_when: False
|
|
||||||
ignore_errors: yes
|
|
||||||
environment:
|
|
||||||
CONF_DIR: "{{ conf_dir }}"
|
|
||||||
ES_PATH_CONF: "{{ conf_dir }}"
|
|
||||||
ES_INCLUDE: "{{ instance_default_file }}"
|
|
||||||
|
|
||||||
#if es_plugins_reinstall is set to true we remove ALL plugins
|
|
||||||
- name: set fact plugins_to_remove to install_plugins.stdout_lines
|
|
||||||
set_fact: plugins_to_remove="{{ installed_plugins.stdout_lines | default([]) }}"
|
|
||||||
when: es_plugins_reinstall
|
|
||||||
|
|
||||||
#if the plugins listed are different than those requested, we remove those installed but not listed in the config
|
|
||||||
- name: set fact plugins_to_remove to delete plugins installed but not listed in es_plugins
|
|
||||||
set_fact: plugins_to_remove="{{ installed_plugins.stdout_lines | difference(es_plugins | json_query('[*].plugin')) | default([]) }}"
|
|
||||||
when: not es_plugins_reinstall
|
|
||||||
|
|
||||||
#if es_plugins_reinstall is set to true we (re)install ALL plugins
|
|
||||||
- name: set fact plugins_to_install to es_plugins
|
|
||||||
set_fact: plugins_to_install="{{ es_plugins | json_query('[*].plugin') | default([]) }}"
|
|
||||||
when: es_plugins_reinstall
|
|
||||||
|
|
||||||
#if the plugins listed are different than those requested, we install those not installed but listed in the config
|
|
||||||
- name: set fact to plugins_to_install to those in es_config but not installed
|
|
||||||
set_fact: plugins_to_install="{{ es_plugins | json_query('[*].plugin') | difference(installed_plugins.stdout_lines) | default([]) }}"
|
|
||||||
when: not es_plugins_reinstall
|
|
||||||
|
|
||||||
# This removes any currently installed plugins (to prevent errors when reinstalling)
|
|
||||||
- name: Remove elasticsearch plugins
|
|
||||||
become: yes
|
|
||||||
command: "{{es_home}}/bin/elasticsearch-plugin remove {{item}} --silent"
|
|
||||||
with_items: "{{ plugins_to_remove | default([]) }}"
|
|
||||||
notify: restart elasticsearch
|
|
||||||
register: plugin_removed
|
|
||||||
environment:
|
|
||||||
CONF_DIR: "{{ conf_dir }}"
|
|
||||||
ES_PATH_CONF: "{{ conf_dir }}"
|
|
||||||
ES_INCLUDE: "{{ instance_default_file }}"
|
|
||||||
|
|
||||||
- name: Install elasticsearch plugins
|
|
||||||
become: yes
|
|
||||||
command: "{{es_home}}/bin/elasticsearch-plugin install {{ item.url | default(item.plugin) }} --batch --silent"
|
|
||||||
register: plugin_installed
|
|
||||||
changed_when: plugin_installed.rc == 0
|
|
||||||
with_items: "{{ es_plugins }}"
|
|
||||||
when: item.plugin in plugins_to_install
|
|
||||||
notify: restart elasticsearch
|
|
||||||
environment:
|
|
||||||
CONF_DIR: "{{ conf_dir }}"
|
|
||||||
ES_PATH_CONF: "{{ conf_dir }}"
|
|
||||||
ES_INCLUDE: "{{ instance_default_file }}"
|
|
||||||
ES_JAVA_OPTS: "{% if item.proxy_host is defined and item.proxy_host != '' and item.proxy_port is defined and item.proxy_port != ''%} -Dhttp.proxyHost={{ item.proxy_host }} -Dhttp.proxyPort={{ item.proxy_port }} -Dhttps.proxyHost={{ item.proxy_host }} -Dhttps.proxyPort={{ item.proxy_port }} {% elif es_proxy_host is defined and es_proxy_host != '' %} -Dhttp.proxyHost={{ es_proxy_host }} -Dhttp.proxyPort={{ es_proxy_port }} -Dhttps.proxyHost={{ es_proxy_host }} -Dhttps.proxyPort={{ es_proxy_port }} {% endif %}"
|
|
||||||
until: plugin_installed.rc == 0
|
|
||||||
retries: 5
|
|
||||||
delay: 5
|
|
||||||
|
|
||||||
#Set permissions on plugins directory
|
|
||||||
- name: Set Plugin Directory Permissions
|
|
||||||
become: yes
|
|
||||||
file: state=directory path={{ es_home }}/plugins owner={{ es_user }} group={{ es_group }} recurse=yes
|
|
@ -1,26 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: set fact es_script_dir
|
|
||||||
set_fact: es_script_dir={{ es_conf_dir }}/{{es_instance_name}}
|
|
||||||
tags:
|
|
||||||
- always
|
|
||||||
|
|
||||||
- name: set fact es_script_dir when path.scripts
|
|
||||||
set_fact: es_script_dir={{es_config['path.scripts']}}
|
|
||||||
when: es_config['path.scripts'] is defined
|
|
||||||
tags:
|
|
||||||
- always
|
|
||||||
|
|
||||||
- name: Create script dir
|
|
||||||
become: yes
|
|
||||||
file: state=directory path={{ es_script_dir }} owner={{ es_user }} group={{ es_group }} recurse=yes
|
|
||||||
|
|
||||||
- name: Copy default scripts to elasticsearch
|
|
||||||
become: yes
|
|
||||||
copy: src=scripts dest={{ es_script_dir }} owner={{ es_user }} group={{ es_group }}
|
|
||||||
when: es_scripts_fileglob is not defined
|
|
||||||
|
|
||||||
- name: Copy scripts to elasticsearch
|
|
||||||
become: yes
|
|
||||||
copy: src={{ item }} dest={{ es_script_dir }} owner={{ es_user }} group={{ es_group }}
|
|
||||||
with_fileglob: "{{ es_scripts_fileglob | default('') }}"
|
|
@ -1,41 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: ensure templates dir is created
|
|
||||||
file:
|
|
||||||
path: /etc/elasticsearch/templates
|
|
||||||
state: directory
|
|
||||||
owner: "{{ es_user }}"
|
|
||||||
group: "{{ es_group }}"
|
|
||||||
|
|
||||||
- name: Copy templates to elasticsearch
|
|
||||||
copy: src={{ item }} dest=/etc/elasticsearch/templates owner={{ es_user }} group={{ es_group }}
|
|
||||||
register: load_templates
|
|
||||||
with_fileglob:
|
|
||||||
- "{{ es_templates_fileglob | default('') }}"
|
|
||||||
|
|
||||||
- name: Install templates without auth
|
|
||||||
uri:
|
|
||||||
url: "http://{{es_api_host}}:{{es_api_port}}/_template/{{item | filename}}"
|
|
||||||
method: PUT
|
|
||||||
status_code: 200
|
|
||||||
body_format: json
|
|
||||||
body: "{{ lookup('file', item) }}"
|
|
||||||
when: load_templates.changed and es_start_service and not es_enable_xpack or not es_xpack_features is defined or "security" not in es_xpack_features
|
|
||||||
with_fileglob:
|
|
||||||
- "{{ es_templates_fileglob | default('') }}"
|
|
||||||
run_once: True
|
|
||||||
|
|
||||||
- name: Install templates with auth
|
|
||||||
uri:
|
|
||||||
url: "http://{{es_api_host}}:{{es_api_port}}/_template/{{item | filename}}"
|
|
||||||
method: PUT
|
|
||||||
status_code: 200
|
|
||||||
user: "{{es_api_basic_auth_username}}"
|
|
||||||
password: "{{es_api_basic_auth_password}}"
|
|
||||||
force_basic_auth: yes
|
|
||||||
body_format: json
|
|
||||||
body: "{{ lookup('file', item) }}"
|
|
||||||
when: load_templates.changed and es_start_service and es_enable_xpack and es_xpack_features is defined and "security" in es_xpack_features
|
|
||||||
with_fileglob:
|
|
||||||
- "{{ es_templates_fileglob | default('') }}"
|
|
||||||
run_once: True
|
|
@ -1,13 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: Include optional user and group creation.
|
|
||||||
when: (es_user_id is defined) and (es_group_id is defined)
|
|
||||||
include: elasticsearch-optional-user.yml
|
|
||||||
|
|
||||||
- name: Include specific Elasticsearch
|
|
||||||
include: elasticsearch-Debian.yml
|
|
||||||
when: ansible_os_family == 'Debian'
|
|
||||||
|
|
||||||
- name: Include specific Elasticsearch
|
|
||||||
include: elasticsearch-RedHat.yml
|
|
||||||
when: ansible_os_family == 'RedHat'
|
|
@ -1,52 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: set fact java_state to present
|
|
||||||
set_fact: java_state="present"
|
|
||||||
|
|
||||||
- name: set fact java_state to latest
|
|
||||||
set_fact: java_state="latest"
|
|
||||||
when: update_java == true
|
|
||||||
|
|
||||||
- name: RedHat - Ensure Java is installed
|
|
||||||
become: yes
|
|
||||||
yum: name={{ java }} state={{java_state}}
|
|
||||||
when: ansible_os_family == 'RedHat'
|
|
||||||
|
|
||||||
- name: Get the installed java path
|
|
||||||
shell: "update-alternatives --display java | grep '^/' | awk '{print $1}' | grep 1.8.0"
|
|
||||||
become: yes
|
|
||||||
register: java_full_path
|
|
||||||
failed_when: False
|
|
||||||
changed_when: False
|
|
||||||
when: ansible_os_family == 'RedHat'
|
|
||||||
|
|
||||||
- name: correct java version selected
|
|
||||||
alternatives:
|
|
||||||
name: java
|
|
||||||
path: "{{ java_full_path.stdout }}"
|
|
||||||
link: /usr/bin/java
|
|
||||||
when: ansible_os_family == 'RedHat' and java_full_path is defined
|
|
||||||
|
|
||||||
- name: Refresh java repo
|
|
||||||
become: yes
|
|
||||||
apt: update_cache=yes
|
|
||||||
changed_when: false
|
|
||||||
when: ansible_os_family == 'Debian'
|
|
||||||
|
|
||||||
- name: Debian - Ensure Java is installed
|
|
||||||
become: yes
|
|
||||||
apt: name={{ java }} state={{java_state}}
|
|
||||||
when: ansible_os_family == 'Debian'
|
|
||||||
|
|
||||||
- name: register open_jdk version
|
|
||||||
shell: java -version 2>&1 | grep OpenJDK
|
|
||||||
register: open_jdk
|
|
||||||
ignore_errors: yes
|
|
||||||
changed_when: false
|
|
||||||
|
|
||||||
#https://github.com/docker-library/openjdk/issues/19 - ensures tests pass due to java 8 broken certs
|
|
||||||
- name: refresh the java ca-certificates
|
|
||||||
become: yes
|
|
||||||
command: /var/lib/dpkg/info/ca-certificates-java.postinst configure
|
|
||||||
when: ansible_distribution == 'Ubuntu' and open_jdk.rc == 0
|
|
||||||
changed_when: false
|
|
@ -1,94 +0,0 @@
|
|||||||
---
|
|
||||||
- name: os-specific vars
|
|
||||||
include_vars: "{{ansible_os_family}}.yml"
|
|
||||||
tags:
|
|
||||||
- always
|
|
||||||
|
|
||||||
- name: set compatibility variables
|
|
||||||
include: compatibility-variables.yml
|
|
||||||
tags:
|
|
||||||
- always
|
|
||||||
|
|
||||||
- name: check-set-parameters
|
|
||||||
include: elasticsearch-parameters.yml
|
|
||||||
tags:
|
|
||||||
- always
|
|
||||||
|
|
||||||
- name: use snapshot release
|
|
||||||
include: snapshot-release.yml
|
|
||||||
when: es_use_snapshot_release
|
|
||||||
|
|
||||||
- name: include java.yml
|
|
||||||
include: java.yml
|
|
||||||
when: es_java_install
|
|
||||||
tags:
|
|
||||||
- java
|
|
||||||
|
|
||||||
- name: include elasticsearch.yml
|
|
||||||
include: elasticsearch.yml
|
|
||||||
tags:
|
|
||||||
- install
|
|
||||||
|
|
||||||
- name: include elasticsearch-config.yml
|
|
||||||
include: elasticsearch-config.yml
|
|
||||||
tags:
|
|
||||||
- config
|
|
||||||
|
|
||||||
- name: include elasticsearch-scripts.yml
|
|
||||||
include: elasticsearch-scripts.yml
|
|
||||||
when: es_scripts
|
|
||||||
tags:
|
|
||||||
- scripts
|
|
||||||
|
|
||||||
- name: include elasticsearch-plugins.yml
|
|
||||||
include: elasticsearch-plugins.yml
|
|
||||||
when: es_plugins is defined or es_plugins_reinstall
|
|
||||||
tags:
|
|
||||||
- plugins
|
|
||||||
|
|
||||||
#We always execute xpack as we may need to remove features
|
|
||||||
- name: include xpack/elasticsearch-xpack.yml
|
|
||||||
include: xpack/elasticsearch-xpack.yml
|
|
||||||
tags:
|
|
||||||
- xpack
|
|
||||||
|
|
||||||
- name: flush handlers
|
|
||||||
meta: flush_handlers
|
|
||||||
|
|
||||||
- name: Make sure elasticsearch is started
|
|
||||||
become: yes
|
|
||||||
service: name={{instance_init_script | basename}} state=started enabled=yes
|
|
||||||
when: es_start_service
|
|
||||||
|
|
||||||
- name: Wait for elasticsearch to startup
|
|
||||||
wait_for: host={{es_api_host}} port={{es_api_port}} delay=5 connect_timeout=1
|
|
||||||
when: es_restarted is defined and es_restarted.changed and es_start_service
|
|
||||||
|
|
||||||
- name: set fact manage_native_realm to false
|
|
||||||
set_fact: manage_native_realm=false
|
|
||||||
|
|
||||||
- name: set fact manage_native_realm to true
|
|
||||||
set_fact: manage_native_realm=true
|
|
||||||
when: es_start_service and (es_enable_xpack and "security" in es_xpack_features) and ((es_users is defined and es_users.native is defined) or (es_roles is defined and es_roles.native is defined))
|
|
||||||
|
|
||||||
# If playbook runs too fast, Native commands could fail as the Native Realm is not yet up
|
|
||||||
- name: Wait 15 seconds for the Native Relm to come up
|
|
||||||
pause: seconds=15
|
|
||||||
when: manage_native_realm
|
|
||||||
|
|
||||||
- name: activate-license
|
|
||||||
include: ./xpack/security/elasticsearch-xpack-activation.yml
|
|
||||||
when: es_start_service and es_enable_xpack and es_xpack_license is defined and es_xpack_license != ''
|
|
||||||
|
|
||||||
#perform security actions here now elasticsearch is started
|
|
||||||
- name: include xpack/security/elasticsearch-security-native.yml
|
|
||||||
include: ./xpack/security/elasticsearch-security-native.yml
|
|
||||||
when: manage_native_realm
|
|
||||||
|
|
||||||
#Templates done after restart - handled by flushing the handlers. e.g. suppose user removes security on a running node and doesn't specify es_api_basic_auth_username and es_api_basic_auth_password. The templates will subsequently not be removed if we don't wait for the node to restart.
|
|
||||||
#We also do after the native realm to ensure any changes are applied here first and its denf up.
|
|
||||||
- name: include elasticsearch-template.yml
|
|
||||||
include: elasticsearch-template.yml
|
|
||||||
when: es_templates
|
|
||||||
tags:
|
|
||||||
- templates
|
|
@ -1,54 +0,0 @@
|
|||||||
# These tasks are to run ansible-elasticsearch using pre-release snapshot builds
|
|
||||||
# This should only be used for testing purposes and can be enabled by setting
|
|
||||||
# es_use_snapshot_release: true
|
|
||||||
|
|
||||||
- name: detect if we need the .deb or .rpm
|
|
||||||
set_fact:
|
|
||||||
package_type: "{{ 'deb' if (ansible_os_family == 'Debian') else 'rpm' }}"
|
|
||||||
|
|
||||||
- name: get the minor version
|
|
||||||
set_fact:
|
|
||||||
minor_version: "{{ es_version.split('.')[0:2] | join('.')}}"
|
|
||||||
|
|
||||||
- name: set the package_name
|
|
||||||
set_fact:
|
|
||||||
package_name: "{{ es_package_name + '-' + es_version + '-SNAPSHOT.' + package_type }}"
|
|
||||||
|
|
||||||
- name: generate the artifacts url
|
|
||||||
set_fact:
|
|
||||||
artifacts_url: "{{ 'https://artifacts-api.elastic.co/v1/search/' + minor_version + '/' + package_name }}"
|
|
||||||
|
|
||||||
- name: get latest snapshot build
|
|
||||||
uri:
|
|
||||||
url: "{{ artifacts_url }}"
|
|
||||||
return_contents: true
|
|
||||||
register: snapshots
|
|
||||||
retries: 5
|
|
||||||
delay: 1
|
|
||||||
ignore_errors: true
|
|
||||||
until: "'status' in snapshots and snapshots.status == 200"
|
|
||||||
|
|
||||||
- name: use the custom package url instead of the repository
|
|
||||||
set_fact:
|
|
||||||
es_custom_package_url: "{{ snapshots.json['packages'][package_name]['url'] }}"
|
|
||||||
es_use_repository: false
|
|
||||||
|
|
||||||
- name: set snapshot urls for es_plugins when it is defined
|
|
||||||
when: es_plugins is defined
|
|
||||||
block:
|
|
||||||
- name: split up the snapshot url so we can create the plugin url
|
|
||||||
set_fact:
|
|
||||||
split_url: "{{ es_custom_package_url.split('/') }}"
|
|
||||||
|
|
||||||
- name: set base plugin url
|
|
||||||
set_fact:
|
|
||||||
plugin_url: "{{ split_url[0] + '//' + split_url[2:5]|join('/') + '/elasticsearch-plugins/'}}"
|
|
||||||
|
|
||||||
- name: create es_plugins with the snapshot url
|
|
||||||
set_fact:
|
|
||||||
es_plugins_temp: "{{ es_plugins_temp|default([]) + [{'plugin': item.plugin, 'url': plugin_url + item.plugin + '/' + item.plugin + '-' + es_version + '-SNAPSHOT.zip'}] }}"
|
|
||||||
with_items: "{{ es_plugins }}"
|
|
||||||
|
|
||||||
- name: override the original es_plugins with the snapshot version
|
|
||||||
set_fact:
|
|
||||||
es_plugins: "{{ es_plugins_temp }}"
|
|
@ -1,68 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
#Test if feature is installed
|
|
||||||
- name: Test if x-pack is installed
|
|
||||||
shell: "{{es_home}}/bin/elasticsearch-plugin list | grep x-pack"
|
|
||||||
become: yes
|
|
||||||
register: x_pack_installed
|
|
||||||
changed_when: False
|
|
||||||
failed_when: "'ERROR' in x_pack_installed.stdout"
|
|
||||||
check_mode: no
|
|
||||||
ignore_errors: yes
|
|
||||||
environment:
|
|
||||||
CONF_DIR: "{{ conf_dir }}"
|
|
||||||
ES_PATH_CONF: "{{ conf_dir }}"
|
|
||||||
ES_INCLUDE: "{{ instance_default_file }}"
|
|
||||||
|
|
||||||
|
|
||||||
#Remove X-Pack if installed and its not been requested or the ES version has changed
|
|
||||||
- name: Remove x-pack plugin
|
|
||||||
become: yes
|
|
||||||
command: "{{es_home}}/bin/elasticsearch-plugin remove x-pack"
|
|
||||||
register: xpack_state
|
|
||||||
failed_when: "'ERROR' in xpack_state.stdout"
|
|
||||||
changed_when: xpack_state.rc == 0
|
|
||||||
when: x_pack_installed.rc == 0 and (not es_enable_xpack or es_version_changed)
|
|
||||||
notify: restart elasticsearch
|
|
||||||
environment:
|
|
||||||
CONF_DIR: "{{ conf_dir }}"
|
|
||||||
ES_PATH_CONF: "{{ conf_dir }}"
|
|
||||||
ES_INCLUDE: "{{ instance_default_file }}"
|
|
||||||
|
|
||||||
|
|
||||||
#Install plugin if not installed, or the es version has changed (so removed above), and its been requested
|
|
||||||
- name: Download x-pack from url
|
|
||||||
get_url: url={{ es_xpack_custom_url }} dest=/tmp/x-pack-{{ es_version }}.zip
|
|
||||||
when: (x_pack_installed.rc == 1 or es_version_changed) and (es_enable_xpack and es_xpack_custom_url is defined)
|
|
||||||
|
|
||||||
- name: Install x-pack plugin from local
|
|
||||||
become: yes
|
|
||||||
command: >
|
|
||||||
{{es_home}}/bin/elasticsearch-plugin install --silent --batch file:///tmp/x-pack-{{ es_version }}.zip
|
|
||||||
register: xpack_state
|
|
||||||
changed_when: xpack_state.rc == 0
|
|
||||||
when: (x_pack_installed.rc == 1 or es_version_changed) and (es_enable_xpack and es_xpack_custom_url is defined)
|
|
||||||
notify: restart elasticsearch
|
|
||||||
environment:
|
|
||||||
CONF_DIR: "{{ conf_dir }}"
|
|
||||||
ES_PATH_CONF: "{{ conf_dir }}"
|
|
||||||
ES_INCLUDE: "{{ instance_default_file }}"
|
|
||||||
|
|
||||||
- name: Delete x-pack zip file
|
|
||||||
file: dest=/tmp/x-pack-{{ es_version }}.zip state=absent
|
|
||||||
when: es_xpack_custom_url is defined
|
|
||||||
|
|
||||||
- name: Install x-pack plugin from elastic.co
|
|
||||||
become: yes
|
|
||||||
command: >
|
|
||||||
{{es_home}}/bin/elasticsearch-plugin install --silent --batch x-pack
|
|
||||||
register: xpack_state
|
|
||||||
failed_when: "'ERROR' in xpack_state.stdout"
|
|
||||||
changed_when: xpack_state.rc == 0
|
|
||||||
when: (x_pack_installed.rc == 1 or es_version_changed) and (es_enable_xpack and es_xpack_custom_url is not defined)
|
|
||||||
notify: restart elasticsearch
|
|
||||||
environment:
|
|
||||||
CONF_DIR: "{{ conf_dir }}"
|
|
||||||
ES_PATH_CONF: "{{ conf_dir }}"
|
|
||||||
ES_INCLUDE: "{{ instance_default_file }}"
|
|
||||||
ES_JAVA_OPTS: "{% if es_proxy_host is defined and es_proxy_host != '' %}-Dhttp.proxyHost={{ es_proxy_host }} -Dhttp.proxyPort={{ es_proxy_port }} -Dhttps.proxyHost={{ es_proxy_host }} -Dhttps.proxyPort={{ es_proxy_port }}{% endif %}"
|
|
@ -1,23 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: set fact es_version_changed
|
|
||||||
set_fact: es_version_changed={{ ((elasticsearch_install_from_package is defined and (debian_elasticsearch_install_from_repo.changed or redhat_elasticsearch_install_from_repo.changed)) or (elasticsearch_install_from_package is defined and elasticsearch_install_from_package.changed)) }}
|
|
||||||
|
|
||||||
- name: include elasticsearch-xpack-install.yml
|
|
||||||
include: elasticsearch-xpack-install.yml
|
|
||||||
when: es_install_xpack
|
|
||||||
|
|
||||||
#Security configuration
|
|
||||||
- name: include security/elasticsearch-security.yml
|
|
||||||
include: security/elasticsearch-security.yml
|
|
||||||
|
|
||||||
#Add any feature specific configuration here
|
|
||||||
- name: Set Plugin Directory Permissions
|
|
||||||
become: yes
|
|
||||||
file: state=directory path={{ es_home }}/plugins owner={{ es_user }} group={{ es_group }} recurse=yes
|
|
||||||
|
|
||||||
#Make sure elasticsearch.keystore has correct Permissions
|
|
||||||
- name: Set elasticsearch.keystore Permissions
|
|
||||||
become: yes
|
|
||||||
file: state=file path={{ conf_dir }}/elasticsearch.keystore owner={{ es_user }} group={{ es_group }}
|
|
||||||
when: es_enable_xpack and "security" in es_xpack_features and (es_version | version_compare('6.0.0', '>'))
|
|
@ -1,89 +0,0 @@
|
|||||||
---
|
|
||||||
- name: set fact manage_file_users
|
|
||||||
set_fact: manage_file_users=es_users is defined and es_users.file is defined and es_users.file.keys() | length > 0
|
|
||||||
|
|
||||||
- name: Create the users file if it doesn't exist
|
|
||||||
copy:
|
|
||||||
content: ""
|
|
||||||
dest: "{{ conf_dir }}{{ es_xpack_conf_subdir }}/users"
|
|
||||||
force: no # this ensures it only creates it if it does not exist
|
|
||||||
group: "{{ es_group }}"
|
|
||||||
owner: "{{ es_user }}"
|
|
||||||
mode: 0555
|
|
||||||
|
|
||||||
#List current users
|
|
||||||
- name: List Users
|
|
||||||
become: yes
|
|
||||||
shell: cat {{conf_dir}}{{es_xpack_conf_subdir}}/users | awk -F':' '{print $1}'
|
|
||||||
register: current_file_users
|
|
||||||
when: manage_file_users
|
|
||||||
changed_when: False
|
|
||||||
|
|
||||||
- name: set fact users_to_remove
|
|
||||||
set_fact: users_to_remove={{ current_file_users.stdout_lines | difference (es_users.file.keys()) }}
|
|
||||||
when: manage_file_users
|
|
||||||
|
|
||||||
#Remove users
|
|
||||||
- name: Remove Users
|
|
||||||
become: yes
|
|
||||||
command: >
|
|
||||||
{{es_home}}/bin/{{es_xpack_users_command}} userdel {{item}}
|
|
||||||
with_items: "{{users_to_remove | default([])}}"
|
|
||||||
when: manage_file_users
|
|
||||||
environment:
|
|
||||||
CONF_DIR: "{{ conf_dir }}"
|
|
||||||
ES_PATH_CONF: "{{ conf_dir }}"
|
|
||||||
ES_HOME: "{{es_home}}"
|
|
||||||
|
|
||||||
- name: set fact users_to_add
|
|
||||||
set_fact: users_to_add={{ es_users.file.keys() | difference (current_file_users.stdout_lines) }}
|
|
||||||
when: manage_file_users
|
|
||||||
|
|
||||||
#Add users
|
|
||||||
- name: Add Users
|
|
||||||
become: yes
|
|
||||||
command: >
|
|
||||||
{{es_home}}/bin/{{es_xpack_users_command}} useradd {{item}} -p {{es_users.file[item].password}}
|
|
||||||
with_items: "{{ users_to_add | default([]) }}"
|
|
||||||
when: manage_file_users
|
|
||||||
no_log: True
|
|
||||||
environment:
|
|
||||||
CONF_DIR: "{{ conf_dir }}"
|
|
||||||
ES_PATH_CONF: "{{ conf_dir }}"
|
|
||||||
ES_HOME: "{{es_home}}"
|
|
||||||
|
|
||||||
#Set passwords for all users declared - Required as the useradd will not change existing user passwords
|
|
||||||
- name: Set User Passwords
|
|
||||||
become: yes
|
|
||||||
command: >
|
|
||||||
{{es_home}}/bin/{{es_xpack_users_command}} passwd {{ item }} -p {{es_users.file[item].password}}
|
|
||||||
with_items: "{{ es_users.file.keys() | default([]) }}"
|
|
||||||
when: manage_file_users
|
|
||||||
#Currently no easy way to figure out if the password has changed or to know what it currently is so we can skip.
|
|
||||||
changed_when: False
|
|
||||||
no_log: True
|
|
||||||
environment:
|
|
||||||
CONF_DIR: "{{ conf_dir }}"
|
|
||||||
ES_PATH_CONF: "{{ conf_dir }}"
|
|
||||||
ES_HOME: "{{es_home}}"
|
|
||||||
|
|
||||||
- name: set fact users_roles
|
|
||||||
set_fact: users_roles={{es_users.file | extract_role_users () }}
|
|
||||||
when: manage_file_users
|
|
||||||
|
|
||||||
#Copy Roles files
|
|
||||||
- name: Copy roles.yml File for Instance
|
|
||||||
become: yes
|
|
||||||
template: src=security/roles.yml.j2 dest={{conf_dir}}{{es_xpack_conf_subdir}}/roles.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes
|
|
||||||
when: es_roles is defined and es_roles.file is defined
|
|
||||||
|
|
||||||
#Overwrite users_roles file
|
|
||||||
- name: Copy User Roles
|
|
||||||
become: yes
|
|
||||||
template: src=security/users_roles.j2 dest={{conf_dir}}{{es_xpack_conf_subdir}}/users_roles mode=0644 force=yes
|
|
||||||
when: manage_file_users and users_roles | length > 0
|
|
||||||
|
|
||||||
#Set permission on security directory. E.g. if 2 nodes are installed on the same machine, the second node will not get the users file created at install, causing the files being created at es_users call and then having the wrong Permissions.
|
|
||||||
- name: Set Security Directory Permissions Recursive
|
|
||||||
become: yes
|
|
||||||
file: state=directory path={{conf_dir}}{{es_xpack_conf_subdir}}/ owner={{ es_user }} group={{ es_group }} recurse=yes
|
|
@ -1,191 +0,0 @@
|
|||||||
---
|
|
||||||
- name: set fact change_api_password to false
|
|
||||||
set_fact: change_api_password=false
|
|
||||||
|
|
||||||
- name: set fact manage_native_users to false
|
|
||||||
set_fact: manage_native_users=false
|
|
||||||
|
|
||||||
- name: set fact manage_native_users to true
|
|
||||||
set_fact: manage_native_users=true
|
|
||||||
when: es_users is defined and es_users.native is defined and es_users.native.keys() | length > 0
|
|
||||||
|
|
||||||
- name: set fact manage_native_role to false
|
|
||||||
set_fact: manage_native_roles=false
|
|
||||||
|
|
||||||
- name: set fact manange_native_roles to true
|
|
||||||
set_fact: manage_native_roles=true
|
|
||||||
when: es_roles is defined and es_roles.native is defined and es_roles.native.keys() | length > 0
|
|
||||||
|
|
||||||
#If the node has just has security installed it maybe either stopped or started 1. if stopped, we need to start to load native realms 2. if started, we need to restart to load
|
|
||||||
|
|
||||||
#List current users
|
|
||||||
- name: List Native Users
|
|
||||||
uri:
|
|
||||||
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/user
|
|
||||||
method: GET
|
|
||||||
user: "{{es_api_basic_auth_username}}"
|
|
||||||
password: "{{es_api_basic_auth_password}}"
|
|
||||||
force_basic_auth: yes
|
|
||||||
status_code: 200
|
|
||||||
register: user_list_response
|
|
||||||
when: manage_native_users
|
|
||||||
|
|
||||||
- name: set fact reserved_users equals user_list_response.json
|
|
||||||
set_fact: reserved_users={{ user_list_response.json | filter_reserved }}
|
|
||||||
when: manage_native_users
|
|
||||||
|
|
||||||
#Current users not inc. those reserved
|
|
||||||
- name: set fact current_users equals user_list_response.json.keys not including reserved
|
|
||||||
set_fact: current_users={{ user_list_response.json.keys() | difference (reserved_users) }}
|
|
||||||
when: manage_native_users
|
|
||||||
|
|
||||||
#We are changing the es_api_basic_auth_username password, so we need to do it first and update the param
|
|
||||||
- name: set fact native_users
|
|
||||||
set_fact: native_users={{ es_users.native }}
|
|
||||||
when: manage_native_users
|
|
||||||
|
|
||||||
- name: set fact change_api_password to true
|
|
||||||
set_fact: change_api_password=true
|
|
||||||
when: manage_native_users and es_api_basic_auth_username in native_users and native_users[es_api_basic_auth_username].password is defined
|
|
||||||
|
|
||||||
- name: Update API User Password
|
|
||||||
uri:
|
|
||||||
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/user/{{es_api_basic_auth_username}}/_password
|
|
||||||
method: POST
|
|
||||||
body_format: json
|
|
||||||
body: "{ \"password\":\"{{native_users[es_api_basic_auth_username].password}}\" }"
|
|
||||||
status_code: 200
|
|
||||||
user: "{{es_api_basic_auth_username}}"
|
|
||||||
password: "{{es_api_basic_auth_password}}"
|
|
||||||
force_basic_auth: yes
|
|
||||||
when: change_api_password
|
|
||||||
|
|
||||||
- name: set fact es_api_basic_auth_password
|
|
||||||
set_fact: es_api_basic_auth_password={{native_users[es_api_basic_auth_username].password}}
|
|
||||||
when: change_api_password
|
|
||||||
|
|
||||||
#Identify users that are present in ES but not declared and thus should be removed
|
|
||||||
- name: set fact users_to_remove
|
|
||||||
set_fact: users_to_remove={{ current_users | difference ( native_users.keys() ) }}
|
|
||||||
when: manage_native_users
|
|
||||||
|
|
||||||
#Delete all non required users NOT inc. reserved
|
|
||||||
- name: Delete Native Users
|
|
||||||
uri:
|
|
||||||
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/user/{{item}}
|
|
||||||
method: DELETE
|
|
||||||
status_code: 200
|
|
||||||
user: "{{es_api_basic_auth_username}}"
|
|
||||||
password: "{{es_api_basic_auth_password}}"
|
|
||||||
force_basic_auth: yes
|
|
||||||
when: manage_native_users
|
|
||||||
with_items: "{{ users_to_remove | default([]) }}"
|
|
||||||
|
|
||||||
- name: set fact users_to_ignore
|
|
||||||
set_fact: users_to_ignore={{ native_users.keys() | intersect (reserved_users) }}
|
|
||||||
when: manage_native_users
|
|
||||||
|
|
||||||
- name: debug message
|
|
||||||
debug:
|
|
||||||
msg: "WARNING: YOU CAN ONLY CHANGE THE PASSWORD FOR RESERVED USERS IN THE NATIVE REALM. ANY ROLE CHANGES WILL BE IGNORED: {{users_to_ignore}}"
|
|
||||||
when: manage_native_users and users_to_ignore | length > 0
|
|
||||||
|
|
||||||
#Update password on all reserved users
|
|
||||||
- name: Update Reserved User Passwords
|
|
||||||
uri:
|
|
||||||
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/user/{{item}}/_password
|
|
||||||
method: POST
|
|
||||||
body_format: json
|
|
||||||
body: "{ \"password\":\"{{native_users[item].password}}\" }"
|
|
||||||
status_code: 200
|
|
||||||
user: "{{es_api_basic_auth_username}}"
|
|
||||||
password: "{{es_api_basic_auth_password}}"
|
|
||||||
force_basic_auth: yes
|
|
||||||
when: native_users[item].password is defined
|
|
||||||
no_log: True
|
|
||||||
with_items: "{{ users_to_ignore | default([]) }}"
|
|
||||||
|
|
||||||
- name: set fact users_to_modify
|
|
||||||
set_fact: users_to_modify={{ native_users.keys() | difference (reserved_users) }}
|
|
||||||
when: manage_native_users
|
|
||||||
|
|
||||||
#Overwrite all other users NOT inc. those reserved
|
|
||||||
- name: Update Non-Reserved Native User Details
|
|
||||||
uri:
|
|
||||||
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/user/{{item}}
|
|
||||||
method: POST
|
|
||||||
body_format: json
|
|
||||||
body: "{{ native_users[item] | to_json }}"
|
|
||||||
status_code: 200
|
|
||||||
user: "{{es_api_basic_auth_username}}"
|
|
||||||
password: "{{es_api_basic_auth_password}}"
|
|
||||||
force_basic_auth: yes
|
|
||||||
when: manage_native_users
|
|
||||||
no_log: True
|
|
||||||
with_items: "{{ users_to_modify | default([]) }}"
|
|
||||||
|
|
||||||
## ROLE CHANGES
|
|
||||||
|
|
||||||
#List current roles not. inc those reserved
|
|
||||||
- name: List Native Roles
|
|
||||||
uri:
|
|
||||||
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/role
|
|
||||||
method: GET
|
|
||||||
body_format: json
|
|
||||||
user: "{{es_api_basic_auth_username}}"
|
|
||||||
password: "{{es_api_basic_auth_password}}"
|
|
||||||
force_basic_auth: yes
|
|
||||||
status_code: 200
|
|
||||||
register: role_list_response
|
|
||||||
when: manage_native_roles
|
|
||||||
|
|
||||||
- name: set fact reserved roles
|
|
||||||
set_fact: reserved_roles={{ role_list_response.json | filter_reserved }}
|
|
||||||
when: manage_native_roles
|
|
||||||
|
|
||||||
- name: set fact current roles
|
|
||||||
set_fact: current_roles={{ role_list_response.json.keys() | difference (reserved_roles) }}
|
|
||||||
when: manage_native_roles
|
|
||||||
|
|
||||||
- name: set fact roles to ignore
|
|
||||||
set_fact: roles_to_ignore={{ es_roles.native.keys() | intersect (reserved_roles) | default([]) }}
|
|
||||||
when: manage_native_roles
|
|
||||||
|
|
||||||
- name: debug message
|
|
||||||
debug:
|
|
||||||
msg: "WARNING: YOU CANNOT CHANGE RESERVED ROLES. THE FOLLOWING WILL BE IGNORED: {{roles_to_ignore}}"
|
|
||||||
when: manage_native_roles and roles_to_ignore | length > 0
|
|
||||||
|
|
||||||
- name: set fact roles_to_remove
|
|
||||||
set_fact: roles_to_remove={{ current_roles | difference ( es_roles.native.keys() ) }}
|
|
||||||
when: manage_native_roles
|
|
||||||
|
|
||||||
#Delete all non required roles NOT inc. reserved
|
|
||||||
- name: Delete Native Roles
|
|
||||||
uri:
|
|
||||||
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/role/{{item}}
|
|
||||||
method: DELETE
|
|
||||||
status_code: 200
|
|
||||||
user: "{{es_api_basic_auth_username}}"
|
|
||||||
password: "{{es_api_basic_auth_password}}"
|
|
||||||
force_basic_auth: yes
|
|
||||||
when: manage_native_roles
|
|
||||||
with_items: "{{roles_to_remove | default([]) }}"
|
|
||||||
|
|
||||||
- name: set fact roles_to_modify
|
|
||||||
set_fact: roles_to_modify={{ es_roles.native.keys() | difference (reserved_roles) }}
|
|
||||||
when: manage_native_roles
|
|
||||||
|
|
||||||
#Update other roles - NOT inc. reserved roles
|
|
||||||
- name: Update Native Roles
|
|
||||||
uri:
|
|
||||||
url: http://{{es_api_host}}:{{es_api_port}}/_xpack/security/role/{{item}}
|
|
||||||
method: POST
|
|
||||||
body_format: json
|
|
||||||
body: "{{ es_roles.native[item] | to_json}}"
|
|
||||||
status_code: 200
|
|
||||||
user: "{{es_api_basic_auth_username}}"
|
|
||||||
password: "{{es_api_basic_auth_password}}"
|
|
||||||
force_basic_auth: yes
|
|
||||||
when: manage_native_roles
|
|
||||||
with_items: "{{ roles_to_modify | default([]) }}"
|
|
@ -1,74 +0,0 @@
|
|||||||
---
|
|
||||||
#Security specific configuration done here
|
|
||||||
|
|
||||||
#TODO: 1. Skip users with no password defined or error 2. Passwords | length > 6
|
|
||||||
|
|
||||||
#Ensure x-pack conf directory is created if necessary
|
|
||||||
- name: Ensure x-pack conf directory exists (file)
|
|
||||||
file: path={{ conf_dir }}{{ es_xpack_conf_subdir }} state=directory owner={{ es_user }} group={{ es_group }}
|
|
||||||
changed_when: False
|
|
||||||
when:
|
|
||||||
- es_enable_xpack and "security" in es_xpack_features
|
|
||||||
- (es_users is defined and es_users.file is defined) or (es_roles is defined and es_roles.file is defined) or (es_role_mapping is defined)
|
|
||||||
|
|
||||||
#-----------------------------Create Bootstrap User-----------------------------------
|
|
||||||
### START BLOCK elasticsearch keystore ###
|
|
||||||
- name: create the elasticsearch keystore
|
|
||||||
when: (es_enable_xpack and "security" in es_xpack_features) and (es_version | version_compare('6.0.0', '>'))
|
|
||||||
block:
|
|
||||||
- name: create the keystore if it doesn't exist yet
|
|
||||||
become: yes
|
|
||||||
command: >
|
|
||||||
{{es_home}}/bin/elasticsearch-keystore create
|
|
||||||
args:
|
|
||||||
creates: "{{ conf_dir }}/elasticsearch.keystore"
|
|
||||||
environment:
|
|
||||||
ES_PATH_CONF: "{{ conf_dir }}"
|
|
||||||
|
|
||||||
- name: Check if bootstrap password is set
|
|
||||||
become: yes
|
|
||||||
command: >
|
|
||||||
{{es_home}}/bin/elasticsearch-keystore list
|
|
||||||
register: list_keystore
|
|
||||||
changed_when: False
|
|
||||||
environment:
|
|
||||||
ES_PATH_CONF: "{{ conf_dir }}"
|
|
||||||
|
|
||||||
- name: Create Bootstrap password for elastic user
|
|
||||||
become: yes
|
|
||||||
shell: echo "{{es_api_basic_auth_password}}" | {{es_home}}/bin/elasticsearch-keystore add -x 'bootstrap.password'
|
|
||||||
when:
|
|
||||||
- es_api_basic_auth_username is defined and list_keystore is defined and es_api_basic_auth_username == 'elastic' and 'bootstrap.password' not in list_keystore.stdout_lines
|
|
||||||
environment:
|
|
||||||
ES_PATH_CONF: "{{ conf_dir }}"
|
|
||||||
no_log: true
|
|
||||||
### END BLOCK elasticsearch keystore ###
|
|
||||||
|
|
||||||
#-----------------------------FILE BASED REALM----------------------------------------
|
|
||||||
|
|
||||||
- include: elasticsearch-security-file.yml
|
|
||||||
when: (es_enable_xpack and "security" in es_xpack_features) and ((es_users is defined and es_users.file is defined) or (es_roles is defined and es_roles.file is defined))
|
|
||||||
|
|
||||||
#-----------------------------ROLE MAPPING ----------------------------------------
|
|
||||||
|
|
||||||
#Copy Roles files
|
|
||||||
- name: Copy role_mapping.yml File for Instance
|
|
||||||
become: yes
|
|
||||||
template: src=security/role_mapping.yml.j2 dest={{conf_dir}}{{es_xpack_conf_subdir}}/role_mapping.yml owner={{ es_user }} group={{ es_group }} mode=0644 force=yes
|
|
||||||
when: es_role_mapping is defined
|
|
||||||
|
|
||||||
#-----------------------------AUTH FILE----------------------------------------
|
|
||||||
|
|
||||||
- name: Copy message auth key to elasticsearch
|
|
||||||
become: yes
|
|
||||||
copy: src={{ es_message_auth_file }} dest={{conf_dir}}{{es_xpack_conf_subdir}}/system_key owner={{ es_user }} group={{ es_group }} mode=0600 force=yes
|
|
||||||
when: es_message_auth_file is defined
|
|
||||||
|
|
||||||
#------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
#Ensure security conf directory is created
|
|
||||||
- name: Ensure security conf directory exists
|
|
||||||
become: yes
|
|
||||||
file: path={{ conf_dir }}/security state=directory owner={{ es_user }} group={{ es_group }}
|
|
||||||
changed_when: False
|
|
||||||
when: es_enable_xpack and "security" in es_xpack_features
|
|
@ -1,37 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- name: Activate ES license (without security authentication)
|
|
||||||
uri:
|
|
||||||
method: PUT
|
|
||||||
url: "http://{{es_api_host}}:{{es_api_port}}/_xpack/license?acknowledge=true"
|
|
||||||
body_format: json
|
|
||||||
body: "{{ es_xpack_license }}"
|
|
||||||
return_content: yes
|
|
||||||
register: license_activated
|
|
||||||
no_log: True
|
|
||||||
when: not "security" in es_xpack_features
|
|
||||||
failed_when: >
|
|
||||||
license_activated.status != 200 or
|
|
||||||
license_activated.json.license_status is not defined or
|
|
||||||
license_activated.json.license_status != 'valid'
|
|
||||||
|
|
||||||
- name: Activate ES license (with security authentication)
|
|
||||||
uri:
|
|
||||||
method: PUT
|
|
||||||
url: "http://{{es_api_host}}:{{es_api_port}}/_xpack/license?acknowledge=true"
|
|
||||||
user: "{{es_api_basic_auth_username}}"
|
|
||||||
password: "{{es_api_basic_auth_password}}"
|
|
||||||
body_format: json
|
|
||||||
force_basic_auth: yes
|
|
||||||
body: "{{ es_xpack_license }}"
|
|
||||||
return_content: yes
|
|
||||||
register: license_activated
|
|
||||||
no_log: True
|
|
||||||
when: "'security' in es_xpack_features"
|
|
||||||
failed_when: >
|
|
||||||
license_activated.status != 200 or
|
|
||||||
license_activated.json.license_status is not defined or
|
|
||||||
license_activated.json.license_status != 'valid'
|
|
||||||
|
|
||||||
- debug:
|
|
||||||
msg: "License: {{ license_activated }}"
|
|
@ -1,83 +0,0 @@
|
|||||||
################################
|
|
||||||
# Elasticsearch
|
|
||||||
################################
|
|
||||||
|
|
||||||
# Elasticsearch home directory
|
|
||||||
ES_HOME={{es_home}}
|
|
||||||
|
|
||||||
# Elasticsearch Java path
|
|
||||||
#JAVA_HOME=
|
|
||||||
|
|
||||||
# Elasticsearch configuration directory
|
|
||||||
CONF_DIR={{conf_dir}}
|
|
||||||
ES_PATH_CONF={{conf_dir}}
|
|
||||||
|
|
||||||
# Elasticsearch data directory
|
|
||||||
DATA_DIR={{ data_dirs | array_to_str }}
|
|
||||||
|
|
||||||
# Elasticsearch logs directory
|
|
||||||
LOG_DIR={{log_dir}}
|
|
||||||
|
|
||||||
# Elasticsearch PID directory
|
|
||||||
PID_DIR={{pid_dir}}
|
|
||||||
|
|
||||||
ES_JVM_OPTIONS={{conf_dir}}/jvm.options
|
|
||||||
|
|
||||||
# Configure restart on package upgrade (true, every other setting will lead to not restarting)
|
|
||||||
#ES_RESTART_ON_UPGRADE=true
|
|
||||||
|
|
||||||
# Path to the GC log file
|
|
||||||
#ES_GC_LOG_FILE=/var/log/elasticsearch/gc.log
|
|
||||||
|
|
||||||
################################
|
|
||||||
# Elasticsearch service
|
|
||||||
################################
|
|
||||||
|
|
||||||
# SysV init.d
|
|
||||||
#
|
|
||||||
# When executing the init script, this user will be used to run the elasticsearch service.
|
|
||||||
# The default value is 'elasticsearch' and is declared in the init.d file.
|
|
||||||
# Note that this setting is only used by the init script. If changed, make sure that
|
|
||||||
# the configured user can read and write into the data, work, plugins and log directories.
|
|
||||||
# For systemd service, the user is usually configured in file /usr/lib/systemd/system/elasticsearch.service
|
|
||||||
ES_USER={{es_user}}
|
|
||||||
ES_GROUP={{es_group}}
|
|
||||||
|
|
||||||
# The number of seconds to wait before checking if Elasticsearch started successfully as a daemon process
|
|
||||||
ES_STARTUP_SLEEP_TIME=5
|
|
||||||
|
|
||||||
################################
|
|
||||||
# System properties
|
|
||||||
################################
|
|
||||||
|
|
||||||
# Specifies the maximum file descriptor number that can be opened by this process
|
|
||||||
# When using Systemd, this setting is ignored and the LimitNOFILE defined in
|
|
||||||
# /usr/lib/systemd/system/elasticsearch.service takes precedence
|
|
||||||
{% if es_max_open_files is defined %}
|
|
||||||
#MAX_OPEN_FILES
|
|
||||||
MAX_OPEN_FILES={{es_max_open_files}}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# The maximum number of bytes of memory that may be locked into RAM
|
|
||||||
# Set to "unlimited" if you use the 'bootstrap.memory_lock: true' option
|
|
||||||
# in elasticsearch.yml
|
|
||||||
# When using Systemd, the LimitMEMLOCK property must be set
|
|
||||||
# in /usr/lib/systemd/system/elasticsearch.service
|
|
||||||
#MAX_LOCKED_MEMORY=
|
|
||||||
{% if m_lock_enabled %}
|
|
||||||
MAX_LOCKED_MEMORY=unlimited
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Maximum number of VMA (Virtual Memory Areas) a process can own
|
|
||||||
# When using Systemd, this setting is ignored and the 'vm.max_map_count'
|
|
||||||
# property is set at boot time in /usr/lib/sysctl.d/elasticsearch.conf
|
|
||||||
#MAX_MAP_COUNT=262144
|
|
||||||
{% if es_max_map_count is defined %}
|
|
||||||
MAX_MAP_COUNT={{es_max_map_count}}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Specifies the maximum number of threads that can be started.
|
|
||||||
# Elasticsearch requires a minimum of 2048.
|
|
||||||
{% if es_max_threads is defined %}
|
|
||||||
MAX_THREADS={{ es_max_threads }}
|
|
||||||
{% endif %}
|
|
@ -1,11 +0,0 @@
|
|||||||
[elasticsearch-{{ es_repo_name }}]
|
|
||||||
name=Elasticsearch repository for {{ es_repo_name }} packages
|
|
||||||
baseurl=https://artifacts.elastic.co/packages/{{ es_repo_name }}/yum
|
|
||||||
gpgcheck=1
|
|
||||||
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
|
|
||||||
enabled=1
|
|
||||||
autorefresh=1
|
|
||||||
type=rpm-md
|
|
||||||
{% if es_proxy_host is defined and es_proxy_host != '' and es_proxy_port is defined %}
|
|
||||||
proxy=http://{{ es_proxy_host }}:{{es_proxy_port}}
|
|
||||||
{% endif %}
|
|
@ -1,75 +0,0 @@
|
|||||||
|
|
||||||
{% if es_config %}
|
|
||||||
{{ es_config | to_nice_yaml }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if es_config['cluster.name'] is not defined %}
|
|
||||||
cluster.name: elasticsearch
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if es_config['node.name'] is not defined %}
|
|
||||||
node.name: {{inventory_hostname}}-{{es_instance_name}}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
#################################### Paths ####################################
|
|
||||||
|
|
||||||
# Path to directory containing configuration (this file and logging.yml):
|
|
||||||
|
|
||||||
{% if (es_version | version_compare('6.0.0', '<')) %}
|
|
||||||
path.conf: {{ conf_dir }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
path.data: {{ data_dirs | array_to_str }}
|
|
||||||
|
|
||||||
path.logs: {{ log_dir }}
|
|
||||||
|
|
||||||
{% if es_path_repo is defined %}
|
|
||||||
path.repo: {{ es_path_repo }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if es_action_auto_create_index == true %}
|
|
||||||
action.auto_create_index: true
|
|
||||||
{% elif not es_action_auto_create_index %}
|
|
||||||
action.auto_create_index: false
|
|
||||||
{% else %}
|
|
||||||
action.auto_create_index: {{ es_action_auto_create_index }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if es_enable_xpack %}
|
|
||||||
{% if not "security" in es_xpack_features %}
|
|
||||||
xpack.security.enabled: false
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if not "monitoring" in es_xpack_features %}
|
|
||||||
xpack.monitoring.enabled: false
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if not "alerting" in es_xpack_features %}
|
|
||||||
xpack.watcher.enabled: false
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if not "ml" in es_xpack_features %}
|
|
||||||
xpack.ml.enabled: false
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if not "graph" in es_xpack_features %}
|
|
||||||
xpack.graph.enabled: false
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if es_mail_config is defined %}
|
|
||||||
xpack.notification.email:
|
|
||||||
account:
|
|
||||||
{{ es_mail_config['account'] }}:
|
|
||||||
profile: {{ es_mail_config['profile'] }}
|
|
||||||
email_defaults:
|
|
||||||
from: {{ es_mail_config['from'] }}
|
|
||||||
smtp:
|
|
||||||
auth: {{ es_mail_config['require_auth'] }}
|
|
||||||
host: {{ es_mail_config['host'] }}
|
|
||||||
port: {{ es_mail_config['port'] }}
|
|
||||||
{% if es_mail_config['require_auth'] == true %}
|
|
||||||
user: {{ es_mail_config['user'] }}
|
|
||||||
password: {{ es_mail_config['pass'] }}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
@ -1,229 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# /etc/init.d/elasticsearch -- startup script for Elasticsearch
|
|
||||||
#
|
|
||||||
### BEGIN INIT INFO
|
|
||||||
# Provides: elasticsearch
|
|
||||||
# Required-Start: $network $remote_fs $named
|
|
||||||
# Required-Stop: $network $remote_fs $named
|
|
||||||
# Default-Start: 2 3 4 5
|
|
||||||
# Default-Stop: 0 1 6
|
|
||||||
# Short-Description: Starts elasticsearch
|
|
||||||
# Description: Starts elasticsearch using start-stop-daemon
|
|
||||||
### END INIT INFO
|
|
||||||
|
|
||||||
PATH=/bin:/usr/bin:/sbin:/usr/sbin
|
|
||||||
NAME={{es_instance_name}}_{{default_file | basename}}
|
|
||||||
{% if es_config['node.name'] is defined %}
|
|
||||||
DESC="Elasticsearch Server - {{es_config['node.name']}}"
|
|
||||||
{% else %}
|
|
||||||
DESC="Elasticsearch Server - {{es_instance_name}}"
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
DEFAULT=/etc/default/$NAME
|
|
||||||
|
|
||||||
if [ `id -u` -ne 0 ]; then
|
|
||||||
echo "You need root privileges to run this script"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
. /lib/lsb/init-functions
|
|
||||||
if [ -r /etc/default/rcS ]; then
|
|
||||||
. /etc/default/rcS
|
|
||||||
fi
|
|
||||||
|
|
||||||
# The following variables can be overwritten in $DEFAULT
|
|
||||||
|
|
||||||
# Run Elasticsearch as this user ID and group ID
|
|
||||||
ES_USER={{es_user}}
|
|
||||||
ES_GROUP={{es_group}}
|
|
||||||
|
|
||||||
# Directory where the Elasticsearch binary distribution resides
|
|
||||||
ES_HOME={{es_home}}
|
|
||||||
|
|
||||||
# Maximum number of open files
|
|
||||||
{% if es_max_open_files is defined %}
|
|
||||||
MAX_OPEN_FILES={{es_max_open_files}}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Maximum amount of locked memory
|
|
||||||
#MAX_LOCKED_MEMORY=
|
|
||||||
{% if m_lock_enabled %}
|
|
||||||
MAX_LOCKED_MEMORY=unlimited
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Elasticsearch log directory
|
|
||||||
LOG_DIR={{log_dir}}
|
|
||||||
|
|
||||||
# Elasticsearch data directory
|
|
||||||
DATA_DIR={{ data_dirs | array_to_str }}
|
|
||||||
|
|
||||||
# Elasticsearch configuration directory
|
|
||||||
CONF_DIR={{conf_dir}}
|
|
||||||
ES_PATH_CONF={{ conf_dir }}
|
|
||||||
|
|
||||||
# Maximum number of VMA (Virtual Memory Areas) a process can own
|
|
||||||
{% if es_max_map_count is defined %}
|
|
||||||
MAX_MAP_COUNT={{es_max_map_count}}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Elasticsearch PID file directory
|
|
||||||
PID_DIR={{pid_dir}}
|
|
||||||
|
|
||||||
ES_JVM_OPTIONS="{{conf_dir}}/jvm.options"
|
|
||||||
|
|
||||||
# End of variables that can be overwritten in $DEFAULT
|
|
||||||
|
|
||||||
# overwrite settings from default file
|
|
||||||
if [ -f "$DEFAULT" ]; then
|
|
||||||
. "$DEFAULT"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# CONF_FILE setting was removed
|
|
||||||
if [ ! -z "$CONF_FILE" ]; then
|
|
||||||
echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$ES_USER" != "elasticsearch" ] || [ "$ES_GROUP" != "elasticsearch" ]; then
|
|
||||||
echo "WARNING: ES_USER and ES_GROUP are deprecated and will be removed in the next major version of Elasticsearch, got: [$ES_USER:$ES_GROUP]"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Define other required variables
|
|
||||||
PID_FILE="$PID_DIR/$NAME.pid"
|
|
||||||
DAEMON=$ES_HOME/bin/elasticsearch
|
|
||||||
{% if (es_version | version_compare('6.0.0', '<')) %}
|
|
||||||
DAEMON_OPTS="-d -p $PID_FILE -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR"
|
|
||||||
{% else %}
|
|
||||||
DAEMON_OPTS="-d -p $PID_FILE"
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
export ES_JAVA_OPTS
|
|
||||||
export JAVA_HOME
|
|
||||||
export ES_INCLUDE
|
|
||||||
export ES_JVM_OPTIONS
|
|
||||||
export ES_PATH_CONF
|
|
||||||
|
|
||||||
# export unsupported variables so bin/elasticsearch can reject them and inform the user these are unsupported
|
|
||||||
if test -n "$ES_MIN_MEM"; then export ES_MIN_MEM; fi
|
|
||||||
if test -n "$ES_MAX_MEM"; then export ES_MAX_MEM; fi
|
|
||||||
if test -n "$ES_HEAP_SIZE"; then export ES_HEAP_SIZE; fi
|
|
||||||
if test -n "$ES_HEAP_NEWSIZE"; then export ES_HEAP_NEWSIZE; fi
|
|
||||||
if test -n "$ES_DIRECT_SIZE"; then export ES_DIRECT_SIZE; fi
|
|
||||||
if test -n "$ES_USE_IPV4"; then export ES_USE_IPV4; fi
|
|
||||||
if test -n "$ES_GC_OPTS"; then export ES_GC_OPTS; fi
|
|
||||||
if test -n "$ES_GC_LOG_FILE"; then export ES_GC_LOG_FILE; fi
|
|
||||||
|
|
||||||
# Check DAEMON exists
|
|
||||||
if [ ! -x "$DAEMON" ]; then
|
|
||||||
echo "The elasticsearch startup script does not exists or it is not executable, tried: $DAEMON"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
checkJava() {
|
|
||||||
if [ -x "$JAVA_HOME/bin/java" ]; then
|
|
||||||
JAVA="$JAVA_HOME/bin/java"
|
|
||||||
else
|
|
||||||
JAVA=`which java`
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -x "$JAVA" ]; then
|
|
||||||
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
start)
|
|
||||||
checkJava
|
|
||||||
|
|
||||||
log_daemon_msg "Starting $DESC"
|
|
||||||
|
|
||||||
pid=`pidofproc -p $PID_FILE elasticsearch`
|
|
||||||
if [ -n "$pid" ] ; then
|
|
||||||
log_begin_msg "Already running."
|
|
||||||
log_end_msg 0
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Ensure that the PID_DIR exists (it is cleaned at OS startup time)
|
|
||||||
if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then
|
|
||||||
mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR"
|
|
||||||
fi
|
|
||||||
if [ -n "$PID_FILE" ] && [ ! -e "$PID_FILE" ]; then
|
|
||||||
touch "$PID_FILE" && chown "$ES_USER":"$ES_GROUP" "$PID_FILE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
|
||||||
ulimit -n $MAX_OPEN_FILES
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$MAX_LOCKED_MEMORY" ]; then
|
|
||||||
ulimit -l $MAX_LOCKED_MEMORY
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$MAX_THREADS" ]; then
|
|
||||||
ulimit -u $MAX_THREADS
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then
|
|
||||||
sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Start Daemon
|
|
||||||
start-stop-daemon -d $ES_HOME --start --user "$ES_USER" -c "$ES_USER" --pidfile "$PID_FILE" --exec $DAEMON -- $DAEMON_OPTS
|
|
||||||
return=$?
|
|
||||||
if [ $return -eq 0 ]; then
|
|
||||||
i=0
|
|
||||||
timeout={{es_debian_startup_timeout}}
|
|
||||||
# Wait for the process to be properly started before exiting
|
|
||||||
until { kill -0 `cat "$PID_FILE"`; } >/dev/null 2>&1
|
|
||||||
do
|
|
||||||
sleep 1
|
|
||||||
i=$(($i + 1))
|
|
||||||
if [ $i -gt $timeout ]; then
|
|
||||||
log_end_msg 1
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
log_end_msg $return
|
|
||||||
exit $return
|
|
||||||
;;
|
|
||||||
stop)
|
|
||||||
log_daemon_msg "Stopping $DESC"
|
|
||||||
|
|
||||||
if [ -f "$PID_FILE" ]; then
|
|
||||||
start-stop-daemon --stop --pidfile "$PID_FILE" \
|
|
||||||
--user "$ES_USER" \
|
|
||||||
--quiet \
|
|
||||||
--retry forever/TERM/20 > /dev/null
|
|
||||||
if [ $? -eq 1 ]; then
|
|
||||||
log_progress_msg "$DESC is not running but pid file exists, cleaning up"
|
|
||||||
elif [ $? -eq 3 ]; then
|
|
||||||
PID="`cat $PID_FILE`"
|
|
||||||
log_failure_msg "Failed to stop $DESC (pid $PID)"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
rm -f "$PID_FILE"
|
|
||||||
else
|
|
||||||
log_progress_msg "(not running)"
|
|
||||||
fi
|
|
||||||
log_end_msg 0
|
|
||||||
;;
|
|
||||||
status)
|
|
||||||
status_of_proc -p $PID_FILE elasticsearch elasticsearch && exit 0 || exit $?
|
|
||||||
;;
|
|
||||||
restart|force-reload)
|
|
||||||
if [ -f "$PID_FILE" ]; then
|
|
||||||
$0 stop
|
|
||||||
fi
|
|
||||||
$0 start
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
log_success_msg "Usage: $0 {start|stop|restart|force-reload|status}"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
exit 0
|
|
@ -1,217 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#
|
|
||||||
# elasticsearch <summary>
|
|
||||||
#
|
|
||||||
# chkconfig: 2345 80 20
|
|
||||||
# description: Starts and stops a single elasticsearch instance on this system
|
|
||||||
#
|
|
||||||
|
|
||||||
### BEGIN INIT INFO
|
|
||||||
# Provides: Elasticsearch
|
|
||||||
# Required-Start: $network $named
|
|
||||||
# Required-Stop: $network $named
|
|
||||||
# Default-Start: 2 3 4 5
|
|
||||||
# Default-Stop: 0 1 6
|
|
||||||
# Short-Description: This service manages the elasticsearch daemon
|
|
||||||
# Description: Elasticsearch is a very scalable, schema-free and high-performance search solution supporting multi-tenancy and near realtime search.
|
|
||||||
### END INIT INFO
|
|
||||||
|
|
||||||
#
|
|
||||||
# init.d / servicectl compatibility (openSUSE)
|
|
||||||
#
|
|
||||||
if [ -f /etc/rc.status ]; then
|
|
||||||
. /etc/rc.status
|
|
||||||
rc_reset
|
|
||||||
fi
|
|
||||||
|
|
||||||
#
|
|
||||||
# Source function library.
|
|
||||||
#
|
|
||||||
if [ -f /etc/rc.d/init.d/functions ]; then
|
|
||||||
. /etc/rc.d/init.d/functions
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Sets the default values for elasticsearch variables used in this script
|
|
||||||
ES_USER="{{es_user}}"
|
|
||||||
ES_GROUP="{{es_group}}"
|
|
||||||
ES_HOME="{{es_home}}"
|
|
||||||
{% if es_max_open_files is defined %}
|
|
||||||
MAX_OPEN_FILES={{es_max_open_files}}
|
|
||||||
{% endif %}
|
|
||||||
# Maximum number of VMA (Virtual Memory Areas) a process can own
|
|
||||||
{% if es_max_map_count is defined %}
|
|
||||||
MAX_MAP_COUNT={{es_max_map_count}}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
LOG_DIR="{{log_dir}}"
|
|
||||||
DATA_DIR={{ data_dirs | array_to_str }}
|
|
||||||
CONF_DIR="{{conf_dir}}"
|
|
||||||
ES_PATH_CONF="{{ conf_dir }}"
|
|
||||||
|
|
||||||
PID_DIR="{{pid_dir}}"
|
|
||||||
|
|
||||||
# Source the default env file
|
|
||||||
ES_ENV_FILE="{{instance_default_file}}"
|
|
||||||
if [ -f "$ES_ENV_FILE" ]; then
|
|
||||||
. "$ES_ENV_FILE"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$ES_USER" != "elasticsearch" ] || [ "$ES_GROUP" != "elasticsearch" ]; then
|
|
||||||
echo "WARNING: ES_USER and ES_GROUP are deprecated and will be removed in the next major version of Elasticsearch, got: [$ES_USER:$ES_GROUP]"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# CONF_FILE setting was removed
|
|
||||||
if [ ! -z "$CONF_FILE" ]; then
|
|
||||||
echo "CONF_FILE setting is no longer supported. elasticsearch.yml must be placed in the config directory and cannot be renamed."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
exec="$ES_HOME/bin/elasticsearch"
|
|
||||||
prog="{{es_instance_name}}_{{default_file | basename}}"
|
|
||||||
pidfile="$PID_DIR/${prog}.pid"
|
|
||||||
|
|
||||||
export ES_JAVA_OPTS
|
|
||||||
export JAVA_HOME
|
|
||||||
export ES_INCLUDE
|
|
||||||
export ES_JVM_OPTIONS
|
|
||||||
export ES_STARTUP_SLEEP_TIME
|
|
||||||
export ES_PATH_CONF
|
|
||||||
|
|
||||||
# export unsupported variables so bin/elasticsearch can reject them and inform the user these are unsupported
|
|
||||||
if test -n "$ES_MIN_MEM"; then export ES_MIN_MEM; fi
|
|
||||||
if test -n "$ES_MAX_MEM"; then export ES_MAX_MEM; fi
|
|
||||||
if test -n "$ES_HEAP_SIZE"; then export ES_HEAP_SIZE; fi
|
|
||||||
if test -n "$ES_HEAP_NEWSIZE"; then export ES_HEAP_NEWSIZE; fi
|
|
||||||
if test -n "$ES_DIRECT_SIZE"; then export ES_DIRECT_SIZE; fi
|
|
||||||
if test -n "$ES_USE_IPV4"; then export ES_USE_IPV4; fi
|
|
||||||
if test -n "$ES_GC_OPTS"; then export ES_GC_OPTS; fi
|
|
||||||
if test -n "$ES_GC_LOG_FILE"; then export ES_GC_LOG_FILE; fi
|
|
||||||
|
|
||||||
lockfile=/var/lock/subsys/$prog
|
|
||||||
|
|
||||||
# backwards compatibility for old config sysconfig files, pre 0.90.1
|
|
||||||
if [ -n $USER ] && [ -z $ES_USER ] ; then
|
|
||||||
ES_USER=$USER
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -x "$exec" ]; then
|
|
||||||
echo "The elasticsearch startup script does not exists or it is not executable, tried: $exec"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
checkJava() {
|
|
||||||
if [ -x "$JAVA_HOME/bin/java" ]; then
|
|
||||||
JAVA="$JAVA_HOME/bin/java"
|
|
||||||
else
|
|
||||||
JAVA=`which java`
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ ! -x "$JAVA" ]; then
|
|
||||||
echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
start() {
|
|
||||||
checkJava
|
|
||||||
[ -x $exec ] || exit 5
|
|
||||||
|
|
||||||
if [ -n "$MAX_OPEN_FILES" ]; then
|
|
||||||
ulimit -n $MAX_OPEN_FILES
|
|
||||||
fi
|
|
||||||
if [ -n "$MAX_LOCKED_MEMORY" ]; then
|
|
||||||
ulimit -l $MAX_LOCKED_MEMORY
|
|
||||||
fi
|
|
||||||
if [ -n "$MAX_THREADS" ]; then
|
|
||||||
ulimit -u $MAX_THREADS
|
|
||||||
fi
|
|
||||||
if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then
|
|
||||||
sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Ensure that the PID_DIR exists (it is cleaned at OS startup time)
|
|
||||||
if [ -n "$PID_DIR" ] && [ ! -e "$PID_DIR" ]; then
|
|
||||||
mkdir -p "$PID_DIR" && chown "$ES_USER":"$ES_GROUP" "$PID_DIR"
|
|
||||||
fi
|
|
||||||
if [ -n "$pidfile" ] && [ ! -e "$pidfile" ]; then
|
|
||||||
touch "$pidfile" && chown "$ES_USER":"$ES_GROUP" "$pidfile"
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd $ES_HOME
|
|
||||||
echo -n $"Starting $prog: "
|
|
||||||
# if not running, start it up here, usually something like "daemon $exec"
|
|
||||||
{% if (es_version | version_compare('6.0.0', '<')) %}
|
|
||||||
daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d -Edefault.path.logs=$LOG_DIR -Edefault.path.data=$DATA_DIR -Edefault.path.conf=$CONF_DIR
|
|
||||||
{% else %}
|
|
||||||
daemon --user $ES_USER --pidfile $pidfile $exec -p $pidfile -d
|
|
||||||
{% endif %}
|
|
||||||
retval=$?
|
|
||||||
echo
|
|
||||||
[ $retval -eq 0 ] && touch $lockfile
|
|
||||||
return $retval
|
|
||||||
}
|
|
||||||
|
|
||||||
stop() {
|
|
||||||
echo -n $"Stopping $prog: "
|
|
||||||
# stop it here, often "killproc $prog"
|
|
||||||
killproc -p $pidfile -d 86400 $prog
|
|
||||||
retval=$?
|
|
||||||
echo
|
|
||||||
[ $retval -eq 0 ] && rm -f $lockfile
|
|
||||||
return $retval
|
|
||||||
}
|
|
||||||
|
|
||||||
restart() {
|
|
||||||
stop
|
|
||||||
start
|
|
||||||
}
|
|
||||||
|
|
||||||
reload() {
|
|
||||||
restart
|
|
||||||
}
|
|
||||||
|
|
||||||
force_reload() {
|
|
||||||
restart
|
|
||||||
}
|
|
||||||
|
|
||||||
rh_status() {
|
|
||||||
# run checks to determine if the service is running or use generic status
|
|
||||||
status -p $pidfile $prog
|
|
||||||
}
|
|
||||||
|
|
||||||
rh_status_q() {
|
|
||||||
rh_status >/dev/null 2>&1
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
case "$1" in
|
|
||||||
start)
|
|
||||||
rh_status_q && exit 0
|
|
||||||
$1
|
|
||||||
;;
|
|
||||||
stop)
|
|
||||||
rh_status_q || exit 0
|
|
||||||
$1
|
|
||||||
;;
|
|
||||||
restart)
|
|
||||||
$1
|
|
||||||
;;
|
|
||||||
reload)
|
|
||||||
rh_status_q || exit 7
|
|
||||||
$1
|
|
||||||
;;
|
|
||||||
force-reload)
|
|
||||||
force_reload
|
|
||||||
;;
|
|
||||||
status)
|
|
||||||
rh_status
|
|
||||||
;;
|
|
||||||
condrestart|try-restart)
|
|
||||||
rh_status_q || exit 0
|
|
||||||
restart
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
|
||||||
exit 2
|
|
||||||
esac
|
|
||||||
exit $?
|
|
@ -1,118 +0,0 @@
|
|||||||
## JVM configuration
|
|
||||||
|
|
||||||
################################################################
|
|
||||||
## IMPORTANT: JVM heap size
|
|
||||||
################################################################
|
|
||||||
##
|
|
||||||
## You should always set the min and max JVM heap
|
|
||||||
## size to the same value. For example, to set
|
|
||||||
## the heap to 4 GB, set:
|
|
||||||
##
|
|
||||||
## -Xms4g
|
|
||||||
## -Xmx4g
|
|
||||||
##
|
|
||||||
## See https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html
|
|
||||||
## for more information
|
|
||||||
##
|
|
||||||
################################################################
|
|
||||||
|
|
||||||
# Xms represents the initial size of total heap space
|
|
||||||
# Xmx represents the maximum size of total heap space
|
|
||||||
{% if es_heap_size is defined %}
|
|
||||||
-Xms{{ es_heap_size }}
|
|
||||||
-Xmx{{ es_heap_size }}
|
|
||||||
{% else %}
|
|
||||||
-Xms2g
|
|
||||||
-Xmx2g
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
################################################################
|
|
||||||
## Expert settings
|
|
||||||
################################################################
|
|
||||||
##
|
|
||||||
## All settings below this section are considered
|
|
||||||
## expert settings. Don't tamper with them unless
|
|
||||||
## you understand what you are doing
|
|
||||||
##
|
|
||||||
################################################################
|
|
||||||
|
|
||||||
## GC configuration
|
|
||||||
-XX:+UseConcMarkSweepGC
|
|
||||||
-XX:CMSInitiatingOccupancyFraction=75
|
|
||||||
-XX:+UseCMSInitiatingOccupancyOnly
|
|
||||||
|
|
||||||
## optimizations
|
|
||||||
|
|
||||||
# pre-touch memory pages used by the JVM during initialization
|
|
||||||
-XX:+AlwaysPreTouch
|
|
||||||
|
|
||||||
## basic
|
|
||||||
|
|
||||||
# force the server VM
|
|
||||||
-server
|
|
||||||
|
|
||||||
# set to headless, just in case
|
|
||||||
-Djava.awt.headless=true
|
|
||||||
|
|
||||||
# ensure UTF-8 encoding by default (e.g. filenames)
|
|
||||||
-Dfile.encoding=UTF-8
|
|
||||||
|
|
||||||
# use our provided JNA always versus the system one
|
|
||||||
-Djna.nosys=true
|
|
||||||
|
|
||||||
# use old-style file permissions on JDK9
|
|
||||||
-Djdk.io.permissionsUseCanonicalPath=true
|
|
||||||
|
|
||||||
# flags to configure Netty
|
|
||||||
-Dio.netty.noUnsafe=true
|
|
||||||
-Dio.netty.noKeySetOptimization=true
|
|
||||||
-Dio.netty.recycler.maxCapacityPerThread=0
|
|
||||||
|
|
||||||
# log4j 2
|
|
||||||
-Dlog4j.shutdownHookEnabled=false
|
|
||||||
-Dlog4j2.disable.jmx=true
|
|
||||||
-Dlog4j.skipJansi=true
|
|
||||||
|
|
||||||
## heap dumps
|
|
||||||
|
|
||||||
# generate a heap dump when an allocation from the Java heap fails
|
|
||||||
# heap dumps are created in the working directory of the JVM
|
|
||||||
-XX:+HeapDumpOnOutOfMemoryError
|
|
||||||
|
|
||||||
# specify an alternative path for heap dumps
|
|
||||||
# ensure the directory exists and has sufficient space
|
|
||||||
#-XX:HeapDumpPath=${heap.dump.path}
|
|
||||||
|
|
||||||
## GC logging
|
|
||||||
|
|
||||||
#-XX:+PrintGCDetails
|
|
||||||
#-XX:+PrintGCTimeStamps
|
|
||||||
#-XX:+PrintGCDateStamps
|
|
||||||
#-XX:+PrintClassHistogram
|
|
||||||
#-XX:+PrintTenuringDistribution
|
|
||||||
#-XX:+PrintGCApplicationStoppedTime
|
|
||||||
|
|
||||||
# log GC status to a file with time stamps
|
|
||||||
# ensure the directory exists
|
|
||||||
#-Xloggc:${loggc}
|
|
||||||
|
|
||||||
|
|
||||||
# By default, the GC log file will not rotate.
|
|
||||||
# By uncommenting the lines below, the GC log file
|
|
||||||
# will be rotated every 128MB at most 32 times.
|
|
||||||
#-XX:+UseGCLogFileRotation
|
|
||||||
#-XX:NumberOfGCLogFiles=32
|
|
||||||
#-XX:GCLogFileSize=128M
|
|
||||||
|
|
||||||
# Elasticsearch 5.0.0 will throw an exception on unquoted field names in JSON.
|
|
||||||
# If documents were already indexed with unquoted fields in a previous version
|
|
||||||
# of Elasticsearch, some operations may throw errors.
|
|
||||||
#
|
|
||||||
# WARNING: This option will be removed in Elasticsearch 6.0.0 and is provided
|
|
||||||
# only for migration purposes.
|
|
||||||
#-Delasticsearch.json.allow_unquoted_field_names=true
|
|
||||||
{% if es_jvm_custom_parameters !='' %}
|
|
||||||
{% for item in es_jvm_custom_parameters %}
|
|
||||||
{{ item }}
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
@ -1,117 +0,0 @@
|
|||||||
status = error
|
|
||||||
|
|
||||||
# log action execution errors for easier debugging
|
|
||||||
logger.action.name = org.elasticsearch.action
|
|
||||||
logger.action.level = debug
|
|
||||||
|
|
||||||
appender.console.type = Console
|
|
||||||
appender.console.name = console
|
|
||||||
appender.console.layout.type = PatternLayout
|
|
||||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
|
|
||||||
|
|
||||||
appender.rolling.type = RollingFile
|
|
||||||
appender.rolling.name = rolling
|
|
||||||
{% if (es_version | version_compare('6.0.0', '<')) %}
|
|
||||||
appender.rolling.fileName = ${sys:es.logs}.log
|
|
||||||
{% else %}
|
|
||||||
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
|
|
||||||
{% endif %}
|
|
||||||
appender.rolling.layout.type = PatternLayout
|
|
||||||
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
|
|
||||||
{% if (es_version | version_compare('6.0.0', '<')) %}
|
|
||||||
appender.rolling.filePattern = ${sys:es.logs}-%d{yyyy-MM-dd}.log
|
|
||||||
{% else %}
|
|
||||||
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
|
|
||||||
{% endif %}
|
|
||||||
appender.rolling.policies.type = Policies
|
|
||||||
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
|
|
||||||
appender.rolling.policies.time.interval = 1
|
|
||||||
appender.rolling.policies.time.modulate = true
|
|
||||||
{% if (es_version | version_compare('6.0.0', '>')) %}
|
|
||||||
appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
|
|
||||||
appender.rolling.policies.size.size = 128MB
|
|
||||||
appender.rolling.strategy.type = DefaultRolloverStrategy
|
|
||||||
appender.rolling.strategy.fileIndex = nomax
|
|
||||||
appender.rolling.strategy.action.type = Delete
|
|
||||||
appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
|
|
||||||
appender.rolling.strategy.action.condition.type = IfFileName
|
|
||||||
appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
|
|
||||||
appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
|
|
||||||
appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
|
|
||||||
{% endif %}
|
|
||||||
rootLogger.level = info
|
|
||||||
rootLogger.appenderRef.console.ref = console
|
|
||||||
rootLogger.appenderRef.rolling.ref = rolling
|
|
||||||
|
|
||||||
appender.deprecation_rolling.type = RollingFile
|
|
||||||
appender.deprecation_rolling.name = deprecation_rolling
|
|
||||||
{% if (es_version | version_compare('6.0.0', '<')) %}
|
|
||||||
appender.deprecation_rolling.fileName = ${sys:es.logs}_deprecation.log
|
|
||||||
{% else %}
|
|
||||||
appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
|
|
||||||
{% endif %}
|
|
||||||
appender.deprecation_rolling.layout.type = PatternLayout
|
|
||||||
appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
|
|
||||||
{% if (es_version | version_compare('6.0.0', '<')) %}
|
|
||||||
appender.deprecation_rolling.filePattern = ${sys:es.logs}_deprecation-%i.log.gz
|
|
||||||
{% else %}
|
|
||||||
appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz
|
|
||||||
{% endif %}
|
|
||||||
appender.deprecation_rolling.policies.type = Policies
|
|
||||||
appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
|
|
||||||
appender.deprecation_rolling.policies.size.size = 1GB
|
|
||||||
appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
|
|
||||||
appender.deprecation_rolling.strategy.max = 4
|
|
||||||
|
|
||||||
logger.deprecation.name = org.elasticsearch.deprecation
|
|
||||||
logger.deprecation.level = warn
|
|
||||||
logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
|
|
||||||
logger.deprecation.additivity = false
|
|
||||||
|
|
||||||
appender.index_search_slowlog_rolling.type = RollingFile
|
|
||||||
appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
|
|
||||||
{% if (es_version | version_compare('6.0.0', '<')) %}
|
|
||||||
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs}_index_search_slowlog.log
|
|
||||||
{% else %}
|
|
||||||
appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log
|
|
||||||
{% endif %}
|
|
||||||
appender.index_search_slowlog_rolling.layout.type = PatternLayout
|
|
||||||
appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
|
|
||||||
{% if (es_version | version_compare('6.0.0', '<')) %}
|
|
||||||
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
|
|
||||||
{% else %}
|
|
||||||
appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log
|
|
||||||
{% endif %}
|
|
||||||
appender.index_search_slowlog_rolling.policies.type = Policies
|
|
||||||
appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
|
|
||||||
appender.index_search_slowlog_rolling.policies.time.interval = 1
|
|
||||||
appender.index_search_slowlog_rolling.policies.time.modulate = true
|
|
||||||
|
|
||||||
logger.index_search_slowlog_rolling.name = index.search.slowlog
|
|
||||||
logger.index_search_slowlog_rolling.level = trace
|
|
||||||
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
|
|
||||||
logger.index_search_slowlog_rolling.additivity = false
|
|
||||||
|
|
||||||
appender.index_indexing_slowlog_rolling.type = RollingFile
|
|
||||||
appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
|
|
||||||
{% if (es_version | version_compare('6.0.0', '<')) %}
|
|
||||||
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log
|
|
||||||
{% else %}
|
|
||||||
appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log
|
|
||||||
{% endif %}
|
|
||||||
appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
|
|
||||||
appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
|
|
||||||
{% if (es_version | version_compare('6.0.0', '<')) %}
|
|
||||||
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
|
|
||||||
{% else %}
|
|
||||||
appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
|
|
||||||
{% endif %}
|
|
||||||
appender.index_indexing_slowlog_rolling.policies.type = Policies
|
|
||||||
appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
|
|
||||||
appender.index_indexing_slowlog_rolling.policies.time.interval = 1
|
|
||||||
appender.index_indexing_slowlog_rolling.policies.time.modulate = true
|
|
||||||
|
|
||||||
logger.index_indexing_slowlog.name = index.indexing.slowlog.index
|
|
||||||
logger.index_indexing_slowlog.level = trace
|
|
||||||
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
|
|
||||||
logger.index_indexing_slowlog.additivity = false
|
|
@ -1 +0,0 @@
|
|||||||
{{ es_role_mapping | to_nice_yaml }}
|
|
@ -1 +0,0 @@
|
|||||||
{{ es_roles.file | to_nice_yaml }}
|
|
@ -1 +0,0 @@
|
|||||||
{{users_roles | join("\n") }}
|
|
@ -1,76 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Elasticsearch-{{es_instance_name}}
|
|
||||||
Documentation=http://www.elastic.co
|
|
||||||
Wants=network-online.target
|
|
||||||
After=network-online.target
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Environment=ES_HOME={{es_home}}
|
|
||||||
Environment=CONF_DIR={{conf_dir}}
|
|
||||||
Environment=ES_PATH_CONF={{conf_dir}}
|
|
||||||
Environment=DATA_DIR={{ data_dirs | array_to_str }}
|
|
||||||
Environment=LOG_DIR={{log_dir}}
|
|
||||||
Environment=PID_DIR={{pid_dir}}
|
|
||||||
EnvironmentFile=-{{instance_default_file}}
|
|
||||||
|
|
||||||
WorkingDirectory={{es_home}}
|
|
||||||
|
|
||||||
User={{es_user}}
|
|
||||||
Group={{es_group}}
|
|
||||||
|
|
||||||
{% if (es_version | version_compare('6.0.0', '<')) %}
|
|
||||||
ExecStartPre=/usr/share/elasticsearch/bin/elasticsearch-systemd-pre-exec
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
ExecStart={{es_home}}/bin/elasticsearch \
|
|
||||||
-p ${PID_DIR}/elasticsearch.pid \
|
|
||||||
{% if (es_version | version_compare('6.0.0', '<')) %}
|
|
||||||
-Edefault.path.logs=${LOG_DIR} \
|
|
||||||
-Edefault.path.data=${DATA_DIR} \
|
|
||||||
-Edefault.path.conf=${CONF_DIR} \
|
|
||||||
{% endif %}
|
|
||||||
--quiet
|
|
||||||
|
|
||||||
|
|
||||||
# StandardOutput is configured to redirect to journalctl since
|
|
||||||
# some error messages may be logged in standard output before
|
|
||||||
# elasticsearch logging system is initialized. Elasticsearch
|
|
||||||
# stores its logs in /var/log/elasticsearch and does not use
|
|
||||||
# journalctl by default. If you also want to enable journalctl
|
|
||||||
# logging, you can simply remove the "quiet" option from ExecStart.
|
|
||||||
StandardOutput=journal
|
|
||||||
StandardError=inherit
|
|
||||||
|
|
||||||
# Specifies the maximum file descriptor number that can be opened by this process
|
|
||||||
{% if es_max_open_files is defined %}
|
|
||||||
LimitNOFILE={{es_max_open_files}}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Specifies the maximum number of bytes of memory that may be locked into RAM
|
|
||||||
# Set to "infinity" if you use the 'bootstrap.memory_lock: true' option
|
|
||||||
# in elasticsearch.yml and 'MAX_LOCKED_MEMORY=unlimited' in {{instance_default_file}}
|
|
||||||
{% if m_lock_enabled %}
|
|
||||||
LimitMEMLOCK=infinity
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Specifies the maximum number of threads that can be started. Elasticsearch requires a
|
|
||||||
# minimum of 2048.
|
|
||||||
LimitNPROC={{ es_max_threads }}
|
|
||||||
|
|
||||||
# Disable timeout logic and wait until process is stopped
|
|
||||||
TimeoutStopSec=0
|
|
||||||
|
|
||||||
# SIGTERM signal is used to stop the Java process
|
|
||||||
KillSignal=SIGTERM
|
|
||||||
|
|
||||||
# Send the signal only to the JVM rather than its control group
|
|
||||||
KillMode=process
|
|
||||||
|
|
||||||
# Java process is never killed
|
|
||||||
SendSIGKILL=no
|
|
||||||
|
|
||||||
# When a JVM receives a SIGTERM signal it exits with code 143
|
|
||||||
SuccessExitStatus=143
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Dump all variables to a file
|
|
||||||
changed_when: False
|
|
||||||
copy:
|
|
||||||
content: '{{ vars | to_nice_json }} '
|
|
||||||
dest: '/tmp/vars.json'
|
|
@ -1,3 +0,0 @@
|
|||||||
source 'https://rubygems.org'
|
|
||||||
|
|
||||||
gem 'rspec-retry'
|
|
@ -1,10 +0,0 @@
|
|||||||
require 'spec_helper'
|
|
||||||
require 'json'
|
|
||||||
vars = JSON.parse(File.read('/tmp/vars.json'))
|
|
||||||
|
|
||||||
shared_examples 'issue_test::init' do |vars|
|
|
||||||
|
|
||||||
#Add custom tests here for the issue-test.yml test
|
|
||||||
|
|
||||||
end
|
|
||||||
|
|
@ -1,139 +0,0 @@
|
|||||||
require 'spec_helper'
|
|
||||||
require 'json'
|
|
||||||
vars = JSON.parse(File.read('/tmp/vars.json'))
|
|
||||||
|
|
||||||
shared_examples 'multi::init' do |vars|
|
|
||||||
|
|
||||||
describe service('master_elasticsearch') do
|
|
||||||
it { should be_running }
|
|
||||||
end
|
|
||||||
#test configuration parameters have been set - test all appropriately set in config file
|
|
||||||
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/elasticsearch.yml") do
|
|
||||||
it { should be_file }
|
|
||||||
it { should contain 'http.port: 9201' }
|
|
||||||
it { should contain 'transport.tcp.port: 9301' }
|
|
||||||
it { should contain 'node.data: true' }
|
|
||||||
it { should contain 'node.master: false' }
|
|
||||||
it { should contain "node.name: localhost-#{vars['es_instance_name']}" }
|
|
||||||
it { should_not contain 'bootstrap.memory_lock: true' }
|
|
||||||
if vars['es_major_version'] == '6.x'
|
|
||||||
it { should_not contain "path.conf: /etc/elasticsearch/#{vars['es_instance_name']}" }
|
|
||||||
else
|
|
||||||
it { should contain "path.conf: /etc/elasticsearch/#{vars['es_instance_name']}" }
|
|
||||||
end
|
|
||||||
it { should contain "path.data: /opt/elasticsearch/data-1/localhost-#{vars['es_instance_name']},/opt/elasticsearch/data-2/localhost-#{vars['es_instance_name']}" }
|
|
||||||
it { should contain "path.logs: /var/log/elasticsearch/localhost-#{vars['es_instance_name']}" }
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
||||||
#test configuration parameters have been set for master - test all appropriately set in config file
|
|
||||||
describe file('/etc/elasticsearch/master/elasticsearch.yml') do
|
|
||||||
it { should be_file }
|
|
||||||
it { should contain 'http.port: 9200' }
|
|
||||||
it { should contain 'transport.tcp.port: 9300' }
|
|
||||||
it { should contain 'node.data: false' }
|
|
||||||
it { should contain 'node.master: true' }
|
|
||||||
it { should contain 'node.name: localhost-master' }
|
|
||||||
it { should contain 'bootstrap.memory_lock: true' }
|
|
||||||
if vars['es_major_version'] == '6.x'
|
|
||||||
it { should_not contain 'path.conf: /etc/elasticsearch/master' }
|
|
||||||
else
|
|
||||||
it { should contain 'path.conf: /etc/elasticsearch/master' }
|
|
||||||
end
|
|
||||||
it { should contain 'path.data: /opt/elasticsearch/master/localhost-master' }
|
|
||||||
it { should contain 'path.logs: /var/log/elasticsearch/localhost-master' }
|
|
||||||
end
|
|
||||||
|
|
||||||
describe 'Master listening' do
|
|
||||||
it 'listening in port 9200' do
|
|
||||||
expect(port 9200).to be_listening
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
#test we started on the correct port was used for master
|
|
||||||
describe 'master started' do
|
|
||||||
it 'master node should be running', :retry => 3, :retry_wait => 10 do
|
|
||||||
expect(curl_json('http://localhost:9200')['name']).to eq('localhost-master')
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
#test we started on the correct port was used for node 1
|
|
||||||
describe "#{vars['es_instance_name']} started" do
|
|
||||||
it 'node should be running', :retry => 3, :retry_wait => 10 do
|
|
||||||
expect(curl_json('http://localhost:9201')['name']).to eq("localhost-#{vars['es_instance_name']}")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
#Confirm scripts are on both nodes
|
|
||||||
describe file('/etc/elasticsearch/master/scripts') do
|
|
||||||
it { should be_directory }
|
|
||||||
it { should be_owned_by 'elasticsearch' }
|
|
||||||
end
|
|
||||||
|
|
||||||
describe file('/etc/elasticsearch/master/scripts/calculate-score.groovy') do
|
|
||||||
it { should be_file }
|
|
||||||
it { should be_owned_by 'elasticsearch' }
|
|
||||||
end
|
|
||||||
|
|
||||||
#Confirm that the data directory has only been set for the first node
|
|
||||||
describe file('/opt/elasticsearch/master/localhost-master') do
|
|
||||||
it { should be_directory }
|
|
||||||
it { should be_owned_by 'elasticsearch' }
|
|
||||||
end
|
|
||||||
|
|
||||||
describe file("/opt/elasticsearch/data-1/localhost-#{vars['es_instance_name']}") do
|
|
||||||
it { should be_directory }
|
|
||||||
it { should be_owned_by 'elasticsearch' }
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
||||||
describe file("/opt/elasticsearch/data-2/localhost-#{vars['es_instance_name']}") do
|
|
||||||
it { should be_directory }
|
|
||||||
it { should be_owned_by 'elasticsearch' }
|
|
||||||
end
|
|
||||||
|
|
||||||
#test to make sure mlock was applied
|
|
||||||
describe command('curl -s "localhost:9200/_nodes/localhost-master/process?pretty=true" | grep mlockall') do
|
|
||||||
its(:stdout) { should match /true/ }
|
|
||||||
its(:exit_status) { should eq 0 }
|
|
||||||
end
|
|
||||||
|
|
||||||
#test to make sure mlock was not applied
|
|
||||||
describe command("curl -s 'localhost:9201/_nodes/localhost-#{vars['es_instance_name']}/process?pretty=true' | grep mlockall") do
|
|
||||||
its(:stdout) { should match /false/ }
|
|
||||||
its(:exit_status) { should eq 0 }
|
|
||||||
end
|
|
||||||
|
|
||||||
describe 'version check on master' do
|
|
||||||
it 'should be reported as version '+vars['es_version'] do
|
|
||||||
command = command('curl -s localhost:9200 | grep number')
|
|
||||||
expect(command.stdout).to match(vars['es_version'])
|
|
||||||
expect(command.exit_status).to eq(0)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
describe 'version check on data' do
|
|
||||||
it 'should be reported as version '+vars['es_version'] do
|
|
||||||
command = command('curl -s localhost:9201 | grep number')
|
|
||||||
expect(command.stdout).to match(vars['es_version'])
|
|
||||||
expect(command.exit_status).to eq(0)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
for plugin in vars['es_plugins']
|
|
||||||
plugin = plugin['plugin']
|
|
||||||
|
|
||||||
describe command('curl -s localhost:9200/_nodes/plugins?pretty=true | grep '+plugin) do
|
|
||||||
its(:exit_status) { should eq 0 }
|
|
||||||
end
|
|
||||||
|
|
||||||
describe command('curl -s localhost:9201/_nodes/plugins?pretty=true | grep '+plugin) do
|
|
||||||
its(:exit_status) { should eq 0 }
|
|
||||||
end
|
|
||||||
|
|
||||||
describe file('/usr/share/elasticsearch/plugins/'+plugin) do
|
|
||||||
it { should be_directory }
|
|
||||||
it { should be_owned_by 'elasticsearch' }
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
@ -1,13 +0,0 @@
|
|||||||
require 'spec_helper'
|
|
||||||
|
|
||||||
shared_examples 'oss::init' do |vars|
|
|
||||||
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/log4j2.properties") do
|
|
||||||
it { should be_file }
|
|
||||||
it { should be_owned_by 'elasticsearch' }
|
|
||||||
it { should_not contain 'CUSTOM LOG4J FILE' }
|
|
||||||
end
|
|
||||||
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/jvm.options") do
|
|
||||||
it { should be_file }
|
|
||||||
it { should be_owned_by vars['es_user'] }
|
|
||||||
end
|
|
||||||
end
|
|
@ -1,4 +0,0 @@
|
|||||||
require 'spec_helper'
|
|
||||||
|
|
||||||
shared_examples 'oss_to_xpack_upgrade::init' do |vars|
|
|
||||||
end
|
|
@ -1,4 +0,0 @@
|
|||||||
require 'spec_helper'
|
|
||||||
|
|
||||||
shared_examples 'oss_upgrade::init' do |vars|
|
|
||||||
end
|
|
@ -1,170 +0,0 @@
|
|||||||
require 'spec_helper'
|
|
||||||
require 'json'
|
|
||||||
vars = JSON.parse(File.read('/tmp/vars.json'))
|
|
||||||
|
|
||||||
families = {
|
|
||||||
'Debian' => {
|
|
||||||
'shell' => '/bin/false',
|
|
||||||
'password' => '*',
|
|
||||||
'defaults_path' => '/etc/default/elasticsearch'
|
|
||||||
},
|
|
||||||
'RedHat' => {
|
|
||||||
'shell' => '/sbin/nologin',
|
|
||||||
'password' => '!!',
|
|
||||||
'defaults_path' => '/etc/sysconfig/elasticsearch'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
family = families[vars['ansible_os_family']]
|
|
||||||
|
|
||||||
es_api_url = "http://localhost:#{vars['es_api_port']}"
|
|
||||||
username = vars['es_api_basic_auth_username']
|
|
||||||
password = vars['es_api_basic_auth_password']
|
|
||||||
|
|
||||||
shared_examples 'shared::init' do |vars|
|
|
||||||
describe 'version check' do
|
|
||||||
it 'should be reported as version '+vars['es_version'] do
|
|
||||||
expect(curl_json(es_api_url, username=username, password=password)['version']['number']).to eq(vars['es_version'])
|
|
||||||
end
|
|
||||||
end
|
|
||||||
describe 'xpack checks' do
|
|
||||||
if vars['es_enable_xpack']
|
|
||||||
it 'should be be running the xpack version' do
|
|
||||||
expect(curl_json("#{es_api_url}/_xpack", username=username, password=password)['tagline']).to eq('You know, for X')
|
|
||||||
end
|
|
||||||
it 'xpack should be activated' do
|
|
||||||
expect(curl_json("#{es_api_url}/_license", username=username, password=password)['license']['status']).to eq('active')
|
|
||||||
end
|
|
||||||
features = curl_json("#{es_api_url}/_xpack", username=username, password=password)
|
|
||||||
curl_json("#{es_api_url}/_xpack", username=username, password=password)['features'].each do |feature,values|
|
|
||||||
enabled = vars['es_xpack_features'].include? feature
|
|
||||||
status = if enabled then 'enabled' else 'disabled' end
|
|
||||||
it "the xpack feature '#{feature}' to be #{status}" do
|
|
||||||
expect(values['enabled'] = enabled)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
# X-Pack is no longer installed as a plugin in elasticsearch
|
|
||||||
if vars['es_major_version'] == '5.x'
|
|
||||||
describe file('/usr/share/elasticsearch/plugins/x-pack') do
|
|
||||||
it { should be_directory }
|
|
||||||
it { should be_owned_by vars['es_user'] }
|
|
||||||
end
|
|
||||||
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/x-pack") do
|
|
||||||
it { should be_directory }
|
|
||||||
it { should be_owned_by vars['es_user'] }
|
|
||||||
end
|
|
||||||
describe 'x-pack-core plugin' do
|
|
||||||
it 'should be installed with the correct version' do
|
|
||||||
plugins = curl_json("#{es_api_url}/_nodes/plugins", username=username, password=password)
|
|
||||||
node, data = plugins['nodes'].first
|
|
||||||
version = 'plugin not found'
|
|
||||||
name = 'x-pack'
|
|
||||||
|
|
||||||
data['plugins'].each do |plugin|
|
|
||||||
if plugin['name'] == name
|
|
||||||
version = plugin['version']
|
|
||||||
end
|
|
||||||
end
|
|
||||||
expect(version).to eql(vars['es_version'])
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
describe user(vars['es_user']) do
|
|
||||||
it { should exist }
|
|
||||||
it { should belong_to_group vars['es_group'] }
|
|
||||||
it { should have_uid vars['es_user_id'] } if vars.key?('es_user_id')
|
|
||||||
|
|
||||||
it { should have_login_shell family['shell'] }
|
|
||||||
|
|
||||||
its(:encrypted_password) { should eq(family['password']) }
|
|
||||||
end
|
|
||||||
|
|
||||||
describe package(vars['es_package_name']) do
|
|
||||||
it { should be_installed }
|
|
||||||
end
|
|
||||||
|
|
||||||
describe service("#{vars['es_instance_name']}_elasticsearch") do
|
|
||||||
it { should be_running }
|
|
||||||
end
|
|
||||||
|
|
||||||
describe port(vars['es_api_port']) do
|
|
||||||
it { should be_listening.with('tcp') }
|
|
||||||
end
|
|
||||||
|
|
||||||
if vars['es_templates']
|
|
||||||
describe file('/etc/elasticsearch/templates') do
|
|
||||||
it { should be_directory }
|
|
||||||
it { should be_owned_by vars['es_user'] }
|
|
||||||
end
|
|
||||||
describe file('/etc/elasticsearch/templates/basic.json') do
|
|
||||||
it { should be_file }
|
|
||||||
it { should be_owned_by vars['es_user'] }
|
|
||||||
end
|
|
||||||
#This is possibly subject to format changes in the response across versions so may fail in the future
|
|
||||||
describe 'Template Contents Correct' do
|
|
||||||
it 'should be reported as being installed', :retry => 3, :retry_wait => 10 do
|
|
||||||
template = curl_json("#{es_api_url}/_template/basic", username=username, password=password)
|
|
||||||
expect(template.key?('basic'))
|
|
||||||
expect(template['basic']['settings']['index']['number_of_shards']).to eq("1")
|
|
||||||
expect(template['basic']['mappings']['type1']['_source']['enabled']).to eq(false)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
if vars['es_scripts']
|
|
||||||
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/scripts") do
|
|
||||||
it { should be_directory }
|
|
||||||
it { should be_owned_by 'elasticsearch' }
|
|
||||||
end
|
|
||||||
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/scripts/calculate-score.groovy") do
|
|
||||||
it { should be_file }
|
|
||||||
it { should be_owned_by 'elasticsearch' }
|
|
||||||
end
|
|
||||||
end
|
|
||||||
describe file('/etc/init.d/elasticsearch') do
|
|
||||||
it { should_not exist }
|
|
||||||
end
|
|
||||||
|
|
||||||
describe file(family['defaults_path']) do
|
|
||||||
its(:content) { should match '' }
|
|
||||||
end
|
|
||||||
|
|
||||||
describe file('/etc/elasticsearch/elasticsearch.yml') do
|
|
||||||
it { should_not exist }
|
|
||||||
end
|
|
||||||
|
|
||||||
describe file('/etc/elasticsearch/logging.yml') do
|
|
||||||
it { should_not exist }
|
|
||||||
end
|
|
||||||
|
|
||||||
if vars.key?('es_plugins')
|
|
||||||
vars['es_plugins'].each do |plugin|
|
|
||||||
name = plugin['plugin']
|
|
||||||
describe file('/usr/share/elasticsearch/plugins/'+name) do
|
|
||||||
it { should be_directory }
|
|
||||||
it { should be_owned_by vars['es_user'] }
|
|
||||||
end
|
|
||||||
it 'should be installed and the right version' do
|
|
||||||
plugins = curl_json("#{es_api_url}/_nodes/plugins", username=username, password=password)
|
|
||||||
version = nil
|
|
||||||
_node, data = plugins['nodes'].first
|
|
||||||
data['plugins'].each do |p|
|
|
||||||
version = p['version'] if p['name'] == name
|
|
||||||
end
|
|
||||||
expect(version).to eql(vars['es_version'])
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/elasticsearch.yml") do
|
|
||||||
it { should contain "node.name: localhost-#{vars['es_instance_name']}" }
|
|
||||||
it { should contain 'cluster.name: elasticsearch' }
|
|
||||||
if vars['es_major_version'] == '6.x'
|
|
||||||
it { should_not contain "path.conf: /etc/elasticsearch/#{vars['es_instance_name']}" }
|
|
||||||
else
|
|
||||||
it { should contain "path.conf: /etc/elasticsearch/#{vars['es_instance_name']}" }
|
|
||||||
end
|
|
||||||
its(:content) { should match "path.data: #{vars['data_dirs'].join(',')}" }
|
|
||||||
its(:content) { should match "path.logs: /var/log/elasticsearch/localhost-#{vars['es_instance_name']}" }
|
|
||||||
end
|
|
||||||
end
|
|
@ -1,26 +0,0 @@
|
|||||||
require 'serverspec'
|
|
||||||
require 'net/http'
|
|
||||||
require 'json'
|
|
||||||
|
|
||||||
set :backend, :exec
|
|
||||||
|
|
||||||
require 'rspec/retry'
|
|
||||||
|
|
||||||
RSpec.configure do |config|
|
|
||||||
# show retry status in spec process
|
|
||||||
config.verbose_retry = true
|
|
||||||
# show exception that triggers a retry if verbose_retry is set to true
|
|
||||||
config.display_try_failure_messages = true
|
|
||||||
end
|
|
||||||
|
|
||||||
def curl_json(uri, username=nil, password=nil)
|
|
||||||
uri = URI(uri)
|
|
||||||
req = Net::HTTP::Get.new(uri)
|
|
||||||
if username && password
|
|
||||||
req.basic_auth username, password
|
|
||||||
end
|
|
||||||
res = Net::HTTP.start(uri.hostname, uri.port) {|http|
|
|
||||||
http.request(req)
|
|
||||||
}
|
|
||||||
return JSON.parse(res.body)
|
|
||||||
end
|
|
@ -1,17 +0,0 @@
|
|||||||
require 'spec_helper'
|
|
||||||
|
|
||||||
shared_examples 'xpack::init' do |vars|
|
|
||||||
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/elasticsearch.yml") do
|
|
||||||
it { should contain "node.name: localhost-#{vars['es_instance_name']}" }
|
|
||||||
it { should contain 'cluster.name: elasticsearch' }
|
|
||||||
if vars['es_major_version'] == '6.x'
|
|
||||||
it { should_not contain 'path.conf: /etc/elasticsearch/security_node' }
|
|
||||||
else
|
|
||||||
it { should contain 'path.conf: /etc/elasticsearch/security_node' }
|
|
||||||
end
|
|
||||||
it { should contain "path.data: /var/lib/elasticsearch/localhost-#{vars['es_instance_name']}" }
|
|
||||||
it { should contain "path.logs: /var/log/elasticsearch/localhost-#{vars['es_instance_name']}" }
|
|
||||||
it { should contain 'xpack.security.enabled: false' }
|
|
||||||
it { should contain 'xpack.watcher.enabled: false' }
|
|
||||||
end
|
|
||||||
end
|
|
@ -1,103 +0,0 @@
|
|||||||
require 'spec_helper'
|
|
||||||
require 'json'
|
|
||||||
vars = JSON.parse(File.read('/tmp/vars.json'))
|
|
||||||
|
|
||||||
shared_examples 'xpack_upgrade::init' do |vars|
|
|
||||||
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/elasticsearch.yml") do
|
|
||||||
it { should contain "node.name: localhost-#{vars['es_instance_name']}" }
|
|
||||||
it { should contain 'cluster.name: elasticsearch' }
|
|
||||||
if vars['es_major_version'] == '6.x'
|
|
||||||
it { should_not contain "path.conf: /etc/elasticsearch/#{vars['es_instance_name']}" }
|
|
||||||
else
|
|
||||||
it { should contain "path.conf: /etc/elasticsearch/#{vars['es_instance_name']}" }
|
|
||||||
end
|
|
||||||
it { should contain "path.data: /var/lib/elasticsearch/localhost-#{vars['es_instance_name']}" }
|
|
||||||
it { should contain "path.logs: /var/log/elasticsearch/localhost-#{vars['es_instance_name']}" }
|
|
||||||
end
|
|
||||||
|
|
||||||
#Test users file, users_roles and roles.yml
|
|
||||||
describe file("/etc/elasticsearch/#{vars['es_instance_name']}#{vars['es_xpack_conf_subdir']}/users_roles") do
|
|
||||||
it { should be_owned_by 'elasticsearch' }
|
|
||||||
it { should contain 'admin:es_admin' }
|
|
||||||
it { should contain 'power_user:testUser' }
|
|
||||||
end
|
|
||||||
|
|
||||||
describe file("/etc/elasticsearch/#{vars['es_instance_name']}#{vars['es_xpack_conf_subdir']}/users") do
|
|
||||||
it { should be_owned_by 'elasticsearch' }
|
|
||||||
it { should contain 'testUser:' }
|
|
||||||
it { should contain 'es_admin:' }
|
|
||||||
end
|
|
||||||
|
|
||||||
describe 'security roles' do
|
|
||||||
it 'should list the security roles' do
|
|
||||||
roles = curl_json('http://localhost:9200/_xpack/security/role', username='es_admin', password='changeMeAgain')
|
|
||||||
expect(roles.key?('superuser'))
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
describe file("/etc/elasticsearch/#{vars['es_instance_name']}/elasticsearch.yml") do
|
|
||||||
it { should contain 'security.authc.realms.file1.order: 0' }
|
|
||||||
it { should contain 'security.authc.realms.file1.type: file' }
|
|
||||||
it { should contain 'security.authc.realms.native1.order: 1' }
|
|
||||||
it { should contain 'security.authc.realms.native1.type: native' }
|
|
||||||
end
|
|
||||||
|
|
||||||
#Test contents of role_mapping.yml
|
|
||||||
describe file("/etc/elasticsearch/#{vars['es_instance_name']}#{vars['es_xpack_conf_subdir']}/role_mapping.yml") do
|
|
||||||
it { should be_owned_by 'elasticsearch' }
|
|
||||||
it { should contain 'power_user:' }
|
|
||||||
it { should contain '- cn=admins,dc=example,dc=com' }
|
|
||||||
it { should contain 'user:' }
|
|
||||||
it { should contain '- cn=admins,dc=example,dc=com' }
|
|
||||||
end
|
|
||||||
|
|
||||||
#check accounts are correct i.e. we can auth and they have the correct roles
|
|
||||||
describe 'kibana4_server access check' do
|
|
||||||
it 'should be reported as version '+vars['es_version'] do
|
|
||||||
command = command('curl -s localhost:9200/ -u kibana4_server:changeMe | grep number')
|
|
||||||
expect(command.stdout).to match(vars['es_version'])
|
|
||||||
expect(command.exit_status).to eq(0)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
describe 'security users' do
|
|
||||||
result = curl_json('http://localhost:9200/_xpack/security/user', username='elastic', password='elasticChanged')
|
|
||||||
it 'should have the elastic user' do
|
|
||||||
expect(result['elastic']['username']).to eq('elastic')
|
|
||||||
expect(result['elastic']['roles']).to eq(['superuser'])
|
|
||||||
expect(result['elastic']['enabled']).to eq(true)
|
|
||||||
end
|
|
||||||
it 'should have the kibana user' do
|
|
||||||
expect(result['kibana']['username']).to eq('kibana')
|
|
||||||
expect(result['kibana']['roles']).to eq(['kibana_system'])
|
|
||||||
expect(result['kibana']['enabled']).to eq(true)
|
|
||||||
end
|
|
||||||
it 'should have the kibana_server user' do
|
|
||||||
expect(result['kibana4_server']['username']).to eq('kibana4_server')
|
|
||||||
expect(result['kibana4_server']['roles']).to eq(['kibana4_server'])
|
|
||||||
expect(result['kibana4_server']['enabled']).to eq(true)
|
|
||||||
end
|
|
||||||
it 'should have the logstash user' do
|
|
||||||
expect(result['logstash_system']['username']).to eq('logstash_system')
|
|
||||||
expect(result['logstash_system']['roles']).to eq(['logstash_system'])
|
|
||||||
expect(result['logstash_system']['enabled']).to eq(true)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
describe 'logstash_system access check' do
|
|
||||||
it 'should be reported as version '+vars['es_version'] do
|
|
||||||
command = command('curl -s localhost:9200/ -u logstash_system:aNewLogstashPassword | grep number')
|
|
||||||
expect(command.stdout).to match(vars['es_version'])
|
|
||||||
expect(command.exit_status).to eq(0)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
if vars['es_major_version'] == '5.x' # kibana default password has been removed in 6.x
|
|
||||||
describe 'kibana access check' do
|
|
||||||
it 'should be reported as version '+vars['es_version'] do
|
|
||||||
result = curl_json('http://localhost:9200/', username='kibana', password='changeme')
|
|
||||||
expect(result['version']['number']).to eq(vars['es_version'])
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
@ -1,50 +0,0 @@
|
|||||||
#This file is for users to test issues and reproduce them using the test framework.
|
|
||||||
#Modify the playbook below and test with kitchen i.e. `kitchen test issue-test`
|
|
||||||
#To add custom tests modify the serverspec file ./helpers/serverspec/issue_test_spec.rb
|
|
||||||
#Idempot test is enabled for this test
|
|
||||||
|
|
||||||
- name: Simple Example
|
|
||||||
hosts: localhost
|
|
||||||
post_tasks:
|
|
||||||
- include: elasticsearch/test/integration/debug.yml
|
|
||||||
roles:
|
|
||||||
- elasticsearch
|
|
||||||
vars:
|
|
||||||
es_instance_name: "security_node"
|
|
||||||
es_xpack_license: "{{ lookup('file', '/tmp/license.json') }}"
|
|
||||||
es_config:
|
|
||||||
xpack.security.enabled: True
|
|
||||||
xpack.security.authc.realms.file1.type: "file"
|
|
||||||
xpack.security.authc.realms.file1.order: 1
|
|
||||||
xpack.security.authc.realms.native1.type: "native"
|
|
||||||
xpack.security.authc.realms.native1.order: 0
|
|
||||||
es_heap_size: "1g"
|
|
||||||
es_enable_xpack: true
|
|
||||||
es_plugins:
|
|
||||||
- plugin: ingest-geoip
|
|
||||||
es_xpack_features:
|
|
||||||
- security
|
|
||||||
- alerting
|
|
||||||
es_api_basic_auth_username: elastic
|
|
||||||
es_api_basic_auth_password: changeme
|
|
||||||
es_users:
|
|
||||||
file:
|
|
||||||
test_user:
|
|
||||||
password: changeme
|
|
||||||
roles:
|
|
||||||
- kibana_system
|
|
||||||
native:
|
|
||||||
kibana:
|
|
||||||
password: changeme
|
|
||||||
roles:
|
|
||||||
- kibana_system
|
|
||||||
elastic:
|
|
||||||
password: aNewPassWord
|
|
||||||
es_roles:
|
|
||||||
native:
|
|
||||||
logstash:
|
|
||||||
cluster:
|
|
||||||
- manage_index_templates
|
|
||||||
logstash_system:
|
|
||||||
cluster:
|
|
||||||
- manage_index_templates
|
|
@ -1,2 +0,0 @@
|
|||||||
---
|
|
||||||
- host: test-kitchen
|
|
@ -1,8 +0,0 @@
|
|||||||
require 'issue_test_spec'
|
|
||||||
require 'json'
|
|
||||||
vars = JSON.parse(File.read('/tmp/vars.json'))
|
|
||||||
|
|
||||||
describe 'Issue Test' do
|
|
||||||
include_examples 'issue_test::init', vars
|
|
||||||
end
|
|
||||||
|
|
@ -1,51 +0,0 @@
|
|||||||
---
|
|
||||||
# Test ability to deploy multiple instances to a machine
|
|
||||||
- name: Elasticsearch Multi test - master on 9200
|
|
||||||
hosts: localhost
|
|
||||||
post_tasks:
|
|
||||||
- include: elasticsearch/test/integration/debug.yml
|
|
||||||
roles:
|
|
||||||
- elasticsearch
|
|
||||||
vars:
|
|
||||||
es_instance_name: "master"
|
|
||||||
es_data_dirs:
|
|
||||||
- "/opt/elasticsearch/master"
|
|
||||||
es_config:
|
|
||||||
discovery.zen.ping.unicast.hosts: "localhost:9300"
|
|
||||||
http.port: 9200
|
|
||||||
transport.tcp.port: 9300
|
|
||||||
node.data: false
|
|
||||||
node.master: true
|
|
||||||
bootstrap.memory_lock: true
|
|
||||||
es_enable_xpack: false
|
|
||||||
es_scripts: true
|
|
||||||
es_templates: true
|
|
||||||
es_heap_size: "1g"
|
|
||||||
es_api_port: 9200
|
|
||||||
es_plugins:
|
|
||||||
- plugin: ingest-geoip
|
|
||||||
|
|
||||||
- name: Elasticsearch Multi test - data on 9201
|
|
||||||
hosts: localhost
|
|
||||||
post_tasks:
|
|
||||||
- include: elasticsearch/test/integration/debug.yml
|
|
||||||
roles:
|
|
||||||
- elasticsearch
|
|
||||||
vars:
|
|
||||||
es_enable_xpack: false
|
|
||||||
es_scripts: true
|
|
||||||
es_templates: true
|
|
||||||
es_heap_size: "1g"
|
|
||||||
es_api_port: 9201
|
|
||||||
es_plugins:
|
|
||||||
- plugin: ingest-geoip
|
|
||||||
es_instance_name: "node1"
|
|
||||||
es_data_dirs:
|
|
||||||
- "/opt/elasticsearch/data-1"
|
|
||||||
- "/opt/elasticsearch/data-2"
|
|
||||||
es_config:
|
|
||||||
discovery.zen.ping.unicast.hosts: "localhost:9300"
|
|
||||||
http.port: 9201
|
|
||||||
transport.tcp.port: 9301
|
|
||||||
node.data: true
|
|
||||||
node.master: false
|
|
@ -1,2 +0,0 @@
|
|||||||
---
|
|
||||||
- host: test-kitchen
|
|
@ -1,9 +0,0 @@
|
|||||||
require 'multi_spec'
|
|
||||||
require 'shared_spec'
|
|
||||||
require 'json'
|
|
||||||
vars = JSON.parse(File.read('/tmp/vars.json'))
|
|
||||||
|
|
||||||
describe 'Multi Tests' do
|
|
||||||
include_examples 'shared::init', vars
|
|
||||||
include_examples 'multi::init', vars
|
|
||||||
end
|
|
@ -1,27 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Standard test for single node setup. Tests idempotence.
|
|
||||||
hosts: localhost
|
|
||||||
post_tasks:
|
|
||||||
- include: elasticsearch/test/integration/debug.yml
|
|
||||||
roles:
|
|
||||||
- elasticsearch
|
|
||||||
vars:
|
|
||||||
es_instance_name: "node1"
|
|
||||||
es_version: "{{ '6.2.4' if es_major_version == '6.x' else '5.6.9' }}" # This is set to an older version than the current default to force an upgrade
|
|
||||||
es_enable_xpack: false
|
|
||||||
es_heap_size: "1g"
|
|
||||||
|
|
||||||
- name: Standard test for single node setup. Tests idempotence.
|
|
||||||
hosts: localhost
|
|
||||||
post_tasks:
|
|
||||||
- include: elasticsearch/test/integration/debug.yml
|
|
||||||
roles:
|
|
||||||
- elasticsearch
|
|
||||||
vars:
|
|
||||||
es_instance_name: "node1"
|
|
||||||
es_enable_xpack: true
|
|
||||||
es_api_basic_auth_username: elastic
|
|
||||||
es_api_basic_auth_password: changeme
|
|
||||||
es_heap_size: "1g"
|
|
||||||
es_xpack_features:
|
|
||||||
- security
|
|
@ -1,2 +0,0 @@
|
|||||||
---
|
|
||||||
- host: test-kitchen
|
|
@ -1,9 +0,0 @@
|
|||||||
require 'oss_to_xpack_upgrade_spec'
|
|
||||||
require 'shared_spec'
|
|
||||||
require 'json'
|
|
||||||
vars = JSON.parse(File.read('/tmp/vars.json'))
|
|
||||||
|
|
||||||
describe 'oss to xpack upgrade Tests' do
|
|
||||||
include_examples 'shared::init', vars
|
|
||||||
include_examples 'oss_to_xpack_upgrade::init', vars
|
|
||||||
end
|
|
@ -1,23 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Standard test for single node setup. Tests idempotence.
|
|
||||||
hosts: localhost
|
|
||||||
post_tasks:
|
|
||||||
- include: elasticsearch/test/integration/debug.yml
|
|
||||||
roles:
|
|
||||||
- elasticsearch
|
|
||||||
vars:
|
|
||||||
es_instance_name: "node1"
|
|
||||||
es_version: "{{ '6.2.4' if es_major_version == '6.x' else '5.6.9' }}" # This is set to an older version than the current default to force an upgrade
|
|
||||||
es_enable_xpack: false
|
|
||||||
es_heap_size: "1g"
|
|
||||||
|
|
||||||
- name: Standard test for single node setup. Tests idempotence.
|
|
||||||
hosts: localhost
|
|
||||||
post_tasks:
|
|
||||||
- include: elasticsearch/test/integration/debug.yml
|
|
||||||
roles:
|
|
||||||
- elasticsearch
|
|
||||||
vars:
|
|
||||||
es_instance_name: "node1"
|
|
||||||
es_enable_xpack: false
|
|
||||||
es_heap_size: "1g"
|
|
@ -1,2 +0,0 @@
|
|||||||
---
|
|
||||||
- host: test-kitchen
|
|
@ -1,9 +0,0 @@
|
|||||||
require 'oss_upgrade_spec'
|
|
||||||
require 'shared_spec'
|
|
||||||
require 'json'
|
|
||||||
vars = JSON.parse(File.read('/tmp/vars.json'))
|
|
||||||
|
|
||||||
describe 'oss upgrade Tests' do
|
|
||||||
include_examples 'oss_upgrade::init', vars
|
|
||||||
include_examples 'shared::init', vars
|
|
||||||
end
|
|
@ -1,15 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Standard test for single node setup. Tests idempotence.
|
|
||||||
hosts: localhost
|
|
||||||
post_tasks:
|
|
||||||
- include: elasticsearch/test/integration/debug.yml
|
|
||||||
roles:
|
|
||||||
- elasticsearch
|
|
||||||
vars:
|
|
||||||
es_instance_name: "node1"
|
|
||||||
es_enable_xpack: false
|
|
||||||
es_heap_size: "1g"
|
|
||||||
es_plugins:
|
|
||||||
- plugin: ingest-geoip
|
|
||||||
|
|
||||||
#Do not add tests here. This test is run twice and confirms idempotency.
|
|
@ -1,2 +0,0 @@
|
|||||||
---
|
|
||||||
- host: test-kitchen
|
|
@ -1,12 +0,0 @@
|
|||||||
require 'oss_spec'
|
|
||||||
require 'shared_spec'
|
|
||||||
require 'json'
|
|
||||||
vars = JSON.parse(File.read('/tmp/vars.json'))
|
|
||||||
|
|
||||||
describe 'OSS Tests' do
|
|
||||||
include_examples 'oss::init', vars
|
|
||||||
include_examples 'shared::init', vars
|
|
||||||
end
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,166 +0,0 @@
|
|||||||
---
|
|
||||||
- name: Elasticsearch Xpack tests initial
|
|
||||||
hosts: localhost
|
|
||||||
post_tasks:
|
|
||||||
- include: elasticsearch/test/integration/debug.yml
|
|
||||||
roles:
|
|
||||||
- elasticsearch
|
|
||||||
vars:
|
|
||||||
es_instance_name: "node1"
|
|
||||||
es_api_port: 9200
|
|
||||||
es_config:
|
|
||||||
http.port: 9200
|
|
||||||
transport.tcp.port: 9300
|
|
||||||
discovery.zen.ping.unicast.hosts: "localhost:9300"
|
|
||||||
xpack.security.authc.realms.file1.type: "file"
|
|
||||||
xpack.security.authc.realms.file1.order: 0
|
|
||||||
xpack.security.authc.realms.native1.type: "native"
|
|
||||||
xpack.security.authc.realms.native1.order: 1
|
|
||||||
es_heap_size: "1g"
|
|
||||||
es_templates: true
|
|
||||||
es_version: "{{ '6.2.4' if es_major_version == '6.x' else '5.6.9' }}" # This is set to an older version than the current default to force an upgrade
|
|
||||||
es_enable_xpack: true
|
|
||||||
es_xpack_license: "{{ lookup('file', '/tmp/license.json') }}"
|
|
||||||
es_plugins:
|
|
||||||
- plugin: ingest-geoip
|
|
||||||
es_xpack_features:
|
|
||||||
- security
|
|
||||||
- alerting
|
|
||||||
es_api_basic_auth_username: elastic
|
|
||||||
es_api_basic_auth_password: changeme
|
|
||||||
es_message_auth_file: system_key
|
|
||||||
es_role_mapping:
|
|
||||||
power_user:
|
|
||||||
- "cn=admins,dc=example,dc=com"
|
|
||||||
user:
|
|
||||||
- "cn=users,dc=example,dc=com"
|
|
||||||
- "cn=admins,dc=example,dc=com"
|
|
||||||
es_users:
|
|
||||||
native:
|
|
||||||
kibana4_server:
|
|
||||||
password: changeMe
|
|
||||||
roles:
|
|
||||||
- kibana4_server
|
|
||||||
logstash_system:
|
|
||||||
#this should be successfully modified
|
|
||||||
password: aNewLogstashPassword
|
|
||||||
#this will be ignored
|
|
||||||
roles:
|
|
||||||
- kibana4_server
|
|
||||||
elastic:
|
|
||||||
password: elasticChanged
|
|
||||||
file:
|
|
||||||
es_admin:
|
|
||||||
password: changeMe
|
|
||||||
roles:
|
|
||||||
- admin
|
|
||||||
testUser:
|
|
||||||
password: changeMeAlso!
|
|
||||||
roles:
|
|
||||||
- power_user
|
|
||||||
- user
|
|
||||||
es_roles:
|
|
||||||
file:
|
|
||||||
admin:
|
|
||||||
cluster:
|
|
||||||
- all
|
|
||||||
indices:
|
|
||||||
- names: '*'
|
|
||||||
privileges:
|
|
||||||
- all
|
|
||||||
power_user:
|
|
||||||
cluster:
|
|
||||||
- monitor
|
|
||||||
indices:
|
|
||||||
- names: '*'
|
|
||||||
privileges:
|
|
||||||
- all
|
|
||||||
user:
|
|
||||||
indices:
|
|
||||||
- names: '*'
|
|
||||||
privileges:
|
|
||||||
- read
|
|
||||||
kibana4_server:
|
|
||||||
cluster:
|
|
||||||
- monitor
|
|
||||||
indices:
|
|
||||||
- names: '.kibana'
|
|
||||||
privileges:
|
|
||||||
- all
|
|
||||||
native:
|
|
||||||
logstash:
|
|
||||||
cluster:
|
|
||||||
- manage_index_templates
|
|
||||||
indices:
|
|
||||||
- names: 'logstash-*'
|
|
||||||
privileges:
|
|
||||||
- write
|
|
||||||
- delete
|
|
||||||
- create_index
|
|
||||||
#this will be ignored - its reserved
|
|
||||||
logstash_system:
|
|
||||||
cluster:
|
|
||||||
- manage_index_templates
|
|
||||||
indices:
|
|
||||||
- names: 'logstash-*'
|
|
||||||
privileges:
|
|
||||||
- write
|
|
||||||
- delete
|
|
||||||
- create_index
|
|
||||||
|
|
||||||
#modifies the installation. Changes es_admin password and upgrades ES. Tests confirm the correct version is installed.
|
|
||||||
- name: Elasticsearch Xpack modify
|
|
||||||
hosts: localhost
|
|
||||||
post_tasks:
|
|
||||||
- include: elasticsearch/test/integration/debug.yml
|
|
||||||
roles:
|
|
||||||
- elasticsearch
|
|
||||||
vars:
|
|
||||||
es_api_port: 9200
|
|
||||||
es_instance_name: "node1"
|
|
||||||
es_config:
|
|
||||||
http.port: 9200
|
|
||||||
transport.tcp.port: 9300
|
|
||||||
discovery.zen.ping.unicast.hosts: "localhost:9300"
|
|
||||||
xpack.security.enabled: True
|
|
||||||
xpack.security.authc.realms.file1.type: "file"
|
|
||||||
xpack.security.authc.realms.file1.order: 0
|
|
||||||
xpack.security.authc.realms.native1.type: "native"
|
|
||||||
xpack.security.authc.realms.native1.order: 1
|
|
||||||
es_heap_size: "1g"
|
|
||||||
es_templates: true
|
|
||||||
es_enable_xpack: true
|
|
||||||
es_xpack_license: "{{ lookup('file', '/tmp/license.json') }}"
|
|
||||||
es_plugins:
|
|
||||||
- plugin: ingest-attachment
|
|
||||||
es_xpack_features:
|
|
||||||
- security
|
|
||||||
- alerting
|
|
||||||
es_api_basic_auth_username: elastic
|
|
||||||
es_api_basic_auth_password: elasticChanged
|
|
||||||
es_role_mapping:
|
|
||||||
power_user:
|
|
||||||
- "cn=admins,dc=example,dc=com"
|
|
||||||
user:
|
|
||||||
- "cn=users,dc=example,dc=com"
|
|
||||||
- "cn=admins,dc=example,dc=com"
|
|
||||||
es_users:
|
|
||||||
native:
|
|
||||||
kibana4_server:
|
|
||||||
password: changeMe
|
|
||||||
roles:
|
|
||||||
- kibana4_server
|
|
||||||
logstash_system:
|
|
||||||
#this will be ignored
|
|
||||||
roles:
|
|
||||||
- kibana4_server
|
|
||||||
file:
|
|
||||||
es_admin:
|
|
||||||
password: changeMeAgain
|
|
||||||
roles:
|
|
||||||
- admin
|
|
||||||
testUser:
|
|
||||||
password: changeMeAlso!
|
|
||||||
roles:
|
|
||||||
- power_user
|
|
||||||
- user
|
|
@ -1,9 +0,0 @@
|
|||||||
require 'xpack_upgrade_spec'
|
|
||||||
require 'shared_spec'
|
|
||||||
require 'json'
|
|
||||||
vars = JSON.parse(File.read('/tmp/vars.json'))
|
|
||||||
|
|
||||||
describe 'Xpack upgrade Tests' do
|
|
||||||
include_examples 'shared::init', vars
|
|
||||||
include_examples 'xpack_upgrade::init', vars
|
|
||||||
end
|
|
@ -1,2 +0,0 @@
|
|||||||
---
|
|
||||||
- host: test-kitchen
|
|
@ -1,22 +0,0 @@
|
|||||||
#Tests x-pack is idempotent and works when security is not enabled
|
|
||||||
---
|
|
||||||
- name: Elasticsearch Xpack tests - no security and manual download
|
|
||||||
hosts: localhost
|
|
||||||
post_tasks:
|
|
||||||
- include: elasticsearch/test/integration/debug.yml
|
|
||||||
roles:
|
|
||||||
- elasticsearch
|
|
||||||
vars:
|
|
||||||
es_api_port: 9200
|
|
||||||
es_instance_name: "node1"
|
|
||||||
es_config:
|
|
||||||
http.port: 9200
|
|
||||||
transport.tcp.port: 9300
|
|
||||||
discovery.zen.ping.unicast.hosts: "localhost:9300"
|
|
||||||
es_xpack_custom_url: "https://artifacts.elastic.co/downloads/packs/x-pack/x-pack-{{ es_version }}.zip"
|
|
||||||
es_heap_size: 2g
|
|
||||||
es_enable_xpack: true
|
|
||||||
es_xpack_features:
|
|
||||||
- monitoring
|
|
||||||
- graph
|
|
||||||
- ml
|
|
@ -1,9 +0,0 @@
|
|||||||
require 'xpack_spec'
|
|
||||||
require 'shared_spec'
|
|
||||||
require 'json'
|
|
||||||
vars = JSON.parse(File.read('/tmp/vars.json'))
|
|
||||||
|
|
||||||
describe 'Xpack upgrade Tests' do
|
|
||||||
include_examples 'shared::init', vars
|
|
||||||
include_examples 'xpack::init', vars
|
|
||||||
end
|
|
@ -1,2 +0,0 @@
|
|||||||
---
|
|
||||||
- host: test-kitchen
|
|
@ -1,15 +0,0 @@
|
|||||||
VERSION:
|
|
||||||
- 6.x
|
|
||||||
- 5.x
|
|
||||||
OS:
|
|
||||||
- ubuntu-1404
|
|
||||||
- ubuntu-1604
|
|
||||||
- debian-8
|
|
||||||
- centos-7
|
|
||||||
TEST_TYPE:
|
|
||||||
- oss
|
|
||||||
- oss-upgrade
|
|
||||||
- oss-to-xpack-upgrade
|
|
||||||
- xpack
|
|
||||||
- xpack-upgrade
|
|
||||||
- multi
|
|
@ -1,4 +0,0 @@
|
|||||||
---
|
|
||||||
java: "{% if es_java is defined %}{{es_java}}{% else %}openjdk-8-jre-headless{% endif %}"
|
|
||||||
default_file: "/etc/default/elasticsearch"
|
|
||||||
es_home: "/usr/share/elasticsearch"
|
|
@ -1,4 +0,0 @@
|
|||||||
---
|
|
||||||
java: "{{ es_java | default('java-1.8.0-openjdk.x86_64') }}"
|
|
||||||
default_file: "/etc/sysconfig/elasticsearch"
|
|
||||||
es_home: "/usr/share/elasticsearch"
|
|
@ -1,8 +0,0 @@
|
|||||||
---
|
|
||||||
es_package_url: "https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch"
|
|
||||||
es_conf_dir: "/etc/elasticsearch"
|
|
||||||
sysd_script: "/usr/lib/systemd/system/elasticsearch.service"
|
|
||||||
init_script: "/etc/init.d/elasticsearch"
|
|
||||||
#add supported features here
|
|
||||||
supported_xpack_features: ["alerting","monitoring","graph","security"]
|
|
||||||
reserved_xpack_users: ["elastic","kibana","logstash_system"]
|
|
@ -1,67 +0,0 @@
|
|||||||
---
|
|
||||||
- name: install required packages
|
|
||||||
package:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items: "{{ base_packages }}"
|
|
||||||
tags:
|
|
||||||
- install
|
|
||||||
|
|
||||||
- name: install python packages
|
|
||||||
package:
|
|
||||||
name: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
with_items: "{{ python_packages }}"
|
|
||||||
tags:
|
|
||||||
- install
|
|
||||||
|
|
||||||
- name: create required directories
|
|
||||||
file:
|
|
||||||
state: directory
|
|
||||||
path: "{{ item }}"
|
|
||||||
with_items:
|
|
||||||
- "{{ vulnwhisperer.prefix }}"
|
|
||||||
- "{{ vulnwhisperer.prefix }}/{{ vulnwhisperer.location }}"
|
|
||||||
tags:
|
|
||||||
- install
|
|
||||||
|
|
||||||
- name: deploy application
|
|
||||||
git:
|
|
||||||
accept_hostkey: yes
|
|
||||||
clone: yes
|
|
||||||
dest: "{{ vulnwhisperer.prefix }}/{{ vulnwhisperer.location }}"
|
|
||||||
force: yes
|
|
||||||
repo: "{{ vulnwhisperer.repository }}"
|
|
||||||
register: repository
|
|
||||||
tags:
|
|
||||||
- install
|
|
||||||
- update
|
|
||||||
|
|
||||||
- name: create virtualenv
|
|
||||||
pip:
|
|
||||||
virtualenv: "{{ vulnwhisperer.prefix }}/{{ vulnwhisperer.venv_location }}"
|
|
||||||
requirements: "{{ vulnwhisperer.prefix }}/{{ vulnwhisperer.location }}/requirements.txt"
|
|
||||||
virtualenv_python: /usr/bin/python2.7
|
|
||||||
tags:
|
|
||||||
- install
|
|
||||||
- update
|
|
||||||
|
|
||||||
- name: install vulnwhisperer in virtualenv
|
|
||||||
command: "{{ vulnwhisperer.prefix }}/{{ vulnwhisperer.venv_location }}/bin/python setup.py install"
|
|
||||||
args:
|
|
||||||
chdir: "{{ vulnwhisperer.prefix }}/{{ vulnwhisperer.location }}"
|
|
||||||
tags:
|
|
||||||
- install
|
|
||||||
- update
|
|
||||||
|
|
||||||
- name: load configuration file from provided path
|
|
||||||
set_fact:
|
|
||||||
configuration_file_contents: "{{lookup('file', configuration_file )}}"
|
|
||||||
|
|
||||||
- name: install vulnwhisperer configuration file
|
|
||||||
copy:
|
|
||||||
dest: "{{ vulnwhisperer.prefix }}/{{ vulnwhisperer.location }}/configs/{{ vulnwhisperer.configuration_file_name }}"
|
|
||||||
content: "{{ configuration_file_contents }}"
|
|
||||||
tags:
|
|
||||||
- install
|
|
||||||
- update
|
|
@ -1,12 +0,0 @@
|
|||||||
base_packages:
|
|
||||||
- zlib1g-dev
|
|
||||||
- libxml2-dev
|
|
||||||
- libxslt1-dev
|
|
||||||
- libffi-dev
|
|
||||||
- libssl-dev
|
|
||||||
|
|
||||||
python_packages:
|
|
||||||
- python2.7
|
|
||||||
- python2.7-dev
|
|
||||||
- python-virtualenv
|
|
||||||
- virtualenvwrapper
|
|
@ -1,3 +0,0 @@
|
|||||||
Host *
|
|
||||||
IdentityFile ~/.ssh/id_rsa_vulnwhisperer
|
|
||||||
IdentitiesOnly yes
|
|
Reference in New Issue
Block a user