diff --git a/README.md b/README.md
index c3bd36b..af30b31 100644
--- a/README.md
+++ b/README.md
@@ -182,6 +182,44 @@ If no section is specified (e.g. -s nessus), vulnwhisperer will check on the con

Next you'll need to import the visualizations into Kibana and setup your logstash config. A more thorough README is underway with setup instructions.
+
+Docker-compose
+-----
+The docker-compose file has been tested and running on a Ubuntu 18.04 environment, with docker-ce v.18.06. The structure's purpose is to store locally the data from the scanners, letting vulnwhisperer update the records and Logstash feed them to ElasticSearch, so it requires a local storage folder.
+
+- It will run out of the box if you create on the root directory of VulnWhisperer a folder named "data", which needs permissions for other users to read/write/execute in order to sync:
+```shell
+ mkdir data && chmod -R 666 data #data/database/report_tracker.db will need 777 to use with local vulnwhisperer
+```
+otherwise the users running inside the docker containers will not be able to work with it properly. If you don't apply chmod recursively, it will still work to sync the data, but only root use in localhost will have access to the created data (if you run local vulnwhisperer with the same data will break).
+- You will need to rebuild the vulnwhisperer Dockerfile before launching the docker-compose, as by the way it is created right now it doesn't pull the last version of the VulnWhisperer code from Github, due to docker layering inner workings. To do this, the best way is to:
+```shell
+
+wget https://raw.githubusercontent.com/qmontal/docker_vulnwhisperer/master/Dockerfile
+docker build --no-cache -t hasecuritysolutions/docker_vulnwhisperer -f Dockerfile . --network=host
+
+```
+This will create the image hasecuritysolutions/docker_vulnwhisperer:latest from scratch with the latest updates. Will soon fix that with the next VulnWhisperer version.
+- The vulnwhisperer container inside of docker-compose is using network_mode=host instead of the bridge mode by default; this is due to issues encountered when the container is trying to pull data from your scanners from a different VLAN than the one you currently are. The host network mode uses the DNS and interface from the host itself, fixing those issues, but it breaks the network isolation from the container (this is due to docker creating bridge interfaces to route the traffic, blocking both container's and host's network). If you change this to bridge, you might need to add your DNS to the config in order to resolve internal hostnames.
+- ElasticSearch requires having the value vm.max_map_count with a minimum of 262144; otherwise, it will probably break at launch. Please check https://elk-docker.readthedocs.io/#prerequisites to solve that.
+- If you want to change the "data" folder for storing the results, remember to change it from both the docker-compose.yml file and the logstash files that are in the root "docker/" folder.
+- Hostnames do NOT allow _ (underscores) on it, if you change the hostname configuration from the docker-compose file and add underscores, config files from logstash will fail.
+- If you are having issues with the connection between hosts, to troubleshoot them you can spawn a shell in said host doing the following:
+```shell
+docker ps #check the images from the containers
+docker exec -i -t 665b4a1e17b6 /bin/bash #where 665b4a1e17b6 is the container image you want to troubleshoot
+```
+You can also make sure that all ELK components are working by doing "curl -i host:9200 (elastic)/ host:5601 (kibana) /host:9600 (logstash). WARNING! It is possible that logstash is not exposing to the external network the port but it does to its internal docker network "esnet".
+- If Kibana is not showing the results, check that you are searching on the whole ES range, as by default it shows logs for the last 15 minutes (you can choose up to last 5 years)
+- X-Pack has been disabled by default due to the noise, plus being a trial version. You can enable it modifying the docker-compose.yml and docker/logstash.conf files. Logstash.conf contains the default credentials for the X-Pack enabled ES.
+
+To launch docker-compose, do:
+```shell
+docker-compose -f docker-compose.yml up
+```
+
+Known issue: Qualys Vuln Management error -> QualysGuard Username: [ERROR] Could not connect to Qualys - EOF when reading a line (working on vulnwhisperer without docker)
+
Running Nightly
---------------
If you're running linux, be sure to setup a cronjob to remove old files that get stored in the database. Be sure to change .csv if you're using json.
diff --git a/configs/frameworks_example.ini b/configs/frameworks_example.ini
index 4a74098..fd8e317 100755
--- a/configs/frameworks_example.ini
+++ b/configs/frameworks_example.ini
@@ -4,8 +4,8 @@ hostname=localhost
port=8834
username=nessus_username
password=nessus_password
-write_path=/opt/vulnwhisperer/nessus/
-db_path=/opt/vulnwhisperer/database
+write_path=/opt/VulnWhisperer/data/nessus/
+db_path=/opt/VulnWhisperer/data/database
trash=false
verbose=true
@@ -15,8 +15,8 @@ hostname=cloud.tenable.com
port=443
username=tenable.io_username
password=tenable.io_password
-write_path=/opt/vulnwhisperer/tenable/
-db_path=/opt/vulnwhisperer/database
+write_path=/opt/VulnWhisperer/data/tenable/
+db_path=/opt/VulnWhisperer/data/database
trash=false
verbose=true
@@ -26,8 +26,8 @@ enabled = true
hostname = qualysapi.qg2.apps.qualys.com
username = exampleuser
password = examplepass
-write_path=/opt/vulnwhisperer/qualys/
-db_path=/opt/vulnwhisperer/database
+write_path=/opt/VulnWhisperer/data/qualys/
+db_path=/opt/VulnWhisperer/data/database
verbose=true
# Set the maximum number of retries each connection should attempt.
@@ -42,8 +42,8 @@ enabled = true
hostname = qualysapi.qg2.apps.qualys.com
username = exampleuser
password = examplepass
-write_path=/opt/vulnwhisperer/qualys/
-db_path=/opt/vulnwhisperer/database
+write_path=/opt/VulnWhisperer/data/qualys/
+db_path=/opt/VulnWhisperer/data/database
verbose=true
# Set the maximum number of retries each connection should attempt.
@@ -52,14 +52,26 @@ max_retries = 10
# Template ID will need to be retrieved for each document. Please follow the reference guide above for instructions on how to get your template ID.
template_id = 126024
+[detectify]
+#Reference https://developer.detectify.com/
+enabled = false
+hostname = api.detectify.com
+#username variable used as apiKey
+username = exampleuser
+#password variable used as secretKey
+password = examplepass
+write_path =/opt/VulnWhisperer/data/detectify/
+db_path = /opt/VulnWhisperer/data/database
+verbose = true
+
[openvas]
enabled = false
hostname = localhost
port = 4000
username = exampleuser
password = examplepass
-write_path=/opt/vulnwhisperer/openvas/
-db_path=/opt/vulnwhisperer/database
+write_path=/opt/VulnWhisperer/data/openvas/
+db_path=/opt/VulnWhisperer/data/database
verbose=true
#[proxy]
diff --git a/deps/qualysapi/qualysapi/util.py b/deps/qualysapi/qualysapi/util.py
index 5f0521e..8786097 100644
--- a/deps/qualysapi/qualysapi/util.py
+++ b/deps/qualysapi/qualysapi/util.py
@@ -19,7 +19,7 @@ def connect(config_file=qcs.default_filename, remember_me=False, remember_me_alw
file.
"""
# Retrieve login credentials.
- conf = qcconf.QualysConnectConfig(filename=config_file, remember_me=remember_me,
+ conf = qcconf.QualysConnectConfig(filename=config_file, remember_me=remember_me,
remember_me_always=remember_me_always)
connect = qcconn.QGConnector(conf.get_auth(),
conf.get_hostname(),
diff --git a/docker-compose.yml b/docker-compose.yml
index 8527102..6a21ea5 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,8 +1,8 @@
version: '2'
services:
- vulnwhisp_es1:
+ vulnwhisp-es1:
image: docker.elastic.co/elasticsearch/elasticsearch:5.6.2
- container_name: vulnwhisp_es1
+ container_name: vulnwhisp-es1
environment:
- cluster.name=vulnwhisperer
- bootstrap.memory_lock=true
@@ -11,27 +11,55 @@ services:
memlock:
soft: -1
hard: -1
- mem_limit: 1g
+ mem_limit: 8g
volumes:
- esdata1:/usr/share/elasticsearch/data
ports:
- - 19200:9200
+ - 9200:9200
+ environment:
+ - xpack.security.enabled=false
+ restart: always
networks:
- - esnet
- vulnwhisp_ks1:
+ esnet:
+ aliases:
+ - vulnwhisp-es1.local
+ vulnwhisp-ks1:
image: docker.elastic.co/kibana/kibana:5.6.2
environment:
- SERVER_NAME: vulnwhisp_ks1
- ELASTICSEARCH_URL: http://vulnwhisp_es1:9200
+ SERVER_NAME: vulnwhisp-ks1
+ ELASTICSEARCH_URL: http://vulnwhisp-es1:9200
ports:
- - 15601:5601
+ - 5601:5601
+ depends_on:
+ - vulnwhisp-es1
networks:
- - esnet
- vulnwhisp_ls1:
+ esnet:
+ aliases:
+ - vulnwhisp-ks1.local
+ vulnwhisp-ls1:
image: docker.elastic.co/logstash/logstash:5.6.2
+ container_name: vulnwhisp-ls1
+ volumes:
+ - ./docker/1000_nessus_process_file.conf:/etc/logstash/conf.d/1000_nessus_process_file.conf
+ - ./docker/2000_qualys_web_scans.conf:/etc/logstash/conf.d/2000_qualys_web_scans.conf
+ - ./docker/3000_openvas.conf:/etc/logstash/conf.d/3000_openvas.conf
+ - ./docker/logstash.yml:/etc/logstash/logstash.yml
+ - ./data/:/opt/VulnWhisperer/data
+ environment:
+ - xpack.monitoring.enabled=false
+ depends_on:
+ - vulnwhisp-es1
networks:
- - esnet
-
+ esnet:
+ aliases:
+ - vulnwhisp-ls1.local
+ vulnwhisp-vulnwhisperer:
+ image: hasecuritysolutions/docker_vulnwhisperer:latest
+ container_name: vulnwhisp-vulnwhisperer
+ volumes:
+ - ./data/:/opt/VulnWhisperer/data
+ - ./configs/frameworks_example.ini:/opt/VulnWhisperer/frameworks_example.ini
+ network_mode: host
volumes:
esdata1:
driver: local
diff --git a/docker/1000_nessus_process_file.conf b/docker/1000_nessus_process_file.conf
new file mode 100644
index 0000000..f28a530
--- /dev/null
+++ b/docker/1000_nessus_process_file.conf
@@ -0,0 +1,220 @@
+# Author: Austin Taylor and Justin Henderson
+# Email: email@austintaylor.io
+# Last Update: 12/20/2017
+# Version 0.3
+# Description: Take in nessus reports from vulnWhisperer and pumps into logstash
+
+
+input {
+ file {
+ path => "/opt/VulnWhisperer/data/nessus/**/*"
+ start_position => "beginning"
+ tags => "nessus"
+ type => "nessus"
+ }
+ file {
+ path => "/opt/VulnWhisperer/data/tenable/*.csv"
+ start_position => "beginning"
+ tags => "tenable"
+ type => "tenable"
+ }
+}
+
+filter {
+ if "nessus" in [tags] or "tenable" in [tags] {
+ # Drop the header column
+ if [message] =~ "^Plugin ID" { drop {} }
+
+ csv {
+ # columns => ["plugin_id", "cve", "cvss", "risk", "asset", "protocol", "port", "plugin_name", "synopsis", "description", "solution", "see_also", "plugin_output"]
+ columns => ["plugin_id", "cve", "cvss", "risk", "asset", "protocol", "port", "plugin_name", "synopsis", "description", "solution", "see_also", "plugin_output", "asset_uuid", "vulnerability_state", "ip", "fqdn", "netbios", "operating_system", "mac_address", "plugin_family", "cvss_base", "cvss_temporal", "cvss_temporal_vector", "cvss_vector", "cvss3_base", "cvss3_temporal", "cvss3_temporal_vector", "cvss_vector", "system_type", "host_start", "host_end"]
+ separator => ","
+ source => "message"
+ }
+
+ ruby {
+ code => "if event.get('description')
+ event.set('description', event.get('description').gsub(92.chr + 'n', 10.chr).gsub(92.chr + 'r', 13.chr))
+ end
+ if event.get('synopsis')
+ event.set('synopsis', event.get('synopsis').gsub(92.chr + 'n', 10.chr).gsub(92.chr + 'r', 13.chr))
+ end
+ if event.get('solution')
+ event.set('solution', event.get('solution').gsub(92.chr + 'n', 10.chr).gsub(92.chr + 'r', 13.chr))
+ end
+ if event.get('see_also')
+ event.set('see_also', event.get('see_also').gsub(92.chr + 'n', 10.chr).gsub(92.chr + 'r', 13.chr))
+ end
+ if event.get('plugin_output')
+ event.set('plugin_output', event.get('plugin_output').gsub(92.chr + 'n', 10.chr).gsub(92.chr + 'r', 13.chr))
+ end"
+ }
+
+ #If using filebeats as your source, you will need to replace the "path" field to "source"
+ grok {
+ match => { "path" => "(?[a-zA-Z0-9_.\-]+)_%{INT:scan_id}_%{INT:history_id}_%{INT:last_updated}.csv$" }
+ tag_on_failure => []
+ }
+
+ date {
+ match => [ "last_updated", "UNIX" ]
+ target => "@timestamp"
+ remove_field => ["last_updated"]
+ }
+
+ if [risk] == "None" {
+ mutate { add_field => { "risk_number" => 0 }}
+ }
+ if [risk] == "Low" {
+ mutate { add_field => { "risk_number" => 1 }}
+ }
+ if [risk] == "Medium" {
+ mutate { add_field => { "risk_number" => 2 }}
+ }
+ if [risk] == "High" {
+ mutate { add_field => { "risk_number" => 3 }}
+ }
+ if [risk] == "Critical" {
+ mutate { add_field => { "risk_number" => 4 }}
+ }
+
+ if ![cve] or [cve] == "nan" {
+ mutate { remove_field => [ "cve" ] }
+ }
+ if ![cvss] or [cvss] == "nan" {
+ mutate { remove_field => [ "cvss" ] }
+ }
+ if ![cvss_base] or [cvss_base] == "nan" {
+ mutate { remove_field => [ "cvss_base" ] }
+ }
+ if ![cvss_temporal] or [cvss_temporal] == "nan" {
+ mutate { remove_field => [ "cvss_temporal" ] }
+ }
+ if ![cvss_temporal_vector] or [cvss_temporal_vector] == "nan" {
+ mutate { remove_field => [ "cvss_temporal_vector" ] }
+ }
+ if ![cvss_vector] or [cvss_vector] == "nan" {
+ mutate { remove_field => [ "cvss_vector" ] }
+ }
+ if ![cvss3_base] or [cvss3_base] == "nan" {
+ mutate { remove_field => [ "cvss3_base" ] }
+ }
+ if ![cvss3_temporal] or [cvss3_temporal] == "nan" {
+ mutate { remove_field => [ "cvss3_temporal" ] }
+ }
+ if ![cvss3_temporal_vector] or [cvss3_temporal_vector] == "nan" {
+ mutate { remove_field => [ "cvss3_temporal_vector" ] }
+ }
+ if ![description] or [description] == "nan" {
+ mutate { remove_field => [ "description" ] }
+ }
+ if ![mac_address] or [mac_address] == "nan" {
+ mutate { remove_field => [ "mac_address" ] }
+ }
+ if ![netbios] or [netbios] == "nan" {
+ mutate { remove_field => [ "netbios" ] }
+ }
+ if ![operating_system] or [operating_system] == "nan" {
+ mutate { remove_field => [ "operating_system" ] }
+ }
+ if ![plugin_output] or [plugin_output] == "nan" {
+ mutate { remove_field => [ "plugin_output" ] }
+ }
+ if ![see_also] or [see_also] == "nan" {
+ mutate { remove_field => [ "see_also" ] }
+ }
+ if ![synopsis] or [synopsis] == "nan" {
+ mutate { remove_field => [ "synopsis" ] }
+ }
+ if ![system_type] or [system_type] == "nan" {
+ mutate { remove_field => [ "system_type" ] }
+ }
+
+ mutate {
+ remove_field => [ "message" ]
+ add_field => { "risk_score" => "%{cvss}" }
+ }
+ mutate {
+ convert => { "risk_score" => "float" }
+ }
+ if [risk_score] == 0 {
+ mutate {
+ add_field => { "risk_score_name" => "info" }
+ }
+ }
+ if [risk_score] > 0 and [risk_score] < 3 {
+ mutate {
+ add_field => { "risk_score_name" => "low" }
+ }
+ }
+ if [risk_score] >= 3 and [risk_score] < 6 {
+ mutate {
+ add_field => { "risk_score_name" => "medium" }
+ }
+ }
+ if [risk_score] >=6 and [risk_score] < 9 {
+ mutate {
+ add_field => { "risk_score_name" => "high" }
+ }
+ }
+ if [risk_score] >= 9 {
+ mutate {
+ add_field => { "risk_score_name" => "critical" }
+ }
+ }
+
+ # Compensating controls - adjust risk_score
+ # Adobe and Java are not allowed to run in browser unless whitelisted
+ # Therefore, lower score by dividing by 3 (score is subjective to risk)
+
+ #Modify and uncomment when ready to use
+ #if [risk_score] != 0 {
+ # if [plugin_name] =~ "Adobe" and [risk_score] > 6 or [plugin_name] =~ "Java" and [risk_score] > 6 {
+ # ruby {
+ # code => "event.set('risk_score', event.get('risk_score') / 3)"
+ # }
+ # mutate {
+ # add_field => { "compensating_control" => "Adobe and Flash removed from browsers unless whitelisted site." }
+ # }
+ # }
+ #}
+
+ # Add tags for reporting based on assets or criticality
+
+ if [asset] == "dc01" or [asset] == "dc02" or [asset] == "pki01" or [asset] == "192.168.0.54" or [asset] =~ "^192\.168\.0\." or [asset] =~ "^42.42.42." {
+ mutate {
+ add_tag => [ "critical_asset" ]
+ }
+ }
+ #if [asset] =~ "^192\.168\.[45][0-9][0-9]\.1$" or [asset] =~ "^192.168\.[50]\.[0-9]{1,2}\.1$"{
+ # mutate {
+ # add_tag => [ "has_hipaa_data" ]
+ # }
+ #}
+ #if [asset] =~ "^192\.168\.[45][0-9][0-9]\." {
+ # mutate {
+ # add_tag => [ "hipaa_asset" ]
+ # }
+ #}
+ if [asset] =~ "^hr" {
+ mutate {
+ add_tag => [ "pci_asset" ]
+ }
+ }
+ #if [asset] =~ "^10\.0\.50\." {
+ # mutate {
+ # add_tag => [ "web_servers" ]
+ # }
+ #}
+ }
+}
+
+output {
+ if "nessus" in [tags] or "tenable" in [tags] or [type] in [ "nessus", "tenable" ] {
+ # stdout { codec => rubydebug }
+ elasticsearch {
+ hosts => [ "vulnwhisp-es1.local:9200" ]
+ index => "logstash-vulnwhisperer-%{+YYYY.MM}"
+ }
+ }
+}
diff --git a/docker/2000_qualys_web_scans.conf b/docker/2000_qualys_web_scans.conf
new file mode 100644
index 0000000..ebeb541
--- /dev/null
+++ b/docker/2000_qualys_web_scans.conf
@@ -0,0 +1,153 @@
+# Author: Austin Taylor and Justin Henderson
+# Email: austin@hasecuritysolutions.com
+# Last Update: 12/30/2017
+# Version 0.3
+# Description: Take in qualys web scan reports from vulnWhisperer and pumps into logstash
+
+input {
+ file {
+ path => "/opt/VulnWhisperer/data/qualys/scans/**/*.json"
+ type => json
+ codec => json
+ start_position => "beginning"
+ tags => [ "qualys" ]
+ }
+}
+
+filter {
+ if "qualys" in [tags] {
+ grok {
+ match => { "path" => [ "(?qualys_vuln)_scan_%{DATA}_%{INT:last_updated}.json$", "(?qualys_web)_%{INT:app_id}_%{INT:last_updated}.json$" ] }
+ tag_on_failure => []
+ }
+
+ mutate {
+ replace => [ "message", "%{message}" ]
+ #gsub => [
+ # "message", "\|\|\|", " ",
+ # "message", "\t\t", " ",
+ # "message", " ", " ",
+ # "message", " ", " ",
+ # "message", " ", " ",
+ # "message", "nan", " ",
+ # "message",'\n',''
+ #]
+ }
+
+ if "qualys_web" in [tags] {
+ mutate {
+ add_field => { "asset" => "%{web_application_name}" }
+ add_field => { "risk_score" => "%{cvss}" }
+ }
+ } else if "qualys_vuln" in [tags] {
+ mutate {
+ add_field => { "asset" => "%{ip}" }
+ add_field => { "risk_score" => "%{cvss}" }
+ }
+ }
+
+ if [risk] == "1" {
+ mutate { add_field => { "risk_number" => 0 }}
+ mutate { replace => { "risk" => "info" }}
+ }
+ if [risk] == "2" {
+ mutate { add_field => { "risk_number" => 1 }}
+ mutate { replace => { "risk" => "low" }}
+ }
+ if [risk] == "3" {
+ mutate { add_field => { "risk_number" => 2 }}
+ mutate { replace => { "risk" => "medium" }}
+ }
+ if [risk] == "4" {
+ mutate { add_field => { "risk_number" => 3 }}
+ mutate { replace => { "risk" => "high" }}
+ }
+ if [risk] == "5" {
+ mutate { add_field => { "risk_number" => 4 }}
+ mutate { replace => { "risk" => "critical" }}
+ }
+
+ mutate {
+ remove_field => "message"
+ }
+
+ if [first_time_detected] {
+ date {
+ match => [ "first_time_detected", "dd MMM yyyy HH:mma 'GMT'ZZ", "dd MMM yyyy HH:mma 'GMT'" ]
+ target => "first_time_detected"
+ }
+ }
+ if [first_time_tested] {
+ date {
+ match => [ "first_time_tested", "dd MMM yyyy HH:mma 'GMT'ZZ", "dd MMM yyyy HH:mma 'GMT'" ]
+ target => "first_time_tested"
+ }
+ }
+ if [last_time_detected] {
+ date {
+ match => [ "last_time_detected", "dd MMM yyyy HH:mma 'GMT'ZZ", "dd MMM yyyy HH:mma 'GMT'" ]
+ target => "last_time_detected"
+ }
+ }
+ if [last_time_tested] {
+ date {
+ match => [ "last_time_tested", "dd MMM yyyy HH:mma 'GMT'ZZ", "dd MMM yyyy HH:mma 'GMT'" ]
+ target => "last_time_tested"
+ }
+ }
+ date {
+ match => [ "last_updated", "UNIX" ]
+ target => "@timestamp"
+ remove_field => "last_updated"
+ }
+ mutate {
+ convert => { "plugin_id" => "integer"}
+ convert => { "id" => "integer"}
+ convert => { "risk_number" => "integer"}
+ convert => { "risk_score" => "float"}
+ convert => { "total_times_detected" => "integer"}
+ convert => { "cvss_temporal" => "float"}
+ convert => { "cvss" => "float"}
+ }
+ if [risk_score] == 0 {
+ mutate {
+ add_field => { "risk_score_name" => "info" }
+ }
+ }
+ if [risk_score] > 0 and [risk_score] < 3 {
+ mutate {
+ add_field => { "risk_score_name" => "low" }
+ }
+ }
+ if [risk_score] >= 3 and [risk_score] < 6 {
+ mutate {
+ add_field => { "risk_score_name" => "medium" }
+ }
+ }
+ if [risk_score] >=6 and [risk_score] < 9 {
+ mutate {
+ add_field => { "risk_score_name" => "high" }
+ }
+ }
+ if [risk_score] >= 9 {
+ mutate {
+ add_field => { "risk_score_name" => "critical" }
+ }
+ }
+
+ if [asset] =~ "\.yourdomain\.(com|net)$" {
+ mutate {
+ add_tag => [ "critical_asset" ]
+ }
+ }
+ }
+}
+output {
+ if "qualys" in [tags] {
+ stdout { codec => rubydebug }
+ elasticsearch {
+ hosts => [ "vulnwhisp_es1.local:9200" ]
+ index => "logstash-vulnwhisperer-%{+YYYY.MM}"
+ }
+ }
+}
diff --git a/docker/3000_openvas.conf b/docker/3000_openvas.conf
new file mode 100644
index 0000000..b3ee1d1
--- /dev/null
+++ b/docker/3000_openvas.conf
@@ -0,0 +1,146 @@
+# Author: Austin Taylor and Justin Henderson
+# Email: austin@hasecuritysolutions.com
+# Last Update: 03/04/2018
+# Version 0.3
+# Description: Take in qualys web scan reports from vulnWhisperer and pumps into logstash
+
+input {
+ file {
+ path => "/opt/VulnWhisperer/data/openvas/*.json"
+ type => json
+ codec => json
+ start_position => "beginning"
+ tags => [ "openvas_scan", "openvas" ]
+ }
+}
+
+filter {
+ if "openvas_scan" in [tags] {
+ mutate {
+ replace => [ "message", "%{message}" ]
+ gsub => [
+ "message", "\|\|\|", " ",
+ "message", "\t\t", " ",
+ "message", " ", " ",
+ "message", " ", " ",
+ "message", " ", " ",
+ "message", "nan", " ",
+ "message",'\n',''
+ ]
+ }
+
+
+ grok {
+ match => { "path" => "openvas_scan_%{DATA:scan_id}_%{INT:last_updated}.json$" }
+ tag_on_failure => []
+ }
+
+ mutate {
+ add_field => { "risk_score" => "%{cvss}" }
+ }
+
+ if [risk] == "1" {
+ mutate { add_field => { "risk_number" => 0 }}
+ mutate { replace => { "risk" => "info" }}
+ }
+ if [risk] == "2" {
+ mutate { add_field => { "risk_number" => 1 }}
+ mutate { replace => { "risk" => "low" }}
+ }
+ if [risk] == "3" {
+ mutate { add_field => { "risk_number" => 2 }}
+ mutate { replace => { "risk" => "medium" }}
+ }
+ if [risk] == "4" {
+ mutate { add_field => { "risk_number" => 3 }}
+ mutate { replace => { "risk" => "high" }}
+ }
+ if [risk] == "5" {
+ mutate { add_field => { "risk_number" => 4 }}
+ mutate { replace => { "risk" => "critical" }}
+ }
+
+ mutate {
+ remove_field => "message"
+ }
+
+ if [first_time_detected] {
+ date {
+ match => [ "first_time_detected", "dd MMM yyyy HH:mma 'GMT'ZZ", "dd MMM yyyy HH:mma 'GMT'" ]
+ target => "first_time_detected"
+ }
+ }
+ if [first_time_tested] {
+ date {
+ match => [ "first_time_tested", "dd MMM yyyy HH:mma 'GMT'ZZ", "dd MMM yyyy HH:mma 'GMT'" ]
+ target => "first_time_tested"
+ }
+ }
+ if [last_time_detected] {
+ date {
+ match => [ "last_time_detected", "dd MMM yyyy HH:mma 'GMT'ZZ", "dd MMM yyyy HH:mma 'GMT'" ]
+ target => "last_time_detected"
+ }
+ }
+ if [last_time_tested] {
+ date {
+ match => [ "last_time_tested", "dd MMM yyyy HH:mma 'GMT'ZZ", "dd MMM yyyy HH:mma 'GMT'" ]
+ target => "last_time_tested"
+ }
+ }
+ date {
+ match => [ "last_updated", "UNIX" ]
+ target => "@timestamp"
+ remove_field => "last_updated"
+ }
+ mutate {
+ convert => { "plugin_id" => "integer"}
+ convert => { "id" => "integer"}
+ convert => { "risk_number" => "integer"}
+ convert => { "risk_score" => "float"}
+ convert => { "total_times_detected" => "integer"}
+ convert => { "cvss_temporal" => "float"}
+ convert => { "cvss" => "float"}
+ }
+ if [risk_score] == 0 {
+ mutate {
+ add_field => { "risk_score_name" => "info" }
+ }
+ }
+ if [risk_score] > 0 and [risk_score] < 3 {
+ mutate {
+ add_field => { "risk_score_name" => "low" }
+ }
+ }
+ if [risk_score] >= 3 and [risk_score] < 6 {
+ mutate {
+ add_field => { "risk_score_name" => "medium" }
+ }
+ }
+ if [risk_score] >=6 and [risk_score] < 9 {
+ mutate {
+ add_field => { "risk_score_name" => "high" }
+ }
+ }
+ if [risk_score] >= 9 {
+ mutate {
+ add_field => { "risk_score_name" => "critical" }
+ }
+ }
+ # Add your critical assets by subnet or by hostname. Comment this field out if you don't want to tag any, but the asset panel will break.
+ if [asset] =~ "^10\.0\.100\." {
+ mutate {
+ add_tag => [ "critical_asset" ]
+ }
+ }
+ }
+}
+output {
+ if "openvas" in [tags] {
+ stdout { codec => rubydebug }
+ elasticsearch {
+ hosts => [ "vulnwhisp_es1.local:9200" ]
+ index => "logstash-vulnwhisperer-%{+YYYY.MM}"
+ }
+ }
+}
diff --git a/docker/logstash.yml b/docker/logstash.yml
new file mode 100644
index 0000000..1326391
--- /dev/null
+++ b/docker/logstash.yml
@@ -0,0 +1,6 @@
+path.data: /var/lib/logstash
+path.config: /etc/logstash/conf.d
+#path.logs: /var/log/logstash
+xpack.monitoring.elasticsearch.url: [ "vulnwhisp-es1.local:9200" ]
+xpack.monitoring.elasticsearch.username: "elastic"
+xpack.monitoring.elasticsearch.password: "changeme"
diff --git a/vulnwhisp/frameworks/nessus.py b/vulnwhisp/frameworks/nessus.py
index 1e9a14f..09f4e3d 100755
--- a/vulnwhisp/frameworks/nessus.py
+++ b/vulnwhisp/frameworks/nessus.py
@@ -7,6 +7,10 @@ from datetime import datetime
import json
import sys
import time
+
+from requests.packages.urllib3.exceptions import InsecureRequestWarning
+requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+
class NessusAPI(object):
diff --git a/vulnwhisp/vulnwhisp.py b/vulnwhisp/vulnwhisp.py
index 7ea881a..c6c742c 100755
--- a/vulnwhisp/vulnwhisp.py
+++ b/vulnwhisp/vulnwhisp.py
@@ -292,7 +292,7 @@ class vulnWhispererNessus(vulnWhispererBase):
if not scan_list:
self.vprint('{info} No new scans to process. Exiting...'.format(info=bcolors.INFO))
- exit(0)
+ return 0
# Create scan subfolders
@@ -612,7 +612,7 @@ class vulnWhispererQualys(vulnWhispererBase):
else:
self.vprint('{info} No new scans to process. Exiting...'.format(info=bcolors.INFO))
self.conn.close()
- exit(0)
+ return 0
class vulnWhispererOpenVAS(vulnWhispererBase):
@@ -748,7 +748,7 @@ class vulnWhispererOpenVAS(vulnWhispererBase):
else:
self.vprint('{info} No new scans to process. Exiting...'.format(info=bcolors.INFO))
self.conn.close()
- exit(0)
+ return 0
class vulnWhispererQualysVuln(vulnWhispererBase):
@@ -872,7 +872,7 @@ class vulnWhispererQualysVuln(vulnWhispererBase):
else:
self.vprint('{info} No new scans to process. Exiting...'.format(info=bcolors.INFO))
self.conn.close()
- exit(0)
+ return 0
class vulnWhisperer(object):