diff --git a/bin/vuln_whisperer b/bin/vuln_whisperer index b934063..604f60a 100644 --- a/bin/vuln_whisperer +++ b/bin/vuln_whisperer @@ -93,7 +93,7 @@ def main(): scanname=args.scanname) exit_code += vw.whisper_vulnerabilities() except Exception as e: - logger.error("VulnWhisperer was unable to perform the processing on '{}'".format(args.source)) + logger.error("VulnWhisperer was unable to perform the processing on '{}'".format(section)) else: logger.info('Running vulnwhisperer for section {}'.format(args.section)) vw = vulnWhisperer(config=args.config, diff --git a/setup.py b/setup.py index db493cd..35f6df7 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,6 @@ #!/usr/bin/env python +from __future__ import absolute_import from setuptools import setup, find_packages setup( diff --git a/vulnwhisp/base/config.py b/vulnwhisp/base/config.py index 630b21b..aa412e6 100644 --- a/vulnwhisp/base/config.py +++ b/vulnwhisp/base/config.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import import sys import logging @@ -5,7 +6,7 @@ import logging if sys.version_info > (3, 0): import configparser as cp else: - import ConfigParser as cp + import six.moves.configparser as cp class vwConfig(object): diff --git a/vulnwhisp/frameworks/nessus.py b/vulnwhisp/frameworks/nessus.py index 91f0d46..707fd27 100755 --- a/vulnwhisp/frameworks/nessus.py +++ b/vulnwhisp/frameworks/nessus.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import import json import logging import sys diff --git a/vulnwhisp/frameworks/openvas.py b/vulnwhisp/frameworks/openvas.py index a5f8b70..e03dc51 100644 --- a/vulnwhisp/frameworks/openvas.py +++ b/vulnwhisp/frameworks/openvas.py @@ -1,5 +1,6 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +from __future__ import absolute_import __author__ = 'Austin Taylor' import datetime as dt diff --git a/vulnwhisp/frameworks/qualys_vuln.py b/vulnwhisp/frameworks/qualys_vuln.py index 69cddfa..713121b 100644 --- a/vulnwhisp/frameworks/qualys_vuln.py +++ b/vulnwhisp/frameworks/qualys_vuln.py @@ -1,5 +1,6 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +from __future__ import absolute_import __author__ = 'Nathan Young' import logging diff --git a/vulnwhisp/frameworks/qualys_web.py b/vulnwhisp/frameworks/qualys_web.py index 7d6b122..158c886 100644 --- a/vulnwhisp/frameworks/qualys_web.py +++ b/vulnwhisp/frameworks/qualys_web.py @@ -1,5 +1,8 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +from __future__ import absolute_import +from six.moves import range +from functools import reduce __author__ = 'Austin Taylor' from lxml import objectify diff --git a/vulnwhisp/reporting/jira_api.py b/vulnwhisp/reporting/jira_api.py index d48b34a..17574e9 100644 --- a/vulnwhisp/reporting/jira_api.py +++ b/vulnwhisp/reporting/jira_api.py @@ -1,15 +1,18 @@ +from __future__ import absolute_import import json import os -from datetime import datetime, date, timedelta +from datetime import datetime, date from jira import JIRA -import requests import logging from bottle import template import re +from six.moves import range + class JiraAPI(object): - def __init__(self, hostname=None, username=None, password=None, path="", debug=False, clean_obsolete=True, max_time_window=12, decommission_time_window=3): + def __init__(self, hostname=None, username=None, password=None, path="", debug=False, clean_obsolete=True, + max_time_window=12, decommission_time_window=3): self.logger = logging.getLogger('JiraAPI') if debug: self.logger.setLevel(logging.DEBUG) @@ -29,26 +32,31 @@ class JiraAPI(object): self.template_path = 'vulnwhisp/reporting/resources/ticket.tpl' self.max_ips_ticket = 30 self.attachment_filename = "vulnerable_assets.txt" - self.max_time_tracking = max_time_window #in months + self.max_time_tracking = max_time_window # in months if path: self.download_tickets(path) else: self.logger.warn("No local path specified, skipping Jira ticket download.") - self.max_decommission_time = decommission_time_window #in months + self.max_decommission_time = decommission_time_window # in months # [HIGIENE] close tickets older than 12 months as obsolete (max_time_window defined) if clean_obsolete: self.close_obsolete_tickets() # deletes the tag "server_decommission" from those tickets closed <=3 months ago self.decommission_cleanup() - - self.jira_still_vulnerable_comment = '''This ticket has been reopened due to the vulnerability not having been fixed (if multiple assets are affected, all need to be fixed; if the server is down, lastest known vulnerability might be the one reported). - - In the case of the team accepting the risk and wanting to close the ticket, please add the label "*risk_accepted*" to the ticket before closing it. - - If server has been decommissioned, please add the label "*server_decommission*" to the ticket before closing it. - - If when checking the vulnerability it looks like a false positive, _+please elaborate in a comment+_ and add the label "*false_positive*" before closing it; we will review it and report it to the vendor. + + self.jira_still_vulnerable_comment = '''This ticket has been reopened due to the vulnerability not having been \ + fixed (if multiple assets are affected, all need to be fixed; if the server is down, lastest known \ + vulnerability might be the one reported). + - In the case of the team accepting the risk and wanting to close the ticket, please add the label \ + "*risk_accepted*" to the ticket before closing it. + - If server has been decommissioned, please add the label "*server_decommission*" to the ticket before closing \ + it. + - If when checking the vulnerability it looks like a false positive, _+please elaborate in a comment+_ and add \ + the label "*false_positive*" before closing it; we will review it and report it to the vendor. If you have further doubts, please contact the Security Team.''' - def create_ticket(self, title, desc, project="IS", components=[], tags=[], attachment_contents = []): + def create_ticket(self, title, desc, project="IS", components=[], tags=[], attachment_contents=[]): labels = ['vulnerability_management'] for tag in tags: labels.append(str(tag)) @@ -62,53 +70,56 @@ class JiraAPI(object): for c in project_obj.components: if component == c.name: self.logger.debug("resolved component name {} to id {}".format(c.name, c.id)) - components_ticket.append({ "id": c.id }) - exists=True + components_ticket.append({"id": c.id}) + exists = True if not exists: self.logger.error("Error creating Ticket: component {} not found".format(component)) return 0 - + new_issue = self.jira.create_issue(project=project, summary=title, description=desc, issuetype={'name': 'Bug'}, labels=labels, components=components_ticket) - + self.logger.info("Ticket {} created successfully".format(new_issue)) - + if attachment_contents: self.add_content_as_attachment(new_issue, attachment_contents) - + return new_issue - - #Basic JIRA Metrics + + # Basic JIRA Metrics def metrics_open_tickets(self, project=None): - jql = "labels= vulnerability_management and resolution = Unresolved" + jql = "labels= vulnerability_management and resolution = Unresolved" if project: jql += " and (project='{}')".format(project) - self.logger.debug('Executing: {}'.format(jql)) + self.logger.debug('Executing: {}'.format(jql)) return len(self.jira.search_issues(jql, maxResults=0)) def metrics_closed_tickets(self, project=None): - jql = "labels= vulnerability_management and NOT resolution = Unresolved AND created >=startOfMonth(-{})".format(self.max_time_tracking) + jql = "labels= vulnerability_management and NOT resolution = Unresolved AND created >=startOfMonth(-{})".format( + self.max_time_tracking) if project: jql += " and (project='{}')".format(project) return len(self.jira.search_issues(jql, maxResults=0)) def sync(self, vulnerabilities, project, components=[]): - #JIRA structure of each vulnerability: [source, scan_name, title, diagnosis, consequence, solution, ips, risk, references] + # JIRA structure of each vulnerability: [source, scan_name, title, diagnosis, consequence, solution, + # ips, risk, references] self.logger.info("JIRA Sync started") for vuln in vulnerabilities: # JIRA doesn't allow labels with spaces, so making sure that the scan_name doesn't have spaces # if it has, they will be replaced by "_" - if " " in vuln['scan_name']: + if " " in vuln['scan_name']: vuln['scan_name'] = "_".join(vuln['scan_name'].split(" ")) - - # we exclude from the vulnerabilities to report those assets that already exist with *risk_accepted*/*server_decommission* + + # we exclude from the vulnerabilities to report those assets that already exist + # with *risk_accepted*/*server_decommission* vuln = self.exclude_accepted_assets(vuln) - + # make sure after exclusion of risk_accepted assets there are still assets if vuln['ips']: exists = False @@ -131,56 +142,65 @@ class JiraAPI(object): # create local text file with assets, attach it to ticket if len(vuln['ips']) > self.max_ips_ticket: attachment_contents = vuln['ips'] - vuln['ips'] = ["Affected hosts ({assets}) exceed Jira's allowed character limit, added as an attachment.".format(assets = len(attachment_contents))] + vuln['ips'] = [ + "Affected hosts ({assets}) exceed Jira's allowed character limit, added as an attachment.".format( + assets=len(attachment_contents))] try: tpl = template(self.template_path, vuln) except Exception as e: self.logger.error('Exception templating: {}'.format(str(e))) return 0 - self.create_ticket(title=vuln['title'], desc=tpl, project=project, components=components, tags=[vuln['source'], vuln['scan_name'], 'vulnerability', vuln['risk']], attachment_contents = attachment_contents) + self.create_ticket(title=vuln['title'], desc=tpl, project=project, components=components, + tags=[vuln['source'], vuln['scan_name'], 'vulnerability', vuln['risk']], + attachment_contents=attachment_contents) else: self.logger.info("Ignoring vulnerability as all assets are already reported in a risk_accepted ticket") - + self.close_fixed_tickets(vulnerabilities) # we reinitialize so the next sync redoes the query with their specific variables self.all_tickets = [] self.excluded_tickets = [] return True - + def exclude_accepted_assets(self, vuln): # we want to check JIRA tickets with risk_accepted/server_decommission or false_positive labels sharing the same source # will exclude tickets older than 12 months, old tickets will get closed for higiene and recreated if still vulnerable - labels = [vuln['source'], vuln['scan_name'], 'vulnerability_management', 'vulnerability'] - + labels = [vuln['source'], vuln['scan_name'], 'vulnerability_management', 'vulnerability'] + if not self.excluded_tickets: - jql = "{} AND labels in (risk_accepted,server_decommission, false_positive) AND NOT labels=advisory AND created >=startOfMonth(-{})".format(" AND ".join(["labels={}".format(label) for label in labels]), self.max_time_tracking) + jql = "{} AND labels in (risk_accepted,server_decommission, false_positive) AND NOT labels=advisory AND created >=startOfMonth(-{})".format( + " AND ".join(["labels={}".format(label) for label in labels]), self.max_time_tracking) self.excluded_tickets = self.jira.search_issues(jql, maxResults=0) title = vuln['title'] - #WARNING: function IGNORES DUPLICATES, after finding a "duplicate" will just return it exists - #it wont iterate over the rest of tickets looking for other possible duplicates/similar issues + # WARNING: function IGNORES DUPLICATES, after finding a "duplicate" will just return it exists + # it wont iterate over the rest of tickets looking for other possible duplicates/similar issues self.logger.info("Comparing vulnerability to risk_accepted tickets") assets_to_exclude = [] tickets_excluded_assets = [] for index in range(len(self.excluded_tickets)): - checking_ticketid, checking_title, checking_assets = self.ticket_get_unique_fields(self.excluded_tickets[index]) + checking_ticketid, checking_title, checking_assets = self.ticket_get_unique_fields( + self.excluded_tickets[index]) if title.encode('ascii') == checking_title.encode('ascii'): if checking_assets: - #checking_assets is a list, we add to our full list for later delete all assets - assets_to_exclude+=checking_assets + # checking_assets is a list, we add to our full list for later delete all assets + assets_to_exclude += checking_assets tickets_excluded_assets.append(checking_ticketid) - + if assets_to_exclude: assets_to_remove = [] - self.logger.warn("Vulnerable Assets seen on an already existing risk_accepted Jira ticket: {}".format(', '.join(tickets_excluded_assets))) + self.logger.warn("Vulnerable Assets seen on an already existing risk_accepted Jira ticket: {}".format( + ', '.join(tickets_excluded_assets))) self.logger.debug("Original assets: {}".format(vuln['ips'])) - #assets in vulnerability have the structure "ip - hostname - port", so we need to match by partial + # assets in vulnerability have the structure "ip - hostname - port", so we need to match by partial for exclusion in assets_to_exclude: # for efficiency, we walk the backwards the array of ips from the scanners, as we will be popping out the matches # and we don't want it to affect the rest of the processing (otherwise, it would miss the asset right after the removed one) for index in range(len(vuln['ips']))[::-1]: if exclusion == vuln['ips'][index].split(" - ")[0]: - self.logger.debug("Deleting asset {} from vulnerability {}, seen in risk_accepted.".format(vuln['ips'][index], title)) + self.logger.debug( + "Deleting asset {} from vulnerability {}, seen in risk_accepted.".format(vuln['ips'][index], + title)) vuln['ips'].pop(index) self.logger.debug("Modified assets: {}".format(vuln['ips'])) @@ -192,35 +212,37 @@ class JiraAPI(object): Returns [exists (bool), is equal (bool), ticketid (str), assets (array)] ''' # we need to return if the vulnerability has already been reported and the ID of the ticket for further processing - #function returns array [duplicated(bool), update(bool), ticketid, ticket_assets] + # function returns array [duplicated(bool), update(bool), ticketid, ticket_assets] title = vuln['title'] - labels = [vuln['source'], vuln['scan_name'], 'vulnerability_management', 'vulnerability'] - #list(set()) to remove duplicates + labels = [vuln['source'], vuln['scan_name'], 'vulnerability_management', 'vulnerability'] + # list(set()) to remove duplicates assets = list(set(re.findall(r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b", ",".join(vuln['ips'])))) - + if not self.all_tickets: self.logger.info("Retrieving all JIRA tickets with the following tags {}".format(labels)) # we want to check all JIRA tickets, to include tickets moved to other queues # will exclude tickets older than 12 months, old tickets will get closed for higiene and recreated if still vulnerable - jql = "{} AND NOT labels=advisory AND created >=startOfMonth(-{})".format(" AND ".join(["labels={}".format(label) for label in labels]), self.max_time_tracking) - + jql = "{} AND NOT labels=advisory AND created >=startOfMonth(-{})".format( + " AND ".join(["labels={}".format(label) for label in labels]), self.max_time_tracking) + self.all_tickets = self.jira.search_issues(jql, maxResults=0) - - #WARNING: function IGNORES DUPLICATES, after finding a "duplicate" will just return it exists - #it wont iterate over the rest of tickets looking for other possible duplicates/similar issues + + # WARNING: function IGNORES DUPLICATES, after finding a "duplicate" will just return it exists + # it wont iterate over the rest of tickets looking for other possible duplicates/similar issues self.logger.info("Comparing Vulnerabilities to created tickets") for index in range(len(self.all_tickets)): checking_ticketid, checking_title, checking_assets = self.ticket_get_unique_fields(self.all_tickets[index]) # added "not risk_accepted", as if it is risk_accepted, we will create a new ticket excluding the accepted assets - if title.encode('ascii') == checking_title.encode('ascii') and not self.is_risk_accepted(self.jira.issue(checking_ticketid)): + if title.encode('ascii') == checking_title.encode('ascii') and not self.is_risk_accepted( + self.jira.issue(checking_ticketid)): difference = list(set(assets).symmetric_difference(checking_assets)) - #to check intersection - set(assets) & set(checking_assets) - if difference: + # to check intersection - set(assets) & set(checking_assets) + if difference: self.logger.info("Asset mismatch, ticket to update. Ticket ID: {}".format(checking_ticketid)) - return False, True, checking_ticketid, checking_assets #this will automatically validate + return False, True, checking_ticketid, checking_assets # this will automatically validate else: self.logger.info("Confirmed duplicated. TickedID: {}".format(checking_ticketid)) - return True, False, checking_ticketid, [] #this will automatically validate + return True, False, checking_ticketid, [] # this will automatically validate return False, False, "", [] def ticket_get_unique_fields(self, ticket): @@ -229,19 +251,22 @@ class JiraAPI(object): assets = self.get_assets_from_description(ticket) if not assets: - #check if attachment, if so, get assets from attachment + # check if attachment, if so, get assets from attachment assets = self.get_assets_from_attachment(ticket) - + return ticketid, title, assets - def get_assets_from_description(self, ticket, _raw = False): + def get_assets_from_description(self, ticket, _raw=False): # Get the assets as a string "host - protocol/port - hostname" separated by "\n" # structure the text to have the same structure as the assets from the attachment affected_assets = "" try: - affected_assets = ticket.raw.get('fields', {}).get('description').encode("ascii").split("{panel:title=Affected Assets}")[1].split("{panel}")[0].replace('\n','').replace(' * ','\n').replace('\n', '', 1) + affected_assets = \ + ticket.raw.get('fields', {}).get('description').encode("ascii").split("{panel:title=Affected Assets}")[ + 1].split("{panel}")[0].replace('\n', '').replace(' * ', '\n').replace('\n', '', 1) except Exception as e: - self.logger.error("Unable to process the Ticket's 'Affected Assets'. Ticket ID: {}. Reason: {}".format(ticket, e)) + self.logger.error( + "Unable to process the Ticket's 'Affected Assets'. Ticket ID: {}. Reason: {}".format(ticket, e)) if affected_assets: if _raw: @@ -257,14 +282,14 @@ class JiraAPI(object): self.logger.error("Ticket IPs regex failed. Ticket ID: {}. Reason: {}".format(ticket, e)) return False - def get_assets_from_attachment(self, ticket, _raw = False): + def get_assets_from_attachment(self, ticket, _raw=False): # Get the assets as a string "host - protocol/port - hostname" separated by "\n" affected_assets = [] try: fields = self.jira.issue(ticket.key).raw.get('fields', {}) attachments = fields.get('attachment', {}) affected_assets = "" - #we will make sure we get the latest version of the file + # we will make sure we get the latest version of the file latest = '' attachment_id = '' if attachments: @@ -272,15 +297,16 @@ class JiraAPI(object): if item.get('filename') == self.attachment_filename: if not latest: latest = item.get('created') - attachment_id = item.get('id') + attachment_id = item.get('id') else: if latest < item.get('created'): - latest = item.get('created') - attachment_id = item.get('id') + latest = item.get('created') + attachment_id = item.get('id') affected_assets = self.jira.attachment(attachment_id).get() except Exception as e: - self.logger.error("Failed to get assets from ticket attachment. Ticket ID: {}. Reason: {}".format(ticket, e)) + self.logger.error( + "Failed to get assets from ticket attachment. Ticket ID: {}. Reason: {}".format(ticket, e)) if affected_assets: if _raw: @@ -326,15 +352,15 @@ class JiraAPI(object): def add_content_as_attachment(self, issue, contents): try: - #Create the file locally with the data + # Create the file locally with the data attachment_file = open(self.attachment_filename, "w") attachment_file.write("\n".join(contents)) attachment_file.close() - #Push the created file to the ticket + # Push the created file to the ticket attachment_file = open(self.attachment_filename, "rb") self.jira.add_attachment(issue, attachment_file, self.attachment_filename) attachment_file.close() - #remove the temp file + # remove the temp file os.remove(self.attachment_filename) self.logger.info("Added attachment successfully.") except: @@ -344,21 +370,23 @@ class JiraAPI(object): return True def get_ticket_reported_assets(self, ticket): - #[METRICS] return a list with all the affected assets for that vulnerability (including already resolved ones) - return list(set(re.findall(r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b",str(self.jira.issue(ticket).raw)))) + # [METRICS] return a list with all the affected assets for that vulnerability (including already resolved ones) + return list(set(re.findall(r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b", str(self.jira.issue(ticket).raw)))) def get_resolution_time(self, ticket): - #get time a ticket took to be resolved + # get time a ticket took to be resolved ticket_obj = self.jira.issue(ticket) if self.is_ticket_resolved(ticket_obj): ticket_data = ticket_obj.raw.get('fields') - #dates follow format '2018-11-06T10:36:13.849+0100' - created = [int(x) for x in ticket_data['created'].split('.')[0].replace('T', '-').replace(':','-').split('-')] - resolved =[int(x) for x in ticket_data['resolutiondate'].split('.')[0].replace('T', '-').replace(':','-').split('-')] - - start = datetime(created[0],created[1],created[2],created[3],created[4],created[5]) - end = datetime(resolved[0],resolved[1],resolved[2],resolved[3],resolved[4],resolved[5]) - return (end-start).days + # dates follow format '2018-11-06T10:36:13.849+0100' + created = [int(x) for x in + ticket_data['created'].split('.')[0].replace('T', '-').replace(':', '-').split('-')] + resolved = [int(x) for x in + ticket_data['resolutiondate'].split('.')[0].replace('T', '-').replace(':', '-').split('-')] + + start = datetime(created[0], created[1], created[2], created[3], created[4], created[5]) + end = datetime(resolved[0], resolved[1], resolved[2], resolved[3], resolved[4], resolved[5]) + return (end - start).days else: self.logger.error("Ticket {ticket} is not resolved, can't calculate resolution time".format(ticket=ticket)) @@ -367,28 +395,28 @@ class JiraAPI(object): def ticket_update_assets(self, vuln, ticketid, ticket_assets): # correct description will always be in the vulnerability to report, only needed to update description to new one self.logger.info("Ticket {} exists, UPDATE requested".format(ticketid)) - - #for now, if a vulnerability has been accepted ('accepted_risk'), ticket is completely ignored and not updated (no new assets) - #TODO when vulnerability accepted, create a new ticket with only the non-accepted vulnerable assets - #this would require go through the downloaded tickets, check duplicates/accepted ones, and if so, - #check on their assets to exclude them from the new ticket + # for now, if a vulnerability has been accepted ('accepted_risk'), ticket is completely ignored and not updated (no new assets) + + # TODO when vulnerability accepted, create a new ticket with only the non-accepted vulnerable assets + # this would require go through the downloaded tickets, check duplicates/accepted ones, and if so, + # check on their assets to exclude them from the new ticket risk_accepted = False ticket_obj = self.jira.issue(ticketid) if self.is_ticket_resolved(ticket_obj): if self.is_risk_accepted(ticket_obj): return 0 self.reopen_ticket(ticketid=ticketid, comment=self.jira_still_vulnerable_comment) - - #First will do the comparison of assets + + # First will do the comparison of assets ticket_obj.update() assets = list(set(re.findall(r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b", ",".join(vuln['ips'])))) difference = list(set(assets).symmetric_difference(ticket_assets)) - + comment = '' added = '' removed = '' - #put a comment with the assets that have been added/removed + # put a comment with the assets that have been added/removed for asset in difference: if asset in assets: if not added: @@ -396,66 +424,71 @@ class JiraAPI(object): added += '* {}\n'.format(asset) elif asset in ticket_assets: if not removed: - removed= '\nThe following assets *have been resolved*:\n' + removed = '\nThe following assets *have been resolved*:\n' removed += '* {}\n'.format(asset) comment = added + removed - - #then will check if assets are too many that need to be added as an attachment + + # then will check if assets are too many that need to be added as an attachment attachment_contents = [] if len(vuln['ips']) > self.max_ips_ticket: attachment_contents = vuln['ips'] - vuln['ips'] = ["Affected hosts ({assets}) exceed Jira's allowed character limit, added as an attachment.".format(assets = len(attachment_contents))] - - #fill the ticket description template + vuln['ips'] = [ + "Affected hosts ({assets}) exceed Jira's allowed character limit, added as an attachment.".format( + assets=len(attachment_contents))] + + # fill the ticket description template try: tpl = template(self.template_path, vuln) except Exception as e: self.logger.error('Exception updating assets: {}'.format(str(e))) return 0 - #proceed checking if it requires adding as an attachment + # proceed checking if it requires adding as an attachment try: - #update attachment with hosts and delete the old versions + # update attachment with hosts and delete the old versions if attachment_contents: self.clean_old_attachments(ticket_obj) self.add_content_as_attachment(ticket_obj, attachment_contents) - - ticket_obj.update(description=tpl, comment=comment, fields={"labels":ticket_obj.fields.labels}) + + ticket_obj.update(description=tpl, comment=comment, fields={"labels": ticket_obj.fields.labels}) self.logger.info("Ticket {} updated successfully".format(ticketid)) self.add_label(ticketid, 'updated') except Exception as e: - self.logger.error("Error while trying up update ticket {ticketid}.\nReason: {e}".format(ticketid = ticketid, e=e)) + self.logger.error( + "Error while trying up update ticket {ticketid}.\nReason: {e}".format(ticketid=ticketid, e=e)) return 0 def add_label(self, ticketid, label): ticket_obj = self.jira.issue(ticketid) - + if label not in [x.encode('utf8') for x in ticket_obj.fields.labels]: ticket_obj.fields.labels.append(label) - + try: - ticket_obj.update(fields={"labels":ticket_obj.fields.labels}) + ticket_obj.update(fields={"labels": ticket_obj.fields.labels}) self.logger.info("Added label {label} to ticket {ticket}".format(label=label, ticket=ticketid)) - except: - self.logger.error("Error while trying to add label {label} to ticket {ticket}".format(label=label, ticket=ticketid)) - + except Exception as e: + self.logger.error( + "Error while trying to add label {label} to ticket {ticket}".format(label=label, ticket=ticketid)) + return 0 def remove_label(self, ticketid, label): ticket_obj = self.jira.issue(ticketid) - + if label in [x.encode('utf8') for x in ticket_obj.fields.labels]: ticket_obj.fields.labels.remove(label) - + try: - ticket_obj.update(fields={"labels":ticket_obj.fields.labels}) + ticket_obj.update(fields={"labels": ticket_obj.fields.labels}) self.logger.info("Removed label {label} from ticket {ticket}".format(label=label, ticket=ticketid)) - except: - self.logger.error("Error while trying to remove label {label} to ticket {ticket}".format(label=label, ticket=ticketid)) + except Exception as e: + self.logger.error("Error while trying to remove label {label} to ticket {ticket}".format(label=label, + ticket=ticketid)) else: self.logger.error("Error: label {label} not in ticket {ticket}".format(label=label, ticket=ticketid)) - + return 0 def close_fixed_tickets(self, vulnerabilities): @@ -475,10 +508,9 @@ class JiraAPI(object): self.logger.info("Ticket {} is still vulnerable".format(ticket)) continue self.logger.info("Ticket {} is no longer vulnerable".format(ticket)) - self.close_ticket(ticket, self.JIRA_RESOLUTION_FIXED, comment) + self.close_ticket(ticket, self.JIRA_RESOLUTION_FIXED, comment) return 0 - def is_ticket_reopenable(self, ticket_obj): transitions = self.jira.transitions(ticket_obj) for transition in transitions: @@ -497,7 +529,7 @@ class JiraAPI(object): return False def is_ticket_resolved(self, ticket_obj): - #Checks if a ticket is resolved or not + # Checks if a ticket is resolved or not if ticket_obj is not None: if ticket_obj.raw['fields'].get('resolution') is not None: if ticket_obj.raw['fields'].get('resolution').get('name') != 'Unresolved': @@ -507,7 +539,6 @@ class JiraAPI(object): self.logger.debug("Checked ticket {} is already open".format(ticket_obj)) return False - def is_risk_accepted(self, ticket_obj): if ticket_obj is not None: if ticket_obj.raw['fields'].get('labels') is not None: @@ -528,12 +559,13 @@ class JiraAPI(object): self.logger.debug("Ticket {} exists, REOPEN requested".format(ticketid)) # this will reopen a ticket by ticketid ticket_obj = self.jira.issue(ticketid) - + if self.is_ticket_resolved(ticket_obj): if (not self.is_risk_accepted(ticket_obj) or ignore_labels): try: if self.is_ticket_reopenable(ticket_obj): - error = self.jira.transition_issue(issue=ticketid, transition=self.JIRA_REOPEN_ISSUE, comment = comment) + error = self.jira.transition_issue(issue=ticketid, transition=self.JIRA_REOPEN_ISSUE, + comment=comment) self.logger.info("Ticket {} reopened successfully".format(ticketid)) if not ignore_labels: self.add_label(ticketid, 'reopened') @@ -551,30 +583,32 @@ class JiraAPI(object): if not self.is_ticket_resolved(ticket_obj): try: if self.is_ticket_closeable(ticket_obj): - #need to add the label before closing the ticket + # need to add the label before closing the ticket self.add_label(ticketid, 'closed') - error = self.jira.transition_issue(issue=ticketid, transition=self.JIRA_CLOSE_ISSUE, comment = comment, resolution = {"name": resolution }) + error = self.jira.transition_issue(issue=ticketid, transition=self.JIRA_CLOSE_ISSUE, + comment=comment, resolution={"name": resolution}) self.logger.info("Ticket {} closed successfully".format(ticketid)) return 1 except Exception as e: # continue with ticket data so that a new ticket is created in place of the "lost" one self.logger.error("error closing ticket {}: {}".format(ticketid, e)) return 0 - + return 0 def close_obsolete_tickets(self): # Close tickets older than 12 months, vulnerabilities not solved will get created a new ticket self.logger.info("Closing obsolete tickets older than {} months".format(self.max_time_tracking)) - jql = "labels=vulnerability_management AND NOT labels=advisory AND created 2: - self.logger.info('Processing {}/{} for scan: {}'.format(scan_count, len(scan_list), scan_name.encode('utf8'))) - columns_to_cleanse = ['CVSS','CVE','Description','Synopsis','Solution','See Also','Plugin Output', 'MAC Address'] - - for col in columns_to_cleanse: - if col in clean_csv: - clean_csv[col] = clean_csv[col].astype(str).apply(self.cleanser) - - clean_csv.to_csv(relative_path_name, index=False) - record_meta = ( - scan_name, - scan_id, - norm_time, - file_name, - time.time(), - clean_csv.shape[0], - self.CONFIG_SECTION, - uuid, - 1, - 0, - ) - self.record_insert(record_meta) - self.logger.info('{filename} records written to {path} '.format(filename=clean_csv.shape[0], - path=file_name.encode('utf8'))) - else: - record_meta = ( - scan_name, - scan_id, - norm_time, - file_name, - time.time(), - clean_csv.shape[0], - self.CONFIG_SECTION, - uuid, - 1, - 0, - ) - self.record_insert(record_meta) - self.logger.warn('{} has no host available... Updating database and skipping!'.format(file_name)) - self.conn.close() - self.logger.info('Scan aggregation complete! Connection to database closed.') - else: - self.logger.error('Failed to use scanner at {host}:{port}'.format(host=self.hostname, port=self.nessus_port)) - self.exit_code += 1 - return self.exit_code - - -class vulnWhispererQualys(vulnWhispererBase): - - CONFIG_SECTION = 'qualys_web' - COLUMN_MAPPING = {'Access Path': 'access_path', - 'Ajax Request': 'ajax_request', - 'Ajax Request ID': 'ajax_request_id', - 'Authentication': 'authentication', - 'CVSS Base': 'cvss', - 'CVSS Temporal': 'cvss_temporal', - 'CWE': 'cwe', - 'Category': 'category', - 'Content': 'content', - 'DescriptionSeverity': 'severity_description', - 'DescriptionCatSev': 'category_description', - 'Detection ID': 'detection_id', - 'Evidence #1': 'evidence_1', - 'First Time Detected': 'first_time_detected', - 'Form Entry Point': 'form_entry_point', - 'Function': 'function', - 'Groups': 'groups', - 'ID': 'id', - 'Ignore Comments': 'ignore_comments', - 'Ignore Date': 'ignore_date', - 'Ignore Reason': 'ignore_reason', - 'Ignore User': 'ignore_user', - 'Ignored': 'ignored', - 'Impact': 'impact', - 'Last Time Detected': 'last_time_detected', - 'Last Time Tested': 'last_time_tested', - 'Level': 'level', - 'OWASP': 'owasp', - 'Operating System': 'operating_system', - 'Owner': 'owner', - 'Param': 'param', - 'Payload #1': 'payload_1', - 'QID': 'plugin_id', - 'Request Headers #1': 'request_headers_1', - 'Request Method #1': 'request_method_1', - 'Request URL #1': 'request_url_1', - 'Response #1': 'response_1', - 'Scope': 'scope', - 'Severity': 'risk', - 'Severity Level': 'security_level', - 'Solution': 'solution', - 'Times Detected': 'times_detected', - 'Title': 'plugin_name', - 'URL': 'url', - 'Url': 'uri', - 'Vulnerability Category': 'vulnerability_category', - 'WASC': 'wasc', - 'Web Application Name': 'web_application_name'} - def __init__( - self, - config=None, - db_name='report_tracker.db', - purge=False, - verbose=None, - debug=False, - username=None, - password=None, - ): - - super(vulnWhispererQualys, self).__init__(config=config) - self.logger = logging.getLogger('vulnWhispererQualys') - if debug: - self.logger.setLevel(logging.DEBUG) - try: - self.qualys_scan = qualysScanReport(config=config) - except Exception as e: - self.logger.error("Unable to establish connection with Qualys scanner. Reason: {}".format(e)) - return False - self.latest_scans = self.qualys_scan.qw.get_all_scans() - self.directory_check() - self.scans_to_process = None - - def whisper_reports(self, - report_id=None, - launched_date=None, - scan_name=None, - scan_reference=None, - output_format='json', - cleanup=True): - """ - report_id: App ID - updated_date: Last time scan was ran for app_id - """ - vuln_ready = None - - try: - if 'Z' in launched_date: - launched_date = self.qualys_scan.utils.iso_to_epoch(launched_date) - report_name = 'qualys_web_' + str(report_id) \ - + '_{last_updated}'.format(last_updated=launched_date) \ - + '.{extension}'.format(extension=output_format) - - relative_path_name = self.path_check(report_name).encode('utf8') - - if os.path.isfile(relative_path_name): - #TODO Possibly make this optional to sync directories - file_length = len(open(relative_path_name).readlines()) - record_meta = ( - scan_name, - scan_reference, - launched_date, - report_name, - time.time(), - file_length, - self.CONFIG_SECTION, - report_id, - 1, - 0, - ) - self.record_insert(record_meta) - self.logger.info('File {filename} already exist! Updating database'.format(filename=relative_path_name)) - - else: - self.logger.info('Generating report for {}'.format(report_id)) - status = self.qualys_scan.qw.create_report(report_id) - root = objectify.fromstring(status) - if root.responseCode == 'SUCCESS': - self.logger.info('Successfully generated report! ID: {}'.format(report_id)) - generated_report_id = root.data.Report.id - self.logger.info('New Report ID: {}'.format(generated_report_id)) - - vuln_ready = self.qualys_scan.process_data(path=self.write_path, file_id=str(generated_report_id)) - - vuln_ready['scan_name'] = scan_name - vuln_ready['scan_reference'] = scan_reference - vuln_ready.rename(columns=self.COLUMN_MAPPING, inplace=True) - - record_meta = ( - scan_name, - scan_reference, - launched_date, - report_name, - time.time(), - vuln_ready.shape[0], - self.CONFIG_SECTION, - report_id, - 1, - 0, - ) - self.record_insert(record_meta) - - if output_format == 'json': - with open(relative_path_name, 'w') as f: - f.write(vuln_ready.to_json(orient='records', lines=True)) - f.write('\n') - - elif output_format == 'csv': - vuln_ready.to_csv(relative_path_name, index=False, header=True) # add when timestamp occured - - self.logger.info('Report written to {}'.format(report_name)) - - if cleanup: - self.logger.info('Removing report {} from Qualys Database'.format(generated_report_id)) - cleaning_up = self.qualys_scan.qw.delete_report(generated_report_id) - os.remove(self.path_check(str(generated_report_id) + '.csv')) - self.logger.info('Deleted report from local disk: {}'.format(self.path_check(str(generated_report_id)))) - else: - self.logger.error('Could not process report ID: {}'.format(status)) - - except Exception as e: - self.logger.error('Could not process {}: {}'.format(report_id, str(e))) - return vuln_ready - - - def identify_scans_to_process(self): - if self.uuids: - self.scans_to_process = self.latest_scans[~self.latest_scans['id'].isin(self.uuids)] - else: - self.scans_to_process = self.latest_scans - self.logger.info('Identified {new} scans to be processed'.format(new=len(self.scans_to_process))) - - - def process_web_assets(self): - counter = 0 - self.identify_scans_to_process() - if self.scans_to_process.shape[0]: - for app in self.scans_to_process.iterrows(): - counter += 1 - r = app[1] - self.logger.info('Processing {}/{}'.format(counter, len(self.scans_to_process))) - self.whisper_reports(report_id=r['id'], - launched_date=r['launchedDate'], - scan_name=r['name'], - scan_reference=r['reference']) - else: - self.logger.info('No new scans to process. Exiting...') - self.conn.close() - return self.exit_code - - -class vulnWhispererOpenVAS(vulnWhispererBase): - CONFIG_SECTION = 'openvas' - COLUMN_MAPPING = {'IP': 'asset', - 'Hostname': 'hostname', - 'Port': 'port', - 'Port Protocol': 'protocol', - 'CVSS': 'cvss', - 'Severity': 'severity', - 'Solution Type': 'category', - 'NVT Name': 'plugin_name', - 'Summary': 'synopsis', - 'Specific Result': 'plugin_output', - 'NVT OID': 'nvt_oid', - 'Task ID': 'task_id', - 'Task Name': 'task_name', - 'Timestamp': 'timestamp', - 'Result ID': 'result_id', - 'Impact': 'description', - 'Solution': 'solution', - 'Affected Software/OS': 'affected_software', - 'Vulnerability Insight': 'vulnerability_insight', - 'Vulnerability Detection Method': 'vulnerability_detection_method', - 'Product Detection Result': 'product_detection_result', - 'BIDs': 'bids', - 'CERTs': 'certs', - 'Other References': 'see_also' - } - - def __init__( - self, - config=None, - db_name='report_tracker.db', - purge=False, - verbose=None, - debug=False, - username=None, - password=None, - ): - super(vulnWhispererOpenVAS, self).__init__(config=config) - self.logger = logging.getLogger('vulnWhispererOpenVAS') - if debug: - self.logger.setLevel(logging.DEBUG) - - self.directory_check() - self.port = int(self.config.get(self.CONFIG_SECTION, 'port')) - self.develop = True - self.purge = purge - self.scans_to_process = None - try: - self.openvas_api = OpenVAS_API(hostname=self.hostname, - port=self.port, - username=self.username, - password=self.password) - except Exception as e: - self.logger.error("Unable to establish connection with OpenVAS scanner. Reason: {}".format(e)) - return False - - def whisper_reports(self, output_format='json', launched_date=None, report_id=None, cleanup=True): - report = None - if report_id: - self.logger.info('Processing report ID: {}'.format(report_id)) - - - scan_name = report_id.replace('-', '') - report_name = 'openvas_scan_{scan_name}_{last_updated}.{extension}'.format(scan_name=scan_name, - last_updated=launched_date, - extension=output_format) - relative_path_name = self.path_check(report_name).encode('utf8') - scan_reference = report_id - - if os.path.isfile(relative_path_name): - # TODO Possibly make this optional to sync directories - file_length = len(open(relative_path_name).readlines()) - record_meta = ( - scan_name, - scan_reference, - launched_date, - report_name, - time.time(), - file_length, - self.CONFIG_SECTION, - report_id, - 1, - 0, - ) - self.record_insert(record_meta) - self.logger.info('File {filename} already exist! Updating database'.format(filename=relative_path_name)) - - record_meta = ( - scan_name, - scan_reference, - launched_date, - report_name, - time.time(), - file_length, - self.CONFIG_SECTION, - report_id, - 1, - ) - - else: - vuln_ready = self.openvas_api.process_report(report_id=report_id) - vuln_ready['scan_name'] = scan_name - vuln_ready['scan_reference'] = report_id - vuln_ready.rename(columns=self.COLUMN_MAPPING, inplace=True) - vuln_ready.port = vuln_ready.port.fillna(0).astype(int) - vuln_ready.fillna('', inplace=True) - if output_format == 'json': - with open(relative_path_name, 'w') as f: - f.write(vuln_ready.to_json(orient='records', lines=True)) - f.write('\n') - self.logger.info('Report written to {}'.format(report_name)) - - return report - - def identify_scans_to_process(self): - if self.uuids: - self.scans_to_process = self.openvas_api.openvas_reports[ - ~self.openvas_api.openvas_reports.report_ids.isin(self.uuids)] - else: - self.scans_to_process = self.openvas_api.openvas_reports - self.logger.info('Identified {new} scans to be processed'.format(new=len(self.scans_to_process))) - - def process_openvas_scans(self): - counter = 0 - self.identify_scans_to_process() - if self.scans_to_process.shape[0]: - for scan in self.scans_to_process.iterrows(): - counter += 1 - info = scan[1] - self.logger.info('Processing {}/{} - Report ID: {}'.format(counter, len(self.scans_to_process), info['report_ids'])) - self.whisper_reports(report_id=info['report_ids'], - launched_date=info['epoch']) - self.logger.info('Processing complete') - else: - self.logger.info('No new scans to process. Exiting...') - self.conn.close() - return self.exit_code - - -class vulnWhispererQualysVuln(vulnWhispererBase): - - CONFIG_SECTION = 'qualys_vuln' - COLUMN_MAPPING = {'cvss_base': 'cvss', - 'cvss3_base': 'cvss3', - 'cve_id': 'cve', - 'os': 'operating_system', - 'qid': 'plugin_id', - 'severity': 'risk', - 'title': 'plugin_name'} - - def __init__( - self, - config=None, - db_name='report_tracker.db', - purge=False, - verbose=None, - debug=False, - username=None, - password=None, - ): - - super(vulnWhispererQualysVuln, self).__init__(config=config) - self.logger = logging.getLogger('vulnWhispererQualysVuln') - if debug: - self.logger.setLevel(logging.DEBUG) - try: - self.qualys_scan = qualysVulnScan(config=config) - except Exception as e: - self.logger.error("Unable to create connection with Qualys. Reason: {}".format(e)) - return False - self.directory_check() - self.scans_to_process = None - - def whisper_reports(self, - report_id=None, - launched_date=None, - scan_name=None, - scan_reference=None, - output_format='json', - cleanup=True): - - if 'Z' in launched_date: - launched_date = self.qualys_scan.utils.iso_to_epoch(launched_date) - report_name = 'qualys_vuln_' + report_id.replace('/','_') \ - + '_{last_updated}'.format(last_updated=launched_date) \ - + '.json' - - relative_path_name = self.path_check(report_name).encode('utf8') - - if os.path.isfile(relative_path_name): - #TODO Possibly make this optional to sync directories - file_length = len(open(relative_path_name).readlines()) - record_meta = ( - scan_name, - scan_reference, - launched_date, - report_name, - time.time(), - file_length, - self.CONFIG_SECTION, - report_id, - 1, - 0, - ) - self.record_insert(record_meta) - self.logger.info('File {filename} already exist! Updating database'.format(filename=relative_path_name)) - - else: - try: - self.logger.info('Processing report ID: {}'.format(report_id)) - vuln_ready = self.qualys_scan.process_data(scan_id=report_id) - vuln_ready['scan_name'] = scan_name - vuln_ready['scan_reference'] = report_id - vuln_ready.rename(columns=self.COLUMN_MAPPING, inplace=True) - except Exception as e: - self.logger.error('Could not process {}: {}'.format(report_id, str(e))) - self.exit_code += 1 - return self.exit_code - - record_meta = ( - scan_name, - scan_reference, - launched_date, - report_name, - time.time(), - vuln_ready.shape[0], - self.CONFIG_SECTION, - report_id, - 1, - 0, - ) - self.record_insert(record_meta) - - if output_format == 'json': - with open(relative_path_name, 'w') as f: - f.write(vuln_ready.to_json(orient='records', lines=True)) - f.write('\n') - - self.logger.info('Report written to {}'.format(report_name)) - return self.exit_code - - - def identify_scans_to_process(self): - self.latest_scans = self.qualys_scan.qw.get_all_scans() - if self.uuids: - self.scans_to_process = self.latest_scans.loc[ - (~self.latest_scans['id'].isin(self.uuids)) - & (self.latest_scans['status'] == 'Finished')] - else: - self.scans_to_process = self.latest_scans - self.logger.info('Identified {new} scans to be processed'.format(new=len(self.scans_to_process))) - - - def process_vuln_scans(self): - counter = 0 - self.identify_scans_to_process() - if self.scans_to_process.shape[0]: - for app in self.scans_to_process.iterrows(): - counter += 1 - r = app[1] - self.logger.info('Processing {}/{}'.format(counter, len(self.scans_to_process))) - self.exit_code += self.whisper_reports(report_id=r['id'], - launched_date=r['date'], - scan_name=r['name'], - scan_reference=r['type']) - else: - self.logger.info('No new scans to process. Exiting...') - self.conn.close() - return self.exit_code - - -class vulnWhispererJIRA(vulnWhispererBase): - - CONFIG_SECTION = 'jira' - - def __init__( - self, - config=None, - db_name='report_tracker.db', - purge=False, - verbose=None, - debug=False, - username=None, - password=None, - ): - super(vulnWhispererJIRA, self).__init__(config=config) - self.logger = logging.getLogger('vulnWhispererJira') - if debug: - self.logger.setLevel(logging.DEBUG) - self.config_path = config - self.config = vwConfig(config) - self.host_resolv_cache = {} - self.host_no_resolv = [] - self.no_resolv_by_team_dict = {} - #Save locally those assets without DNS entry for flag to system owners - self.no_resolv_fname="no_resolv.txt" - if os.path.isfile(self.no_resolv_fname): - with open(self.no_resolv_fname, "r") as json_file: - self.no_resolv_by_team_dict = json.load(json_file) - self.directory_check() - - if config is not None: - try: - self.logger.info('Attempting to connect to jira...') - self.jira = \ - JiraAPI(hostname=self.hostname, - username=self.username, - password=self.password, - path=self.config.get('jira','write_path')) - self.jira_connect = True - self.logger.info('Connected to jira on {host}'.format(host=self.hostname)) - except Exception as e: - self.logger.error('Exception: {}'.format(str(e))) - raise Exception( - 'Could not connect to nessus -- Please verify your settings in {config} are correct and try again.\nReason: {e}'.format( - config=self.config.config_in, e=e)) - return False - #sys.exit(1) - - profiles = [] - profiles = self.get_scan_profiles() - - if not self.config.exists_jira_profiles(profiles): - self.config.update_jira_profiles(profiles) - self.logger.info("Jira profiles have been created in {config}, please fill the variables before rerunning the module.".format(config=self.config_path)) - sys.exit(0) - - - def get_env_variables(self, source, scan_name): - # function returns an array with [jira_project, jira_components, datafile_path] - - #Jira variables - jira_section = self.config.normalize_section("{}.{}".format(source,scan_name)) - - project = self.config.get(jira_section,'jira_project') - if project == "": - self.logger.error('JIRA project is missing on the configuration file!') - sys.exit(0) - - # check that project actually exists - if not self.jira.project_exists(project): - self.logger.error("JIRA project '{project}' doesn't exist!".format(project=project)) - sys.exit(0) - - components = self.config.get(jira_section,'components').split(',') - - #cleaning empty array from '' - if not components[0]: - components = [] - - min_critical = self.config.get(jira_section,'min_critical_to_report') - if not min_critical: - self.logger.error('"min_critical_to_report" variable on config file is empty.') - sys.exit(0) - - #datafile path - filename, reported = self.get_latest_results(source, scan_name) - fullpath = "" - - # search data files under user specified directory - for root, dirnames, filenames in os.walk(vwConfig(self.config_path).get(source,'write_path')): - if filename in filenames: - fullpath = "{}/{}".format(root,filename) - - if reported: - self.logger.warn('Last Scan of "{scan_name}" for source "{source}" has already been reported; will be skipped.'.format(scan_name=scan_name, source=source)) - return [False] * 5 - - if not fullpath: - self.logger.error('Scan of "{scan_name}" for source "{source}" has not been found. Please check that the scanner data files are in place.'.format(scan_name=scan_name, source=source)) - sys.exit(1) - - dns_resolv = self.config.get('jira','dns_resolv') - if dns_resolv in ('False', 'false', ''): - dns_resolv = False - elif dns_resolv in ('True', 'true'): - dns_resolv = True - else: - self.logger.error("dns_resolv variable not setup in [jira] section; will not do dns resolution") - dns_resolv = False - - return project, components, fullpath, min_critical, dns_resolv - - - def parse_nessus_vulnerabilities(self, fullpath, source, scan_name, min_critical): - - vulnerabilities = [] - - # we need to parse the CSV - risks = ['none', 'low', 'medium', 'high', 'critical'] - min_risk = int([i for i,x in enumerate(risks) if x == min_critical][0]) - - df = pd.read_csv(fullpath, delimiter=',') - - #nessus fields we want - ['Host','Protocol','Port', 'Name', 'Synopsis', 'Description', 'Solution', 'See Also'] - for index in range(len(df)): - # filtering vulnerabilities by criticality, discarding low risk - to_report = int([i for i,x in enumerate(risks) if x == df.loc[index]['Risk'].lower()][0]) - if to_report < min_risk: - continue - - if not vulnerabilities or df.loc[index]['Name'] not in [entry['title'] for entry in vulnerabilities]: - vuln = {} - #vulnerabilities should have all the info for creating all JIRA labels - vuln['source'] = source - vuln['scan_name'] = scan_name - #vulnerability variables - vuln['title'] = df.loc[index]['Name'] - vuln['diagnosis'] = df.loc[index]['Synopsis'].replace('\\n',' ') - vuln['consequence'] = df.loc[index]['Description'].replace('\\n',' ') - vuln['solution'] = df.loc[index]['Solution'].replace('\\n',' ') - vuln['ips'] = [] - vuln['ips'].append("{} - {}/{}".format(df.loc[index]['Host'], df.loc[index]['Protocol'], df.loc[index]['Port'])) - vuln['risk'] = df.loc[index]['Risk'].lower() - - # Nessus "nan" value gets automatically casted to float by python - if not (type(df.loc[index]['See Also']) is float): - vuln['references'] = df.loc[index]['See Also'].split("\\n") - else: - vuln['references'] = [] - vulnerabilities.append(vuln) - - else: - # grouping assets by vulnerability to open on single ticket, as each asset has its own nessus entry - for vuln in vulnerabilities: - if vuln['title'] == df.loc[index]['Name']: - vuln['ips'].append("{} - {}/{}".format(df.loc[index]['Host'], df.loc[index]['Protocol'], df.loc[index]['Port'])) - - return vulnerabilities - - def parse_qualys_vuln_vulnerabilities(self, fullpath, source, scan_name, min_critical, dns_resolv = False): - #parsing of the qualys vulnerabilities schema - #parse json - vulnerabilities = [] - - risks = ['info', 'low', 'medium', 'high', 'critical'] - # +1 as array is 0-4, but score is 1-5 - min_risk = int([i for i,x in enumerate(risks) if x == min_critical][0])+1 - - try: - data=[json.loads(line) for line in open(fullpath).readlines()] - except Exception as e: - self.logger.warn("Scan has no vulnerabilities, skipping.") - return vulnerabilities - - #qualys fields we want - [] - for index in range(len(data)): - if int(data[index]['risk']) < min_risk: - continue - - elif data[index]['type'] == 'Practice' or data[index]['type'] == 'Ig': - self.logger.debug("Vulnerability '{vuln}' ignored, as it is 'Practice/Potential', not verified.".format(vuln=data[index]['plugin_name'])) - continue - - if not vulnerabilities or data[index]['plugin_name'] not in [entry['title'] for entry in vulnerabilities]: - vuln = {} - #vulnerabilities should have all the info for creating all JIRA labels - vuln['source'] = source - vuln['scan_name'] = scan_name - #vulnerability variables - vuln['title'] = data[index]['plugin_name'] - vuln['diagnosis'] = data[index]['threat'].replace('\\n',' ') - vuln['consequence'] = data[index]['impact'].replace('\\n',' ') - vuln['solution'] = data[index]['solution'].replace('\\n',' ') - vuln['ips'] = [] - #TODO ADDED DNS RESOLUTION FROM QUALYS! \n SEPARATORS INSTEAD OF \\n! - - vuln['ips'].append("{ip} - {protocol}/{port} - {dns}".format(**self.get_asset_fields(data[index], dns_resolv))) - - #different risk system than Nessus! - vuln['risk'] = risks[int(data[index]['risk'])-1] - - # Nessus "nan" value gets automatically casted to float by python - if not (type(data[index]['vendor_reference']) is float or data[index]['vendor_reference'] == None): - vuln['references'] = data[index]['vendor_reference'].split("\\n") - else: - vuln['references'] = [] - vulnerabilities.append(vuln) - else: - # grouping assets by vulnerability to open on single ticket, as each asset has its own nessus entry - for vuln in vulnerabilities: - if vuln['title'] == data[index]['plugin_name']: - vuln['ips'].append("{ip} - {protocol}/{port} - {dns}".format(**self.get_asset_fields(data[index], dns_resolv))) - - return vulnerabilities - - def get_asset_fields(self, vuln, dns_resolv): - values = {} - values['ip'] = vuln['ip'] - values['protocol'] = vuln['protocol'] - values['port'] = vuln['port'] - values['dns'] = '' - if dns_resolv: - if vuln['dns']: - values['dns'] = vuln['dns'] - else: - if values['ip'] in self.host_resolv_cache.keys(): - self.logger.debug("Hostname from {ip} cached, retrieving from cache.".format(ip=values['ip'])) - values['dns'] = self.host_resolv_cache[values['ip']] - else: - self.logger.debug("No hostname, trying to resolve {ip}'s hostname.".format(ip=values['ip'])) - try: - values['dns'] = socket.gethostbyaddr(vuln['ip'])[0] - self.host_resolv_cache[values['ip']] = values['dns'] - self.logger.debug("Hostname found: {hostname}.".format(hostname=values['dns'])) - except: - self.host_resolv_cache[values['ip']] = '' - self.host_no_resolv.append(values['ip']) - self.logger.debug("Hostname not found for: {ip}.".format(ip=values['ip'])) - - for key in values.keys(): - if not values[key]: - values[key] = 'N/A' - - return values - - def parse_vulnerabilities(self, fullpath, source, scan_name, min_critical): - #TODO: SINGLE LOCAL SAVE FORMAT FOR ALL SCANNERS - #JIRA standard vuln format - ['source', 'scan_name', 'title', 'diagnosis', 'consequence', 'solution', 'ips', 'references'] - - return 0 - - - def jira_sync(self, source, scan_name): - self.logger.info("Jira Sync triggered for source '{source}' and scan '{scan_name}'".format(source=source, scan_name=scan_name)) - - project, components, fullpath, min_critical, dns_resolv = self.get_env_variables(source, scan_name) - - if not project: - self.logger.debug("Skipping scan for source '{source}' and scan '{scan_name}': vulnerabilities have already been reported.".format(source=source, scan_name=scan_name)) - return False - - vulnerabilities = [] - - #***Nessus parsing*** - if source == "nessus": - vulnerabilities = self.parse_nessus_vulnerabilities(fullpath, source, scan_name, min_critical) - - #***Qualys VM parsing*** - if source == "qualys_vuln": - vulnerabilities = self.parse_qualys_vuln_vulnerabilities(fullpath, source, scan_name, min_critical, dns_resolv) - - #***JIRA sync*** - if vulnerabilities: - self.logger.info('{source} data has been successfuly parsed'.format(source=source.upper())) - self.logger.info('Starting JIRA sync') - - self.jira.sync(vulnerabilities, project, components) - else: - self.logger.info("[{source}.{scan_name}] No vulnerabilities or vulnerabilities not parsed.".format(source=source, scan_name=scan_name)) - self.set_latest_scan_reported(fullpath.split("/")[-1]) - return False - - #writing to file those assets without DNS resolution - #if its not empty - if self.host_no_resolv: - #we will replace old list of non resolved for the new one or create if it doesn't exist already - self.no_resolv_by_team_dict[scan_name] = self.host_no_resolv - with open(self.no_resolv_fname, 'w') as outfile: - json.dump(self.no_resolv_by_team_dict, outfile) - - self.set_latest_scan_reported(fullpath.split("/")[-1]) - return True - - def sync_all(self): - autoreport_sections = self.config.get_sections_with_attribute('autoreport') - - if autoreport_sections: - for scan in autoreport_sections: - try: - self.jira_sync(self.config.get(scan, 'source'), self.config.get(scan, 'scan_name')) - except Exception as e: - self.logger.error("VulnWhisperer wasn't able to report the vulnerabilities from the '{}'s source".format(self.config.get(scan, 'source'))) - return True - return False - -class vulnWhisperer(object): - - def __init__(self, - profile=None, - verbose=None, - username=None, - password=None, - config=None, - source=None, - scanname=None): - - self.logger = logging.getLogger('vulnWhisperer') - if verbose: - self.logger.setLevel(logging.DEBUG) - self.profile = profile - self.config = config - self.username = username - self.password = password - self.verbose = verbose - self.source = source - self.scanname = scanname - self.exit_code = 0 - - - def whisper_vulnerabilities(self): - - if self.profile == 'nessus': - vw = vulnWhispererNessus(config=self.config, - profile=self.profile) - if vw: - self.exit_code += vw.whisper_nessus() - - elif self.profile == 'qualys_web': - vw = vulnWhispererQualys(config=self.config) - if vw: - self.exit_code += vw.process_web_assets() - - elif self.profile == 'openvas': - vw_openvas = vulnWhispererOpenVAS(config=self.config) - if vw: - self.exit_code += vw_openvas.process_openvas_scans() - - elif self.profile == 'tenable': - vw = vulnWhispererNessus(config=self.config, - profile=self.profile) - if vw: - self.exit_code += vw.whisper_nessus() - - elif self.profile == 'qualys_vuln': - vw = vulnWhispererQualysVuln(config=self.config) - if vw: - self.exit_code += vw.process_vuln_scans() - - elif self.profile == 'jira': - #first we check config fields are created, otherwise we create them - vw = vulnWhispererJIRA(config=self.config) - if vw: - if not (self.source and self.scanname): - self.logger.info('No source/scan_name selected, all enabled scans will be synced') - success = vw.sync_all() - if not success: - self.logger.error('All scans sync failed!') - self.logger.error('Source scanner and scan name needed!') - return 0 - else: - vw.jira_sync(self.source, self.scanname) - - return self.exit_code +#!/usr/bin/python +# -*- coding: utf-8 -*- +from __future__ import absolute_import +from six.moves import range +from functools import reduce +__author__ = 'Austin Taylor' + +from .base.config import vwConfig +from .frameworks.nessus import NessusAPI +from .frameworks.qualys_web import qualysScanReport +from .frameworks.qualys_vuln import qualysVulnScan +from .frameworks.openvas import OpenVAS_API +from .reporting.jira_api import JiraAPI +import pandas as pd +from lxml import objectify +import sys +import os +import io +import time +import sqlite3 +import json +import logging +import socket + + +class vulnWhispererBase(object): + + CONFIG_SECTION = None + + def __init__( + self, + config=None, + db_name='report_tracker.db', + purge=False, + verbose=None, + debug=False, + username=None, + password=None, + section=None, + develop=False, + ): + self.logger = logging.getLogger('vulnWhispererBase') + if debug: + self.logger.setLevel(logging.DEBUG) + + if self.CONFIG_SECTION is None: + raise Exception('Implementing class must define CONFIG_SECTION') + + self.exit_code = 0 + self.db_name = db_name + self.purge = purge + self.develop = develop + + if config is not None: + self.config = vwConfig(config_in=config) + try: + self.enabled = self.config.get(self.CONFIG_SECTION, 'enabled') + except: + self.enabled = False + self.hostname = self.config.get(self.CONFIG_SECTION, 'hostname') + try: + self.username = self.config.get(self.CONFIG_SECTION, 'username') + self.password = self.config.get(self.CONFIG_SECTION, 'password') + except: + self.username = None + self.password = None + self.write_path = self.config.get(self.CONFIG_SECTION, 'write_path') + self.db_path = self.config.get(self.CONFIG_SECTION, 'db_path') + self.verbose = self.config.getbool(self.CONFIG_SECTION, 'verbose') + + + + if self.db_name is not None: + if self.db_path: + self.database = os.path.join(self.db_path, + db_name) + else: + self.database = \ + os.path.abspath(os.path.join(os.path.dirname(__file__), + 'database', db_name)) + if not os.path.exists(self.db_path): + os.makedirs(self.db_path) + self.logger.info('Creating directory {dir}'.format(dir=self.db_path)) + + if not os.path.exists(self.database): + with open(self.database, 'w'): + self.logger.info('Creating file {dir}'.format(dir=self.database)) + + try: + self.conn = sqlite3.connect(self.database) + self.cur = self.conn.cursor() + self.logger.info('Connected to database at {loc}'.format(loc=self.database)) + except Exception as e: + self.logger.error('Could not connect to database at {loc}\nReason: {e} - Please ensure the path exist'.format( + e=e, + loc=self.database)) + else: + + self.logger.error('Please specify a database to connect to!') + exit(1) + + self.table_columns = [ + 'scan_name', + 'scan_id', + 'last_modified', + 'filename', + 'download_time', + 'record_count', + 'source', + 'uuid', + 'processed', + 'reported', + ] + + self.init() + self.uuids = self.retrieve_uuids() + self.processed = 0 + self.skipped = 0 + self.scan_list = [] + + def create_table(self): + self.cur.execute( + 'CREATE TABLE IF NOT EXISTS scan_history (id INTEGER PRIMARY KEY,' + ' scan_name TEXT, scan_id INTEGER, last_modified DATE, filename TEXT,' + ' download_time DATE, record_count INTEGER, source TEXT,' + ' uuid TEXT, processed INTEGER, reported INTEGER)' + ) + self.conn.commit() + + def delete_table(self): + self.cur.execute('DROP TABLE IF EXISTS scan_history') + self.conn.commit() + + def init(self): + if self.purge: + self.delete_table() + self.create_table() + + def cleanser(self, _data): + repls = (('\n', r'\n'), ('\r', r'\r')) + data = reduce(lambda a, kv: a.replace(*kv), repls, _data) + return data + + def path_check(self, _data): + if self.write_path: + if '/' or '\\' in _data[-1]: + data = self.write_path + _data + else: + data = self.write_path + '/' + _data + return data + + def record_insert(self, record): + #for backwards compatibility with older versions without "reported" field + + try: + #-1 to get the latest column, 1 to get the column name (old version would be "processed", new "reported") + #TODO delete backward compatibility check after some versions + last_column_table = self.cur.execute('PRAGMA table_info(scan_history)').fetchall()[-1][1] + if last_column_table == self.table_columns[-1]: + self.cur.execute('insert into scan_history({table_columns}) values (?,?,?,?,?,?,?,?,?,?)'.format( + table_columns=', '.join(self.table_columns)), record) + + else: + self.cur.execute('insert into scan_history({table_columns}) values (?,?,?,?,?,?,?,?,?)'.format( + table_columns=', '.join(self.table_columns[:-1])), record[:-1]) + self.conn.commit() + except Exception as e: + self.logger.error("Failed to insert record in database. Error: {}".format(e)) + sys.exit(1) + + def set_latest_scan_reported(self, filename): + #the reason to use the filename instead of the source/scan_name is because the filename already belongs to + #that latest scan, and we maintain integrity making sure that it is the exact scan we checked + try: + self.cur.execute('UPDATE scan_history SET reported = 1 WHERE filename="{}";'.format(filename)) + self.conn.commit() + self.logger.info('Scan {} marked as successfully processed.'.format(filename)) + return True + except Exception as e: + self.logger.error('Failed while setting scan with file {} as processed'.format(filename)) + + return False + + def retrieve_uuids(self): + """ + Retrieves UUIDs from database and checks list to determine which files need to be processed. + :return: + """ + try: + self.conn.text_factory = str + self.cur.execute('SELECT uuid FROM scan_history where source = "{config_section}"'.format(config_section=self.CONFIG_SECTION)) + results = frozenset([r[0] for r in self.cur.fetchall()]) + except: + results = [] + return results + + def directory_check(self): + if not os.path.exists(self.write_path): + os.makedirs(self.write_path) + self.logger.info('Directory created at {scan} - Skipping creation'.format( + scan=self.write_path.encode('utf8'))) + else: + os.path.exists(self.write_path) + self.logger.info('Directory already exist for {scan} - Skipping creation'.format( + scan=self.write_path.encode('utf8'))) + + def get_latest_results(self, source, scan_name): + processed = 0 + results = [] + reported = "" + + try: + self.conn.text_factory = str + self.cur.execute('SELECT filename FROM scan_history WHERE source="{}" AND scan_name="{}" ORDER BY last_modified DESC LIMIT 1;'.format(source, scan_name)) + #should always return just one filename + results = [r[0] for r in self.cur.fetchall()][0] + + #-1 to get the latest column, 1 to get the column name (old version would be "processed", new "reported") + #TODO delete backward compatibility check after some versions + last_column_table = self.cur.execute('PRAGMA table_info(scan_history)').fetchall()[-1][1] + if results and last_column_table == self.table_columns[-1]: + reported = self.cur.execute('SELECT reported FROM scan_history WHERE filename="{}"'.format(results)).fetchall() + reported = reported[0][0] + if reported: + self.logger.debug("Last downloaded scan from source {source} scan_name {scan_name} has already been reported".format(source=source, scan_name=scan_name)) + + except Exception as e: + self.logger.error("Error when getting latest results from {}.{} : {}".format(source, scan_name, e)) + + return results, reported + + def get_scan_profiles(self): + # Returns a list of source.scan_name elements from the database + + # we get the list of sources + try: + self.conn.text_factory = str + self.cur.execute('SELECT DISTINCT source FROM scan_history;') + sources = [r[0] for r in self.cur.fetchall()] + except: + sources = [] + self.logger.error("Process failed at executing 'SELECT DISTINCT source FROM scan_history;'") + + results = [] + + # we get the list of scans within each source + for source in sources: + scan_names = [] + try: + self.conn.text_factory = str + self.cur.execute("SELECT DISTINCT scan_name FROM scan_history WHERE source='{}';".format(source)) + scan_names = [r[0] for r in self.cur.fetchall()] + for scan in scan_names: + results.append('{}.{}'.format(source,scan)) + except: + scan_names = [] + + return results + +class vulnWhispererNessus(vulnWhispererBase): + + CONFIG_SECTION = None + + def __init__( + self, + config=None, + db_name='report_tracker.db', + purge=False, + verbose=None, + debug=False, + username=None, + password=None, + profile='nessus' + ): + self.CONFIG_SECTION=profile + + super(vulnWhispererNessus, self).__init__(config=config) + + self.logger = logging.getLogger('vulnWhispererNessus') + if debug: + self.logger.setLevel(logging.DEBUG) + self.port = int(self.config.get(self.CONFIG_SECTION, 'port')) + + self.develop = True + self.purge = purge + self.access_key = None + self.secret_key = None + + if config is not None: + try: + self.nessus_port = self.config.get(self.CONFIG_SECTION, 'port') + + self.nessus_trash = self.config.getbool(self.CONFIG_SECTION, + 'trash') + + try: + self.access_key = self.config.get(self.CONFIG_SECTION,'access_key') + self.secret_key = self.config.get(self.CONFIG_SECTION,'secret_key') + except: + pass + + try: + self.logger.info('Attempting to connect to {}...'.format(self.CONFIG_SECTION)) + self.nessus = \ + NessusAPI(hostname=self.hostname, + port=self.nessus_port, + username=self.username, + password=self.password, + profile=self.CONFIG_SECTION, + access_key=self.access_key, + secret_key=self.secret_key + ) + self.nessus_connect = True + self.logger.info('Connected to {} on {host}:{port}'.format(self.CONFIG_SECTION, host=self.hostname, + port=str(self.nessus_port))) + except Exception as e: + self.logger.error('Exception: {}'.format(str(e))) + raise Exception( + 'Could not connect to {} -- Please verify your settings in {config} are correct and try again.\nReason: {e}'.format( + self.CONFIG_SECTION, + config=self.config.config_in, + e=e)) + except Exception as e: + self.logger.error('Could not properly load your config!\nReason: {e}'.format(e=e)) + return False + #sys.exit(1) + + + + def scan_count(self, scans, completed=False): + """ + + :param scans: Pulls in available scans + :param completed: Only return completed scans + :return: + """ + + self.logger.info('Gathering all scan data... this may take a while...') + scan_records = [] + for s in scans: + if s: + record = {} + record['scan_id'] = s['id'] + record['scan_name'] = s.get('name', '') + record['owner'] = s.get('owner', '') + record['creation_date'] = s.get('creation_date', '') + record['starttime'] = s.get('starttime', '') + record['timezone'] = s.get('timezone', '') + record['folder_id'] = s.get('folder_id', '') + try: + for h in self.nessus.get_scan_history(s['id']): + record['uuid'] = h.get('uuid', '') + record['status'] = h.get('status', '') + record['history_id'] = h.get('history_id', '') + record['last_modification_date'] = \ + h.get('last_modification_date', '') + record['norm_time'] = \ + self.nessus.get_utc_from_local(int(record['last_modification_date' + ]), + local_tz=self.nessus.tz_conv(record['timezone' + ])) + scan_records.append(record.copy()) + except Exception as e: + # Generates error each time nonetype is encountered. + pass + + if completed: + scan_records = [s for s in scan_records if s['status'] == 'completed'] + return scan_records + + + def whisper_nessus(self): + if self.nessus_connect: + scan_data = self.nessus.scans + folders = scan_data['folders'] + scans = scan_data['scans'] if scan_data['scans'] else [] + all_scans = self.scan_count(scans) + if self.uuids: + scan_list = [scan for scan in all_scans if scan['uuid'] + not in self.uuids and scan['status'] in ['completed', 'imported']] + else: + scan_list = all_scans + self.logger.info('Identified {new} scans to be processed'.format(new=len(scan_list))) + + if not scan_list: + self.logger.warn('No new scans to process. Exiting...') + return self.exit_code + + # Create scan subfolders + + for f in folders: + if not os.path.exists(self.path_check(f['name'])): + if f['name'] == 'Trash' and self.nessus_trash: + os.makedirs(self.path_check(f['name'])) + elif f['name'] != 'Trash': + os.makedirs(self.path_check(f['name'])) + else: + os.path.exists(self.path_check(f['name'])) + self.logger.info('Directory already exist for {scan} - Skipping creation'.format( + scan=self.path_check(f['name']).encode('utf8'))) + + # try download and save scans into each folder the belong to + + scan_count = 0 + + # TODO Rewrite this part to go through the scans that have aleady been processed + + for s in scan_list: + scan_count += 1 + ( + scan_name, + scan_id, + history_id, + norm_time, + status, + uuid, + ) = ( + s['scan_name'], + s['scan_id'], + s['history_id'], + s['norm_time'], + s['status'], + s['uuid'], + ) + + # TODO Create directory sync function which scans the directory for files that exist already and populates the database + + folder_id = s['folder_id'] + if self.CONFIG_SECTION == 'tenable': + folder_name = '' + else: + folder_name = next(f['name'] for f in folders if f['id'] == folder_id) + if status in ['completed', 'imported']: + file_name = '%s_%s_%s_%s.%s' % (scan_name, scan_id, + history_id, norm_time, 'csv') + repls = (('\\', '_'), ('/', '_'), (' ', '_')) + file_name = reduce(lambda a, kv: a.replace(*kv), repls, file_name) + relative_path_name = self.path_check(folder_name + '/' + file_name).encode('utf8') + + if os.path.isfile(relative_path_name): + if self.develop: + csv_in = pd.read_csv(relative_path_name) + record_meta = ( + scan_name, + scan_id, + norm_time, + file_name, + time.time(), + csv_in.shape[0], + self.CONFIG_SECTION, + uuid, + 1, + 0, + ) + self.record_insert(record_meta) + self.logger.info('File {filename} already exist! Updating database'.format(filename=relative_path_name)) + else: + try: + file_req = \ + self.nessus.download_scan(scan_id=scan_id, history=history_id, + export_format='csv') + except Exception as e: + self.logger.error('Could not download {} scan {}: {}'.format(self.CONFIG_SECTION, scan_id, str(e))) + self.exit_code += 1 + continue + + clean_csv = \ + pd.read_csv(io.StringIO(file_req.decode('utf-8'))) + if len(clean_csv) > 2: + self.logger.info('Processing {}/{} for scan: {}'.format(scan_count, len(scan_list), scan_name.encode('utf8'))) + columns_to_cleanse = ['CVSS','CVE','Description','Synopsis','Solution','See Also','Plugin Output', 'MAC Address'] + + for col in columns_to_cleanse: + if col in clean_csv: + clean_csv[col] = clean_csv[col].astype(str).apply(self.cleanser) + + clean_csv.to_csv(relative_path_name, index=False) + record_meta = ( + scan_name, + scan_id, + norm_time, + file_name, + time.time(), + clean_csv.shape[0], + self.CONFIG_SECTION, + uuid, + 1, + 0, + ) + self.record_insert(record_meta) + self.logger.info('{filename} records written to {path} '.format(filename=clean_csv.shape[0], + path=file_name.encode('utf8'))) + else: + record_meta = ( + scan_name, + scan_id, + norm_time, + file_name, + time.time(), + clean_csv.shape[0], + self.CONFIG_SECTION, + uuid, + 1, + 0, + ) + self.record_insert(record_meta) + self.logger.warn('{} has no host available... Updating database and skipping!'.format(file_name)) + self.conn.close() + self.logger.info('Scan aggregation complete! Connection to database closed.') + else: + self.logger.error('Failed to use scanner at {host}:{port}'.format(host=self.hostname, port=self.nessus_port)) + self.exit_code += 1 + return self.exit_code + + +class vulnWhispererQualys(vulnWhispererBase): + + CONFIG_SECTION = 'qualys_web' + COLUMN_MAPPING = {'Access Path': 'access_path', + 'Ajax Request': 'ajax_request', + 'Ajax Request ID': 'ajax_request_id', + 'Authentication': 'authentication', + 'CVSS Base': 'cvss', + 'CVSS Temporal': 'cvss_temporal', + 'CWE': 'cwe', + 'Category': 'category', + 'Content': 'content', + 'DescriptionSeverity': 'severity_description', + 'DescriptionCatSev': 'category_description', + 'Detection ID': 'detection_id', + 'Evidence #1': 'evidence_1', + 'First Time Detected': 'first_time_detected', + 'Form Entry Point': 'form_entry_point', + 'Function': 'function', + 'Groups': 'groups', + 'ID': 'id', + 'Ignore Comments': 'ignore_comments', + 'Ignore Date': 'ignore_date', + 'Ignore Reason': 'ignore_reason', + 'Ignore User': 'ignore_user', + 'Ignored': 'ignored', + 'Impact': 'impact', + 'Last Time Detected': 'last_time_detected', + 'Last Time Tested': 'last_time_tested', + 'Level': 'level', + 'OWASP': 'owasp', + 'Operating System': 'operating_system', + 'Owner': 'owner', + 'Param': 'param', + 'Payload #1': 'payload_1', + 'QID': 'plugin_id', + 'Request Headers #1': 'request_headers_1', + 'Request Method #1': 'request_method_1', + 'Request URL #1': 'request_url_1', + 'Response #1': 'response_1', + 'Scope': 'scope', + 'Severity': 'risk', + 'Severity Level': 'security_level', + 'Solution': 'solution', + 'Times Detected': 'times_detected', + 'Title': 'plugin_name', + 'URL': 'url', + 'Url': 'uri', + 'Vulnerability Category': 'vulnerability_category', + 'WASC': 'wasc', + 'Web Application Name': 'web_application_name'} + def __init__( + self, + config=None, + db_name='report_tracker.db', + purge=False, + verbose=None, + debug=False, + username=None, + password=None, + ): + + super(vulnWhispererQualys, self).__init__(config=config) + self.logger = logging.getLogger('vulnWhispererQualys') + if debug: + self.logger.setLevel(logging.DEBUG) + try: + self.qualys_scan = qualysScanReport(config=config) + except Exception as e: + self.logger.error("Unable to establish connection with Qualys scanner. Reason: {}".format(e)) + return False + self.latest_scans = self.qualys_scan.qw.get_all_scans() + self.directory_check() + self.scans_to_process = None + + def whisper_reports(self, + report_id=None, + launched_date=None, + scan_name=None, + scan_reference=None, + output_format='json', + cleanup=True): + """ + report_id: App ID + updated_date: Last time scan was ran for app_id + """ + vuln_ready = None + + try: + if 'Z' in launched_date: + launched_date = self.qualys_scan.utils.iso_to_epoch(launched_date) + report_name = 'qualys_web_' + str(report_id) \ + + '_{last_updated}'.format(last_updated=launched_date) \ + + '.{extension}'.format(extension=output_format) + + relative_path_name = self.path_check(report_name).encode('utf8') + + if os.path.isfile(relative_path_name): + #TODO Possibly make this optional to sync directories + file_length = len(open(relative_path_name).readlines()) + record_meta = ( + scan_name, + scan_reference, + launched_date, + report_name, + time.time(), + file_length, + self.CONFIG_SECTION, + report_id, + 1, + 0, + ) + self.record_insert(record_meta) + self.logger.info('File {filename} already exist! Updating database'.format(filename=relative_path_name)) + + else: + self.logger.info('Generating report for {}'.format(report_id)) + status = self.qualys_scan.qw.create_report(report_id) + root = objectify.fromstring(status) + if root.responseCode == 'SUCCESS': + self.logger.info('Successfully generated report! ID: {}'.format(report_id)) + generated_report_id = root.data.Report.id + self.logger.info('New Report ID: {}'.format(generated_report_id)) + + vuln_ready = self.qualys_scan.process_data(path=self.write_path, file_id=str(generated_report_id)) + + vuln_ready['scan_name'] = scan_name + vuln_ready['scan_reference'] = scan_reference + vuln_ready.rename(columns=self.COLUMN_MAPPING, inplace=True) + + record_meta = ( + scan_name, + scan_reference, + launched_date, + report_name, + time.time(), + vuln_ready.shape[0], + self.CONFIG_SECTION, + report_id, + 1, + 0, + ) + self.record_insert(record_meta) + + if output_format == 'json': + with open(relative_path_name, 'w') as f: + f.write(vuln_ready.to_json(orient='records', lines=True)) + f.write('\n') + + elif output_format == 'csv': + vuln_ready.to_csv(relative_path_name, index=False, header=True) # add when timestamp occured + + self.logger.info('Report written to {}'.format(report_name)) + + if cleanup: + self.logger.info('Removing report {} from Qualys Database'.format(generated_report_id)) + cleaning_up = self.qualys_scan.qw.delete_report(generated_report_id) + os.remove(self.path_check(str(generated_report_id) + '.csv')) + self.logger.info('Deleted report from local disk: {}'.format(self.path_check(str(generated_report_id)))) + else: + self.logger.error('Could not process report ID: {}'.format(status)) + + except Exception as e: + self.logger.error('Could not process {}: {}'.format(report_id, str(e))) + return vuln_ready + + + def identify_scans_to_process(self): + if self.uuids: + self.scans_to_process = self.latest_scans[~self.latest_scans['id'].isin(self.uuids)] + else: + self.scans_to_process = self.latest_scans + self.logger.info('Identified {new} scans to be processed'.format(new=len(self.scans_to_process))) + + + def process_web_assets(self): + counter = 0 + self.identify_scans_to_process() + if self.scans_to_process.shape[0]: + for app in self.scans_to_process.iterrows(): + counter += 1 + r = app[1] + self.logger.info('Processing {}/{}'.format(counter, len(self.scans_to_process))) + self.whisper_reports(report_id=r['id'], + launched_date=r['launchedDate'], + scan_name=r['name'], + scan_reference=r['reference']) + else: + self.logger.info('No new scans to process. Exiting...') + self.conn.close() + return self.exit_code + + +class vulnWhispererOpenVAS(vulnWhispererBase): + CONFIG_SECTION = 'openvas' + COLUMN_MAPPING = {'IP': 'asset', + 'Hostname': 'hostname', + 'Port': 'port', + 'Port Protocol': 'protocol', + 'CVSS': 'cvss', + 'Severity': 'severity', + 'Solution Type': 'category', + 'NVT Name': 'plugin_name', + 'Summary': 'synopsis', + 'Specific Result': 'plugin_output', + 'NVT OID': 'nvt_oid', + 'Task ID': 'task_id', + 'Task Name': 'task_name', + 'Timestamp': 'timestamp', + 'Result ID': 'result_id', + 'Impact': 'description', + 'Solution': 'solution', + 'Affected Software/OS': 'affected_software', + 'Vulnerability Insight': 'vulnerability_insight', + 'Vulnerability Detection Method': 'vulnerability_detection_method', + 'Product Detection Result': 'product_detection_result', + 'BIDs': 'bids', + 'CERTs': 'certs', + 'Other References': 'see_also' + } + + def __init__( + self, + config=None, + db_name='report_tracker.db', + purge=False, + verbose=None, + debug=False, + username=None, + password=None, + ): + super(vulnWhispererOpenVAS, self).__init__(config=config) + self.logger = logging.getLogger('vulnWhispererOpenVAS') + if debug: + self.logger.setLevel(logging.DEBUG) + + self.directory_check() + self.port = int(self.config.get(self.CONFIG_SECTION, 'port')) + self.develop = True + self.purge = purge + self.scans_to_process = None + try: + self.openvas_api = OpenVAS_API(hostname=self.hostname, + port=self.port, + username=self.username, + password=self.password) + except Exception as e: + self.logger.error("Unable to establish connection with OpenVAS scanner. Reason: {}".format(e)) + return False + + def whisper_reports(self, output_format='json', launched_date=None, report_id=None, cleanup=True): + report = None + if report_id: + self.logger.info('Processing report ID: {}'.format(report_id)) + + + scan_name = report_id.replace('-', '') + report_name = 'openvas_scan_{scan_name}_{last_updated}.{extension}'.format(scan_name=scan_name, + last_updated=launched_date, + extension=output_format) + relative_path_name = self.path_check(report_name).encode('utf8') + scan_reference = report_id + + if os.path.isfile(relative_path_name): + # TODO Possibly make this optional to sync directories + file_length = len(open(relative_path_name).readlines()) + record_meta = ( + scan_name, + scan_reference, + launched_date, + report_name, + time.time(), + file_length, + self.CONFIG_SECTION, + report_id, + 1, + 0, + ) + self.record_insert(record_meta) + self.logger.info('File {filename} already exist! Updating database'.format(filename=relative_path_name)) + + record_meta = ( + scan_name, + scan_reference, + launched_date, + report_name, + time.time(), + file_length, + self.CONFIG_SECTION, + report_id, + 1, + ) + + else: + vuln_ready = self.openvas_api.process_report(report_id=report_id) + vuln_ready['scan_name'] = scan_name + vuln_ready['scan_reference'] = report_id + vuln_ready.rename(columns=self.COLUMN_MAPPING, inplace=True) + vuln_ready.port = vuln_ready.port.fillna(0).astype(int) + vuln_ready.fillna('', inplace=True) + if output_format == 'json': + with open(relative_path_name, 'w') as f: + f.write(vuln_ready.to_json(orient='records', lines=True)) + f.write('\n') + self.logger.info('Report written to {}'.format(report_name)) + + return report + + def identify_scans_to_process(self): + if self.uuids: + self.scans_to_process = self.openvas_api.openvas_reports[ + ~self.openvas_api.openvas_reports.report_ids.isin(self.uuids)] + else: + self.scans_to_process = self.openvas_api.openvas_reports + self.logger.info('Identified {new} scans to be processed'.format(new=len(self.scans_to_process))) + + def process_openvas_scans(self): + counter = 0 + self.identify_scans_to_process() + if self.scans_to_process.shape[0]: + for scan in self.scans_to_process.iterrows(): + counter += 1 + info = scan[1] + self.logger.info('Processing {}/{} - Report ID: {}'.format(counter, len(self.scans_to_process), info['report_ids'])) + self.whisper_reports(report_id=info['report_ids'], + launched_date=info['epoch']) + self.logger.info('Processing complete') + else: + self.logger.info('No new scans to process. Exiting...') + self.conn.close() + return self.exit_code + + +class vulnWhispererQualysVuln(vulnWhispererBase): + + CONFIG_SECTION = 'qualys_vuln' + COLUMN_MAPPING = {'cvss_base': 'cvss', + 'cvss3_base': 'cvss3', + 'cve_id': 'cve', + 'os': 'operating_system', + 'qid': 'plugin_id', + 'severity': 'risk', + 'title': 'plugin_name'} + + def __init__( + self, + config=None, + db_name='report_tracker.db', + purge=False, + verbose=None, + debug=False, + username=None, + password=None, + ): + + super(vulnWhispererQualysVuln, self).__init__(config=config) + self.logger = logging.getLogger('vulnWhispererQualysVuln') + if debug: + self.logger.setLevel(logging.DEBUG) + try: + self.qualys_scan = qualysVulnScan(config=config) + except Exception as e: + self.logger.error("Unable to create connection with Qualys. Reason: {}".format(e)) + return False + self.directory_check() + self.scans_to_process = None + + def whisper_reports(self, + report_id=None, + launched_date=None, + scan_name=None, + scan_reference=None, + output_format='json', + cleanup=True): + + if 'Z' in launched_date: + launched_date = self.qualys_scan.utils.iso_to_epoch(launched_date) + report_name = 'qualys_vuln_' + report_id.replace('/','_') \ + + '_{last_updated}'.format(last_updated=launched_date) \ + + '.json' + + relative_path_name = self.path_check(report_name).encode('utf8') + + if os.path.isfile(relative_path_name): + #TODO Possibly make this optional to sync directories + file_length = len(open(relative_path_name).readlines()) + record_meta = ( + scan_name, + scan_reference, + launched_date, + report_name, + time.time(), + file_length, + self.CONFIG_SECTION, + report_id, + 1, + 0, + ) + self.record_insert(record_meta) + self.logger.info('File {filename} already exist! Updating database'.format(filename=relative_path_name)) + + else: + try: + self.logger.info('Processing report ID: {}'.format(report_id)) + vuln_ready = self.qualys_scan.process_data(scan_id=report_id) + vuln_ready['scan_name'] = scan_name + vuln_ready['scan_reference'] = report_id + vuln_ready.rename(columns=self.COLUMN_MAPPING, inplace=True) + except Exception as e: + self.logger.error('Could not process {}: {}'.format(report_id, str(e))) + self.exit_code += 1 + return self.exit_code + + record_meta = ( + scan_name, + scan_reference, + launched_date, + report_name, + time.time(), + vuln_ready.shape[0], + self.CONFIG_SECTION, + report_id, + 1, + 0, + ) + self.record_insert(record_meta) + + if output_format == 'json': + with open(relative_path_name, 'w') as f: + f.write(vuln_ready.to_json(orient='records', lines=True)) + f.write('\n') + + self.logger.info('Report written to {}'.format(report_name)) + return self.exit_code + + + def identify_scans_to_process(self): + self.latest_scans = self.qualys_scan.qw.get_all_scans() + if self.uuids: + self.scans_to_process = self.latest_scans.loc[ + (~self.latest_scans['id'].isin(self.uuids)) + & (self.latest_scans['status'] == 'Finished')] + else: + self.scans_to_process = self.latest_scans + self.logger.info('Identified {new} scans to be processed'.format(new=len(self.scans_to_process))) + + + def process_vuln_scans(self): + counter = 0 + self.identify_scans_to_process() + if self.scans_to_process.shape[0]: + for app in self.scans_to_process.iterrows(): + counter += 1 + r = app[1] + self.logger.info('Processing {}/{}'.format(counter, len(self.scans_to_process))) + self.exit_code += self.whisper_reports(report_id=r['id'], + launched_date=r['date'], + scan_name=r['name'], + scan_reference=r['type']) + else: + self.logger.info('No new scans to process. Exiting...') + self.conn.close() + return self.exit_code + + +class vulnWhispererJIRA(vulnWhispererBase): + + CONFIG_SECTION = 'jira' + + def __init__( + self, + config=None, + db_name='report_tracker.db', + purge=False, + verbose=None, + debug=False, + username=None, + password=None, + ): + super(vulnWhispererJIRA, self).__init__(config=config) + self.logger = logging.getLogger('vulnWhispererJira') + if debug: + self.logger.setLevel(logging.DEBUG) + self.config_path = config + self.config = vwConfig(config) + self.host_resolv_cache = {} + self.host_no_resolv = [] + self.no_resolv_by_team_dict = {} + #Save locally those assets without DNS entry for flag to system owners + self.no_resolv_fname="no_resolv.txt" + if os.path.isfile(self.no_resolv_fname): + with open(self.no_resolv_fname, "r") as json_file: + self.no_resolv_by_team_dict = json.load(json_file) + self.directory_check() + + if config is not None: + try: + self.logger.info('Attempting to connect to jira...') + self.jira = \ + JiraAPI(hostname=self.hostname, + username=self.username, + password=self.password, + path=self.config.get('jira','write_path')) + self.jira_connect = True + self.logger.info('Connected to jira on {host}'.format(host=self.hostname)) + except Exception as e: + self.logger.error('Exception: {}'.format(str(e))) + raise Exception( + 'Could not connect to nessus -- Please verify your settings in {config} are correct and try again.\nReason: {e}'.format( + config=self.config.config_in, e=e)) + return False + #sys.exit(1) + + profiles = [] + profiles = self.get_scan_profiles() + + if not self.config.exists_jira_profiles(profiles): + self.config.update_jira_profiles(profiles) + self.logger.info("Jira profiles have been created in {config}, please fill the variables before rerunning the module.".format(config=self.config_path)) + sys.exit(0) + + + def get_env_variables(self, source, scan_name): + # function returns an array with [jira_project, jira_components, datafile_path] + + #Jira variables + jira_section = self.config.normalize_section("{}.{}".format(source,scan_name)) + + project = self.config.get(jira_section,'jira_project') + if project == "": + self.logger.error('JIRA project is missing on the configuration file!') + sys.exit(0) + + # check that project actually exists + if not self.jira.project_exists(project): + self.logger.error("JIRA project '{project}' doesn't exist!".format(project=project)) + sys.exit(0) + + components = self.config.get(jira_section,'components').split(',') + + #cleaning empty array from '' + if not components[0]: + components = [] + + min_critical = self.config.get(jira_section,'min_critical_to_report') + if not min_critical: + self.logger.error('"min_critical_to_report" variable on config file is empty.') + sys.exit(0) + + #datafile path + filename, reported = self.get_latest_results(source, scan_name) + fullpath = "" + + # search data files under user specified directory + for root, dirnames, filenames in os.walk(vwConfig(self.config_path).get(source,'write_path')): + if filename in filenames: + fullpath = "{}/{}".format(root,filename) + + if reported: + self.logger.warn('Last Scan of "{scan_name}" for source "{source}" has already been reported; will be skipped.'.format(scan_name=scan_name, source=source)) + return [False] * 5 + + if not fullpath: + self.logger.error('Scan of "{scan_name}" for source "{source}" has not been found. Please check that the scanner data files are in place.'.format(scan_name=scan_name, source=source)) + sys.exit(1) + + dns_resolv = self.config.get('jira','dns_resolv') + if dns_resolv in ('False', 'false', ''): + dns_resolv = False + elif dns_resolv in ('True', 'true'): + dns_resolv = True + else: + self.logger.error("dns_resolv variable not setup in [jira] section; will not do dns resolution") + dns_resolv = False + + return project, components, fullpath, min_critical, dns_resolv + + + def parse_nessus_vulnerabilities(self, fullpath, source, scan_name, min_critical): + + vulnerabilities = [] + + # we need to parse the CSV + risks = ['none', 'low', 'medium', 'high', 'critical'] + min_risk = int([i for i,x in enumerate(risks) if x == min_critical][0]) + + df = pd.read_csv(fullpath, delimiter=',') + + #nessus fields we want - ['Host','Protocol','Port', 'Name', 'Synopsis', 'Description', 'Solution', 'See Also'] + for index in range(len(df)): + # filtering vulnerabilities by criticality, discarding low risk + to_report = int([i for i,x in enumerate(risks) if x == df.loc[index]['Risk'].lower()][0]) + if to_report < min_risk: + continue + + if not vulnerabilities or df.loc[index]['Name'] not in [entry['title'] for entry in vulnerabilities]: + vuln = {} + #vulnerabilities should have all the info for creating all JIRA labels + vuln['source'] = source + vuln['scan_name'] = scan_name + #vulnerability variables + vuln['title'] = df.loc[index]['Name'] + vuln['diagnosis'] = df.loc[index]['Synopsis'].replace('\\n',' ') + vuln['consequence'] = df.loc[index]['Description'].replace('\\n',' ') + vuln['solution'] = df.loc[index]['Solution'].replace('\\n',' ') + vuln['ips'] = [] + vuln['ips'].append("{} - {}/{}".format(df.loc[index]['Host'], df.loc[index]['Protocol'], df.loc[index]['Port'])) + vuln['risk'] = df.loc[index]['Risk'].lower() + + # Nessus "nan" value gets automatically casted to float by python + if not (type(df.loc[index]['See Also']) is float): + vuln['references'] = df.loc[index]['See Also'].split("\\n") + else: + vuln['references'] = [] + vulnerabilities.append(vuln) + + else: + # grouping assets by vulnerability to open on single ticket, as each asset has its own nessus entry + for vuln in vulnerabilities: + if vuln['title'] == df.loc[index]['Name']: + vuln['ips'].append("{} - {}/{}".format(df.loc[index]['Host'], df.loc[index]['Protocol'], df.loc[index]['Port'])) + + return vulnerabilities + + def parse_qualys_vuln_vulnerabilities(self, fullpath, source, scan_name, min_critical, dns_resolv = False): + #parsing of the qualys vulnerabilities schema + #parse json + vulnerabilities = [] + + risks = ['info', 'low', 'medium', 'high', 'critical'] + # +1 as array is 0-4, but score is 1-5 + min_risk = int([i for i,x in enumerate(risks) if x == min_critical][0])+1 + + try: + data=[json.loads(line) for line in open(fullpath).readlines()] + except Exception as e: + self.logger.warn("Scan has no vulnerabilities, skipping.") + return vulnerabilities + + #qualys fields we want - [] + for index in range(len(data)): + if int(data[index]['risk']) < min_risk: + continue + + elif data[index]['type'] == 'Practice' or data[index]['type'] == 'Ig': + self.logger.debug("Vulnerability '{vuln}' ignored, as it is 'Practice/Potential', not verified.".format(vuln=data[index]['plugin_name'])) + continue + + if not vulnerabilities or data[index]['plugin_name'] not in [entry['title'] for entry in vulnerabilities]: + vuln = {} + #vulnerabilities should have all the info for creating all JIRA labels + vuln['source'] = source + vuln['scan_name'] = scan_name + #vulnerability variables + vuln['title'] = data[index]['plugin_name'] + vuln['diagnosis'] = data[index]['threat'].replace('\\n',' ') + vuln['consequence'] = data[index]['impact'].replace('\\n',' ') + vuln['solution'] = data[index]['solution'].replace('\\n',' ') + vuln['ips'] = [] + #TODO ADDED DNS RESOLUTION FROM QUALYS! \n SEPARATORS INSTEAD OF \\n! + + vuln['ips'].append("{ip} - {protocol}/{port} - {dns}".format(**self.get_asset_fields(data[index], dns_resolv))) + + #different risk system than Nessus! + vuln['risk'] = risks[int(data[index]['risk'])-1] + + # Nessus "nan" value gets automatically casted to float by python + if not (type(data[index]['vendor_reference']) is float or data[index]['vendor_reference'] == None): + vuln['references'] = data[index]['vendor_reference'].split("\\n") + else: + vuln['references'] = [] + vulnerabilities.append(vuln) + else: + # grouping assets by vulnerability to open on single ticket, as each asset has its own nessus entry + for vuln in vulnerabilities: + if vuln['title'] == data[index]['plugin_name']: + vuln['ips'].append("{ip} - {protocol}/{port} - {dns}".format(**self.get_asset_fields(data[index], dns_resolv))) + + return vulnerabilities + + def get_asset_fields(self, vuln, dns_resolv): + values = {} + values['ip'] = vuln['ip'] + values['protocol'] = vuln['protocol'] + values['port'] = vuln['port'] + values['dns'] = '' + if dns_resolv: + if vuln['dns']: + values['dns'] = vuln['dns'] + else: + if values['ip'] in list(self.host_resolv_cache.keys()): + self.logger.debug("Hostname from {ip} cached, retrieving from cache.".format(ip=values['ip'])) + values['dns'] = self.host_resolv_cache[values['ip']] + else: + self.logger.debug("No hostname, trying to resolve {ip}'s hostname.".format(ip=values['ip'])) + try: + values['dns'] = socket.gethostbyaddr(vuln['ip'])[0] + self.host_resolv_cache[values['ip']] = values['dns'] + self.logger.debug("Hostname found: {hostname}.".format(hostname=values['dns'])) + except: + self.host_resolv_cache[values['ip']] = '' + self.host_no_resolv.append(values['ip']) + self.logger.debug("Hostname not found for: {ip}.".format(ip=values['ip'])) + + for key in values.keys(): + if not values[key]: + values[key] = 'N/A' + + return values + + def parse_vulnerabilities(self, fullpath, source, scan_name, min_critical): + #TODO: SINGLE LOCAL SAVE FORMAT FOR ALL SCANNERS + #JIRA standard vuln format - ['source', 'scan_name', 'title', 'diagnosis', 'consequence', 'solution', 'ips', 'references'] + + return 0 + + + def jira_sync(self, source, scan_name): + self.logger.info("Jira Sync triggered for source '{source}' and scan '{scan_name}'".format(source=source, scan_name=scan_name)) + + project, components, fullpath, min_critical, dns_resolv = self.get_env_variables(source, scan_name) + + if not project: + self.logger.debug("Skipping scan for source '{source}' and scan '{scan_name}': vulnerabilities have already been reported.".format(source=source, scan_name=scan_name)) + return False + + vulnerabilities = [] + + #***Nessus parsing*** + if source == "nessus": + vulnerabilities = self.parse_nessus_vulnerabilities(fullpath, source, scan_name, min_critical) + + #***Qualys VM parsing*** + if source == "qualys_vuln": + vulnerabilities = self.parse_qualys_vuln_vulnerabilities(fullpath, source, scan_name, min_critical, dns_resolv) + + #***JIRA sync*** + if vulnerabilities: + self.logger.info('{source} data has been successfuly parsed'.format(source=source.upper())) + self.logger.info('Starting JIRA sync') + + self.jira.sync(vulnerabilities, project, components) + else: + self.logger.info("[{source}.{scan_name}] No vulnerabilities or vulnerabilities not parsed.".format(source=source, scan_name=scan_name)) + self.set_latest_scan_reported(fullpath.split("/")[-1]) + return False + + #writing to file those assets without DNS resolution + #if its not empty + if self.host_no_resolv: + #we will replace old list of non resolved for the new one or create if it doesn't exist already + self.no_resolv_by_team_dict[scan_name] = self.host_no_resolv + with open(self.no_resolv_fname, 'w') as outfile: + json.dump(self.no_resolv_by_team_dict, outfile) + + self.set_latest_scan_reported(fullpath.split("/")[-1]) + return True + + def sync_all(self): + autoreport_sections = self.config.get_sections_with_attribute('autoreport') + + if autoreport_sections: + for scan in autoreport_sections: + try: + self.jira_sync(self.config.get(scan, 'source'), self.config.get(scan, 'scan_name')) + except Exception as e: + self.logger.error("VulnWhisperer wasn't able to report the vulnerabilities from the '{}'s source".format(self.config.get(scan, 'source'))) + return True + return False + +class vulnWhisperer(object): + + def __init__(self, + profile=None, + verbose=None, + username=None, + password=None, + config=None, + source=None, + scanname=None): + + self.logger = logging.getLogger('vulnWhisperer') + if verbose: + self.logger.setLevel(logging.DEBUG) + self.profile = profile + self.config = config + self.username = username + self.password = password + self.verbose = verbose + self.source = source + self.scanname = scanname + self.exit_code = 0 + + + def whisper_vulnerabilities(self): + + if self.profile == 'nessus': + vw = vulnWhispererNessus(config=self.config, + profile=self.profile) + if vw: + self.exit_code += vw.whisper_nessus() + + elif self.profile == 'qualys_web': + vw = vulnWhispererQualys(config=self.config) + if vw: + self.exit_code += vw.process_web_assets() + + elif self.profile == 'openvas': + vw_openvas = vulnWhispererOpenVAS(config=self.config) + if vw: + self.exit_code += vw_openvas.process_openvas_scans() + + elif self.profile == 'tenable': + vw = vulnWhispererNessus(config=self.config, + profile=self.profile) + if vw: + self.exit_code += vw.whisper_nessus() + + elif self.profile == 'qualys_vuln': + vw = vulnWhispererQualysVuln(config=self.config) + if vw: + self.exit_code += vw.process_vuln_scans() + + elif self.profile == 'jira': + #first we check config fields are created, otherwise we create them + vw = vulnWhispererJIRA(config=self.config) + if vw: + if not (self.source and self.scanname): + self.logger.info('No source/scan_name selected, all enabled scans will be synced') + success = vw.sync_all() + if not success: + self.logger.error('All scans sync failed!') + self.logger.error('Source scanner and scan name needed!') + return 0 + else: + vw.jira_sync(self.source, self.scanname) + + return self.exit_code