Create cvss score from base and temporal

This commit is contained in:
pemontto
2019-04-15 13:32:31 +10:00
parent 603050e7b3
commit 275b89c94d
4 changed files with 81 additions and 80 deletions

View File

@ -25,10 +25,10 @@ class NessusAPI(object):
EXPORT_HISTORY = EXPORT + '?history_id={history_id}' EXPORT_HISTORY = EXPORT + '?history_id={history_id}'
# All column mappings should be lowercase # All column mappings should be lowercase
COLUMN_MAPPING = { COLUMN_MAPPING = {
'cvss base score': 'cvss', 'cvss base score': 'cvss_base',
'cvss temporal score': 'cvss_temporal', 'cvss temporal score': 'cvss_temporal',
'cvss temporal vector': 'cvss_temporal_vector', 'cvss temporal vector': 'cvss_temporal_vector',
'cvss3 base score': 'cvss3', 'cvss3 base score': 'cvss3_base',
'cvss3 temporal score': 'cvss3_temporal', 'cvss3 temporal score': 'cvss3_temporal',
'cvss3 temporal vector': 'cvss3_temporal_vector', 'cvss3 temporal vector': 'cvss3_temporal_vector',
'fqdn': 'dns', 'fqdn': 'dns',
@ -188,64 +188,64 @@ class NessusAPI(object):
'None': 'US/Central'} 'None': 'US/Central'}
return time_map.get(tz, None) return time_map.get(tz, None)
def normalise(self, dataframe): def normalise(self, df):
self.logger.debug('Normalising data') self.logger.debug('Normalising data')
self.map_fields(dataframe) df = self.map_fields(df)
self.transform_values(dataframe) df = self.transform_values(df)
return dataframe return df
def map_fields(self, dataframe): def map_fields(self, df):
self.logger.debug('Mapping fields') self.logger.debug('Mapping fields')
# Any specific mappings here # Any specific mappings here
if self.profile == 'tenable': if self.profile == 'tenable':
# Prefer CVSS Base Score over CVSS for tenable # Prefer CVSS Base Score over CVSS for tenable
self.logger.debug('Dropping redundant tenable fields') self.logger.debug('Dropping redundant tenable fields')
dataframe.drop('CVSS', axis=1, inplace=True) df.drop('CVSS', axis=1, inplace=True)
dataframe.drop('IP Address', axis=1, inplace=True) df.drop('IP Address', axis=1, inplace=True)
# Map fields from COLUMN_MAPPING # Map fields from COLUMN_MAPPING
fields = [x.lower() for x in dataframe.columns] fields = [x.lower() for x in df.columns]
for field, replacement in self.COLUMN_MAPPING.iteritems(): for field, replacement in self.COLUMN_MAPPING.iteritems():
if field in fields: if field in fields:
self.logger.debug('Renaming "{}" to "{}"'.format(field, replacement)) self.logger.debug('Renaming "{}" to "{}"'.format(field, replacement))
fields[fields.index(field)] = replacement fields[fields.index(field)] = replacement
fields = [x.replace(' ', '_') for x in fields] fields = [x.replace(' ', '_') for x in fields]
dataframe.columns = fields df.columns = fields
return df
return dataframe
def transform_values(self, dataframe): def transform_values(self, df):
self.logger.debug('Transforming values') self.logger.debug('Transforming values')
# upper/lowercase fields # upper/lowercase fields
self.logger.debug('Changing case of fields') self.logger.debug('Changing case of fields')
dataframe['cve'] = dataframe['cve'].str.upper() df['cve'] = df['cve'].str.upper()
dataframe['protocol'] = dataframe['protocol'].str.lower() df['protocol'] = df['protocol'].str.lower()
# Copy asset to IP # Copy asset to IP
dataframe['ip'] = dataframe['asset'] df['ip'] = df['asset']
# Map risk to a SEVERITY MAPPING value # Map risk to a SEVERITY MAPPING value
self.logger.debug('Mapping risk to severity number') self.logger.debug('Mapping risk to severity number')
dataframe['risk_number'] = dataframe['risk'].str.lower() df['risk_number'] = df['risk'].str.lower()
dataframe['risk_number'].replace(self.SEVERITY_MAPPING, inplace=True) df['risk_number'].replace(self.SEVERITY_MAPPING, inplace=True)
if self.profile == 'tenable': if self.profile == 'tenable':
self.logger.debug('Combinging CVSS vectors for tenable') self.logger.debug('Combinging CVSS vectors for tenable')
# Combine CVSS vectors # Combine CVSS vectors
dataframe['cvss_vector'] = ( df['cvss_vector'] = (
dataframe[['cvss_vector', 'cvss_temporal_vector']] df[['cvss_vector', 'cvss_temporal_vector']]
.apply(lambda x: '{}/{}'.format(x[0], x[1]), axis=1) .apply(lambda x: '{}/{}'.format(x[0], x[1]), axis=1)
.str.rstrip('/nan') .str.rstrip('/nan')
) )
dataframe['cvss3_vector'] = ( df['cvss3_vector'] = (
dataframe[['cvss3_vector', 'cvss3_temporal_vector']] df[['cvss3_vector', 'cvss3_temporal_vector']]
.apply(lambda x: '{}/{}'.format(x[0], x[1]), axis=1) .apply(lambda x: '{}/{}'.format(x[0], x[1]), axis=1)
.str.rstrip('/nan') .str.rstrip('/nan')
) )
# CVSS score = cvss_temporal if cvss_temporal else cvss_base
df['cvss'] = df['cvss_base']
df.loc[df['cvss_temporal'].notnull(), 'cvss'] = df['cvss_temporal']
dataframe.fillna('', inplace=True) df.fillna('', inplace=True)
return df
return dataframe

View File

@ -191,16 +191,16 @@ class OpenVAS_API(object):
merged_df = pd.merge(report_df, self.openvas_reports, on='report_ids').reset_index().drop('index', axis=1) merged_df = pd.merge(report_df, self.openvas_reports, on='report_ids').reset_index().drop('index', axis=1)
return merged_df return merged_df
def normalise(self, dataframe): def normalise(self, df):
self.logger.debug('Normalising data') self.logger.debug('Normalising data')
self.map_fields(dataframe) df = self.map_fields(df)
self.transform_values(dataframe) df = self.transform_values(df)
return dataframe return df
def map_fields(self, dataframe): def map_fields(self, df):
self.logger.debug('Mapping fields') self.logger.debug('Mapping fields')
return dataframe return df
def transform_values(self, dataframe): def transform_values(self, df):
self.logger.debug('Transforming values') self.logger.debug('Transforming values')
return dataframe return df

View File

@ -29,7 +29,7 @@ class qualysWhisperAPI(object):
def scan_xml_parser(self, xml): def scan_xml_parser(self, xml):
all_records = [] all_records = []
root = ET.XML(xml.encode("utf-8")) root = ET.XML(xml.encode('utf-8'))
for child in root.find('.//SCAN_LIST'): for child in root.find('.//SCAN_LIST'):
all_records.append({ all_records.append({
'name': child.find('TITLE').text, 'name': child.find('TITLE').text,
@ -81,8 +81,6 @@ class qualysVulnScan:
COLUMN_MAPPING = { COLUMN_MAPPING = {
'cve_id': 'cve', 'cve_id': 'cve',
'cvss_base': 'cvss',
'cvss3_base': 'cvss3',
'impact': 'synopsis', 'impact': 'synopsis',
'ip_status': 'state', 'ip_status': 'state',
'os': 'operating_system', 'os': 'operating_system',
@ -137,77 +135,80 @@ class qualysVulnScan:
return scan_report return scan_report
def normalise(self, dataframe): def normalise(self, df):
self.logger.debug('Normalising data') self.logger.debug('Normalising data')
self.map_fields(dataframe) df = self.map_fields(df)
self.transform_values(dataframe) df = self.transform_values(df)
return dataframe return df
def map_fields(self, dataframe): def map_fields(self, df):
self.logger.info('Mapping fields') self.logger.info('Mapping fields')
# Map fields from COLUMN_MAPPING # Map fields from COLUMN_MAPPING
fields = [x.lower() for x in dataframe.columns] fields = [x.lower() for x in df.columns]
for field, replacement in self.COLUMN_MAPPING.iteritems(): for field, replacement in self.COLUMN_MAPPING.iteritems():
if field in fields: if field in fields:
self.logger.info('Renaming "{}" to "{}"'.format(field, replacement)) self.logger.info('Renaming "{}" to "{}"'.format(field, replacement))
fields[fields.index(field)] = replacement fields[fields.index(field)] = replacement
fields = [x.replace(' ', '_') for x in fields] fields = [x.replace(' ', '_') for x in fields]
dataframe.columns = fields df.columns = fields
return dataframe return df
def transform_values(self, dataframe): def transform_values(self, df):
self.logger.info('Transforming values') self.logger.info('Transforming values')
# upper/lowercase fields # upper/lowercase fields
self.logger.info('Changing case of fields') self.logger.info('Changing case of fields')
dataframe['cve'] = dataframe['cve'].str.upper() df['cve'] = df['cve'].str.upper()
dataframe['protocol'] = dataframe['protocol'].str.lower() df['protocol'] = df['protocol'].str.lower()
# Contruct the CVSS vector # Contruct the CVSS vector
dataframe['cvss_vector'] = '' df['cvss_vector'] = ''
dataframe.loc[dataframe["cvss"].notnull(), "cvss_vector"] = ( df.loc[df['cvss_base'].notnull(), 'cvss_vector'] = (
dataframe.loc[dataframe["cvss"].notnull(), "cvss"] df.loc[df['cvss_base'].notnull(), 'cvss_base']
.str.split() .str.split()
.apply(lambda x: x[1]) .apply(lambda x: x[1])
.str.replace("(", "") .str.replace('(', '')
.str.replace(")", "") .str.replace(')', '')
) )
dataframe.loc[dataframe["cvss"].notnull(), "cvss"] = ( df.loc[df['cvss_base'].notnull(), 'cvss_base'] = (
dataframe.loc[dataframe["cvss"].notnull(), "cvss"] df.loc[df['cvss_base'].notnull(), 'cvss_base']
.str.split() .str.split()
.apply(lambda x: x[0]) .apply(lambda x: x[0])
) )
dataframe['cvss_temporal_vector'] = '' df['cvss_temporal_vector'] = ''
dataframe.loc[dataframe["cvss_temporal"].notnull(), "cvss_temporal_vector"] = ( df.loc[df['cvss_temporal'].notnull(), 'cvss_temporal_vector'] = (
dataframe.loc[dataframe["cvss_temporal"].notnull(), "cvss_temporal"] df.loc[df['cvss_temporal'].notnull(), 'cvss_temporal']
.str.split() .str.split()
.apply(lambda x: x[1]) .apply(lambda x: x[1])
.str.replace("(", "") .str.replace('(', '')
.str.replace(")", "") .str.replace(')', '')
) )
dataframe.loc[dataframe["cvss_temporal"].notnull(), "cvss_temporal"] = ( df.loc[df['cvss_temporal'].notnull(), 'cvss_temporal'] = (
dataframe.loc[dataframe["cvss_temporal"].notnull(), "cvss_temporal"] df.loc[df['cvss_temporal'].notnull(), 'cvss_temporal']
.str.split() .str.split()
.apply(lambda x: x[0]) .apply(lambda x: x[0])
.fillna('') .fillna('')
) )
# Combine base and temporal # Combine base and temporal
dataframe["cvss_vector"] = ( df['cvss_vector'] = (
dataframe[["cvss_vector", "cvss_temporal_vector"]] df[['cvss_vector', 'cvss_temporal_vector']]
.apply(lambda x: "{}/{}".format(x[0], x[1]), axis=1) .apply(lambda x: '{}/{}'.format(x[0], x[1]), axis=1)
.str.rstrip("/nan") .str.rstrip('/nan')
.fillna("") .fillna('')
) )
dataframe.drop('cvss_temporal_vector', axis=1, inplace=True) df.drop('cvss_temporal_vector', axis=1, inplace=True)
# Convert Qualys severity to standardised risk number # Convert Qualys severity to standardised risk number
dataframe['risk_number'] = dataframe['severity'].astype(int)-1 df['risk_number'] = df['severity'].astype(int)-1
dataframe.fillna('', inplace=True) df['cvss'] = df['cvss_base']
df.loc[df['cvss_temporal'].notnull(), 'cvss'] = df['cvss_temporal']
return dataframe df.fillna('', inplace=True)
return df

View File

@ -464,16 +464,16 @@ class qualysScanReport:
return merged_data return merged_data
def normalise(self, dataframe): def normalise(self, df):
self.logger.debug('Normalising data') self.logger.debug('Normalising data')
self.map_fields(dataframe) df = self.map_fields(df)
self.transform_values(dataframe) df = self.transform_values(df)
return dataframe return df
def map_fields(self, dataframe): def map_fields(self, df):
self.logger.debug('Mapping fields') self.logger.debug('Mapping fields')
return dataframe return df
def transform_values(self, dataframe): def transform_values(self, df):
self.logger.debug('Transforming values') self.logger.debug('Transforming values')
return dataframe return df