Implemented callback functions etc. using the new probe api in OpenSCAP.
Relies on a patch to OpenSCAP which has not been accepted yet. Also
fixed a few other small problems.
---
src/secstate/main.py | 182 +------------------------------------------------
src/secstate/util.py | 98 +++++++++++++++++++++++++++
2 files changed, 102 insertions(+), 178 deletions(-)
diff --git a/src/secstate/main.py b/src/secstate/main.py
index 0e1ab5b..c3b84d7 100644
--- a/src/secstate/main.py
+++ b/src/secstate/main.py
@@ -358,80 +358,6 @@ class Secstate:
self.log.debug("Succesfully added %(name)s to target model"
% {'name':oscap.oval_definition_get_id(defn)})
return True
- def create_result_model(self, def_model):
- if def_model == None:
- self.log.error("No definition model specified")
- return None
-
- sys_model = oscap.oval_syschar_model_new(def_model)
- if sys_model == None:
- self.log.error("Error creating system characteristics model")
- return None
-
- sess = oscap.oval_probe_session_new(sys_model)
- oscap.oval_psess_probe_sysinfo(sess)
- oscap.oval_psess_probe_objects(sess)
-
- res_model = oscap.oval_results_model_new(def_model, [sys_model, None])
- if res_model == None:
- self.log.error("Error creating results model")
- oscap.oval_syschar_model_free(sys_model)
- return None
-
- return res_model
-
- def create_result_directives(self, res_model):
- res_direct = oscap.oval_result_directives_new(res_model)
- if res_direct == None:
- self.log.error("Error creating directives")
- oscap.oval_syschar_model_free(sys_model)
- oscap.oval_results_model_free(res_model)
- return None
-
- oscap.oval_result_directives_set_reported(res_direct, oscap.OVAL_RESULT_INVALID,
1)
- oscap.oval_result_directives_set_reported(res_direct, oscap.OVAL_RESULT_TRUE, 1)
- oscap.oval_result_directives_set_reported(res_direct, oscap.OVAL_RESULT_FALSE,
1)
- oscap.oval_result_directives_set_reported(res_direct, oscap.OVAL_RESULT_UNKNOWN,
1)
- oscap.oval_result_directives_set_reported(res_direct, oscap.OVAL_RESULT_ERROR,
1)
- oscap.oval_result_directives_set_reported(res_direct,
oscap.OVAL_RESULT_NOT_EVALUATED, 1)
- oscap.oval_result_directives_set_reported(res_direct,
oscap.OVAL_RESULT_NOT_APPLICABLE, 1)
- oscap.oval_result_directives_set_reported(res_direct, oscap.OVAL_RESULT_FALSE,
oscap.OVAL_DIRECTIVE_CONTENT_FULL)
- oscap.oval_result_directives_set_reported(res_direct, oscap.OVAL_RESULT_TRUE,
oscap.OVAL_DIRECTIVE_CONTENT_FULL)
-
- return res_direct
-
- def output_results(self, res_model, res_directives, xml, html):
- if xml:
- if not oscap.oval_results_model_export(res_model, res_directives, xml):
- self.log.error("Error exporting results to %(file)s" %
{'file':xml})
- return False
-
- if html:
- tmp = tempfile.mktemp()
- if not oscap.oval_results_model_export(res_model, res_directives, tmp):
- self.log.error("Error exporting results to %(file)s" %
{'file':html})
- return False
-
- import libxml2
- import libxslt
-
- styledoc = libxml2.parseFile(self.config.get('secstate',
'results_stylesheet'))
- style = libxslt.parseStylesheetDoc(styledoc)
- doc = libxml2.parseFile(tmp)
- result = style.applyStylesheet(doc, None)
- if not style.saveResultToFilename(html, result, 0):
- self.log.error("Error exporting results to %(file)s" %
{'file':html})
- style.freeStylesheet()
- doc.freeDoc()
- result.freeDoc()
- return False
-
- style.freeStylesheet()
- doc.freeDoc()
- result.freeDoc()
-
- return True
-
def audit(self, interpreter, args, oval_only, def_models=None, verbose=False,
xml=None, html=None):
"""
Function: Run an audit on the system agains the given definition model
@@ -455,17 +381,8 @@ class Secstate:
oscap.oval_definition_model_free(tmp_model)
if interpreter == "openscap":
- res_model = self.create_result_model(def_model)
- oscap.oval_results_model_eval(res_model)
-
- (def_results, test_results) = self.get_results(res_model)
- self.parse_results(def_results, test_results)
-
- if xml or html:
- res_direct = self.create_result_directives(res_model)
- self.output_results(res_model, res_direct, xml, html)
-
- oscap.oval_definition_model_free(def_model)
+ sess = oscap.oval_agent_new_session(def_model)
+ return evaluate_oval(def_model, sess, xml, html, verbose)
else:
self.log.error("Unsupported interpreter specified: %(inter)s" %
{'inter':interpreter})
@@ -489,97 +406,6 @@ class Secstate:
sess = oscap.oval_agent_new_session(def_model)
return evaluate_xccdf(benchmark, def_model, benchmark_path, sess,
f_xml=xml, f_html=html)
- def get_result_definitions(self, result_model):
- """
- Function: Get all definition results from an OVAL results model
- Input: oval_results_model
- Output: list of oval_result_definition's
- """
- definitions = []
- # FIXME: This might be unsafe as we are accessing iterators without calling
has_more
- system =
oscap.oval_result_system_iterator_next(oscap.oval_results_model_get_systems(result_model))
- defs = oscap.oval_result_system_get_definitions(system)
- for definition in oval_result_definition_generator(defs):
- definitions.append(definition)
-
- return definitions
-
- def get_result_tests(self, result_model):
- """
- Function: Get all test results from an OVAL results model
- Input: oval_results_model
- Output: list of oval_result_test's
- """
- test_results = []
- system =
oscap.oval_result_system_iterator_next(oscap.oval_results_model_get_systems(result_model))
- tests = oscap.oval_result_system_get_tests(system)
- for result in oval_result_test_generator(tests):
- test_results.append(result)
-
- return test_results
-
- def get_results(self, target_model):
- """
- Function: Get both results and tests from an OVAL test model, mapped to their
corresponding result
- Input: oval_results_model
- Output: Retuns two dicts with definitions and tests mapped to the result of
the definition or test
- """
- def_results = {}
- defs = self.get_result_definitions(target_model)
- for defn in defs:
- result = oscap.oval_result_definition_get_result(defn)
- if def_results.has_key(result):
- def_results[result].append(defn)
- else:
- def_results[result] = [defn]
-
- test_results = {}
- tests = self.get_result_tests(target_model)
- for defn in tests:
- result = oscap.oval_result_test_get_result(defn)
- if test_results.has_key(result):
- test_results[result].append(defn)
- else:
- test_results[result] = [defn]
-
- return def_results, test_results
-
- def parse_results(self, definitions, tests = None):
- """
- Function: Parse the results and print out information regarding the
results
- Input: Two dicts with oval_result_t as key to
oval_result_definition/test
- Output: None
- Side Effects: Prints out the number of definitions/tests that passed or
failed.
- """
- if definitions.has_key(oscap.OVAL_RESULT_INVALID):
- print "%d definition(s) were invalid" %
definitions[oscap.OVAL_RESULT_INVALID].__len__()
- for defn in definitions[oscap.OVAL_RESULT_INVALID]:
- print "\t%s" %
oscap.oval_definition_get_id(oscap.oval_result_definition_get_definition(defn))
- if definitions.has_key(oscap.OVAL_RESULT_TRUE):
- print "%d definition(s) passed" %
definitions[oscap.OVAL_RESULT_TRUE].__len__()
- for defn in definitions[oscap.OVAL_RESULT_TRUE]:
- print "\t%s" %
oscap.oval_definition_get_id(oscap.oval_result_definition_get_definition(defn))
- if definitions.has_key(oscap.OVAL_RESULT_FALSE):
- print "%d definition(s) failed" %
definitions[oscap.OVAL_RESULT_FALSE].__len__()
- for defn in definitions[oscap.OVAL_RESULT_FALSE]:
- print "\t%s" %
oscap.oval_definition_get_id(oscap.oval_result_definition_get_definition(defn))
- if definitions.has_key(oscap.OVAL_RESULT_UNKNOWN):
- print "%d definition(s) were unknown" %
definitions[oscap.OVAL_RESULT_UNKNOWN].__len__()
- for defn in definitions[oscap.OVAL_RESULT_UNKNOWN]:
- print "\t%s" %
oscap.oval_definition_get_id(oscap.oval_result_definition_get_definition(defn))
- if definitions.has_key(oscap.OVAL_RESULT_ERROR):
- print "%d definition(s) errored" %
definitions[oscap.OVAL_RESULT_ERROR].__len__()
- for defn in definitions[oscap.OVAL_RESULT_ERROR]:
- print "\t%s" %
oscap.oval_definition_get_id(oscap.oval_result_definition_get_definition(defn))
- if definitions.has_key(oscap.OVAL_RESULT_NOT_EVALUATED):
- print "%d definition(s) not evaluated" %
definitions[oscap.OVAL_RESULT_NOT_EVALUATED].__len__()
- for defn in definitions[oscap.OVAL_RESULT_NOT_EVALUATED]:
- print "\t%s" %
oscap.oval_definition_get_id(oscap.oval_result_definition_get_definition(defn))
- if definitions.has_key(oscap.OVAL_RESULT_NOT_APPLICABLE):
- print "%d definition(s) not applicable" %
definitions[oscap.OVAL_RESULT_NOT_APPLICABLE].__len__()
- for defn in definitions[oscap.OVAL_RESULT_NOT_APPLICABLE]:
- print "\t%s" %
oscap.oval_definition_get_id(oscap.oval_result_definition_get_definition(defn))
-
def search(self, search_string, verbose=False):
"""
Function: Searches though all imported benchmarks for a string of text
@@ -776,13 +602,13 @@ class Secstate:
self.log.error("Benchmark was None")
return False
try:
- puppet_content = self.parse_puppet_fixes(benchmark, passing_ids)
+ puppet_content = parse_puppet_fixes(benchmark, passing_ids)
except SecstateException, se:
sys.stderr.write('Error: %s\n' % str(se))
return False
else:
handle, fname = tempfile.mkstemp(suffix='.sh')
- os.write(handle, template % self.dict_to_external(puppet_content))
+ os.write(handle, template % dict_to_external(puppet_content))
os.close(handle)
os.chmod(fname, 755)
puppet_args = ['/usr/bin/puppet', '--external_node',
fname, '--node_terminus', 'exec', puppet_lib]
diff --git a/src/secstate/util.py b/src/secstate/util.py
index 203a190..5e84a4a 100644
--- a/src/secstate/util.py
+++ b/src/secstate/util.py
@@ -22,6 +22,7 @@
import sys
import xml.dom.minidom
import time
+import re
import openscap as oscap
@@ -235,6 +236,103 @@ def evaluate_xccdf(benchmark, def_model, url_XCCDF, sess,
s_profile=None, f_xml=
#oscap.xccdf_policy_model_free(policy_model)
return 0
+def output_oval_results(res_model, res_directives, xml, html):
+ if xml:
+ if not oscap.oval_results_model_export(res_model, res_directives, xml):
+ self.log.error("Error exporting results to %(file)s" %
{'file':xml})
+ return False
+
+ if html:
+ tmp = tempfile.mktemp()
+ if not oscap.oval_results_model_export(res_model, res_directives, tmp):
+ self.log.error("Error exporting results to %(file)s" %
{'file':html})
+ return False
+
+ import libxml2
+ import libxslt
+
+ styledoc = libxml2.parseFile(self.config.get('secstate',
'results_stylesheet'))
+ style = libxslt.parseStylesheetDoc(styledoc)
+ doc = libxml2.parseFile(tmp)
+ result = style.applyStylesheet(doc, None)
+ if not style.saveResultToFilename(html, result, 0):
+ self.log.error("Error exporting results to %(file)s" %
{'file':html})
+ style.freeStylesheet()
+ doc.freeDoc()
+ result.freeDoc()
+ return False
+
+ style.freeStylesheet()
+ doc.freeDoc()
+ result.freeDoc()
+
+ return True
+
+def oval_callback(id, result, usr):
+ if usr['verbose']:
+ print "Evaluated definition %(id)s: %(res)s" % {'id':id,
'res':oscap.oval_result_get_text(result)}
+
+ if result == oscap.OVAL_RESULT_TRUE:
+ usr['true'] += 1
+ elif result == oscap.OVAL_RESULT_FALSE:
+ usr['false'] += 1
+ if result == oscap.OVAL_RESULT_INVALID:
+ usr['invalid'] += 1
+ if result == oscap.OVAL_RESULT_UNKNOWN:
+ usr['unknown'] += 1
+ if result == oscap.OVAL_RESULT_NOT_EVALUATED:
+ usr['neval'] += 1
+ if result == oscap.OVAL_RESULT_NOT_APPLICABLE:
+ usr['napp'] += 1
+
+ return 0
+
+def evaluate_oval(def_model, sess, f_xml, f_html, verbose=False):
+ res_model = oscap.oval_agent_get_results_model(sess)
+ usr = {'verbose':verbose,
+ 'true':0,
+ 'false':0,
+ 'invalid':0,
+ 'unknown':0,
+ 'neval':0,
+ 'napp':0}
+ ret = oscap.oval_agent_eval_system_py(sess, oval_callback, usr)
+
+ if verbose:
+ print "Evaluation Completed"
+
+ if ret == -1:
+ if oscap.oscap_err():
+ sys.stderr.write("Error: (%(code)d) %(desc)s" %
{'code':oscap.oscap_err_code(),
+
'desc':oscap.oscap_err_desc()})
+ return False
+
+ print "Results:"
+ print "True:\t%d" % usr['true']
+ print "False:\t%d" % usr['false']
+ print "Invalid:\t%d" % usr['invalid']
+ print "Unknown:\t%d" % usr['unknown']
+ print "Not Evaluated:\t%d" % usr['neval']
+ print "Not Applicable:\t%d" % usr['napp']
+
+ if f_xml or f_html:
+ res_direct = oscap.oval_result_directives_new(res_model)
+ oscap.oval_result_directives_set_reported(res_direct, oscap.OVAL_RESULT_INVALID
|
+ oscap.OVAL_RESULT_TRUE |
+ oscap.OVAL_RESULT_FALSE |
+ oscap.OVAL_RESULT_UNKNOWN |
+ oscap.OVAL_RESULT_ERROR |
+ oscap.OVAL_RESULT_NOT_EVALUATED |
+ oscap.OVAL_RESULT_NOT_APPLICABLE,
True)
+ oscap.oval_result_directives_set_content(res_direct, oscap.OVAL_RESULT_FALSE,
oscap.OVAL_DIRECTIVE_CONTENT_FULL)
+ oscap.oval_result_directives_set_content(res_direct, oscap.OVAL_RESULT_TRUE,
oscap.OVAL_DIRECTIVE_CONTENT_FULL)
+
+ output_oval_results(res_model, res_direct, f_xml, f_html)
+ oscap.oval_result_directives_free(res_direct)
+
+ oscap.oval_agent_destroy_session(sess)
+ return ret
+
def is_benchmark(benchmark):
tree = xml.dom.minidom.parse(benchmark)
return (tree.getElementsByTagName("Benchmark") != [])
--
1.7.0.1