Config files and a dict of items to their selections status are now
attached to benchmark objects on import. Also allow the user to specify
active profile on import.
---
src/bin/secstate | 6 +-
src/secstate/main.py | 253 +++++++++++++++++++++++---------------------------
src/secstate/util.py | 99 +++++++++++---------
3 files changed, 173 insertions(+), 185 deletions(-)
diff --git a/src/bin/secstate b/src/bin/secstate
index 9692c0e..2348591 100644
--- a/src/bin/secstate
+++ b/src/bin/secstate
@@ -114,14 +114,14 @@ def import_content(arguments):
help="Imports the specified CPE content")
parser.add_option('-p', '--puppet', action='store_true', dest='puppet', default=False,
help="Imports the specified puppet content")
+ parser.add_option('--profile', action='store', type='string', dest='profile', default="__None__",
+ help="Imports the specified benchmark and sets the active profile")
(options, args) = parser.parse_args(arguments)
for arg in args:
- (benchmark, def_model) = sec_instance.import_content(arg, options.cpe, options.puppet, save=True)
+ (benchmark, def_model) = sec_instance.import_content(arg, options.cpe, options.puppet, save=True, active_profile=options.profile)
if (benchmark == None) and (def_model == None):
return -1
- oscap.oval.definition_model_free(def_model)
- oscap.xccdf.benchmark_free(benchmark)
def export(arguments):
parser = OptionParser(usage="secstate export [options] <benchmark> <file>")
diff --git a/src/secstate/main.py b/src/secstate/main.py
index 8948712..3dd93c8 100644
--- a/src/secstate/main.py
+++ b/src/secstate/main.py
@@ -31,7 +31,6 @@ import subprocess
import time
import mimetypes
import json
-import cProfile
import openscap_api as oscap
from secstate.util import *
@@ -47,8 +46,9 @@ class Secstate:
self.log.error("Could not create config directory: %(dir)s" % {'dir':self.conf_dir})
return (None, None)
- self.content = self.get_content_dict()
- self.content_configs = self.get_content_configs()
+ self.content = {}
+ self.content_configs = {}
+ self.load_content()
self.log = self.getLogger()
self.benchmark_dir = self.config.get('secstate', 'benchmark_dir')
@@ -88,28 +88,14 @@ class Secstate:
return log
- def get_content_dict(self):
- content = {}
+ def load_content(self):
conf_dir = self.config.get('secstate', 'conf_dir')
for conf_file in os.listdir(conf_dir):
id = os.path.splitext(conf_file)[0]
- config = ConfigParser.ConfigParser()
- fp = open(os.path.join(conf_dir, conf_file))
- config.readfp(fp)
+ self.content_configs[id] = os.path.join(conf_dir, conf_file)
+ config = load_config(os.path.join(conf_dir, conf_file))
content_file = config.get(id, 'file')
- content[id] = content_file
- fp.close()
-
- return content
-
- def get_content_configs(self):
- configs = {}
- conf_dir = self.config.get('secstate', 'conf_dir')
- for conf_file in os.listdir(conf_dir):
- id = os.path.splitext(conf_file)[0]
- configs[id] = os.path.join(conf_dir, conf_file)
-
- return configs
+ self.content[id] = content_file
def combine_def_models(self, target, source):
"""
@@ -174,6 +160,17 @@ class Secstate:
self.log.error("Definition model is invalid")
return (None, None)
+ oval_id = os.path.splitext(os.path.basename(oval_file))[0]
+
+ if self.content.has_key(oval_id):
+ config = ConfigParser.ConfigParser()
+ config.optionxform = str
+ if config.read(self.content_configs[oval_id]) == []:
+ self.log.error("Error loading config file: %(file)s" % {'file':self.content_configs[oval_id]})
+ return (None, None)
+
+ def_model.__dict__['config'] = config
+
if store_path:
if not os.path.isdir(store_path):
try:
@@ -197,7 +194,7 @@ class Secstate:
return (None, def_model)
- def import_benchmark(self, benchmark_file, oval_path="", store_path=None):
+ def import_benchmark(self, benchmark_file, oval_path="", store_path=None, changes=False, active_profile='__None__'):
"""
Function: Imports an XCCDF benchmark
Input: Source File, path to associated OVAL content
@@ -241,6 +238,32 @@ class Secstate:
profile.id = "Custom"
benchmark.add_profile(profile)
+ config = ConfigParser.ConfigParser()
+ config.optionxform = str
+ if self.content_configs.has_key(benchmark.id):
+ if config.read(self.content_configs[benchmark.id]) == []:
+ self.log.error("Error opening config file: %(file)s" % {'file':self.content_config[benchmark.id]})
+ return (None, None)
+ else:
+ config.add_section(benchmark.id)
+ config.set(benchmark.id, 'profile', active_profile)
+
+ benchmark.__dict__['config'] = config
+
+ if changes:
+ benchmark = apply_changes_profile(benchmark)
+
+ benchmark.__dict__['selections'] = {}
+ for item in xccdf_get_items(benchmark, oscap.xccdf.XCCDF_ITEM, benchmark.content):
+ benchmark.selections[item.id] = item.selected
+
+ current_profile = benchmark.config.get(benchmark.id, 'profile')
+ if current_profile != '__None__':
+ profile = benchmark.get_item(current_profile).to_profile()
+ prof_sel = get_profile_selections(benchmark, profile)
+ for key,val in prof_sel.items():
+ benchmark.selections[key] = val
+
if store_path != None:
id = get_benchmark_id(benchmark_file)
directory = os.path.join(bench_dir, id)
@@ -251,12 +274,10 @@ class Secstate:
try:
os.mkdir(directory)
shutil.copy(benchmark_file, directory)
- config = ConfigParser.ConfigParser()
- config.add_section(id)
- config.set(id, 'file', os.path.join(directory, os.path.basename(benchmark_file)))
- config.set(id, 'selected', True)
+ benchmark.config.set(id, 'file', os.path.join(directory, os.path.basename(benchmark_file)))
+ benchmark.config.set(id, 'selected', True)
conf_file = open(os.path.join(self.config.get('secstate', 'conf_dir'), id + ".cfg"), 'w')
- config.write(conf_file)
+ benchmark.config.write(conf_file)
conf_file.close()
for oval in list(set(oval_files)):
@@ -268,7 +289,7 @@ class Secstate:
return (benchmark, def_model)
- def import_zipped_content(self, zip, type, store_path, puppet):
+ def import_zipped_content(self, zip, type, store_path, puppet, changes=False, active_profile='__None__'):
"""
Function: Validate and copy content from zipped file to repository
Input: Zipped file contating content and bool whether it contains puppet content
@@ -324,7 +345,7 @@ class Secstate:
self.log.error("Could not find XCCDF benchmark in archive %(file)s", {'file':zip})
return (None, None)
- (benchmark, def_model) = self.import_benchmark(os.path.join(extract_path, xccdf), extract_path, store_path)
+ (benchmark, def_model) = self.import_benchmark(os.path.join(extract_path, xccdf), extract_path, store_path, changes, active_profile=active_profile)
if benchmark == None:
return (None, None)
@@ -333,7 +354,7 @@ class Secstate:
return (benchmark, def_model)
- def import_content(self, content, cpe=False, puppet=False, changes=True, save=False):
+ def import_content(self, content, cpe=False, puppet=False, changes=True, save=False, active_profile='__None__'):
"""
Function: Validates XCCDF/OVAL content and optionally saves it to the data store
Input: File containing content
@@ -353,19 +374,15 @@ class Secstate:
return (None, None)
if self.content.has_key(content):
- (benchmark, oval) = self.import_content(os.path.join(self.benchmark_dir, content, self.content[content]), save=False)
- if changes and (benchmark != None):
- #benchmark = apply_changes(benchmark, os.path.join(self.benchmark_dir, content, str(content + ".cfg")))
- benchmark = apply_changes_profile(benchmark, self.content_configs[content])
+ return self.import_content(self.content[content], cpe, puppet, changes, active_profile=active_profile)
- return (benchmark, oval)
+ if save:
+ store_path = self.config.get('secstate', 'benchmark_dir')
file_type = mimetypes.guess_type(content)
if file_type[0] == "text/xml":
if is_benchmark(content):
xccdf = True
- if save:
- store_path = self.config.get('secstate', 'benchmark_dir')
else:
oval = True
if save:
@@ -375,10 +392,10 @@ class Secstate:
return self.import_oval(content, store_path)
if xccdf:
- return self.import_benchmark(content, store_path=store_path, oval_path=os.path.dirname(content))
+ return self.import_benchmark(content, store_path=store_path, oval_path=os.path.dirname(content), changes=changes, active_profile=active_profile)
else:
- return self.import_zipped_content(content, file_type, store_path=self.config.get('secstate', 'benchmark_dir'), puppet=puppet)
+ return self.import_zipped_content(content, file_type, store_path=store_path, puppet=puppet, changes=changes, active_profile=active_profile)
def export(self, benchmark_id, new_file, original=False):
if not self.content.has_key(benchmark_id):
@@ -416,17 +433,13 @@ class Secstate:
self.remove_content(key)
elif self.content.has_key(benchmark_id):
- cfg = ConfigParser.ConfigParser()
- conf_file = self.content_configs[benchmark_id]
- fp = open(conf_file)
- cfg.readfp(fp)
- fp.close()
+ cfg = load_config(self.content_configs[benchmark_id])
try:
if os.path.split(cfg.get(benchmark_id, "file"))[0] != self.config.get('secstate', 'oval_dir'):
shutil.rmtree(os.path.split(cfg.get(benchmark_id, "file"))[0])
else:
os.remove(cfg.get(benchmark_id, "file"))
- os.remove(conf_file)
+ os.remove(self.content_configs[benchmark_id])
except IOError,e:
self.log.error("Error removing content: %(error)s" % {'error':e})
return False
@@ -444,21 +457,7 @@ class Secstate:
Output: Succes or failure
"""
sel_dict = {'selected':selected, 'message':message}
- bench_cfg = ConfigParser.ConfigParser()
- bench_cfg.optionxform = str
- conf_path = self.content_configs[benchmark_id]
- if os.path.isfile(conf_path):
- try:
- fp = open(conf_path)
- bench_cfg.readfp(fp)
- except IOError, e:
- self.log.error("Could not open config file: %(error)s" % {'error':e})
- return False
- fp.close()
-
- if not bench_cfg.has_section('Custom'):
- bench_cfg.add_section('Custom')
-
+
if not self.content.has_key(benchmark_id):
self.log.error("No benchmark %(id)s in datastore" % {'id':benchmark_id})
return False
@@ -469,13 +468,13 @@ class Secstate:
self.log.error("Error opening benchmark: %(file)s" % {'file':benchmark_id})
return False
else:
- bench_cfg.set(benchmark_id, 'selected', selected)
+ oval.config.set(benchmark_id, 'selected', selected)
self.log.debug("Set Oval file %(file)s to %(sel)s" % {'file':benchmark_id,
'sel':selected})
else:
if item_id == benchmark_id:
- bench_cfg.set(benchmark_id, 'selected', selected)
+ benchmark.config.set(benchmark_id, 'selected', selected)
self.log.debug("Setting %(id)s to %(val)s" % {'id':benchmark_id,
'val':selected})
item = benchmark.to_item()
@@ -488,37 +487,44 @@ class Secstate:
return False
if item.type == oscap.xccdf.XCCDF_PROFILE:
- bench_cfg.set(benchmark_id, 'profile', item_id)
+ benchmark.config.set(benchmark_id, 'profile', item_id)
self.log.debug("Setting active profile to %(id)s" % {'id':item_id})
else:
- if bench_cfg.has_option(benchmark_id, 'profile'):
- active_profile = bench_cfg.get(benchmark_id, 'profile')
+ if benchmark.config.has_option(benchmark_id, 'profile'):
+ active_profile = benchmark.config.get(benchmark_id, 'profile')
if active_profile != "Custom":
- bench_cfg.set('Custom', 'extends', active_profile)
+ if not benchmark.config.has_section("Custom"):
+ benchmark.config.add_section("Custom")
+ benchmark.config.set('Custom', 'extends', active_profile)
if item.type != oscap.xccdf.XCCDF_BENCHMARK:
- bench_cfg.set('Custom', item_id, json.dumps(sel_dict))
+ benchmark.config.set('Custom', item_id, json.dumps(sel_dict))
self.log.debug("Setting %(id)s to %(val)s" % {'id':item_id,
'val':selected})
if selected:
parent = item.parent
while parent.id != benchmark_id:
- bench_cfg.set('Custom', parent_id, json.dumps(sel_dict))
+ benchmark.config.set('Custom', parent_id, json.dumps(sel_dict))
self.log.debug("Setting %(id)s to %(val)s" % {'id':parent_id,
'val':selected})
parent = parent.parent
- bench_cfg.set(benchmark_id, 'profile', 'Custom')
+ benchmark.config.set(benchmark_id, 'profile', 'Custom')
if recurse:
if (item.type == oscap.xccdf.XCCDF_GROUP) or (item.type == oscap.xccdf.XCCDF_BENCHMARK):
for sub in xccdf_get_items(benchmark, oscap.xccdf.XCCDF_ITEM, item.content):
- bench_cfg.set('Custom', sub.id, json.dumps(sel_dict))
+ benchmark.config.set('Custom', sub.id, json.dumps(sel_dict))
self.log.debug("Setting %(id)s to %(val)s" % {'id':sub.id,
'val':selected})
- fp = open(conf_path, 'w')
- bench_cfg.write(fp)
- fp.close()
+ try:
+ fp = open(self.content_configs[benchmark_id], 'w')
+ benchmark.config.write(fp)
+ fp.close()
+ except IOError, e:
+ self.log.error("Error saving changes: %(err)s" % {'err':e})
+ return False
+
return True
def save_profile(self, benchmark_id, profile_name):
@@ -526,12 +532,7 @@ class Secstate:
self.log.error("No benchmark named %(id)s has been imported" % {'id':benchmark_id})
return False
- bench_cfg = ConfigParser.ConfigParser()
- bench_cfg.optionxform = str
- fp = open(self.content_configs[benchmark_id])
- bench_cfg.readfp(fp)
- fp.close()
-
+ bench_cfg = load_config(self.content_configs[benchmark_id])
if bench_cfg.has_section("Custom"):
bench_cfg.add_section(profile_name)
for opt,val in bench_cfg.items("Custom"):
@@ -542,9 +543,13 @@ class Secstate:
self.log.error("No changes have been made to the current profile")
return False
- fp = open(self.content_configs[benchmark_id], 'w')
- bench_cfg.write(fp)
- fp.close()
+ try:
+ fp = open(self.content_configs[benchmark_id], 'w')
+ bench_cfg.write(fp)
+ fp.close()
+ except IOError, e:
+ self.log.error("Error saving changes: %(err)s" % {'err':e})
+ return False
return True
@@ -560,7 +565,6 @@ class Secstate:
benchmark = None
res_model = None
res_benchmark = None
- config = None
if args == []:
args = self.content.keys()
@@ -573,11 +577,11 @@ class Secstate:
else:
if self.content.has_key(arg):
- config = ConfigParser.ConfigParser()
- fp = open(self.content_configs[arg])
- config.readfp(fp)
- fp.close()
- if not all and (not config.getboolean(arg, 'selected')) and (len(args) > 1):
+ if benchmark == None:
+ scanned_content = def_model
+ else:
+ scanned_content = benchmark
+ if not all and (not scanned_content.config.getboolean(arg, 'selected')) and (len(args) > 1):
print "Skipping %(id)s" % {'id':arg}
ret = True
continue
@@ -586,10 +590,10 @@ class Secstate:
sess = oscap.oval.agent_new_session(def_model)
if benchmark != None:
- # Set profile to default found in config file
- if (profile == None) and (config != None):
- if config.has_option(arg, 'profile'):
- profile = config.get(arg, 'profile')
+ # Set profile to default found in benchmark.config.file
+ if (profile == None) and (benchmark.__dict__.has_key('config')):
+ if benchmark.config.has_option(arg, 'profile'):
+ profile = benchmark.config.get(arg, 'profile')
else:
profile = 'Custom'
@@ -631,14 +635,14 @@ class Secstate:
print "In benchmark %(bench)s:" % {'bench':key}
- for item in xccdf_get_itesm(benchmark, oscap.xccdf.XCCDF_ITEM, benchmark.content):
+ for item in xccdf_get_items(benchmark, oscap.xccdf.XCCDF_ITEM, benchmark.content):
title = None
description = None
- if len(item.titles) > 0:
- title = item.titles[0].text
+ if len(item.title) > 0:
+ title = item.title[0].text
- if len(item.descriptions) > 0:
+ if len(item.description) > 0:
description = item.description[0].text
if (title != None) and (description != None):
@@ -686,20 +690,16 @@ class Secstate:
for title in item.title:
print "\tTitle: '%(title)s'" % {'title':title.text}
- for description in item.descriptions:
+ for description in item.description:
print "\tDescription: %(desc)s" % {'desc':description.text}
print "\tSelected: %(sel)s" % {'sel':item.selected}
type = item.type
if type == oscap.xccdf.XCCDF_BENCHMARK:
- bench_cfg = ConfigParser.ConfigParser()
- fp = open(self.content_configs[key])
- bench_cfg.readfp(fp)
- fp.close()
active_profile = None
- if bench_cfg.has_option(key, 'profile'):
- active_profile = bench_cfg.get(key, 'profile')
+ if benchmark.config.has_option(key, 'profile'):
+ active_profile = benchmark.config.get(key, 'profile')
if len(benchmark.profiles) > 0:
print "\tProfiles:"
for profile in benchmark.profiles:
@@ -730,14 +730,15 @@ class Secstate:
return True
- def sublist(self, benchmark, bench_cfg, def_model, arg, recurse, show_all, selects={}, tabs=0):
+ def sublist(self, benchmark, def_model, arg, recurse, show_all, tabs=0):
tabstr = "\t" * tabs
selected = ""
profile = ""
if benchmark == None:
if self.content.has_key(arg):
- if bench_cfg.getboolean(arg, 'selected'):
+ print dir(def_model)
+ if def_model.config.getboolean(arg, 'selected'):
if show_all:
selected = "[X]"
else:
@@ -753,22 +754,15 @@ class Secstate:
item = None
if arg == benchmark.id:
item = benchmark.to_item()
- is_selected = bench_cfg.getboolean(arg, 'selected')
- if bench_cfg.has_option(arg, 'profile'):
- profile = ", Profile: '%s'" % bench_cfg.get(arg, 'profile')
- else:
- profile = ", Profile: None"
+ is_selected = benchmark.config.getboolean(arg, 'selected')
+ profile = ", Profile: '%s'" % benchmark.config.get(arg, 'profile')
+
else:
item = benchmark.get_item(arg)
if item == None:
- return self.sublist(None, bench_cfg, def_model, arg, recurse, show_all, selects, tabs)
-
- is_selected = item.selected
-
- try:
- is_selected = selects[item.id]
- except KeyError, e:
- pass
+ return self.sublist(None, def_model, arg, recurse, show_all, tabs)
+ else:
+ is_selected = benchmark.selections[item.id]
for title in item.title:
if show_all:
@@ -790,7 +784,7 @@ class Secstate:
type = item.type
if (type == oscap.xccdf.XCCDF_GROUP) or (type == oscap.xccdf.XCCDF_BENCHMARK):
for sub in item.content:
- self.sublist(benchmark, bench_cfg, def_model, sub.id, recurse, show_all, selects, tabs+1)
+ self.sublist(benchmark, def_model, sub.id, recurse, show_all, tabs+1)
def list_content(self, arg=None, recurse=False, show_all=False):
@@ -802,29 +796,12 @@ class Secstate:
self.log.error("Error loading benchmark: %(id)s" % {'id':key})
return False
- config = ConfigParser.ConfigParser()
- fp = open(self.content_configs[key])
- config.readfp(fp)
- fp.close()
-
- selects = {}
- if benchmark != None:
- if config.has_option(benchmark.id, 'profile'):
- prof = benchmark.get_item(config.get(benchmark.id, "profile"))
- if prof == None:
- self.log.error("Error loading profile %(prof)s" % {'prof':config.get(benchmark.id, 'profile')})
- return False
- prof = prof.to_profile()
-
- for select in prof.selects:
- selects[select.item] = select.selected
-
if (arg == None) or (arg == key):
- ret = self.sublist(benchmark, config, def_model, key, recurse, show_all, selects)
+ ret = self.sublist(benchmark, def_model, key, recurse, show_all)
else:
if not self.content.has_key(arg):
- ret = self.sublist(benchmark, config, def_model, arg, recurse, show_all, selects)
+ ret = self.sublist(benchmark, def_model, arg, recurse, show_all)
return ret
diff --git a/src/secstate/util.py b/src/secstate/util.py
index 783ed9e..c3b52c0 100644
--- a/src/secstate/util.py
+++ b/src/secstate/util.py
@@ -39,6 +39,19 @@ class SecstateException(Exception):
def __str__(self):
return str(self.reason)
+def load_config(conf_file):
+ config = ConfigParser.ConfigParser()
+ config.optionxform = str
+ try:
+ fp = open(conf_file)
+ config.readfp(fp)
+ fp.close()
+ except IOError, e:
+ sys.stderr.write("Error opening config file: %(file)s" % {'file':conf_file})
+ return None
+
+ return config
+
def xccdf_reporter(msg, usr):
result = oscap.common.reporter_message_get_user2num(msg)
if result == oscap.xccdf.XCCDF_RESULT_PASS:
@@ -70,7 +83,7 @@ def evaluate_xccdf(benchmark, url_XCCDF, sess, s_profile=None, all=False, verbos
if (s_profile != None):
policy = policy_model.get_policy_by_id(s_profile)
else:
- policies = policy_modea.policiesl
+ policies = policy_model.policies
if len(policies) > 0:
policy = policies[0]
@@ -99,9 +112,8 @@ def evaluate_xccdf(benchmark, url_XCCDF, sess, s_profile=None, all=False, verbos
ritem.add_title(title)
ritem.start_time = time.time()
if policy != None:
- id = policy.profile.id
- if id != None:
- ritem.set_profile(id)
+ if policy.profile != None:
+ ritem.set_profile(policy.profile.id)
oscap.oval.agent_export_sysinfo_to_xccdf_result(sess, ritem)
for model in benchmark.models:
@@ -121,7 +133,6 @@ def evaluate_xccdf(benchmark, url_XCCDF, sess, s_profile=None, all=False, verbos
"Informational:\t%(info)s\n" \
"Unknown:\t%(unknown)s\n" % res_dict
- print "HERE"
results_benchmark = benchmark.clone()
results_benchmark.add_result(oscap.xccdf.result_clone(ritem))
res_model = oscap.oval.agent_get_results_model(sess)
@@ -315,50 +326,50 @@ def xccdf_rule_get_defs(rule):
return defs
-def apply_changes_profile(benchmark, conf):
- config = ConfigParser.ConfigParser()
- config.optionxform = str
- if os.path.isfile(conf):
- try:
- fp = open(conf)
- config.readfp(fp)
- except IOError,e:
- sys.stderr.write("Error opening config file: %(err)s\n" % {'err':e})
- return None
- fp.close()
-
- for section in config.sections():
- if section != benchmark.id:
- prof = oscap.xccdf.profile_new()
- if config.has_option(section, 'extends'):
- original_prof = benchmark.get_item(config.get(section, 'extends')).to_profile()
- if len(original_prof.title) > 0:
- new_title = oscap.common.text_new()
- new_title.text = "-- Customized --" + original_prof.title[0].text
- prof.add_title(new_title)
- prof.extends = config.get(section, 'extends')
- else:
+def apply_changes_profile(benchmark):
+ for section in benchmark.config.sections():
+ if section != benchmark.id:
+ prof = oscap.xccdf.profile_new()
+ if benchmark.config.has_option(section, 'extends'):
+ original_prof = benchmark.get_item(benchmark.config.get(section, 'extends')).to_profile()
+ if len(original_prof.title) > 0:
new_title = oscap.common.text_new()
- new_title.text = "Customized profile from secstate"
+ new_title.text = "-- Customized --" + original_prof.title[0].text
prof.add_title(new_title)
- prof.id = section
-
- for id,val in config.items(section):
- if id != 'extends':
- sel_dict = json.loads(val)
- select = oscap.xccdf.select_new()
- select.item = id
- select.selected = sel_dict['selected']
- if sel_dict['message']:
- text = oscap.common.text_new()
- text.text = str(sel_dict['message'])
- select.add_remark(text)
- prof.add_select(select)
-
- benchmark.add_profile(prof)
+ prof.extends = benchmark.config.get(section, 'extends')
+ else:
+ new_title = oscap.common.text_new()
+ new_title.text = "Customized profile from secstate"
+ prof.add_title(new_title)
+ prof.id = section
+
+ for id,val in benchmark.config.items(section):
+ if id != 'extends':
+ sel_dict = json.loads(val)
+ select = oscap.xccdf.select_new()
+ select.item = id
+ select.selected = sel_dict['selected']
+ if sel_dict['message']:
+ text = oscap.common.text_new()
+ text.text = str(sel_dict['message'])
+ select.add_remark(text)
+ prof.add_select(select)
+
+ benchmark.add_profile(prof)
return benchmark
+def get_profile_selections(benchmark, profile):
+ selections = {}
+ if profile.extends != None:
+ selections.update(get_profile_selections(benchmark, benchmark.get_item(profile.extends).to_profile()))
+
+ for sel in profile.selects:
+ selections[sel.item] = sel.selected
+
+ return selections
+
+
def xccdf_get_fixes(benchmark, ignore_ids=[]):
"""
Function: Get all fixes for rules in the XCCDF document
--
1.7.2