From: Ondrej Lichtner <olichtne(a)redhat.com>
These are not used anymore in the new Python Recipes implementation.
Signed-off-by: Ondrej Lichtner <olichtne(a)redhat.com>
---
lnst/Controller/NetTestController.py | 620 ---------------------------------
lnst/Controller/RecipeParser.py | 572 -------------------------------
lnst/Controller/SlavePool.py | 648 -----------------------------------
lnst/Controller/XmlParser.py | 188 ----------
lnst/Controller/XmlProcessing.py | 235 -------------
lnst/Controller/XmlTemplates.py | 438 -----------------------
6 files changed, 2701 deletions(-)
delete mode 100644 lnst/Controller/NetTestController.py
delete mode 100644 lnst/Controller/RecipeParser.py
delete mode 100644 lnst/Controller/SlavePool.py
delete mode 100644 lnst/Controller/XmlParser.py
delete mode 100644 lnst/Controller/XmlProcessing.py
delete mode 100644 lnst/Controller/XmlTemplates.py
diff --git a/lnst/Controller/NetTestController.py b/lnst/Controller/NetTestController.py
deleted file mode 100644
index 8f41cd0..0000000
--- a/lnst/Controller/NetTestController.py
+++ /dev/null
@@ -1,620 +0,0 @@
-"""
-This module defines NetTestController class which does the controlling
-part of network testing.
-
-Copyright 2011 Red Hat, Inc.
-Licensed under the GNU General Public License, version 2 as
-published by the Free Software Foundation; see COPYING for details.
-"""
-
-__author__ = """
-jpirko(a)redhat.com (Jiri Pirko)
-"""
-
-import logging
-import socket
-import os
-import re
-import cPickle
-import imp
-import copy
-import sys
-from time import sleep
-from lnst.Common.NetUtils import MacPool
-from lnst.Common.Utils import md5sum, dir_md5sum
-from lnst.Common.Utils import check_process_running, bool_it, get_module_tools
-from lnst.Common.NetTestCommand import str_command, CommandException
-from lnst.Controller.RecipeParser import RecipeError
-from lnst.Controller.SlavePool import SlavePool
-from lnst.Controller.Machine import MachineError, VirtualInterface
-from lnst.Controller.Machine import StaticInterface
-from lnst.Controller.CtlSecSocket import CtlSecSocket
-from lnst.Common.SecureSocket import SecSocketException
-from lnst.Common.ConnectionHandler import send_data, recv_data
-from lnst.Common.ConnectionHandler import ConnectionHandler
-from lnst.Common.Config import lnst_config
-from lnst.Common.Path import Path
-from lnst.Common.Colours import decorate_with_preset
-from lnst.Common.NetUtils import test_tcp_connection
-import lnst.Controller.Task as Task
-
-# conditional support for libvirt
-if check_process_running("libvirtd"):
- from lnst.Controller.VirtUtils import VirtNetCtl, VirtDomainCtl
-
-class NetTestError(Exception):
- pass
-
-class NoMatchError(NetTestError):
- pass
-
-def ignore_event(**kwarg):
- pass
-
-class NetTestController:
- def __init__(self, recipe_path, log_ctl,
- res_serializer=None, pool_checks=True,
- packet_capture=False,
- defined_aliases=None, reduce_sync=False,
- restrict_pools=[], multi_match=False,
- breakpoints=False):
- self._res_serializer = res_serializer
- self._remote_capture_files = {}
- self._log_ctl = log_ctl
- self._recipe_path = Path(None, recipe_path)
- self._msg_dispatcher = MessageDispatcher(log_ctl)
- self._packet_capture = packet_capture
- self._reduce_sync = reduce_sync
- self._defined_aliases = defined_aliases
- self._multi_match = multi_match
-
- self.run_mode = "run"
- self.breakpoints = breakpoints
-
- self._machines = {}
- self._network_bridges = {}
- self._tasks = []
-
- mac_pool_range = lnst_config.get_option('environment',
'mac_pool_range')
- self._mac_pool = MacPool(mac_pool_range[0], mac_pool_range[1])
-
- conf_pools = lnst_config.get_pools()
- pools = {}
- if len(restrict_pools) > 0:
- for pool_name in restrict_pools:
- if pool_name in conf_pools:
- pools[pool_name] = conf_pools[pool_name]
- elif len(restrict_pools) == 1 and os.path.isdir(pool_name):
- pools = {"cmd_line_pool": pool_name}
- else:
- raise NetTestError("Pool %s does not exist!" % pool_name)
- else:
- pools = conf_pools
-
- sp = SlavePool(pools, pool_checks)
- self._slave_pool = sp
-
- modules_dirs = lnst_config.get_option('environment',
'module_dirs')
- tools_dirs = lnst_config.get_option('environment', 'tool_dirs')
-
- self._resource_table = {}
- self._resource_table["module"] = self._load_test_modules(modules_dirs)
- self._resource_table["tools"] = self._load_test_tools(tools_dirs)
-
- def _get_machineinfo(self, machine_id):
- try:
- info = self._recipe["machines"][machine_id]["params"]
- except KeyError:
- msg = "Machine parameters requested, but not yet available"
- raise NetTestError(msg)
-
- return info
-
- @staticmethod
- def _session_die(session, status):
- logging.debug("%s terminated with status %s", session.command, status)
- msg = "SSH session terminated with status %s" % status
- raise NetTestError(msg)
-
- def _prepare_network(self, resource_sync=True):
- mreq = Task.get_mreq()
-
- machines = self._machines
- for m_id in machines.keys():
- self._prepare_machine(m_id, resource_sync)
-
- for machine_id, machine_data in mreq.iteritems():
- m_id = machine_id
- m = machines[m_id]
- namespaces = set()
- for if_id, iface_data in machine_data["interfaces"].iteritems():
- self._prepare_interface(m_id, if_id, iface_data)
-
- if iface_data["netns"] != None:
- namespaces.add(iface_data["netns"])
-
- if len(namespaces) > 0:
- m.disable_nm()
-
- ifaces = m.get_ordered_interfaces()
- for netns in namespaces:
- m.add_netns(netns)
-
- for iface in ifaces:
- iface.configure()
- if (m._libvirt_domain is None and
- isinstance(iface, StaticInterface)):
- driver = iface._driver
- if_id = iface._id
- mapped_machine = self._slave_pool._map['machines'][m_id]
- mapped_machine['interfaces'][if_id]['driver'] =
driver
- for iface in ifaces:
- iface.up()
-
- m.wait_interface_init()
-
- def set_machine_requirements(self):
- mreq = Task.get_mreq()
- sp = self._slave_pool
- sp.set_machine_requirements(mreq)
-
- def provision_machines(self):
- sp = self._slave_pool
- machines = self._machines
- if not sp.provision_machines(machines):
- msg = "This setup cannot be provisioned with the current pool."
- raise NoMatchError(msg)
-
- def print_match_description(self):
- sp = self._slave_pool
- match = sp.get_match()
- logging.info("Pool match description:")
- if sp.is_setup_virtual():
- logging.info(" Setup is using virtual machines.")
- for m_id, m in sorted(match["machines"].iteritems()):
- logging.info(" host \"%s\" uses \"%s\"" %
(m_id, m["target"]))
- for if_id, match in m["interfaces"].iteritems():
- pool_id = match["target"]
- logging.info(" interface \"%s\" matched to
\"%s\"" %\
- (if_id, pool_id))
-
- def get_pool_match(self):
- return self._slave_pool.get_match()
-
- def _prepare_machine(self, m_id, resource_sync=True):
- machine = self._machines[m_id]
- address = socket.gethostbyname(machine.get_hostname())
-
- self._log_ctl.add_slave(m_id)
- machine.set_rpc(self._msg_dispatcher)
- machine.set_mac_pool(self._mac_pool)
- machine.set_network_bridges(self._network_bridges)
-
- recipe_name = os.path.basename(self._recipe_path.abs_path())
- machine.init_connection(recipe_name)
-
- def _prepare_interface(self, m_id, if_id, iface_data):
- machine = self._machines[m_id]
-
- iface = machine.get_interface(if_id)
-
- if iface_data["netns"] != None:
- iface.set_netns(iface_data["netns"])
-
- def _prepare_command(self, cmd_data):
- cmd = {"type": cmd_data["type"]}
- if "host" in cmd_data:
- cmd["host"] = cmd_data["host"]
- if cmd["host"] not in self._machines:
- msg = "Invalid host id '%s'." % cmd["host"]
- raise RecipeError(msg, cmd_data)
-
- if "netns" in cmd_data:
- cmd["netns"] = cmd_data["netns"]
-
- if "expect" in cmd_data:
- expect = cmd_data["expect"]
- if expect not in ["pass", "fail"]:
- msg = "Illegal expect attribute value."
- raise RecipeError(msg, cmd_data)
- cmd["expect"] = expect == "pass"
-
- if cmd["type"] == "test":
- cmd["module"] = cmd_data["module"]
-
- cmd_opts = {}
- if "options" in cmd_data:
- for opt in cmd_data["options"]:
- name = opt["name"]
- val = opt["value"]
-
- if name not in cmd_opts:
- cmd_opts[name] = []
-
- cmd_opts[name].append({"value": val})
- cmd["options"] = cmd_opts
- elif cmd["type"] == "exec":
- cmd["command"] = cmd_data["command"]
-
- if "from" in cmd_data:
- cmd["from"] = cmd_data["from"]
- elif cmd["type"] in ["wait", "intr",
"kill"]:
- # 'proc_id' is used to store bg_id for wait/kill/intr
- # 'bg_id' is used for test/exec
- # this is used to distinguish between the two in NetTestSlave code
- cmd["proc_id"] = cmd_data["bg_id"]
- elif cmd["type"] == "config":
- cmd["persistent"] = False
- if "persistent" in cmd_data:
- cmd["persistent"] = bool_it(cmd_data["persistent"])
-
- cmd["options"] = []
- for opt in cmd_data["options"]:
- name = opt["name"]
- value = opt["value"]
- cmd["options"].append({"name": name,
"value": value})
- elif cmd["type"] == "ctl_wait":
- cmd["seconds"] = int(cmd_data["seconds"])
- else:
- msg = "Unknown command type '%s'" % cmd["type"]
- raise RecipeError(msg, cmd_data)
-
-
- if cmd["type"] in ["test", "exec"]:
- if "bg_id" in cmd_data:
- cmd["bg_id"] = cmd_data["bg_id"]
-
- if "timeout" in cmd_data:
- try:
- cmd["timeout"] = int(cmd_data["timeout"])
- except ValueError:
- msg = "Timeout value must be an integer."
- raise RecipeError(msg, cmd_data)
-
- return cmd
-
- def _check_task(self, task):
- err = False
- bg_ids = {}
- for i, command in enumerate(task["skeleton"]):
- if command["type"] == "ctl_wait":
- continue
-
- machine_id = command["host"]
- if not machine_id in bg_ids:
- bg_ids[machine_id] = set()
-
- cmd_type = command["type"]
- if cmd_type in ["wait", "intr", "kill"]:
- bg_id = command["proc_id"]
- if bg_id in bg_ids[machine_id]:
- bg_ids[machine_id].remove(bg_id)
- else:
- logging.error("Found command \"%s\" for bg_id
\"%s\" on "
- "host \"%s\" which was not previously
"
- "defined", cmd_type, bg_id, machine_id)
- err = True
-
- if "bg_id" in command:
- bg_id = command["bg_id"]
- if not bg_id in bg_ids[machine_id]:
- bg_ids[machine_id].add(bg_id)
- else:
- logging.error("Command \"%d\" uses bg_id
\"%s\" on host"
- "\"%s\" which is already used",
- i, bg_id, machine_id)
- err = True
-
- for machine_id in bg_ids:
- for bg_id in bg_ids[machine_id]:
- logging.error("bg_id \"%s\" on host \"%s\" has
no kill/wait "
- "command to it", bg_id, machine_id)
- err = True
-
- return err
-
- def _cleanup_slaves(self):
- if self._machines == None:
- return
-
- for machine_id, machine in self._machines.iteritems():
- if machine.is_configured():
- try:
- machine.cleanup()
- except:
- pass
-
- #clean-up slave logger
- self._log_ctl.remove_slave(machine_id)
-
- for m_id in list(self._machines.keys()):
- del self._machines[m_id]
-
- # remove dynamically created bridges
- for bridge in self._network_bridges.itervalues():
- bridge.cleanup()
- self._network_bridges = {}
-
- def match_setup(self):
- self.run_mode = "match_setup"
- res = self._run_python_task()
- return {"passed": True}
-
- def run_recipe(self):
- try:
- res = self._run_recipe()
- except Exception as exc:
- logging.error("Recipe execution terminated by unexpected
exception")
- raise
- finally:
- if self._packet_capture:
- self._stop_packet_capture()
- self._gather_capture_files()
- self._cleanup_slaves()
-
- return res
-
- def prepare_test_env(self):
- try:
- self.provision_machines()
- self.print_match_description()
- if self.run_mode == "match_setup":
- return True
- self._prepare_network()
- Task.ctl.init_hosts(self._machines)
- return True
- except (NoMatchError) as exc:
- self._cleanup_slaves()
- return False
- except (KeyboardInterrupt, Exception) as exc:
- msg = "Exception raised during configuration."
- logging.error(msg)
- self._cleanup_slaves()
- raise
-
- def _run_recipe(self):
- overall_res = {"passed": True}
-
- try:
- self._res_serializer.add_task()
- res = self._run_python_task()
- except CommandException as exc:
- logging.debug(exc)
- overall_res["passed"] = False
- overall_res["err_msg"] = "Command exception raised."
-
- for machine in self._machines.itervalues():
- machine.restore_system_config()
-
- # task failed
- if not res:
- overall_res["passed"] = False
- overall_res["err_msg"] = "At least one command failed."
-
- return overall_res
-
- def init_taskapi(self):
- Task.ctl = Task.ControllerAPI(self)
-
- def _run_python_task(self):
- #backup of resource table
- res_table_bkp = copy.deepcopy(self._resource_table)
-
- cwd = os.getcwd()
- task_path = self._recipe_path
- name = os.path.basename(task_path.abs_path()).split(".")[0]
- sys.path.append(os.path.dirname(task_path.resolve()))
- os.chdir(os.path.dirname(task_path.resolve()))
- imp.load_source(name, task_path.resolve())
- os.chdir(cwd)
- sys.path.remove(os.path.dirname(task_path.resolve()))
-
- #restore resource table
- self._resource_table = res_table_bkp
-
- return Task.ctl._result
-
- def _run_command(self, command):
- logging.info("Executing command: [%s]", str_command(command))
-
- if "desc" in command:
- logging.info("Cmd description: %s", command["desc"])
-
- if command["type"] == "ctl_wait":
- sleep(command["seconds"])
- cmd_res = {"passed": True,
- "res_header": "%-9s%ss" %
("ctl_wait",
- command["seconds"]),
- "msg": "",
- "res_data": None}
- if self._res_serializer:
- self._res_serializer.add_cmd_result(command, cmd_res)
- return cmd_res
-
- machine_id = command["host"]
- machine = self._machines[machine_id]
-
- try:
- cmd_res = machine.run_command(command)
- except Exception as exc:
- cmd_res = {"passed": False,
- "res_data": {"Exception": str(exc)},
- "msg": "Exception raised.",
- "res_header": "EXCEPTION",
- "report": str(exc)}
- raise
- finally:
- if self._res_serializer:
- self._res_serializer.add_cmd_result(command, cmd_res)
-
- if cmd_res["passed"]:
- res_str = decorate_with_preset("PASS", "pass")
- else:
- res_str = decorate_with_preset("FAIL", "fail")
- logging.info("Result: %s" % res_str)
- if "report" in cmd_res and cmd_res["report"] !=
"":
- logging.info("Result data:")
- for line in cmd_res["report"].splitlines():
- logging.info(4*" " + line)
- if "msg" in cmd_res and cmd_res["msg"] != "":
- logging.info("Status message from slave: \"%s\"" %
cmd_res["msg"])
-
- return cmd_res
-
- def _start_packet_capture(self):
- logging.info("Starting packet capture")
- for machine_id, machine in self._machines.iteritems():
- capture_files = machine.start_packet_capture()
- self._remote_capture_files[machine_id] = capture_files
-
- def _stop_packet_capture(self):
- logging.info("Stopping packet capture")
- for machine_id, machine in self._machines.iteritems():
- machine.stop_packet_capture()
-
- # TODO: Move this function to logging
- def _gather_capture_files(self):
- logging_root = self._log_ctl.get_recipe_log_path()
- logging_root = os.path.abspath(logging_root)
- logging.info("Retrieving capture files from slaves")
- for machine_id, machine in self._machines.iteritems():
- slave_logging_dir = os.path.join(logging_root, machine_id + "/")
- try:
- os.mkdir(slave_logging_dir)
- except OSError as err:
- if err.errno != 17:
- msg = "Cannot access the logging directory %s" \
- % slave_logging_dir
- raise NetTestError(msg)
-
- capture_files = self._remote_capture_files[machine_id]
- for if_id, remote_path in capture_files.iteritems():
- filename = "%s.pcap" % if_id
- local_path = os.path.join(slave_logging_dir, filename)
- machine.copy_file_from_machine(remote_path, local_path)
-
- logging.info("pcap files from machine %s stored at %s",
- machine_id, slave_logging_dir)
-
- def _load_test_modules(self, dirs):
- modules = {}
- for dir_name in dirs:
- files = os.listdir(dir_name)
- for f in files:
- test_path = os.path.abspath("%s/%s" % (dir_name, f))
- if os.path.isfile(test_path):
- match = re.match("(.+)\.py$", f)
- if match:
- test_name = match.group(1)
- test_hash = md5sum(test_path)
-
- if test_name in modules:
- msg = "Overriding previously defined test '%s'
" \
- "from %s with a different one located in " \
- "%s" % (test_name, test_path,
- modules[test_name]["path"])
- logging.warn(msg)
-
- modules[test_name] = {"path": test_path,
- "hash": test_hash}
- return modules
-
- def _load_test_tools(self, dirs):
- packages = {}
- for dir_name in dirs:
- files = os.listdir(dir_name)
- for f in files:
- pkg_path = os.path.abspath("%s/%s" % (dir_name, f))
- if os.path.isdir(pkg_path):
- pkg_name = os.path.basename(pkg_path.rstrip("/"))
- pkg_hash = dir_md5sum(pkg_path)
-
- if pkg_name in packages:
- msg = "Overriding previously defined tools " \
- "package '%s' from %s with a different "
\
- "one located in %s" % (pkg_name, pkg_path,
- packages[pkg_name]["path"])
- logging.warn(msg)
-
- packages[pkg_name] = {"path": pkg_path,
- "hash": pkg_hash}
- return packages
-
- def _get_alias(self, alias):
- if alias in self._defined_aliases:
- return self._defined_aliases[alias]
-
- def _get_aliases(self):
- return self._defined_aliases
-
-class MessageDispatcher(ConnectionHandler):
- def __init__(self, log_ctl):
- super(MessageDispatcher, self).__init__()
- self._log_ctl = log_ctl
- self._machines = dict()
-
- def add_slave(self, machine, connection):
- machine_id = machine.get_id()
- self._machines[machine_id] = machine
- self.add_connection(machine_id, connection)
-
- def send_message(self, machine_id, data):
- soc = self.get_connection(machine_id)
-
- if send_data(soc, data) == False:
- msg = "Connection error from slave %s" % machine_id
- raise NetTestError(msg)
-
- def wait_for_result(self, machine_id):
- wait = True
- while wait:
- connected_slaves = self._connection_mapping.keys()
-
- messages = self.check_connections()
-
- remaining_slaves = self._connection_mapping.keys()
-
- for msg in messages:
- if msg[1]["type"] == "result" and msg[0] ==
machine_id:
- wait = False
- result = msg[1]["result"]
- else:
- self._process_message(msg)
-
- if connected_slaves != remaining_slaves:
- disconnected_slaves = set(connected_slaves) -\
- set(remaining_slaves)
- msg = "Slaves " + str(list(disconnected_slaves)) + \
- " disconnected from the controller."
- raise NetTestError(msg)
-
- return result
-
- def _process_message(self, message):
- if message[1]["type"] == "log":
- record = message[1]["record"]
- self._log_ctl.add_client_log(message[0], record)
- elif message[1]["type"] == "result":
- msg = "Recieved result message from different slave %s" %
message[0]
- logging.debug(msg)
- elif message[1]["type"] == "if_update":
- machine = self._machines[message[0]]
- machine.interface_update(message[1])
- elif message[1]["type"] == "if_deleted":
- machine = self._machines[message[0]]
- machine.dev_db_delete(message[1])
- elif message[1]["type"] == "exception":
- msg = "Slave %s: %s" % (message[0],
message[1]["Exception"])
- raise CommandException(msg)
- elif message[1]["type"] == "error":
- msg = "Recieved an error message from slave %s: %s" %\
- (message[0], message[1]["err"])
- raise CommandException(msg)
- else:
- msg = "Unknown message type: %s" % message[1]["type"]
- raise NetTestError(msg)
-
- def disconnect_slave(self, machine_id):
- soc = self.get_connection(machine_id)
- self.remove_connection(soc)
- del self._machines[machine_id]
diff --git a/lnst/Controller/RecipeParser.py b/lnst/Controller/RecipeParser.py
deleted file mode 100644
index 09233a7..0000000
--- a/lnst/Controller/RecipeParser.py
+++ /dev/null
@@ -1,572 +0,0 @@
-"""
-This module defines RecipeParser class useful to parse xml recipes
-
-Copyright 2013 Red Hat, Inc.
-Licensed under the GNU General Public License, version 2 as
-published by the Free Software Foundation; see COPYING for details.
-"""
-
-__author__ = """
-rpazdera(a)redhat.com (Radek Pazdera)
-"""
-
-import os
-from lnst.Common.Path import Path
-from lnst.Controller.XmlParser import XmlParser
-from lnst.Controller.XmlProcessing import XmlProcessingError, XmlData
-from lnst.Controller.XmlProcessing import XmlCollection
-
-class RecipeError(XmlProcessingError):
- pass
-
-class RecipeParser(XmlParser):
- def __init__(self, recipe_path):
- recipe_path = Path(None, recipe_path).abs_path()
- super(RecipeParser, self).__init__("schema-recipe.rng", recipe_path)
-
- def _process(self, lnst_recipe):
- recipe = XmlData(lnst_recipe)
-
- # machines
- machines_tag = lnst_recipe.find("network")
- if machines_tag is not None:
- machines = recipe["machines"] = XmlCollection(machines_tag)
- for machine_tag in machines_tag:
- machines.append(self._process_machine(machine_tag))
-
- # tasks
- tasks = recipe["tasks"] = XmlCollection()
- task_tags = lnst_recipe.findall("task")
- for task_tag in task_tags:
- tasks.append(self._process_task(task_tag))
-
- return recipe
-
- def _process_machine(self, machine_tag):
- machine = XmlData(machine_tag)
- machine["id"] = self._get_attribute(machine_tag, "id")
-
- # params
- params_tag = machine_tag.find("params")
- params = self._process_params(params_tag)
- if len(params) > 0:
- machine["params"] = params
-
- # interfaces
- interfaces_tag = machine_tag.find("interfaces")
- if interfaces_tag is not None and len(interfaces_tag) > 0:
- machine["interfaces"] = XmlCollection(interfaces_tag)
-
- lo_netns = []
- unique_ids = []
- for interface_tag in interfaces_tag:
- interfaces = self._process_interface(interface_tag)
-
- for interface in interfaces:
- if interface['id'] in unique_ids:
- msg = "Interface with ID \"%s\" has already been
"\
- "defined for this machine." %
interface['id']
- raise RecipeError(msg, interface_tag)
- else:
- unique_ids.append(interface['id'])
-
- if interface['type'] == 'lo':
- if interface['netns'] in lo_netns:
- msg = "Only one loopback device per netns "\
- "is allowed."
- raise RecipeError(msg, interface_tag)
- else:
- lo_netns.append(interface['netns'])
- elif interface['type'] == "ovs_bridge":
- ovs_conf = interface["ovs_conf"]
- for i in ovs_conf["tunnels"] +
ovs_conf["internals"]:
- if i['id'] in unique_ids:
- msg = "Interface with ID \"%s\" has
already "\
- "been defined for this machine." %\
- i['id']
- raise RecipeError(msg, i)
- else:
- unique_ids.append(i['id'])
-
- machine["interfaces"].extend(interfaces)
-
- return machine
-
- def _process_params(self, params_tag):
- params = XmlCollection(params_tag)
- if params_tag is not None:
- for param_tag in params_tag:
- param = XmlData(param_tag)
- param["name"] = self._get_attribute(param_tag,
"name")
- param["value"] = self._get_attribute(param_tag,
"value")
- params.append(param)
-
- return params
-
- def _process_interface(self, iface_tag):
- iface = XmlData(iface_tag)
- iface["type"] = iface_tag.tag
-
- if iface["type"] == "veth_pair":
- iface = self._process_interface(iface_tag[0])[0]
- iface2 = self._process_interface(iface_tag[1])[0]
-
- iface["peer"] = iface2["id"]
- iface2["peer"] = iface["id"]
-
- return [iface, iface2]
-
- iface["id"] = self._get_attribute(iface_tag, "id")
-
- iface["netns"] = None
- if self._has_attribute(iface_tag, "netns"):
- iface["netns"] = self._get_attribute(iface_tag, "netns")
-
- # netem
- netem_tag = iface_tag.find("netem")
- if netem_tag is not None:
- iface["netem"] = self._process_netem(netem_tag)
-
- # params
- params_tag = iface_tag.find("params")
- params = self._process_params(params_tag)
- if len(params) > 0:
- iface["params"] = params
-
- # addresses
- addresses_tag = iface_tag.find("addresses")
- addrs = self._process_addresses(addresses_tag)
- iface["addresses"] = addrs
-
- if iface["type"] == "eth":
- iface["network"] = self._get_attribute(iface_tag,
"label")
- elif iface["type"] in ["bond", "bridge",
"macvlan", "team"]:
- # slaves
- slaves_tag = iface_tag.find("slaves")
- if slaves_tag is not None and len(slaves_tag) > 0:
- iface["slaves"] = XmlCollection(slaves_tag)
- for slave_tag in slaves_tag:
- slave = XmlData(slave_tag)
- slave["id"] = self._get_attribute(slave_tag,
"id")
-
- # slave options
- opts_tag = slave_tag.find("options")
- opts = self._process_options(opts_tag)
- if len(opts) > 0:
- slave["options"] = opts
-
- iface["slaves"].append(slave)
-
- # interface options
- opts_tag = iface_tag.find("options")
- opts = self._process_options(opts_tag)
- if len(opts) > 0:
- iface["options"] = opts
- elif iface["type"] in ["vti", "vti6"]:
- # interface options
- opts_tag = iface_tag.find("options")
- opts = self._process_options(opts_tag)
- iface["options"] = opts
- elif iface["type"] in ["vlan"]:
- # real_dev of the VLAN interface
- slaves_tag = iface_tag.find("slaves")
- if slaves_tag is None or len(slaves_tag) != 1:
- msg = "VLAN '%s' need exactly one slave definition."\
- % iface["id"]
- raise RecipeError(msg, iface_tag)
-
- iface["slaves"] = XmlCollection(slaves_tag)
-
- slave_tag = slaves_tag[0]
- slave = XmlData(slave_tag)
- slave["id"] = self._get_attribute(slave_tag, "id")
-
- iface["slaves"].append(slave)
-
- # interface options
- opts_tag = iface_tag.find("options")
- opts = self._process_options(opts_tag)
- if len(opts) > 0:
- iface["options"] = opts
- elif iface["type"] in ["vxlan"]:
- # real_dev of the VXLAN interface
- slaves_tag = iface_tag.find("slaves")
- if slaves_tag is not None and len(slaves_tag) > 1:
- msg = "VXLAN '%s' needs one or no slave definition."\
- % iface["id"]
- raise RecipeError(msg, iface_tag)
-
- if slaves_tag:
- iface["slaves"] = XmlCollection(slaves_tag)
- slave_tag = slaves_tag[0]
- slave = XmlData(slave_tag)
- slave["id"] = self._get_attribute(slave_tag, "id")
- iface["slaves"].append(slave)
-
- # interface options
- opts_tag = iface_tag.find("options")
- opts = self._process_options(opts_tag)
- if len(opts) > 0:
- iface["options"] = opts
- elif iface["type"] == "ovs_bridge":
- slaves_tag = iface_tag.find("slaves")
- iface["slaves"] = XmlCollection(slaves_tag)
- ovsb_slaves = []
-
- iface["ovs_conf"] = XmlData(slaves_tag)
- if slaves_tag is not None:
- for slave_tag in slaves_tag:
- slave = XmlData(slave_tag)
- slave["id"] = str(self._get_attribute(slave_tag,
"id"))
- ovsb_slaves.append(slave["id"])
-
- opts_tag = slave_tag.find("options")
- opts = self._process_options(opts_tag)
- slave["options"] = opts
-
- iface["slaves"].append(slave)
-
- vlan_elems = iface_tag.findall("vlan")
- vlans = iface["ovs_conf"]["vlans"] = XmlData(slaves_tag)
- for vlan in vlan_elems:
- vlan_tag = str(self._get_attribute(vlan, "tag"))
- if vlan_tag in vlans:
- msg = "VLAN '%s' already defined for "\
- "this ovs_bridge." % vlan_tag
- raise RecipeError(msg, vlan)
-
- vlans[vlan_tag] = XmlData(vlan)
- vlans[vlan_tag]["slaves"] = XmlCollection(vlan)
- vlan_slaves = vlans[vlan_tag]["slaves"]
-
- slaves_tag = vlan.find("slaves")
- for slave_tag in slaves_tag:
- slave_id = str(self._get_attribute(slave_tag, "id"))
- if slave_id not in ovsb_slaves:
- msg = "No port with id '%s' defined for "\
- "this ovs_bridge." % slave_id
- raise RecipeError(msg, slave_tag)
-
- if slave_id in vlan_slaves:
- msg = "Port '%s' already a member of vlan %s"\
- % (slave_id, vlan_tag)
- raise RecipeError(msg, slave_tag)
- else:
- vlan_slaves.append(slave_id)
-
- bonded_slaves = {}
- bond_elems = iface_tag.findall("bond")
- bonds = iface["ovs_conf"]["bonds"] = XmlData(slaves_tag)
- for bond_tag in bond_elems:
- bond_id = str(self._get_attribute(bond_tag, "id"))
- if bond_id in bonds:
- msg = "Bond with id '%s' already defined for "\
- "this ovs_bridge." % bond_id
- raise RecipeError(msg, bond_tag)
- bonds[bond_id] = XmlData(bond_tag)
- bond_slaves = bonds[bond_id]["slaves"] =
XmlCollection(bond_tag)
-
- slaves_tag = bond_tag.find("slaves")
- for slave_tag in slaves_tag:
- slave_id = str(self._get_attribute(slave_tag, "id"))
- if slave_id not in ovsb_slaves:
- msg = "No port with id '%s' defined for "\
- "this ovs_bridge." % slave_id
- raise RecipeError(msg, slave_tag)
-
- if slave_id in bonded_slaves:
- msg = "Port with id '%s' already in bond with id
'%s'"\
- % (slave_id, bonded_slaves[slave_id])
- raise RecipeError(msg, slave_tag)
- else:
- bonded_slaves[slave_id] = bond_id
-
- bond_slaves.append(slave_id)
-
- opts_tag = bond_tag.find("options")
- opts = self._process_options(opts_tag)
- if len(opts) > 0:
- bonds[bond_id]["options"] = opts
-
- unique_ids = []
- tunnels = iface["ovs_conf"]["tunnels"] =
XmlCollection(slaves_tag)
- tunnel_elems = iface_tag.findall("tunnel")
- for tunnel_elem in tunnel_elems:
- tunnels.append(XmlData(tunnel_elem))
- tunnel = tunnels[-1]
- tunnel["id"] = str(self._get_attribute(tunnel_elem,
"id"))
- if tunnel["id"] in unique_ids:
- msg = "Tunnel with id '%s' already defined for "\
- "this ovs_bridge." % tunnel["id"]
- raise RecipeError(msg, tunnel_elem)
- else:
- unique_ids.append(tunnel["id"])
-
- t = str(self._get_attribute(tunnel_elem, "type"))
- tunnel["type"] = t
-
- opts_elem = tunnel_elem.find("options")
- opts = self._process_options(opts_elem)
- if len(opts) > 0:
- tunnel["options"] = opts
-
- # addresses
- addresses_tag = tunnel_elem.find("addresses")
- addrs = self._process_addresses(addresses_tag)
- tunnel["addresses"] = addrs
-
- iface["ovs_conf"]["internals"] =
XmlCollection(slaves_tag)
- internals = iface["ovs_conf"]["internals"]
- internal_elems = iface_tag.findall("internal")
- for internal_elem in internal_elems:
- internals.append(XmlData(internal_elem))
- internal = internals[-1]
- internal["id"] = str(self._get_attribute(internal_elem,
"id"))
- if internal["id"] in unique_ids:
- msg = "Internal id '%s' already defined for "\
- "this ovs_bridge." % internal["id"]
- raise RecipeError(msg, internal_elem)
- else:
- unique_ids.append(internal["id"])
-
- opts_elem = internal_elem.find("options")
- opts = self._process_options(opts_elem)
- if len(opts) > 0:
- internal["options"] = opts
-
- # addresses
- addresses_tag = internal_elem.find("addresses")
- addrs = self._process_addresses(addresses_tag)
- internal["addresses"] = addrs
-
- iface["ovs_conf"]["flow_entries"] =
XmlCollection(slaves_tag)
- flow_entries = iface["ovs_conf"]["flow_entries"]
- flow_elems = iface_tag.findall("flow_entries")
- if len(flow_elems) == 1:
- entries = flow_elems[0].findall("entry")
- for entry in entries:
- if self._has_attribute(entry, "value"):
- flow_entries.append(self._get_attribute(entry,
- "value"))
- else:
- flow_entries.append(self._get_content(entry))
-
- return [iface]
-
- def _process_addresses(self, addresses_tag):
- addresses = XmlCollection(addresses_tag)
- if addresses_tag is not None and len(addresses_tag) > 0:
- for addr_tag in addresses_tag:
- if self._has_attribute(addr_tag, "value"):
- addr = self._get_attribute(addr_tag, "value")
- else:
- addr = self._get_content(addr_tag)
- addresses.append(addr)
- return addresses
-
- def _process_options(self, opts_tag):
- options = XmlCollection(opts_tag)
- if opts_tag is not None:
- for opt_tag in opts_tag:
- opt = XmlData(opt_tag)
- opt["name"] = self._get_attribute(opt_tag, "name")
- if self._has_attribute(opt_tag, "value"):
- opt["value"] = self._get_attribute(opt_tag,
"value")
- else:
- opt["value"] = self._get_content(opt_tag)
- options.append(opt)
-
- return options
-
- def _validate_netem(self, options, netem_op, netem_tag):
- if netem_op == "delay":
- valid = False
- jitter = False
- correlation = False
- distribution = False
- valid_distributions = ["normal", "uniform",
"pareto", "paretonormal"]
- for opt in options:
- if "time" in opt.values():
- valid = True
- elif "distribution" in opt.values():
- if opt["value"] not in valid_distributions:
- raise RecipeError("netem: invalid distribution type",
netem_tag)
- else:
- distribution = True
- elif "jitter" in opt.values():
- jitter = True
- elif "correlation" in opt.values():
- correlation = True
- if not jitter:
- if correlation or distribution:
- raise RecipeError("netem: jitter option is mandatory when using
<correlation> or <distribution>", netem_tag)
- if not valid:
- raise RecipeError("netem: time option is mandatory for
<delay>", netem_tag)
- elif netem_op == "loss":
- for opt in options:
- if "percent" in opt.values():
- return
- raise RecipeError("netem: percent option is mandatory for
<loss>", netem_tag)
- elif netem_op == "duplication":
- for opt in options:
- if "percent" in opt.values():
- return
- raise RecipeError("netem: percent option is mandatory for
<duplication>", netem_tag)
- elif netem_op == "corrupt":
- for opt in options:
- if "percent" in opt.values():
- return
- raise RecipeError("netem: percent option is mandatory for
<corrupt>", netem_tag)
- elif netem_op == "reordering":
- for opt in options:
- if "percent" in opt.values():
- return
- raise RecipeError("netem: percent option is mandatory for
<reordering>", netem_tag)
-
- def _process_netem(self, netem_tag):
- interface = XmlData(netem_tag)
- # params
- for netem_op in ["delay", "loss", "duplication",
"corrupt", "reordering"]:
- netem_op_tag = netem_tag.find(netem_op)
- if netem_op_tag is not None:
- options_tag = netem_op_tag.find("options")
- options = self._process_options(options_tag)
- if len(options) > 0:
- self._validate_netem(options, netem_op, netem_tag)
- interface[netem_op] = options
- return interface
-
- def _process_task(self, task_tag):
- task = XmlData(task_tag)
-
- if self._has_attribute(task_tag, "quit_on_fail"):
- task["quit_on_fail"] = self._get_attribute(task_tag,
"quit_on_fail")
-
- if self._has_attribute(task_tag, "module_dir"):
- base_dir = os.path.dirname(task_tag.attrib["__file"])
- dir_path = str(self._get_attribute(task_tag, "module_dir"))
- exp_path = os.path.expanduser(dir_path)
- abs_path = os.path.join(base_dir, exp_path)
- norm_path = os.path.normpath(abs_path)
- task["module_dir"] = norm_path
-
- if self._has_attribute(task_tag, "tools_dir"):
- base_dir = os.path.dirname(task_tag.attrib["__file"])
- dir_path = str(self._get_attribute(task_tag, "tools_dir"))
- exp_path = os.path.expanduser(dir_path)
- abs_path = os.path.join(base_dir, exp_path)
- norm_path = os.path.normpath(abs_path)
- task["tools_dir"] = norm_path
-
- if self._has_attribute(task_tag, "python"):
- task["python"] = self._get_attribute(task_tag, "python")
- return task
-
- if len(task_tag) > 0:
- task["commands"] = XmlCollection(task_tag)
- for cmd_tag in task_tag:
- if cmd_tag.tag == "run":
- cmd = self._process_run_cmd(cmd_tag)
- elif cmd_tag.tag == "config":
- cmd = self._process_config_cmd(cmd_tag)
- elif cmd_tag.tag == "ctl_wait":
- cmd = self._process_ctl_wait_cmd(cmd_tag)
- elif cmd_tag.tag in ["wait", "intr",
"kill"]:
- cmd = self._process_signal_cmd(cmd_tag)
- else:
- msg = "Unknown command '%s'." % cmd_tag.tag
- raise RecipeError(msg, cmd_tag)
-
- task["commands"].append(cmd)
-
- return task
-
- def _process_run_cmd(self, cmd_tag):
- cmd = XmlData(cmd_tag)
- cmd["host"] = self._get_attribute(cmd_tag, "host")
-
- cmd["netns"] = None
- if self._has_attribute(cmd_tag, "netns"):
- cmd["netns"] = self._get_attribute(cmd_tag, "netns")
-
- has_module = self._has_attribute(cmd_tag, "module")
- has_command = self._has_attribute(cmd_tag, "command")
- has_from = self._has_attribute(cmd_tag, "from")
-
- if (has_module and has_command) or (has_module and has_from):
- msg = "Invalid combination of attributes."
- raise RecipeError(msg, cmd)
-
- if has_module:
- cmd["type"] = "test"
- cmd["module"] = self._get_attribute(cmd_tag, "module")
-
- # options
- opts_tag = cmd_tag.find("options")
- opts = self._process_options(opts_tag)
- if len(opts) > 0:
- cmd["options"] = opts
- elif has_command:
- cmd["type"] = "exec"
- cmd["command"] = self._get_attribute(cmd_tag, "command")
-
- if self._has_attribute(cmd_tag, "from"):
- cmd["from"] = self._get_attribute(cmd_tag, "from")
-
- if self._has_attribute(cmd_tag, "bg_id"):
- cmd["bg_id"] = self._get_attribute(cmd_tag, "bg_id")
-
- if self._has_attribute(cmd_tag, "timeout"):
- cmd["timeout"] = self._get_attribute(cmd_tag, "timeout")
-
- if self._has_attribute(cmd_tag, "expect"):
- cmd["expect"] = self._get_attribute(cmd_tag, "expect")
-
- return cmd
-
- def _process_config_cmd(self, cmd_tag):
- cmd = XmlData(cmd_tag)
- cmd["type"] = "config"
- cmd["host"] = self._get_attribute(cmd_tag, "host")
-
- cmd["netns"] = None
- if self._has_attribute(cmd_tag, "netns"):
- cmd["netns"] = self._get_attribute(cmd_tag, "netns")
-
- if self._has_attribute(cmd_tag, "persistent"):
- cmd["persistent"] = self._get_attribute(cmd_tag,
"persistent")
-
- # inline option
- if self._has_attribute(cmd_tag, "option"):
- cmd["options"] = XmlCollection(cmd_tag)
- if self._has_attribute(cmd_tag, "value"):
- opt = XmlData(cmd_tag)
- opt["name"] = self._get_attribute(cmd_tag, "option")
- opt["value"] = self._get_attribute(cmd_tag, "value")
-
- cmd["options"] = XmlCollection(cmd_tag)
- cmd["options"].append(opt)
- else:
- raise RecipeError("Missing option value.", cmd)
- else:
- # options
- opts_tag = cmd_tag.find("options")
- opts = self._process_options(opts_tag)
- if len(opts) > 0:
- cmd["options"] = opts
-
- return cmd
-
- def _process_ctl_wait_cmd(self, cmd_tag):
- cmd = XmlData(cmd_tag)
- cmd["type"] = "ctl_wait"
- cmd["seconds"] = self._get_attribute(cmd_tag, "seconds")
- return cmd
-
- def _process_signal_cmd(self, cmd_tag):
- cmd = XmlData(cmd_tag)
- cmd["type"] = cmd_tag.tag
- cmd["host"] = self._get_attribute(cmd_tag, "host")
- cmd["bg_id"] = self._get_attribute(cmd_tag, "bg_id")
- cmd["netns"] = None
- return cmd
diff --git a/lnst/Controller/SlavePool.py b/lnst/Controller/SlavePool.py
deleted file mode 100644
index 13cc34e..0000000
--- a/lnst/Controller/SlavePool.py
+++ /dev/null
@@ -1,648 +0,0 @@
-"""
-This module contains implementaion of SlavePool class that
-can be used to maintain a cluster of test machines.
-
-These machines can be provisioned and used in test recipes.
-
-Copyright 2012 Red Hat, Inc.
-Licensed under the GNU General Public License, version 2 as
-published by the Free Software Foundation; see COPYING for details.
-"""
-
-__author__ = """
-rpazdera(a)redhat.com (Radek Pazdera)
-"""
-
-import logging
-import os
-import re
-import socket
-import select
-from lnst.Common.Config import lnst_config
-from lnst.Common.NetUtils import normalize_hwaddr
-from lnst.Controller.Machine import Machine
-from lnst.Controller.SlaveMachineParser import SlaveMachineParser
-from lnst.Controller.SlaveMachineParser import SlaveMachineError
-from lnst.Common.Colours import decorate_with_preset
-from lnst.Common.Utils import check_process_running
-
-class SlavePool:
- """
- This class is responsible for managing test machines that
- are available at the controler and can be used for testing.
- """
- def __init__(self, pools, pool_checks=True):
- self._map = {}
- self._pools = {}
- self._pool = {}
-
- self._machine_matches = []
- self._network_matches = []
-
- self._allow_virt = lnst_config.get_option("environment",
- "allow_virtual")
- self._allow_virt &= check_process_running("libvirtd")
- self._pool_checks = pool_checks
-
- self._mapper = SetupMapper()
- self._mreqs = None
-
- logging.info("Checking machine pool availability.")
- for pool_name, pool_dir in pools.items():
- self._pools[pool_name] = {}
- self.add_dir(pool_name, pool_dir)
- if len(self._pools[pool_name]) == 0:
- del self._pools[pool_name]
-
- self._mapper.set_pools(self._pools)
- logging.info("Finished loading pools.")
-
- def get_pools(self):
- return self._pools
-
- def add_dir(self, pool_name, dir_path):
- logging.info("Processing pool '%s', directory '%s'" %
(pool_name,
- dir_path))
- pool = self._pools[pool_name]
-
- try:
- dentries = os.listdir(dir_path)
- except OSError:
- logging.warn("Directory '%s' does not exist for pool
'%s'" %
- (dir_path,
- pool_name))
- return
-
- for dirent in dentries:
- m_id, m = self.add_file(pool_name, dir_path, dirent)
- if m_id != None and m != None:
- pool[m_id] = m
-
- if len(pool) == 0:
- logging.warn("No machines found in pool '%s', directory
'%s'" %
- (pool_name,
- dir_path))
-
- max_len = 0
- for m_id in pool.keys():
- if len(m_id) > max_len:
- max_len = len(m_id)
-
- if self._pool_checks:
- check_sockets = {}
- for m_id, m in sorted(pool.iteritems()):
- hostname = m["params"]["hostname"]
- if "rpc_port" in m["params"]:
- port = m["params"]["rpc_port"]
- else:
- port = lnst_config.get_option('environment',
'rpcport')
-
- logging.debug("Querying machine '%s': %s:%s" %\
- (m_id, hostname, port))
-
- s = socket.socket()
- s.settimeout(0)
- try:
- s.connect((hostname, port))
- except:
- pass
- check_sockets[s] = m_id
-
- while len(check_sockets) > 0:
- rl, wl, el = select.select([], check_sockets.keys(), [])
- for s in wl:
- err = s.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
- m_id = check_sockets[s]
- if err == 0:
- pool[m_id]["available"] = True
- s.shutdown(socket.SHUT_RDWR)
- s.close()
- del check_sockets[s]
- else:
- pool[m_id]["available"] = False
- s.close()
- del check_sockets[s]
- else:
- for m_id in pool.keys():
- pool[m_id]["available"] = True
-
- for m_id in sorted(list(pool.keys())):
- m = pool[m_id]
- if m["available"]:
- if 'libvirt_domain' in m['params']:
- libvirt_msg = " libvirt_domain: %s" %\
- m['params']['libvirt_domain']
- else:
- libvirt_msg = ""
- msg = "%s%s [%s] %s" % (m_id, (max_len - len(m_id)) * "
",
- decorate_with_preset("UP",
"pass"),
- libvirt_msg)
- else:
- msg = "%s%s [%s]" % (m_id, (max_len - len(m_id)) * "
",
- decorate_with_preset("DOWN",
"fail"))
- del pool[m_id]
-
- logging.info(msg)
-
- def add_file(self, pool_name, dir_path, dirent):
- filepath = dir_path + "/" + dirent
- pool = self._pools[pool_name]
- if os.path.isfile(filepath) and re.search("\.xml$", filepath, re.I):
- dirname, basename = os.path.split(filepath)
- m_id = re.sub("\.[xX][mM][lL]$", "", basename)
-
- parser = SlaveMachineParser(filepath)
- xml_data = parser.parse()
- machine_spec = self._process_machine_xml_data(m_id, xml_data)
-
- if 'libvirt_domain' in machine_spec['params'] and \
- not self._allow_virt:
- logging.debug("libvirtd not running or allow_virtual "\
- "disabled. Removing libvirt_domain from "\
- "machine '%s'" % m_id)
- del machine_spec['params']['libvirt_domain']
-
- # Check if there isn't any machine with the same
- # hostname or libvirt_domain already in the pool
- for pm_id, m in pool.iteritems():
- pm = m["params"]
- rm = machine_spec["params"]
- if pm["hostname"] == rm["hostname"]:
- msg = "You have the same machine listed twice in " \
- "your pool ('%s' and '%s')." % (m_id,
pm_id)
- raise SlaveMachineError(msg)
-
- if "libvirt_domain" in rm and "libvirt_domain" in pm
and \
- pm["libvirt_domain"] == rm["libvirt_domain"]:
- msg = "You have the same libvirt_domain listed twice in "
\
- "your pool ('%s' and '%s')." % (m_id,
pm_id)
- raise SlaveMachineError(msg)
-
- return (m_id, machine_spec)
- return (None, None)
-
- def _process_machine_xml_data(self, m_id, machine_xml_data):
- machine_spec = {"interfaces": {}, "params":{},
"security": {}}
-
- # process parameters
- if "params" in machine_xml_data:
- for param in machine_xml_data["params"]:
- name = str(param["name"])
- value = str(param["value"])
-
- if name == "rpc_port":
- machine_spec["params"][name] = int(value)
- else:
- machine_spec["params"][name] = value
-
- mandatory_params = ["hostname"]
- for p in mandatory_params:
- if p not in machine_spec["params"]:
- msg = "Mandatory parameter '%s' missing for machine
%s." \
- % (p, m_id)
- raise SlaveMachineError(msg, machine_xml_data["params"])
-
- # process interfaces
- if "interfaces" in machine_xml_data:
- for iface in machine_xml_data["interfaces"]:
- if_id = iface["id"]
- iface_spec = self._process_iface_xml_data(m_id, iface)
-
- if if_id not in machine_spec["interfaces"]:
- machine_spec["interfaces"][if_id] = iface_spec
- else:
- msg = "Duplicate interface id '%s'." % if_id
- raise SlaveMachineError(msg, iface)
- else:
- if "libvirt_domain" not in machine_spec["params"]:
- msg = "Machine '%s' has no testing interfaces. " \
- "This setup is supported only for virtual slaves." \
- % m_id
- raise SlaveMachineError(msg, machine_xml_data)
-
- machine_spec["security"] = machine_xml_data["security"]
-
- return machine_spec
-
- def _process_iface_xml_data(self, m_id, iface):
- if_id = iface["id"]
- iface_spec = {"params": {}}
- iface_spec["network"] = iface["network"]
-
- for param in iface["params"]:
- name = str(param["name"])
- value = str(param["value"])
-
- if name == "hwaddr":
- iface_spec["params"][name] = normalize_hwaddr(value)
- else:
- iface_spec["params"][name] = value
-
- mandatory_params = ["hwaddr"]
- for p in mandatory_params:
- if p not in iface_spec["params"]:
- msg = "Mandatory parameter '%s' missing for machine %s,
" \
- "interface '%s'." % (p, m_id, if_id)
- raise SlaveMachineError(msg, iface["params"])
-
- return iface_spec
-
- def set_machine_requirements(self, mreqs):
- self._mreqs = mreqs
- self._mapper.set_requirements(mreqs)
- self._mapper.reset_match_state()
-
- def provision_machines(self, machines):
- """
- This method will try to map a dictionary of machines'
- requirements to a pool of machines that is available to
- this instance.
-
- :param templates: Setup request (dict of required machines)
- :type templates: dict
-
- :return: XML machineconfigs of requested machines
- :rtype: dict
- """
- mapper = self._mapper
- logging.info("Matching machines, without virtuals.")
- res = mapper.match()
-
- if not res and not mapper.get_virtual() and self._allow_virt:
- logging.info("Match failed for normal machines, falling back "\
- "to matching virtual machines.")
- mapper.set_virtual(self._allow_virt)
- mapper.reset_match_state()
- res = mapper.match()
-
- if res:
- self._map = mapper.get_mapping()
- else:
- self._map = {}
-
- if self._map == {}:
- self._pool = {}
- return False
- else:
- self._pool = self._pools[self._map["pool_name"]]
-
- if self._map["virtual"]:
- mreqs = self._mreqs
- for m_id in self._map["machines"]:
- machines[m_id] = self._prepare_virtual_slave(m_id, mreqs[m_id])
- else:
- for m_id in self._map["machines"]:
- machines[m_id] = self._get_mapped_slave(m_id)
-
- return True
-
- def is_setup_virtual(self):
- return self._map["virtual"]
-
- def get_match(self):
- return self._map
-
- def _get_machine_mapping(self, m_id):
- return self._map["machines"][m_id]["target"]
-
- def _get_interface_mapping(self, m_id, if_id):
- return self._map["machines"][m_id]["interfaces"][if_id]
-
- def _get_network_mapping(self, net_id):
- return self._map["networks"][net_id]
-
- def _get_mapped_slave(self, tm_id):
- pm_id = self._get_machine_mapping(tm_id)
- pm = self._pool[pm_id]
-
- hostname = pm["params"]["hostname"]
-
- rpcport = None
- if "rpc_port" in pm["params"]:
- rpcport = pm["params"]["rpc_port"]
-
- machine = Machine(tm_id, hostname, None, rpcport, pm["security"])
-
- used = []
- if_map = self._map["machines"][tm_id]["interfaces"]
- for t_if, p_if in if_map.iteritems():
- pool_id = p_if["target"]
- used.append(pool_id)
- if_data = pm["interfaces"][pool_id]
-
- iface = machine.new_static_interface(t_if, "eth")
- iface.set_hwaddr(if_data["params"]["hwaddr"])
-
- for t_net, p_net in self._map["networks"].iteritems():
- if pm["interfaces"][pool_id]["network"] == p_net:
- iface.set_network(t_net)
- break
-
- for if_id, if_data in pm["interfaces"].iteritems():
- if if_id not in used:
- iface = machine.new_unused_interface("eth")
- iface.set_hwaddr(if_data["params"]["hwaddr"])
- iface.set_network(None)
-
- return machine
-
- def _prepare_virtual_slave(self, tm_id, tm):
- pm_id = self._get_machine_mapping(tm_id)
- pm = self._pool[pm_id]
-
- hostname = pm["params"]["hostname"]
- libvirt_domain = pm["params"]["libvirt_domain"]
-
- rpcport = None
- if "rpc_port" in pm["params"]:
- rpcport = pm["params"]["rpc_port"]
-
- machine = Machine(tm_id, hostname, libvirt_domain, rpcport,
- pm["security"])
-
- # make all the existing unused
- for if_id, if_data in pm["interfaces"].iteritems():
- iface = machine.new_unused_interface("eth")
- iface.set_hwaddr(if_data["params"]["hwaddr"])
- iface.set_network(None)
-
- # add all the other devices
- for if_id, if_data in tm["interfaces"].iteritems():
- iface = machine.new_virtual_interface(if_id, "eth")
- iface.set_network(if_data["network"])
- if "hwaddr" in if_data["params"]:
- iface.set_hwaddr(if_data["params"]["hwaddr"])
- if "driver" in if_data["params"]:
- iface.set_driver(if_data["params"]["driver"])
-
- return machine
-
-class MapperError(Exception):
- pass
-
-class SetupMapper(object):
- def __init__(self):
- self._pools = {}
- self._pool_stack = []
- self._pool = {}
- self._pool_name = None
- self._mreqs = {}
- self._unmatched_req_machines = []
- self._matched_pool_machines = []
- self._machine_stack = []
- self._net_label_mapping = {}
- self._virtual_matching = False
-
- def set_requirements(self, mreqs):
- self._mreqs = mreqs
-
- def set_pools(self, pools):
- self._pools = pools
-
- def set_virtual(self, virt_value):
- self._virtual_matching = virt_value
-
- for m_id, m in self._mreqs.iteritems():
- for if_id, interface in m["interfaces"].iteritems():
- if "params" in interface:
- for name, val in interface["params"].iteritems():
- if name not in ["hwaddr", "driver"]:
- msg = "Dynamically created interfaces "\
- "only support the 'hwaddr' and
'driver' "\
- "option. '%s=%s' found on machine
'%s' "\
- "interface '%s'" % (name, val,
- m_id, if_id)
- raise MapperError(msg)
-
- def get_virtual(self):
- return self._virtual_matching
-
- def reset_match_state(self):
- self._net_label_mapping = {}
- self._machine_stack = []
- self._unmatched_req_machines = sorted(self._mreqs.keys(), reverse=True)
-
- self._pool_stack = list(self._pools.keys())
- if len(self._pool_stack) > 0:
- self._pool_name = self._pool_stack.pop()
- self._pool = self._pools[self._pool_name]
-
- self._unmatched_pool_machines = []
- for p_id, p_machine in sorted(self._pool.iteritems(), reverse=True):
- if self._virtual_matching:
- if "libvirt_domain" in p_machine["params"]:
- self._unmatched_pool_machines.append(p_id)
- else:
- self._unmatched_pool_machines.append(p_id)
-
- if len(self._pool) > 0 and len(self._mreqs) > 0:
- self._push_machine_stack()
-
- def match(self):
- logging.info("Trying match with pool: %s" % self._pool_name)
- while len(self._machine_stack)>0:
- stack_top = self._machine_stack[-1]
- if self._virtual_matching and stack_top["virt_matched"]:
- if stack_top["current_match"] != None:
- cur_match = stack_top["current_match"]
- self._unmatched_pool_machines.append(cur_match)
- stack_top["current_match"] = None
- stack_top["virt_matched"] = False
-
- if self._if_match():
- if len(self._unmatched_req_machines) > 0:
- self._push_machine_stack()
- continue
- else:
- return True
- else:
- #unmap the pool machine
- if stack_top["current_match"] != None:
- cur_match = stack_top["current_match"]
- self._unmatched_pool_machines.append(cur_match)
- stack_top["current_match"] = None
-
- mreq_m_id = stack_top["m_id"]
- while len(stack_top["remaining_matches"]) > 0:
- pool_m_id = stack_top["remaining_matches"].pop()
- if self._check_machine_compatibility(mreq_m_id, pool_m_id):
- #map compatible pool machine
- stack_top["current_match"] = pool_m_id
- stack_top["unmatched_pool_ifs"] = \
- sorted(self._pool[pool_m_id]["interfaces"].keys(),
- reverse=True)
- self._unmatched_pool_machines.remove(pool_m_id)
- break
-
- if stack_top["current_match"] != None:
- #clear if mapping
- stack_top["if_stack"] = []
- #next iteration will match the interfaces
- if not self._virtual_matching:
- self._push_if_stack()
- continue
- else:
- self._pop_machine_stack()
- if len(self._machine_stack) == 0 and\
- len(self._pool_stack) > 0:
- logging.info("Match with pool %s not found." %
- self._pool_name)
- self._pool_name = self._pool_stack.pop()
- self._pool = self._pools[self._pool_name]
- logging.info("Trying match with pool: %s" %
- self._pool_name)
-
- self._unmatched_pool_machines = []
- for p_id, p_machine in sorted(self._pool.iteritems(),
reverse=True):
- if self._virtual_matching:
- if "libvirt_domain" in
p_machine["params"]:
- self._unmatched_pool_machines.append(p_id)
- else:
- self._unmatched_pool_machines.append(p_id)
-
- if len(self._pool) > 0 and len(self._mreqs) > 0:
- self._push_machine_stack()
- continue
- return False
-
- def _if_match(self):
- m_stack_top = self._machine_stack[-1]
- if_stack = m_stack_top["if_stack"]
-
- if self._virtual_matching:
- if m_stack_top["current_match"] != None:
- m_stack_top["virt_matched"] = True
- return True
- else:
- return False
-
- while len(if_stack) > 0:
- stack_top = if_stack[-1]
-
- req_m = self._mreqs[m_stack_top["m_id"]]
- pool_m = self._pool[m_stack_top["current_match"]]
- req_if = req_m["interfaces"][stack_top["if_id"]]
- req_net_label = req_if["network"]
-
- if stack_top["current_match"] != None:
- cur_match = stack_top["current_match"]
- m_stack_top["unmatched_pool_ifs"].append(cur_match)
- pool_if = pool_m["interfaces"][cur_match]
- pool_net_label = pool_if["network"]
- net_label_mapping = self._net_label_mapping[req_net_label]
- if net_label_mapping == (pool_net_label, m_stack_top["m_id"],
- stack_top["if_id"]):
- del self._net_label_mapping[req_net_label]
- stack_top["current_match"] = None
-
- while len(stack_top["remaining_matches"]) > 0:
- pool_if_id = stack_top["remaining_matches"].pop()
- pool_if = pool_m["interfaces"][pool_if_id]
- if self._check_interface_compatibility(req_if, pool_if):
- #map compatible interfaces
- stack_top["current_match"] = pool_if_id
- if req_net_label not in self._net_label_mapping:
- self._net_label_mapping[req_net_label] =\
- (pool_if["network"],
- m_stack_top["m_id"],
- stack_top["if_id"])
- m_stack_top["unmatched_pool_ifs"].remove(pool_if_id)
- break
-
- if stack_top["current_match"] != None:
- if len(m_stack_top["unmatched_ifs"]) > 0:
- self._push_if_stack()
- continue
- else:
- return True
- else:
- self._pop_if_stack()
- continue
- return False
-
- def _push_machine_stack(self):
- machine_match = {}
- machine_match["m_id"] = self._unmatched_req_machines.pop()
- machine_match["current_match"] = None
- machine_match["remaining_matches"] =
list(self._unmatched_pool_machines)
- machine_match["if_stack"] = []
-
- machine = self._mreqs[machine_match["m_id"]]
- machine_match["unmatched_ifs"] =
sorted(machine["interfaces"].keys(),
- reverse=True)
- machine_match["unmatched_pool_ifs"] = []
-
- if self._virtual_matching:
- machine_match["virt_matched"] = False
-
- self._machine_stack.append(machine_match)
-
- def _pop_machine_stack(self):
- stack_top = self._machine_stack.pop()
- self._unmatched_req_machines.append(stack_top["m_id"])
-
- def _push_if_stack(self):
- m_stack_top = self._machine_stack[-1]
- if_match = {}
- if_match["if_id"] = m_stack_top["unmatched_ifs"].pop()
- if_match["current_match"] = None
- if_match["remaining_matches"] =
list(m_stack_top["unmatched_pool_ifs"])
-
- m_stack_top["if_stack"].append(if_match)
-
- def _pop_if_stack(self):
- m_stack_top = self._machine_stack[-1]
- if_stack_top = m_stack_top["if_stack"].pop()
- m_stack_top["unmatched_ifs"].append(if_stack_top["if_id"])
-
- def _check_machine_compatibility(self, req_id, pool_id):
- req_machine = self._mreqs[req_id]
- pool_machine = self._pool[pool_id]
- for param, value in req_machine["params"].iteritems():
- if param not in pool_machine["params"] or\
- value != pool_machine["params"][param]:
- return False
- return True
-
- def _check_interface_compatibility(self, req_if, pool_if):
- label_mapping = self._net_label_mapping
- for req_label, mapping in label_mapping.iteritems():
- if req_label == req_if["network"] and\
- mapping[0] != pool_if["network"]:
- return False
- if mapping[0] == pool_if["network"] and\
- req_label != req_if["network"]:
- return False
- for param, value in req_if["params"].iteritems():
- if param not in pool_if["params"] or\
- value != pool_if["params"][param]:
- return False
- return True
-
- def get_mapping(self):
- mapping = {"machines": {}, "networks": {},
"virtual": False,
- "pool_name": self._pool_name}
-
- for req_label, label_map in self._net_label_mapping.iteritems():
- mapping["networks"][req_label] = label_map[0]
-
- for machine in self._machine_stack:
- m_map = mapping["machines"][machine["m_id"]] = {}
-
- m_map["target"] = machine["current_match"]
-
- hostname =
self._pool[m_map["target"]]["params"]["hostname"]
- m_map["hostname"] = hostname
-
- interfaces = m_map["interfaces"] = {}
- if_stack = machine["if_stack"]
- for interface in if_stack:
- i = interfaces[interface["if_id"]] = {}
- i["target"] = interface["current_match"]
- pool_if =
self._pool[m_map["target"]]["interfaces"][i["target"]]
- i["hwaddr"] = pool_if["params"]["hwaddr"]
-
-
- if self._virtual_matching:
- mapping["virtual"] = True
- return mapping
diff --git a/lnst/Controller/XmlParser.py b/lnst/Controller/XmlParser.py
deleted file mode 100644
index 355b5e8..0000000
--- a/lnst/Controller/XmlParser.py
+++ /dev/null
@@ -1,188 +0,0 @@
-"""
-This module contains the XmlParser and LnstParser classes.
-
-Copyright 2013 Red Hat, Inc.
-Licensed under the GNU General Public License, version 2 as
-published by the Free Software Foundation; see COPYING for details.
-"""
-
-__author__ = """
-rpazdera(a)redhat.com (Radek Pazdera)
-"""
-
-import os
-import re
-import sys
-import copy
-from lxml import etree
-from urllib2 import urlopen
-from lnst.Common.Config import lnst_config
-from lnst.Controller.XmlTemplates import XmlTemplates
-from lnst.Controller.XmlProcessing import XmlProcessingError
-
-class XmlParser(object):
- XINCLUDE_RE = r"\{http\:\/\/www\.w3\.org\/[0-9]{4}\/XInclude\}include"
-
- def __init__(self, schema_file, xml_path):
- # locate the schema file
- # try git path
- dirname = os.path.dirname(sys.argv[0])
- schema_path = os.path.join(dirname, schema_file)
- if not os.path.exists(schema_path):
- # try configuration
- res_dir = lnst_config.get_option("environment",
"resource_dir")
- schema_path = os.path.join(res_dir, schema_file)
-
- if not os.path.exists(schema_path):
- raise Exception("The recipe schema file was not found. " + \
- "Your LNST installation is corrupt!")
-
- self._template_proc = XmlTemplates()
-
- self._path = xml_path
- relaxng_doc = etree.parse(schema_path)
- self._schema = etree.RelaxNG(relaxng_doc)
-
- def parse(self):
- doc = self._parse(self._path)
- self._remove_comments(doc)
-
- # Due to a weird implementation of XInclude in lxml, the
- # XmlParser resolves included documents on it's own.
- #
- # To be able to tell later on where each tag was located
- # in the XML document, we add a '__file' attribute to
- # each element of the tree during the parsing.
- #
- # However, these special attributes are of course not
- # valid according to our schemas. To solve this, a copy of
- # the tree is made and the '__file' attributes are removed
- # before validation.
- #
- # XXX This is a *EXTREMELY* dirty hack. Ideas/proposals
- # for cleaner solutions are more than welcome!
- root_tag = self._init_loc(doc.getroot(), self._path)
- self._expand_xinclude(root_tag, os.path.dirname(self._path))
-
- self._template_proc.process_aliases(root_tag)
-
- try:
- self._validate(doc)
- except:
- err = self._schema.error_log[0]
- loc = {"file": os.path.basename(err.filename),
- "line": err.line, "col": err.column}
- exc = XmlProcessingError(err.message)
- exc.set_loc(loc)
- raise exc
-
- return self._process(root_tag)
-
- def _parse(self, path):
- try:
- if path.startswith('https'):
- doc = etree.parse(urlopen(path))
- else:
- doc = etree.parse(path)
- except etree.LxmlError as err:
- # A workaround for cases when lxml (quite strangely)
- # sets the filename to <string>.
- if err.error_log[0].filename == "<string>":
- filename = self._path
- else:
- filename = err.error_log[0].filename
- loc = {"file": os.path.basename(filename),
- "line": err.error_log[0].line,
- "col": err.error_log[0].column}
- exc = XmlProcessingError(err.error_log[0].message)
- exc.set_loc(loc)
- raise exc
- except Exception as err:
- loc = {"file": os.path.basename(self._path),
- "line": None,
- "col": None}
- exc = XmlProcessingError(str(err))
- exc.set_loc(loc)
- raise exc
-
- return doc
-
- def _process(self, root_tag):
- pass
-
- def set_machines(self, machines):
- self._template_proc.set_machines(machines)
-
- def set_aliases(self, defined, overriden):
- self._template_proc.set_aliases(defined, overriden)
-
- def _has_attribute(self, element, attr):
- return attr in element.attrib
-
- def _get_attribute(self, element, attr):
- text = element.attrib[attr].strip()
- return self._template_proc.expand_functions(text)
-
- def _get_content(self, element):
- text = etree.tostring(element, method="text").strip()
- return self._template_proc.expand_functions(text)
-
- def _expand_xinclude(self, elem, base_url=""):
- for e in elem:
- if re.match(self.XINCLUDE_RE, str(e.tag)):
- href = os.path.join(base_url, e.get("href"))
- filename = os.path.basename(href)
-
- doc = self._parse(href)
- self._remove_comments(doc)
- node = doc.getroot()
-
- node = self._init_loc(node, href)
-
- if e.tail:
- node.tail = (node.tail or "") + e.tail
- self._expand_xinclude(node, os.path.dirname(href))
-
- parent = e.getparent()
- if parent is None:
- return node
-
- parent.replace(e, node)
- else:
- self._expand_xinclude(e, base_url)
- return elem
-
- def _remove_comments(self, doc):
- comments = doc.xpath('//comment()')
- for c in comments:
- p = c.getparent()
- if p is not None:
- p.remove(c)
-
- def _init_loc(self, elem, filename):
- """ Remove all coment tags from the tree """
-
- elem.attrib["__file"] = filename
- for e in elem:
- self._init_loc(e, filename)
-
- return elem
-
- def _validate(self, original):
- """
- Make a copy of the tree, remove the '__file' attributes
- and validate against the appropriate schema.
-
- Very unfortunate solution.
- """
- doc = copy.deepcopy(original)
- root = doc.getroot()
-
- self._prepare_tree_for_validation(root)
- self._schema.assertValid(doc)
-
- def _prepare_tree_for_validation(self, elem):
- if "__file" in elem.attrib:
- del elem.attrib["__file"]
- for e in elem:
- self._prepare_tree_for_validation(e)
diff --git a/lnst/Controller/XmlProcessing.py b/lnst/Controller/XmlProcessing.py
deleted file mode 100644
index b80c3a3..0000000
--- a/lnst/Controller/XmlProcessing.py
+++ /dev/null
@@ -1,235 +0,0 @@
-"""
-This module contains code code for XML parsing and processing.
-
-Copyright 2012 Red Hat, Inc.
-Licensed under the GNU General Public License, version 2 as
-published by the Free Software Foundation; see COPYING for details.
-"""
-
-__author__ = """
-rpazdera(a)redhat.com (Radek Pazdera)
-"""
-
-import os
-
-class XmlProcessingError(Exception):
- """ Exception thrown on parsing errors """
-
- _filename = None
- _line = None
- _col = None
-
- def __init__(self, msg, obj=None):
- super(XmlProcessingError, self).__init__()
- self._msg = msg
-
- if obj is not None:
- if hasattr(obj, "loc"):
- self.set_loc(obj.loc)
- elif hasattr(obj, "attrib") and "__file" in obj.attrib:
- loc = {}
- loc["file"] = obj.attrib["__file"]
- if hasattr(obj, "sourceline"):
- loc["line"] = obj.sourceline
- self.set_loc(loc)
- elif hasattr(obj, "base") and obj.base != None:
- loc = {}
- loc["file"] = os.path.basename(obj.base)
- if hasattr(obj, "sourceline"):
- loc["line"] = obj.sourceline
- self.set_loc(loc)
-
-
- def set_loc(self, loc):
- self._filename = loc["file"]
- self._line = loc["line"]
- if "col" in loc:
- self._col = loc["col"]
-
- def __str__(self):
- line = ""
- col = ""
- sep = ""
- loc = ""
- filename = "<unknown>"
-
- if self._filename:
- filename = self._filename
-
- if self._line:
- line = "%d" % self._line
- sep = ":"
-
- if self._col:
- col = "%s%d" % (sep, self._col)
-
- if self._line or self._col:
- loc = "%s%s:" % (line, col)
-
- return "Parser error: %s:%s %s" % (filename, loc, self._msg)
-
-class XmlDataIterator:
- def __init__(self, iterator):
- self._iterator = iterator
-
- def __iter__(self):
- return self
-
- def next(self):
- n = self._iterator.next()
-
- # For normal iterators
- if type(n) == XmlTemplateString:
- return str(n)
-
- # For iteritems() iterators
- if type(n) == tuple and len(n) == 2 and type(n[1]) == XmlTemplateString:
- return (n[0], str(n[1]))
-
- return n
-
-class XmlCollection(list):
- def __init__(self, node=None):
- super(XmlCollection, self).__init__()
- if node is not None:
- if hasattr(node, "loc"):
- self.loc = node.loc
- elif "__file" in node.attrib:
- loc = {}
- loc["file"] = node.attrib["__file"]
- if hasattr(node, "sourceline"):
- loc["line"] = node.sourceline
- self.loc = loc
- elif hasattr(node, "base") and node.base != None:
- loc = {}
- loc["file"] = os.path.basename(node.base)
- if hasattr(node, "sourceline"):
- loc["line"] = node.sourceline
- self.loc = loc
-
- def __getitem__(self, key):
- value = super(XmlCollection, self).__getitem__(key)
- if type(value) == XmlData or type(value) == XmlCollection:
- return value
-
- return str(value)
-
- def __iter__(self):
- it = super(XmlCollection, self).__iter__()
- return XmlDataIterator(it)
-
- def to_list(self):
- new_list = list()
- for value in self:
- if isinstance(value, XmlData):
- new_val = value.to_dict()
- elif isinstance(value, XmlCollection):
- new_val = value.to_list()
- elif isinstance(value, XmlTemplateString):
- new_val = str(value)
- else:
- new_val = value
- new_list.append(new_val)
-
- return new_list
-
-class XmlData(dict):
- def __init__(self, node=None):
- super(XmlData, self).__init__()
- if node is not None:
- if hasattr(node, "loc"):
- self.loc = node.loc
- elif "__file" in node.attrib:
- loc = {}
- loc["file"] = node.attrib["__file"]
- if hasattr(node, "sourceline"):
- loc["line"] = node.sourceline
- self.loc = loc
- elif hasattr(node, "base") and node.base != None:
- loc = {}
- loc["file"] = os.path.basename(node.base)
- if hasattr(node, "sourceline"):
- loc["line"] = node.sourceline
- self.loc = loc
-
- def __getitem__(self, key):
- value = super(XmlData, self).__getitem__(key)
- if type(value) == XmlData or type(value) == XmlCollection\
- or value == None:
- return value
-
- return str(value)
-
- def __iter__(self):
- it = super(XmlData, self).__iter__()
- return XmlDataIterator(it)
-
- def iteritems(self):
- it = super(XmlData, self).iteritems()
- return XmlDataIterator(it)
-
- def iterkeys(self):
- it = super(XmlData, self).iterkeys()
- return XmlDataIterator(it)
-
- def itervalues(self):
- it = super(XmlData, self).itervalues()
- return XmlDataIterator(it)
-
- def to_dict(self):
- new_dict = dict()
- for key, value in self.iteritems():
- if isinstance(value, XmlData):
- new_val = value.to_dict()
- elif isinstance(value, XmlCollection):
- new_val = value.to_list()
- elif isinstance(value, XmlTemplateString):
- new_val = str(value)
- else:
- new_val = value
- new_dict[key] = new_val
-
- return new_dict
-
-class XmlTemplateString(object):
- def __init__(self, param=None, node=None):
- if type(param) == str:
- self._parts = [param]
- elif type(param) == list:
- self._parts = param
- else:
- self._parts = []
-
- if node and hasattr(node, "loc"):
- self.loc = node.loc
-
- def __add__(self, other):
- if type(other) is str:
- self.add_part(other)
- elif type(other) is self.__class__:
- self._parts += other._parts
- else:
- raise XmlProcessingError("Cannot concatenate %s and %s" % \
- str(type(self)), str(type(other)))
- return self
-
- def __str__(self):
- string = ""
- for part in self._parts:
- string += str(part)
- return string
-
- def __hash__(self):
- return hash(str(self))
-
- def __eq__(self, other):
- return str(self) == str(other)
-
- def __ne__(self, other):
- return str(self) != str(other)
-
- def __len__(self):
- return len(str(self))
-
- def add_part(self, part):
- self._parts.append(part)
diff --git a/lnst/Controller/XmlTemplates.py b/lnst/Controller/XmlTemplates.py
deleted file mode 100644
index a1541db..0000000
--- a/lnst/Controller/XmlTemplates.py
+++ /dev/null
@@ -1,438 +0,0 @@
-"""
-This module contains code to aid processing templates in XML files/recipes
-while they're being parsed.
-
-Templates are strings enclosed in curly braces {} and can be present
-in all text elements of the XML file (this includes tag values or
-attribute values). Templates cannot be used as a stubstitution for tag
-names, attribute names or any other structural elements of the document.
-
-There are two supported types of templates:
-
- * aliases - $alias_name
- * functions - function_name(param1, param2)
-
-Copyright 2012 Red Hat, Inc.
-Licensed under the GNU General Public License, version 2 as
-published by the Free Software Foundation; see COPYING for details.
-"""
-
-__author__ = """
-rpazdera(a)redhat.com (Radek Pazdera)
-"""
-
-import re
-from lxml import etree
-from lnst.Controller.XmlProcessing import XmlTemplateString
-from lnst.Controller.Machine import MachineError, PrefixMissingError
-
-class XmlTemplateError(Exception):
- pass
-
-class TemplateFunc(object):
- def __init__(self, args, machines):
- self._check_args(args)
- self._args = args
-
- self._machines = machines
-
- def __str__(self):
- return self._implementation()
-
- def _check_args(self, args):
- pass
-
- def _implementation(self):
- pass
-
-class IpFunc(TemplateFunc):
- def _check_args(self, args):
- if len(args) > 3:
- msg = "Function ip() takes at most 3 arguments, %d passed" \
- % len(args)
- raise XmlTemplateError(msg)
- if len(args) < 2:
- msg = "Function ip() must have at least 2 arguments, %d passed" \
- % len(args)
- raise XmlTemplateError(msg)
-
- if len(args) == 3:
- try:
- int(args[2])
- except ValueError:
- msg = "The third argument of ip() function must be an integer"
- raise XmlTemplateError(msg)
-
- def _implementation(self):
- m_id = self._args[0]
- if_id = self._args[1]
- addr = 0
- if len(self._args) == 3:
- addr = self._args[2]
-
- try:
- machine = self._machines[m_id]
- except KeyError:
- msg = "First parameter of function ip() is invalid: " \
- "Machine %s does not exist." % m_id
- raise XmlTemplateError(msg)
-
- try:
- iface = machine.get_interface(if_id)
- except MachineError:
- msg = "Second parameter of function ip() is invalid: "\
- "Interface %s does not exist." % if_id
- raise XmlTemplateError(msg)
-
- try:
- return iface.get_address(int(addr))
- except IndexError:
- msg = "There is no address with index %s on machine %s, " \
- "interface %s." % (addr, m_id, if_id)
- raise XmlTemplateError(msg)
-
-class DevnameFunc(TemplateFunc):
- def _check_args(self, args):
- if len(args) != 2:
- msg = "Function devname() takes 2 arguments, %d passed." %
len(args)
- raise XmlTemplateError(msg)
-
- def _implementation(self):
- m_id = self._args[0]
- if_id = self._args[1]
-
- try:
- machine = self._machines[m_id]
- except KeyError:
- msg = "First parameter of function devname() is invalid: " \
- "Machine %s does not exist." % m_id
- raise XmlTemplateError(msg)
-
- try:
- iface = machine.get_interface(if_id)
- except MachineError:
- msg = "Second parameter of function devname() is invalid: "\
- "Interface %s does not exist." % if_id
- raise XmlTemplateError(msg)
-
- try:
- return iface.get_devname()
- except MachineError:
- msg = "Devname not availablefor interface '%s' on machine
'%s'." \
- % (m_id, if_id)
- raise XmlTemplateError(msg)
-
-class PrefixFunc(TemplateFunc):
- def _check_args(self, args):
- if len(args) > 3:
- msg = "Function prefix() takes at most 3 arguments, %d passed" \
- % len(args)
- raise XmlTemplateError(msg)
- if len(args) < 2:
- msg = "Function prefix() must have at least 2 arguments, %d " \
- "passed" % len(args)
- raise XmlTemplateError(msg)
-
- if len(args) == 3:
- try:
- int(args[2])
- except ValueError:
- msg = "The third argument of prefix() function must be an " \
- "integer"
- raise XmlTemplateError(msg)
-
- def _implementation(self):
- m_id = self._args[0]
- if_id = self._args[1]
- addr = 0
- if len(self._args) == 3:
- addr = self._args[2]
-
- try:
- machine = self._machines[m_id]
- except KeyError:
- msg = "First parameter of function prefix() is invalid: " \
- "Machine %s does not exist." % m_id
- raise XmlTemplateError(msg)
-
- try:
- iface = machine.get_interface(if_id)
- except MachineError:
- msg = "Second parameter of function prefix() is invalid: "\
- "Interface %s does not exist." % if_id
- raise XmlTemplateError(msg)
-
- try:
- return iface.get_prefix(int(addr))
- except IndexError:
- msg = "There is no address with index %s on machine %s, " \
- "interface %s." % (addr, m_id, if_id)
- raise XmlTemplateError(msg)
- except PrefixMissingError:
- msg = "Address with the index %s for the interface %s on machine"
\
- "%s does not contain any prefix" % (addr, m_id, if_id)
-
-class HwaddrFunc(TemplateFunc):
- def _check_args(self, args):
- if len(args) != 2:
- msg = "Function hwaddr() takes 2 arguments, %d passed." %
len(args)
- raise XmlTemplateError(msg)
-
- def _implementation(self):
- m_id = self._args[0]
- if_id = self._args[1]
-
- try:
- machine = self._machines[m_id]
- except KeyError:
- msg = "First parameter of function hwaddr() is invalid: " \
- "Machine %s does not exist." % m_id
- raise XmlTemplateError(msg)
-
- try:
- iface = machine.get_interface(if_id)
- except MachineError:
- msg = "Second parameter of function hwaddr() is invalid: "\
- "Interface %s does not exist." % if_id
- raise XmlTemplateError(msg)
-
- try:
- return iface.get_hwaddr()
- except MachineError:
- msg = "Hwaddr not availablefor interface '%s' on machine
'%s'." \
- % (m_id, if_id)
- raise XmlTemplateError(msg)
-
-class XmlTemplates:
- """ This class serves as template processor """
-
- _alias_re = "\{\$([a-zA-Z0-9_]+)\}"
- _func_re = "\{([a-zA-Z0-9_]+)\(([^\(\)]*)\)\}"
-
- _func_map = {"ip": IpFunc, "hwaddr": HwaddrFunc,
"devname": DevnameFunc, \
- "prefix": PrefixFunc }
-
- def __init__(self, definitions=None):
- if definitions:
- self._definitions = [definitions]
- else:
- self._definitions = [{}]
-
- self._machines = {}
- self._reserved_aliases = []
-
- def set_definitions(self, defs):
- """ Set alias definitions
-
- All existing definitions and namespace levels are
- destroyed and replaced with new definitions.
- """
- del self._definitions
- self._definitions = [defs]
-
- def get_definitions(self):
- """ Return definitions dict
-
- Definitions are returned as a single dictionary of
- all currently defined aliases, regardless the internal
- division to namespace levels.
- """
- defs = {}
- for level in self._definitions:
- for name, val in level.iteritems():
- defs[name] = val
-
- return defs
-
- def set_machines(self, machines):
- """ Assign machine information
-
- XmlTemplates use these information about the machines
- to resolve template functions within the recipe.
- """
- self._machines = machines
-
- def set_aliases(self, defined, overriden):
- """ Set aliases defined or overriden from CLI """
-
- for name, value in defined.iteritems():
- self.define_alias(name, value)
-
- self._overriden_aliases = overriden
-
- def define_alias(self, name, value):
- """ Associate an alias name with some value
-
- The value can be of an atomic type or an array. The
- definition is added to the current namespace level.
- """
-
- if not name in self._reserved_aliases:
- self._definitions[-1][name] = value
- else:
- raise XmlTemplateError("Alias name '%s' is reserved" %
name)
-
- def add_namespace_level(self):
- """ Create new namespace level
-
- This method will create a new level for definitions on
- the stack. All aliases, that will be defined after this
- call will be dropped as soon as `drop_namespace_level'
- is called.
- """
- self._definitions.append({})
-
- def drop_namespace_level(self):
- """ Remove one namespace level
-
- This method will erease all defined aliases since the
- last call of `add_namespace_level' method. All aliases,
- that were defined beforehand will be kept.
- """
- self._definitions.pop()
-
- def _find_definition(self, name):
- if name in self._overriden_aliases:
- return self._overriden_aliases[name]
-
- for level in reversed(self._definitions):
- if name in level:
- return level[name]
-
- err = "Alias '%s' is not defined here" % name
- raise XmlTemplateError(err)
-
- def _dump_definitions(self):
- dump = self._overriden_aliases.copy()
-
- for level in self._definitions:
- for name in level:
- if not name in dump:
- dump[name] = level[name]
-
- return dump
-
- def process_aliases(self, element):
- """ Expand aliases within an element and its children
-
- This method will iterate through the element tree that is
- passed and expand aliases in all the text content and
- attributes.
- """
- if element.text != None:
- element.text = self.expand_aliases(element.text)
-
- if element.tail != None:
- element.tail = self.expand_aliases(element.tail)
-
- for name, value in element.attrib.iteritems():
- element.set(name, self.expand_aliases(value))
-
- if element.tag == "define":
- for alias in element.getchildren():
- name = alias.attrib["name"].strip()
- if "value" in alias.attrib:
- value = alias.attrib["value"].strip()
- else:
- value = etree.tostring(element, method="text").strip()
- self.define_alias(name, value)
- parent = element.getparent()
- parent.remove(element)
- return
-
- self.add_namespace_level()
-
- for child in element.getchildren():
- self.process_aliases(child)
-
- # do not drop alias definitions when at top-level so that python
- # tasks are able to access them
- if element.tag != "lnstrecipe":
- self.drop_namespace_level()
-
- def expand_aliases(self, string):
- while True:
- alias_match = re.search(self._alias_re, string)
-
- if alias_match:
- template = alias_match.group(0)
- result = self._process_alias_template(template)
- string = string.replace(template, result)
- else:
- break
-
- return string
-
- def _process_alias_template(self, string):
- result = None
-
- alias_match = re.match(self._alias_re, string)
- if alias_match:
- alias_name = alias_match.group(1)
- result = self._find_definition(alias_name)
-
- return result
-
- def expand_functions(self, string, node=None):
- """ Process a string and expand it into a XmlTemplateString
"""
-
- parts = self._partition_string(string)
- value = XmlTemplateString(node=node)
-
- for part in parts:
- value.add_part(part)
-
- return value
-
- def _partition_string(self, string):
- """ Process templates in a string
-
- This method will process and expand all template functions
- in a string.
-
- The function returns an array of string partitions and
- unresolved template functions for further processing.
- """
-
- result = None
-
- func_match = re.search(self._func_re, string)
- if func_match:
- prefix = string[0:func_match.start(0)]
- suffix = string[func_match.end(0):]
-
- template = func_match.group(0)
- func = self._process_func_template(template)
-
- return self._partition_string(prefix) + [func] + \
- self._partition_string(suffix)
-
- return [string]
-
- def _process_func_template(self, string):
- func_match = re.match(self._func_re, string)
- if func_match:
- func_name = func_match.group(1)
- func_args = func_match.group(2)
-
- if func_args == None:
- func_args = []
- else:
- func_args = func_args.split(",")
-
- param_values = []
- for param in func_args:
- param = param.strip()
- if re.match(self._alias_re, param):
- param = self._process_alias_template(param)
- param_values.append(param)
-
- if func_name not in self._func_map:
- msg = "Unknown template function '%s'." % func_name
- raise XmlTemplateError(msg)
-
- func = self._func_map[func_name](param_values, self._machines)
- return func
- else:
- msg = "The passed string is not a template function."
- raise XmlTemplateError(msg)
--
2.13.0