[copr] master: [backend] dispatcher: handle None value obtained from job_queue (a3fc2ff)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit a3fc2ff0b4639acfde435a8410f54037b8b669b8
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Fri Nov 28 14:21:03 2014 +0100
[backend] dispatcher: handle None value obtained from job_queue
>---------------------------------------------------------------
backend/backend/dispatcher.py | 2 ++
backend/tests/test_dispatcher.py | 12 ++++++++++++
2 files changed, 14 insertions(+), 0 deletions(-)
diff --git a/backend/backend/dispatcher.py b/backend/backend/dispatcher.py
index 13fcb5d..ead03c9 100644
--- a/backend/backend/dispatcher.py
+++ b/backend/backend/dispatcher.py
@@ -446,6 +446,8 @@ class Worker(multiprocessing.Process):
task = self.task_queue.dequeue()
except TypeError:
return
+ if not task:
+ return
# import ipdb; ipdb.set_trace()
job = BuildJob(task.data, self.opts)
diff --git a/backend/tests/test_dispatcher.py b/backend/tests/test_dispatcher.py
index 044500b..9f15282 100644
--- a/backend/tests/test_dispatcher.py
+++ b/backend/tests/test_dispatcher.py
@@ -617,6 +617,18 @@ class TestDispatcher(object):
assert not self.worker.starting_build.called
assert not self.worker.pkg_built_before.called
+ def test_obtain_job_dequeue_none_result(self, init_worker):
+ mc_tq = MagicMock()
+ self.worker.task_queue = mc_tq
+ self.worker.starting_build = MagicMock()
+ self.worker.pkg_built_before = MagicMock()
+ self.worker.pkg_built_before.return_value = False
+
+ mc_tq.dequeue.return_value = None
+ assert self.worker.obtain_job() is None
+ assert not self.worker.starting_build.called
+ assert not self.worker.pkg_built_before.called
+
def test_obtain_job_on_starting_build(self, init_worker):
mc_tq = MagicMock()
self.worker.task_queue = mc_tq
9 years, 5 months
[copr] master: [backend] split copr-be.py into more files (5c46b75)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 5c46b75de4667e6c0ea527ab9cbc97f50ea1c702
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Fri Nov 28 14:04:52 2014 +0100
[backend] split copr-be.py into more files
>---------------------------------------------------------------
backend/backend/daemons/__init__.py | 5 +
backend/backend/daemons/backend.py | 182 +++++++++++++++++++
backend/backend/daemons/job_grab.py | 97 ++++++++++
backend/backend/daemons/log.py | 68 +++++++
backend/run/copr-be.py | 335 +----------------------------------
5 files changed, 354 insertions(+), 333 deletions(-)
diff --git a/backend/backend/daemons/__init__.py b/backend/backend/daemons/__init__.py
new file mode 100644
index 0000000..d60155d
--- /dev/null
+++ b/backend/backend/daemons/__init__.py
@@ -0,0 +1,5 @@
+# coding: utf-8
+
+from .job_grab import CoprJobGrab
+from .log import CoprLog
+from .backend import CoprBackend, run_backend
diff --git a/backend/backend/daemons/backend.py b/backend/backend/daemons/backend.py
new file mode 100644
index 0000000..ff942c8
--- /dev/null
+++ b/backend/backend/daemons/backend.py
@@ -0,0 +1,182 @@
+# coding: utf-8
+
+from __future__ import print_function
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+
+import grp
+import multiprocessing
+import os
+import pwd
+import signal
+import sys
+import time
+from collections import defaultdict
+
+import lockfile
+import daemon
+from retask.queue import Queue
+from retask import ConnectionError
+
+from ..exceptions import CoprBackendError
+from ..dispatcher import Worker
+from ..helpers import BackendConfigReader
+from . import CoprJobGrab, CoprLog
+
+
+class CoprBackend(object):
+
+ """
+ Core process - starts/stops/initializes workers
+ """
+
+ def __init__(self, config_file=None, ext_opts=None):
+ # read in config file
+ # put all the config items into a single self.opts bunch
+
+ if not config_file:
+ raise CoprBackendError("Must specify config_file")
+
+ self.config_file = config_file
+ self.ext_opts = ext_opts # to stow our cli options for read_conf()
+ self.workers_by_group_id = defaultdict(list)
+ self.max_worker_num_by_group_id = defaultdict(int)
+
+ self.config_reader = BackendConfigReader(self.config_file, self.ext_opts)
+ self.opts = None
+ self.update_conf()
+
+ self.lock = multiprocessing.Lock()
+
+ self.task_queues = []
+ try:
+ for group in self.opts.build_groups:
+ group_id = group["id"]
+ self.task_queues.append(Queue("copr-be-{0}".format(group_id)))
+ self.task_queues[group_id].connect()
+ except ConnectionError:
+ raise CoprBackendError(
+ "Could not connect to a task queue. Is Redis running?")
+
+ # make sure there is nothing in our task queues
+ self.clean_task_queues()
+
+ self.events = multiprocessing.Queue()
+ # event format is a dict {when:time, who:[worker|logger|job|main],
+ # what:str}
+
+ # create logger
+ self._logger = CoprLog(self.opts, self.events)
+ self._logger.start()
+
+ self.event("Starting up Job Grabber")
+ # create job grabber
+ self._jobgrab = CoprJobGrab(self.opts, self.events, self.lock)
+ self._jobgrab.start()
+ self.abort = False
+
+ if not os.path.exists(self.opts.worker_logdir):
+ os.makedirs(self.opts.worker_logdir, mode=0o750)
+
+ def event(self, what):
+ self.events.put({"when": time.time(), "who": "main", "what": what})
+
+ def update_conf(self):
+ self.opts = self.config_reader.read()
+
+ def clean_task_queues(self):
+ try:
+ for queue in self.task_queues:
+ while queue.length:
+ queue.dequeue()
+ except ConnectionError:
+ raise CoprBackendError(
+ "Could not connect to a task queue. Is Redis running?")
+
+ def run(self):
+ self.abort = False
+ while not self.abort:
+ # re-read config into opts
+ self.update_conf()
+
+ for group in self.opts.build_groups:
+ group_id = group["id"]
+ self.event(
+ "# jobs in {0} queue: {1}"
+ .format(group["name"], self.task_queues[group_id].length)
+ )
+ # this handles starting/growing the number of workers
+ if len(self.workers_by_group_id[group_id]) < group["max_workers"]:
+ self.event("Spinning up more workers")
+ for _ in range(group["max_workers"] - len(self.workers_by_group_id[group_id])):
+ self.max_worker_num_by_group_id[group_id] += 1
+ w = Worker(
+ self.opts, self.events,
+ self.max_worker_num_by_group_id[group_id],
+ group_id, lock=self.lock
+ )
+
+ self.workers_by_group_id[group_id].append(w)
+ w.start()
+ self.event("Finished starting worker processes")
+ # FIXME - prune out workers
+ # if len(self.workers) > self.opts.num_workers:
+ # killnum = len(self.workers) - self.opts.num_workers
+ # for w in self.workers[:killnum]:
+ # insert a poison pill? Kill after something? I dunno.
+ # FIXME - if a worker bombs out - we need to check them
+ # and startup a new one if it happens
+ # check for dead workers and abort
+ preserved_workers = []
+ for w in self.workers_by_group_id[group_id]:
+ if not w.is_alive():
+ self.event("Worker {0} died unexpectedly".format(w.worker_num))
+ if self.opts.exit_on_worker:
+ raise CoprBackendError(
+ "Worker died unexpectedly, exiting")
+ else:
+ w.terminate() # kill it with a fire
+ else:
+ preserved_workers.append(w)
+ self.workers_by_group_id[group_id] = preserved_workers
+
+ time.sleep(self.opts.sleeptime)
+
+ def terminate(self):
+ """
+ Cleanup backend processes (just workers for now)
+ And also clean all task queues as they would survive copr restart
+ """
+
+ self.abort = True
+ for group in self.opts.build_groups:
+ group_id = group["id"]
+ for w in self.workers_by_group_id[group_id]:
+ self.workers_by_group_id[group_id].remove(w)
+ w.terminate()
+ self.clean_task_queues()
+
+
+def run_backend(opts):
+ try:
+ context = daemon.DaemonContext(
+ pidfile=lockfile.FileLock(opts.pidfile),
+ gid=grp.getgrnam("copr").gr_gid,
+ uid=pwd.getpwnam("copr").pw_uid,
+ detach_process=opts.daemonize,
+ umask=0o22,
+ stderr=sys.stderr,
+ signal_map={
+ signal.SIGTERM: "terminate",
+ signal.SIGHUP: "terminate",
+ },
+ )
+ with context:
+ cbe = CoprBackend(opts.config_file, ext_opts=opts)
+ cbe.run()
+ except (Exception, KeyboardInterrupt):
+ sys.stderr.write("Killing/Dying\n")
+ if "cbe" in locals():
+ cbe.terminate()
+ raise
diff --git a/backend/backend/daemons/job_grab.py b/backend/backend/daemons/job_grab.py
new file mode 100644
index 0000000..c9accd4
--- /dev/null
+++ b/backend/backend/daemons/job_grab.py
@@ -0,0 +1,97 @@
+# coding: utf-8
+
+from __future__ import print_function
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+
+import multiprocessing
+import time
+import setproctitle
+
+import requests
+from retask.task import Task
+from retask.queue import Queue
+
+from backend.actions import Action
+from backend.frontend import FrontendClient
+
+
+class CoprJobGrab(multiprocessing.Process):
+
+ """
+ Fetch jobs from the Frontend
+ - submit them to the jobs queue for workers
+ """
+
+ def __init__(self, opts, events, lock):
+ # base class initialization
+ multiprocessing.Process.__init__(self, name="jobgrab")
+
+ self.opts = opts
+ self.events = events
+ self.task_queues = []
+ for group in self.opts.build_groups:
+ self.task_queues.append(Queue("copr-be-{0}".format(group["id"])))
+ self.task_queues[group["id"]].connect()
+ self.added_jobs = []
+ self.lock = lock
+
+ def event(self, what):
+ self.events.put({"when": time.time(), "who": "jobgrab", "what": what})
+
+ def load_tasks(self):
+ try:
+ r = requests.get(
+ "{0}/waiting/".format(self.opts.frontend_url),
+ auth=("user", self.opts.frontend_auth))
+ r_json = r.json()
+
+ except requests.RequestException as e:
+ self.event("Error retrieving jobs from {0}: {1}".format(
+ self.opts.frontend_url, e))
+ return
+
+ except ValueError as e:
+ self.event("Error getting JSON build list from FE {0}"
+ .format(e))
+ return
+
+ if "builds" in r_json and r_json["builds"]:
+ self.event("{0} jobs returned".format(len(r_json["builds"])))
+ count = 0
+ for task in r_json["builds"]:
+ if "task_id" in task and task["task_id"] not in self.added_jobs:
+ # this will ignore and throw away unconfigured architectures
+ # FIXME: don't do ^
+ arch = task["chroot"].split("-")[2]
+ for group in self.opts.build_groups:
+ if arch in group["archs"]:
+ self.added_jobs.append(task["task_id"])
+ task_obj = Task(task)
+ self.task_queues[group["id"]].enqueue(task_obj)
+ count += 1
+ break
+ if count:
+ self.event("New jobs: %s" % count)
+
+ if "actions" in r_json and r_json["actions"]:
+ self.event("{0} actions returned".format(
+ len(r_json["actions"])))
+
+ for action in r_json["actions"]:
+ ao = Action(self.events, action, self.lock, destdir=self.opts.destdir,
+ frontend_callback=FrontendClient(self.opts, self.events),
+ front_url=self.opts.frontend_base_url,
+ results_root_url=self.opts.results_baseurl)
+ ao.run()
+
+ def run(self):
+ setproctitle.setproctitle("CoprJobGrab")
+ abort = False
+ try:
+ while not abort:
+ self.load_tasks()
+ time.sleep(self.opts.sleeptime)
+ except KeyboardInterrupt:
+ return
diff --git a/backend/backend/daemons/log.py b/backend/backend/daemons/log.py
new file mode 100644
index 0000000..f8a128c
--- /dev/null
+++ b/backend/backend/daemons/log.py
@@ -0,0 +1,68 @@
+# coding: utf-8
+
+from __future__ import print_function
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+
+import logging
+import multiprocessing
+import os
+import sys
+import time
+import setproctitle
+
+
+class CoprLog(multiprocessing.Process):
+
+ """log mechanism where items from the events queue get recorded"""
+
+ def __init__(self, opts, events):
+
+ # base class initialization
+ multiprocessing.Process.__init__(self, name="logger")
+
+ self.opts = opts
+ self.events = events
+
+ logdir = os.path.dirname(self.opts.logfile)
+ if not os.path.exists(logdir):
+ os.makedirs(logdir, mode=0o750)
+
+ def setup_log_handler(self):
+ sys.stderr.write("Running setup handler {} \n".format(self.opts))
+ # setup a log file to write to
+ logging.basicConfig(filename=self.opts.logfile, level=logging.DEBUG)
+
+ self.log({"when": time.time(), "who": self.__class__.__name__, "what": "Logger iniated"})
+
+ def log(self, event):
+
+ when = time.strftime("%F %T", time.gmtime(event["when"]))
+ msg = "{0} : {1}: {2}".format(when,
+ event["who"],
+ event["what"].strip())
+ try:
+ if self.opts.verbose:
+ sys.stderr.write("{0}\n".format(msg))
+ sys.stderr.flush()
+ logging.debug(msg)
+
+ except (IOError, OSError) as e:
+
+ sys.stderr.write("Could not write to logfile {0} - {1}\n".format(
+ self.logfile, e))
+
+ # event format is a dict {when:time, who:[worker|logger|job|main],
+ # what:str}
+ def run(self):
+ setproctitle.setproctitle("CoprLog")
+ self.setup_log_handler()
+ abort = False
+ try:
+ while not abort:
+ e = self.events.get()
+ if "when" in e and "who" in e and "what" in e:
+ self.log(e)
+ except KeyboardInterrupt:
+ return
diff --git a/backend/run/copr-be.py b/backend/run/copr-be.py
index bdca6e8..61f62b8 100755
--- a/backend/run/copr-be.py
+++ b/backend/run/copr-be.py
@@ -5,323 +5,13 @@ from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
-import ConfigParser
-import grp
-import lockfile
-import logging
-import multiprocessing
import optparse
import os
-import pwd
-import signal
import sys
-import time
-from collections import defaultdict
-import daemon
-import requests
-import setproctitle
from bunch import Bunch
-from retask.task import Task
-from retask.queue import Queue
-from retask import ConnectionError
-from backend.exceptions import CoprBackendError
-from backend.dispatcher import Worker
-from backend.actions import Action
-from backend.frontend import FrontendClient
-from backend.helpers import BackendConfigReader
-
-
-def _get_conf(cp, section, option, default, mode=None):
- """
- To make returning items from config parser less irritating
-
- :param mode: convert obtained value, possible modes:
- - None (default): do nothing
- - "bool" or "boolean"
- - "int"
- - "float"
- """
-
- if cp.has_section(section) and cp.has_option(section, option):
- if mode is None:
- return cp.get(section, option)
- elif mode in ["bool", "boolean"]:
- return cp.getboolean(section, option)
- elif mode == "int":
- return cp.getint(section, option)
- elif mode == "float":
- return cp.getfloat(section, option)
- return default
-
-
-class CoprJobGrab(multiprocessing.Process):
-
- """
- Fetch jobs from the Frontend
- - submit them to the jobs queue for workers
- """
-
- def __init__(self, opts, events, lock):
- # base class initialization
- multiprocessing.Process.__init__(self, name="jobgrab")
-
- self.opts = opts
- self.events = events
- self.task_queues = []
- for group in self.opts.build_groups:
- self.task_queues.append(Queue("copr-be-{0}".format(group["id"])))
- self.task_queues[group["id"]].connect()
- self.added_jobs = []
- self.lock = lock
-
- def event(self, what):
- self.events.put({"when": time.time(), "who": "jobgrab", "what": what})
-
- def load_tasks(self):
- try:
- r = requests.get(
- "{0}/waiting/".format(self.opts.frontend_url),
- auth=("user", self.opts.frontend_auth))
- r_json = r.json()
-
- except requests.RequestException as e:
- self.event("Error retrieving jobs from {0}: {1}".format(
- self.opts.frontend_url, e))
- return
-
- except ValueError as e:
- self.event("Error getting JSON build list from FE {0}"
- .format(e))
- return
-
- if "builds" in r_json and r_json["builds"]:
- self.event("{0} jobs returned".format(len(r_json["builds"])))
- count = 0
- for task in r_json["builds"]:
- if "task_id" in task and task["task_id"] not in self.added_jobs:
- # this will ignore and throw away unconfigured architectures
- # FIXME: don't do ^
- arch = task["chroot"].split("-")[2]
- for group in self.opts.build_groups:
- if arch in group["archs"]:
- self.added_jobs.append(task["task_id"])
- task_obj = Task(task)
- self.task_queues[group["id"]].enqueue(task_obj)
- count += 1
- break
- if count:
- self.event("New jobs: %s" % count)
-
- if "actions" in r_json and r_json["actions"]:
- self.event("{0} actions returned".format(
- len(r_json["actions"])))
-
- for action in r_json["actions"]:
- ao = Action(self.events, action, self.lock, destdir=self.opts.destdir,
- frontend_callback=FrontendClient(self.opts, self.events),
- front_url=self.opts.frontend_base_url,
- results_root_url=self.opts.results_baseurl)
- ao.run()
-
- def run(self):
- setproctitle.setproctitle("CoprJobGrab")
- abort = False
- try:
- while not abort:
- self.load_tasks()
- time.sleep(self.opts.sleeptime)
- except KeyboardInterrupt:
- return
-
-
-class CoprLog(multiprocessing.Process):
-
- """log mechanism where items from the events queue get recorded"""
-
- def __init__(self, opts, events):
-
- # base class initialization
- multiprocessing.Process.__init__(self, name="logger")
-
- self.opts = opts
- self.events = events
-
- logdir = os.path.dirname(self.opts.logfile)
- if not os.path.exists(logdir):
- os.makedirs(logdir, mode=0o750)
-
- def setup_log_handler(self):
- sys.stderr.write("Running setup handler {} \n".format(self.opts))
- # setup a log file to write to
- logging.basicConfig(filename=self.opts.logfile, level=logging.DEBUG)
-
- self.log({"when": time.time(), "who": self.__class__.__name__, "what": "Logger iniated"})
-
- def log(self, event):
-
- when = time.strftime("%F %T", time.gmtime(event["when"]))
- msg = "{0} : {1}: {2}".format(when,
- event["who"],
- event["what"].strip())
- try:
- if self.opts.verbose:
- sys.stderr.write("{0}\n".format(msg))
- sys.stderr.flush()
- logging.debug(msg)
-
- except (IOError, OSError) as e:
-
- sys.stderr.write("Could not write to logfile {0} - {1}\n".format(
- self.logfile, e))
-
- # event format is a dict {when:time, who:[worker|logger|job|main],
- # what:str}
- def run(self):
- setproctitle.setproctitle("CoprLog")
- self.setup_log_handler()
- abort = False
- try:
- while not abort:
- e = self.events.get()
- if "when" in e and "who" in e and "what" in e:
- self.log(e)
- except KeyboardInterrupt:
- return
-
-
-class CoprBackend(object):
-
- """
- Core process - starts/stops/initializes workers
- """
-
- def __init__(self, config_file=None, ext_opts=None):
- # read in config file
- # put all the config items into a single self.opts bunch
-
- if not config_file:
- raise CoprBackendError("Must specify config_file")
-
- self.config_file = config_file
- self.ext_opts = ext_opts # to stow our cli options for read_conf()
- self.workers_by_group_id = defaultdict(list)
- self.max_worker_num_by_group_id = defaultdict(int)
-
- self.config_reader = BackendConfigReader(self.config_file, self.ext_opts)
- self.opts = None
- self.update_conf()
-
- self.lock = multiprocessing.Lock()
-
- self.task_queues = []
- try:
- for group in self.opts.build_groups:
- group_id = group["id"]
- self.task_queues.append(Queue("copr-be-{0}".format(group_id)))
- self.task_queues[group_id].connect()
- except ConnectionError:
- raise CoprBackendError(
- "Could not connect to a task queue. Is Redis running?")
-
- # make sure there is nothing in our task queues
- self.clean_task_queues()
-
- self.events = multiprocessing.Queue()
- # event format is a dict {when:time, who:[worker|logger|job|main],
- # what:str}
-
- # create logger
- self._logger = CoprLog(self.opts, self.events)
- self._logger.start()
-
- self.event("Starting up Job Grabber")
- # create job grabber
- self._jobgrab = CoprJobGrab(self.opts, self.events, self.lock)
- self._jobgrab.start()
- self.abort = False
-
- if not os.path.exists(self.opts.worker_logdir):
- os.makedirs(self.opts.worker_logdir, mode=0o750)
-
- def event(self, what):
- self.events.put({"when": time.time(), "who": "main", "what": what})
-
- def update_conf(self):
- self.opts = self.config_reader.read()
-
- def clean_task_queues(self):
- try:
- for queue in self.task_queues:
- while queue.length:
- queue.dequeue()
- except ConnectionError:
- raise CoprBackendError(
- "Could not connect to a task queue. Is Redis running?")
-
- def run(self):
- self.abort = False
- while not self.abort:
- # re-read config into opts
- self.update_conf()
-
- for group in self.opts.build_groups:
- group_id = group["id"]
- self.event(
- "# jobs in {0} queue: {1}"
- .format(group["name"], self.task_queues[group_id].length)
- )
- # this handles starting/growing the number of workers
- if len(self.workers_by_group_id[group_id]) < group["max_workers"]:
- self.event("Spinning up more workers")
- for _ in range(group["max_workers"] - len(self.workers_by_group_id[group_id])):
- self.max_worker_num_by_group_id[group_id] += 1
- w = Worker(
- self.opts, self.events,
- self.max_worker_num_by_group_id[group_id],
- group_id, lock=self.lock
- )
-
- self.workers_by_group_id[group_id].append(w)
- w.start()
- self.event("Finished starting worker processes")
- # FIXME - prune out workers
- # if len(self.workers) > self.opts.num_workers:
- # killnum = len(self.workers) - self.opts.num_workers
- # for w in self.workers[:killnum]:
- # insert a poison pill? Kill after something? I dunno.
- # FIXME - if a worker bombs out - we need to check them
- # and startup a new one if it happens
- # check for dead workers and abort
- preserved_workers = []
- for w in self.workers_by_group_id[group_id]:
- if not w.is_alive():
- self.event("Worker {0} died unexpectedly".format(w.worker_num))
- if self.opts.exit_on_worker:
- raise CoprBackendError(
- "Worker died unexpectedly, exiting")
- else:
- w.terminate() # kill it with a fire
- else:
- preserved_workers.append(w)
- self.workers_by_group_id[group_id] = preserved_workers
-
- time.sleep(self.opts.sleeptime)
-
- def terminate(self):
- """
- Cleanup backend processes (just workers for now)
- And also clean all task queues as they would survive copr restart
- """
-
- self.abort = True
- for group in self.opts.build_groups:
- group_id = group["id"]
- for w in self.workers_by_group_id[group_id]:
- self.workers_by_group_id[group_id].remove(w)
- w.terminate()
- self.clean_task_queues()
+from backend.daemons import run_backend
def parse_args(args):
@@ -356,28 +46,7 @@ def parse_args(args):
def main(args):
opts = parse_args(args)
-
- try:
- context = daemon.DaemonContext(
- pidfile=lockfile.FileLock(opts.pidfile),
- gid=grp.getgrnam("copr").gr_gid,
- uid=pwd.getpwnam("copr").pw_uid,
- detach_process=opts.daemonize,
- umask=0o22,
- stderr=sys.stderr,
- signal_map={
- signal.SIGTERM: "terminate",
- signal.SIGHUP: "terminate",
- },
- )
- with context:
- cbe = CoprBackend(opts.config_file, ext_opts=opts)
- cbe.run()
- except (Exception, KeyboardInterrupt):
- sys.stderr.write("Killing/Dying\n")
- if "cbe" in locals():
- cbe.terminate()
- raise
+ run_backend(opts)
if __name__ == "__main__":
try:
9 years, 5 months
[copr] master: [backend] unittest for frontendclient (b37a5f8)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit b37a5f84b95c9aa69ac87fd4ba2eade4e480c1af
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Fri Nov 28 13:47:01 2014 +0100
[backend] unittest for frontendclient
>---------------------------------------------------------------
backend/backend/frontend.py | 28 +++-----
backend/tests/test_frontend.py | 136 ++++++++++++++++++++++++++++++++++++++++
2 files changed, 147 insertions(+), 17 deletions(-)
diff --git a/backend/backend/frontend.py b/backend/backend/frontend.py
index c34857a..51551f4 100644
--- a/backend/backend/frontend.py
+++ b/backend/backend/frontend.py
@@ -1,5 +1,5 @@
import json
-import requests
+from requests import post, RequestException
import time
@@ -27,13 +27,12 @@ class FrontendClient(object):
self.msg = None
try:
- response = requests.post(url, data=json.dumps(data), auth=auth,
- headers=headers)
- if response.status_code != 200:
+ response = post(url, data=json.dumps(data), auth=auth, headers=headers)
+ if response.status_code >= 400:
self.msg = "Failed to submit to frontend: {0}: {1}".format(
response.status_code, response.text)
- raise requests.RequestException(self.msg)
- except requests.RequestException as e:
+ raise RequestException(self.msg)
+ except RequestException as e:
self.msg = "Post request failed: {0}".format(e)
raise
return response
@@ -42,18 +41,13 @@ class FrontendClient(object):
"""
Make a request max_repeats-time to the frontend
"""
- repeats = 0
- while repeats <= max_repeats:
+ for i in range(max_repeats):
try:
- response = self._post_to_frontend(data, url_path)
- break
- except requests.RequestException:
-
- if repeats == max_repeats:
- raise
- repeats += 1
+ return self._post_to_frontend(data, url_path)
+ except RequestException:
time.sleep(5)
- return response
+ else:
+ raise RequestException("Failed to post to frontend for {} times".format(max_repeats))
def update(self, data):
"""
@@ -70,5 +64,5 @@ class FrontendClient(object):
data = {"build_id": build_id, "chroot": chroot_name}
response = self._post_to_frontend_repeatedly(data, "starting_build")
if "can_start" not in response.json():
- raise requests.RequestException("Bad respond from the frontend")
+ raise RequestException("Bad respond from the frontend")
return response.json()["can_start"]
diff --git a/backend/tests/test_frontend.py b/backend/tests/test_frontend.py
new file mode 100644
index 0000000..c18d1f6
--- /dev/null
+++ b/backend/tests/test_frontend.py
@@ -0,0 +1,136 @@
+# coding: utf-8
+import copy
+
+from collections import defaultdict
+import json
+import multiprocessing
+from pprint import pprint
+from bunch import Bunch
+from requests import RequestException
+from backend.exceptions import BuilderError, BuilderTimeOutError, MockRemoteError, CoprSignError
+
+import tempfile
+import shutil
+import os
+
+import six
+from backend.frontend import FrontendClient
+
+if six.PY3:
+ from unittest import mock
+ from unittest.mock import patch, MagicMock
+else:
+ import mock
+ from mock import patch, MagicMock
+
+import pytest
+
+
+(a)pytest.yield_fixture
+def post_req():
+ with mock.patch("backend.frontend.post") as obj:
+ yield obj
+
+
+(a)pytest.yield_fixture
+def mc_time():
+ with mock.patch("backend.frontend.time") as obj:
+ yield obj
+
+class TestFrontendClient(object):
+
+ def setup_method(self, method):
+ self.opts = Bunch(
+ frontend_url="http://example.com/",
+ frontend_auth="12345678",
+ )
+ self.events = multiprocessing.Queue()
+ self.fc = FrontendClient(self.opts, self.events)
+
+ self.data = {
+ "foo": "bar",
+ "bar": [1, 3, 5],
+ }
+ self.url_path = "sub_path"
+
+ self.build_id = 12345
+ self.chroot_name = "fedora-20-x86_64"
+
+ @pytest.fixture
+ def mask_post_to_fe(self):
+ self.ptf = MagicMock()
+ self.fc._post_to_frontend = self.ptf
+
+ def test_post_to_frontend(self, post_req):
+ post_req.return_value.status_code = 200
+ self.fc._post_to_frontend(self.data, self.url_path)
+
+ assert post_req.called
+
+ def test_post_to_frontend_not_200(self, post_req):
+ post_req.return_value.status_code = 501
+ with pytest.raises(RequestException):
+ self.fc._post_to_frontend(self.data, self.url_path)
+
+ assert post_req.called
+
+ def test_post_to_frontend_post_error(self, post_req):
+ post_req.side_effect = RequestException()
+ with pytest.raises(RequestException):
+ self.fc._post_to_frontend(self.data, self.url_path)
+
+ assert post_req.called
+
+ def test_post_to_frontend_repeated_first_try_ok(self, mask_post_to_fe, mc_time):
+ response = "ok\n"
+ self.ptf.return_value = response
+
+ assert self.fc._post_to_frontend_repeatedly(self.data, self.url_path) == response
+ assert not mc_time.sleep.called
+
+ def test_post_to_frontend_repeated_second_try_ok(self, mask_post_to_fe, mc_time):
+ response = "ok\n"
+ self.ptf.side_effect = [
+ RequestException(),
+ response,
+ ]
+
+ assert self.fc._post_to_frontend_repeatedly(self.data, self.url_path) == response
+ assert mc_time.sleep.called
+
+ def test_post_to_frontend_repeated_all_attempts_failed(self, mask_post_to_fe, mc_time):
+ self.ptf.side_effect = RequestException()
+
+ with pytest.raises(RequestException):
+ self.fc._post_to_frontend_repeatedly(self.data, self.url_path)
+
+ assert mc_time.sleep.called
+
+ def test_update(self):
+ ptfr = MagicMock()
+ self.fc._post_to_frontend_repeatedly = ptfr
+ self.fc.update(self.data)
+ assert ptfr.call_args == mock.call(self.data, "update")
+
+ def test_starting_build(self):
+ ptfr = MagicMock()
+ self.fc._post_to_frontend_repeatedly = ptfr
+ for val in [True, False]:
+ ptfr.return_value.json.return_value = {"can_start": val}
+
+ assert self.fc.starting_build(self.build_id, self.chroot_name) == val
+
+ def test_starting_build_err(self):
+ ptfr = MagicMock()
+ self.fc._post_to_frontend_repeatedly = ptfr
+
+ with pytest.raises(RequestException):
+ self.fc.starting_build(self.build_id, self.chroot_name)
+
+ def test_starting_build_err_2(self):
+ ptfr = MagicMock()
+ self.fc._post_to_frontend_repeatedly = ptfr
+ ptfr.return_value.json.return_value = {}
+
+ with pytest.raises(RequestException):
+ self.fc.starting_build(self.build_id, self.chroot_name)
9 years, 5 months
[copr] master: [backend] renamed FrontendCallback to FrontendClient (1afb7bd)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 1afb7bd9830d976bdc373d7ba7e86ab2f307f6ec
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Fri Nov 28 12:47:25 2014 +0100
[backend] renamed FrontendCallback to FrontendClient
>---------------------------------------------------------------
backend/backend/dispatcher.py | 14 +++++++-------
backend/backend/{callback.py => frontend.py} | 4 ++--
backend/run/copr-be.py | 4 ++--
backend/tests/test_dispatcher.py | 2 --
4 files changed, 11 insertions(+), 13 deletions(-)
diff --git a/backend/backend/dispatcher.py b/backend/backend/dispatcher.py
index b17173a..13fcb5d 100644
--- a/backend/backend/dispatcher.py
+++ b/backend/backend/dispatcher.py
@@ -23,7 +23,7 @@ from .exceptions import MockRemoteError, CoprWorkerError, CoprWorkerSpawnFailErr
from .job import BuildJob
from .mockremote import MockRemote
-from .callback import FrontendCallback
+from .frontend import FrontendClient
from .constants import BuildStatus
ansible_playbook = "ansible-playbook"
@@ -81,7 +81,7 @@ class Worker(multiprocessing.Process):
self.lock = lock
self.spawn_in_advance = self.opts.spawn_in_advance
- self.frontend_callback = FrontendCallback(opts, events)
+ self.frontend_callback = FrontendClient(opts, events)
self.callback = callback
if not self.callback:
@@ -573,8 +573,8 @@ class Worker(multiprocessing.Process):
self.terminate_instance(vm_ip)
vm_ip = None
- # TODO: since spawn requires job object to create vm
- # it's possible to have spawned VM with incorrect configuration
- # disabling spawn in advance for now
- # if self.spawn_in_advance:
- # vm_ip = self.spawn_instance_with_check(job)
+ # TODO: since spawn requires job object to create vm
+ # it's possible to have spawned VM with incorrect configuration
+ # disabling spawn in advance for now
+ # if self.spawn_in_advance:
+ # vm_ip = self.spawn_instance_with_check(job)
diff --git a/backend/backend/callback.py b/backend/backend/frontend.py
similarity index 96%
rename from backend/backend/callback.py
rename to backend/backend/frontend.py
index f76f5fd..c34857a 100644
--- a/backend/backend/callback.py
+++ b/backend/backend/frontend.py
@@ -3,13 +3,13 @@ import requests
import time
-class FrontendCallback(object):
+class FrontendClient(object):
"""
Object to send data back to fronted
"""
def __init__(self, opts, events):
- super(FrontendCallback, self).__init__()
+ super(FrontendClient, self).__init__()
self.frontend_url = opts.frontend_url
self.frontend_auth = opts.frontend_auth
diff --git a/backend/run/copr-be.py b/backend/run/copr-be.py
index fe1a65c..bdca6e8 100755
--- a/backend/run/copr-be.py
+++ b/backend/run/copr-be.py
@@ -29,7 +29,7 @@ from retask import ConnectionError
from backend.exceptions import CoprBackendError
from backend.dispatcher import Worker
from backend.actions import Action
-from backend.callback import FrontendCallback
+from backend.frontend import FrontendClient
from backend.helpers import BackendConfigReader
@@ -120,7 +120,7 @@ class CoprJobGrab(multiprocessing.Process):
for action in r_json["actions"]:
ao = Action(self.events, action, self.lock, destdir=self.opts.destdir,
- frontend_callback=FrontendCallback(self.opts, self.events),
+ frontend_callback=FrontendClient(self.opts, self.events),
front_url=self.opts.frontend_base_url,
results_root_url=self.opts.results_baseurl)
ao.run()
diff --git a/backend/tests/test_dispatcher.py b/backend/tests/test_dispatcher.py
index 77bbb6d..044500b 100644
--- a/backend/tests/test_dispatcher.py
+++ b/backend/tests/test_dispatcher.py
@@ -678,8 +678,6 @@ class TestDispatcher(object):
assert self.worker.obtain_job.called
assert self.worker.terminate_instance.called
-
-
@mock.patch("backend.dispatcher.time")
def test_run_no_job(self, mc_time, init_worker):
self.worker.init_fedmsg = MagicMock()
9 years, 5 months
[copr] master: [backend] refactoring/unittest dispatcher (151e19d)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 151e19d510a4df5ad061d4c81dd150f9f16c16d1
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Thu Nov 27 09:23:00 2014 +0100
[backend] refactoring/unittest dispatcher
>---------------------------------------------------------------
Diff suppressed because of size. To see it, use:
git diff --patch-with-stat --no-color --find-copies-harder --ignore-space-at-eol ^151e19d510a4df5ad061d4c81dd150f9f16c16d1~1 151e19d510a4df5ad061d4c81dd150f9f16c16d1
9 years, 5 months
[copr] master: [backend] PEP8 name convention (a6329a1)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit a6329a1e98cdce7babb680cc33b47f0bbce7f9f3
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Wed Nov 26 16:07:28 2014 +0100
[backend] PEP8 name convention
>---------------------------------------------------------------
backend/backend/dispatcher.py | 6 +++---
1 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/backend/backend/dispatcher.py b/backend/backend/dispatcher.py
index c267941..8beb8ab 100644
--- a/backend/backend/dispatcher.py
+++ b/backend/backend/dispatcher.py
@@ -429,7 +429,7 @@ class Worker(multiprocessing.Process):
return True
return False
- def __spawn_with_check(self, job):
+ def spawn_instance_with_check(self, job):
""" Wrapper around self.spawn_instance() with exception checking """
try:
ip = self.spawn_instance(job)
@@ -508,7 +508,7 @@ class Worker(multiprocessing.Process):
if self.create and not self.ip:
if not self.spawn_in_advance:
- ip = self.__spawn_with_check(job)
+ ip = self.spawn_instance_with_check(job)
# else we get ip from similar calling at the enf of this while-loop
else:
ip = self.ip
@@ -611,4 +611,4 @@ class Worker(multiprocessing.Process):
if self.create:
self.terminate_instance(ip)
if self.create and not self.ip and self.spawn_in_advance:
- ip = self.__spawn_with_check(job)
+ ip = self.spawn_instance_with_check(job)
9 years, 5 months
[copr] master: [backend] unittests for backend/sign (d3a9455)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit d3a945542024c4d2d8b8f2167ad32a616b5699f3
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Tue Nov 25 18:18:26 2014 +0100
[backend] unittests for backend/sign
>---------------------------------------------------------------
backend/backend/sign.py | 9 +-
backend/requirements.txt | 1 +
backend/tests/test_sign.py | 296 ++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 303 insertions(+), 3 deletions(-)
diff --git a/backend/backend/sign.py b/backend/backend/sign.py
index 334beb9..21b6cf4 100755
--- a/backend/backend/sign.py
+++ b/backend/backend/sign.py
@@ -113,9 +113,12 @@ def sign_rpms_in_dir(username, projectname, path, opts, callback=None):
rpm_list = [
os.path.join(path, filename)
for filename in os.listdir(path)
- if filename.endswith("rpm")
+ if filename.endswith(".rpm")
]
+ if not rpm_list:
+ return
+
try:
get_pubkey(username, projectname)
except CoprSignNoKeyError:
@@ -135,8 +138,8 @@ def sign_rpms_in_dir(username, projectname, path, opts, callback=None):
errors.append((rpm, e))
if errors:
- raise MockRemoteError("Rpm sign failed, affected rpms: {}"
- .format([err[0] for err in errors]))
+ raise CoprSignError("Rpm sign failed, affected rpms: {}"
+ .format([err[0] for err in errors]))
def create_user_keys(username, projectname, opts):
diff --git a/backend/requirements.txt b/backend/requirements.txt
index 253297c..9576e53 100644
--- a/backend/requirements.txt
+++ b/backend/requirements.txt
@@ -5,3 +5,4 @@ redis
retask
python-daemon
bunch
+IPy
diff --git a/backend/tests/test_sign.py b/backend/tests/test_sign.py
new file mode 100644
index 0000000..7271c3c
--- /dev/null
+++ b/backend/tests/test_sign.py
@@ -0,0 +1,296 @@
+import os
+
+from collections import defaultdict
+import json
+from pprint import pprint
+from _pytest.capture import capsys
+from bunch import Bunch
+import pytest
+import copy
+import tempfile
+import shutil
+
+import six
+import time
+from backend.exceptions import CoprSignError, CoprSignNoKeyError, CoprKeygenRequestError
+
+if six.PY3:
+ from unittest import mock
+ from unittest.mock import MagicMock
+else:
+ import mock
+ from mock import MagicMock
+
+from backend.sign import get_pubkey, _sign_one, sign_rpms_in_dir, create_user_keys
+
+
+STDOUT = "stdout"
+STDERR = "stderr"
+
+
+class TestSign(object):
+
+ def setup_method(self, method):
+ self.username = "foo"
+ self.projectname = "bar"
+
+ self.usermail = "foo_bar(a)copr.fedorahosted.org"
+ self.test_time = time.time()
+ self.tmp_dir_path = None
+
+ self.opts = Bunch(keygen_host="example.com")
+
+ def teardown_method(self, method):
+ if self.tmp_dir_path:
+ shutil.rmtree(self.tmp_dir_path)
+
+ @pytest.fixture
+ def tmp_dir(self):
+ subdir = "test_createrepo_{}".format(time.time())
+ self.tmp_dir_path = os.path.join(tempfile.gettempdir(), subdir)
+ os.mkdir(self.tmp_dir_path)
+
+ @pytest.fixture
+ def tmp_files(self):
+ # ! require tmp_dir created before
+ self.file_names = ["foo.rpm", "bar.rpm", "bad", "morebadrpm"]
+ for name in self.file_names:
+ path = os.path.join(self.tmp_dir_path, name)
+ with open(path, "w") as handle:
+ handle.write("1")
+
+ @mock.patch("backend.sign.Popen")
+ def test_get_pubkey(self, mc_popen):
+ mc_handle = MagicMock()
+ mc_handle.communicate.return_value = (STDOUT, STDERR)
+ mc_handle.returncode = 0
+ mc_popen.return_value = mc_handle
+
+ result = get_pubkey(self.username, self.projectname)
+ assert result == STDOUT
+ assert mc_popen.call_args[0][0] == ['sudo', '/bin/sign', '-u', self.usermail, '-p']
+
+
+ @mock.patch("backend.sign.Popen")
+ def test_get_pubkey_error(self, mc_popen):
+ mc_popen.side_effect = IOError(STDERR)
+
+ with pytest.raises(CoprSignError):
+ get_pubkey(self.username, self.projectname)
+
+
+ @mock.patch("backend.sign.Popen")
+ def test_get_pubkey_unknown_key(self, mc_popen):
+ mc_handle = MagicMock()
+ mc_handle.communicate.return_value = (STDOUT, "unknown key: foobar")
+ mc_handle.returncode = 1
+ mc_popen.return_value = mc_handle
+
+ with pytest.raises(CoprSignNoKeyError):
+ get_pubkey(self.username, self.projectname)
+
+ @mock.patch("backend.sign.Popen")
+ def test_get_pubkey_unknown_error(self, mc_popen):
+ mc_handle = MagicMock()
+ mc_handle.communicate.return_value = (STDOUT, STDERR)
+ mc_handle.returncode = 1
+ mc_popen.return_value = mc_handle
+
+ with pytest.raises(CoprSignError):
+ get_pubkey(self.username, self.projectname)
+
+ @mock.patch("backend.sign.Popen")
+ def test_get_pubkey_outfile(self, mc_popen, tmp_dir):
+ mc_handle = MagicMock()
+ mc_handle.communicate.return_value = (STDOUT, STDERR)
+ mc_handle.returncode = 0
+ mc_popen.return_value = mc_handle
+
+ outfile_path = os.path.join(self.tmp_dir_path, "out.pub")
+ assert not os.path.exists(outfile_path)
+ result = get_pubkey(self.username, self.projectname, outfile_path)
+ assert result == STDOUT
+ assert os.path.exists(outfile_path)
+ with open(outfile_path) as handle:
+ content = handle.read()
+ assert STDOUT == content
+
+ @mock.patch("backend.sign.Popen")
+ def test_sign_one(self, mc_popen):
+ mc_handle = MagicMock()
+ mc_handle.communicate.return_value = (STDOUT, STDERR)
+ mc_handle.returncode = 0
+ mc_popen.return_value = mc_handle
+
+ fake_path = "/tmp/pkg.rpm"
+ result = _sign_one(fake_path, self.usermail)
+ assert STDOUT, STDERR == result
+
+ expected_cmd = ['sudo', '/bin/sign', '-u', self.usermail, '-r', fake_path]
+ assert mc_popen.call_args[0][0] == expected_cmd
+
+ @mock.patch("backend.sign.Popen")
+ def test_sign_one_popen_error(self, mc_popen):
+ mc_popen.side_effect = IOError()
+
+ fake_path = "/tmp/pkg.rpm"
+ with pytest.raises(CoprSignError):
+ _sign_one(fake_path, self.usermail)
+
+ mc_cb = MagicMock()
+
+ with pytest.raises(CoprSignError):
+ _sign_one(fake_path, self.usermail, mc_cb)
+ assert isinstance(mc_cb.error.call_args[0][0], CoprSignError)
+
+ @mock.patch("backend.sign.Popen")
+ def test_sign_one_cmd_erro(self, mc_popen):
+ mc_handle = MagicMock()
+ mc_handle.communicate.return_value = (STDOUT, STDERR)
+ mc_handle.returncode = 1
+ mc_popen.return_value = mc_handle
+
+
+ fake_path = "/tmp/pkg.rpm"
+ with pytest.raises(CoprSignError):
+ _sign_one(fake_path, self.usermail)
+
+ mc_cb = MagicMock()
+
+ with pytest.raises(CoprSignError):
+ _sign_one(fake_path, self.usermail, mc_cb)
+ assert isinstance(mc_cb.error.call_args[0][0], CoprSignError)
+
+ @mock.patch("backend.sign.request")
+ def test_create_user_keys(self, mc_request):
+ mc_request.return_value.status_code = 200
+ create_user_keys(self.username, self.projectname, self.opts)
+
+ assert mc_request.called
+ expected_call = mock.call(
+ url="http://example.com/gen_key",
+ data='{"name_real": "foo_bar", "name_email": "foo_bar(a)copr.fedorahosted.org"}',
+ method="post"
+ )
+ assert mc_request.call_args == expected_call
+
+ @mock.patch("backend.sign.request")
+ def test_create_user_keys_error_1(self, mc_request):
+ mc_request.side_effect = IOError()
+ with pytest.raises(CoprKeygenRequestError):
+ create_user_keys(self.username, self.projectname, self.opts)
+
+
+ @mock.patch("backend.sign.request")
+ def test_create_user_keys(self, mc_request):
+ for code in [400, 401, 404, 500, 599]:
+ mc_request.return_value.status_code = code
+
+ with pytest.raises(CoprKeygenRequestError):
+ create_user_keys(self.username, self.projectname, self.opts)
+
+
+ @mock.patch("backend.sign._sign_one")
+ @mock.patch("backend.sign.create_user_keys")
+ @mock.patch("backend.sign.get_pubkey")
+ def test_sign_rpms_id_dir_nothing(self, mc_gp, mc_cuk, mc_so,
+ tmp_dir):
+ # empty target dir doesn't produce error
+ sign_rpms_in_dir(self.username, self.projectname,
+ self.tmp_dir_path, self.opts)
+
+ assert not mc_gp.called
+ assert not mc_cuk.called
+ assert not mc_so.called
+
+ @mock.patch("backend.sign._sign_one")
+ @mock.patch("backend.sign.create_user_keys")
+ @mock.patch("backend.sign.get_pubkey")
+ def test_sign_rpms_id_dir_ok(self, mc_gp, mc_cuk, mc_so,
+ tmp_dir, tmp_files):
+ mc_cb = MagicMock()
+ sign_rpms_in_dir(self.username, self.projectname,
+ self.tmp_dir_path, self.opts, callback=mc_cb)
+
+ assert mc_gp.called
+ assert not mc_cuk.called
+ assert mc_so.called
+
+ pathes = [call[0][0] for call in mc_so.call_args_list]
+ count = 0
+ for name in self.file_names:
+ if name.endswith(".rpm"):
+ count += 1
+ assert os.path.join(self.tmp_dir_path, name) in pathes
+ assert len(pathes) == count
+
+ @mock.patch("backend.sign._sign_one")
+ @mock.patch("backend.sign.create_user_keys")
+ @mock.patch("backend.sign.get_pubkey")
+ def test_sign_rpms_id_dir_error_on_pubkey(
+ self, mc_gp, mc_cuk, mc_so, tmp_dir, tmp_files):
+
+ mc_gp.side_effect = CoprSignError("foobar")
+ mc_cb = MagicMock()
+ with pytest.raises(CoprSignError):
+ sign_rpms_in_dir(self.username, self.projectname,
+ self.tmp_dir_path, self.opts, callback=mc_cb)
+
+ assert mc_gp.called
+ assert not mc_cuk.called
+ assert not mc_so.called
+
+ @mock.patch("backend.sign._sign_one")
+ @mock.patch("backend.sign.create_user_keys")
+ @mock.patch("backend.sign.get_pubkey")
+ def test_sign_rpms_id_dir_no_pub_key(
+ self, mc_gp, mc_cuk, mc_so, tmp_dir, tmp_files):
+
+ mc_gp.side_effect = CoprSignNoKeyError("foobar")
+ mc_cb = MagicMock()
+
+ sign_rpms_in_dir(self.username, self.projectname,
+ self.tmp_dir_path, self.opts, callback=mc_cb)
+
+ assert mc_gp.called
+ assert mc_cuk.called
+ assert mc_so.called
+
+ @mock.patch("backend.sign._sign_one")
+ @mock.patch("backend.sign.create_user_keys")
+ @mock.patch("backend.sign.get_pubkey")
+ def test_sign_rpms_id_dir_sign_error_one(
+ self, mc_gp, mc_cuk, mc_so, tmp_dir, tmp_files):
+
+ mc_cb = MagicMock()
+
+ mc_so.side_effect = [
+ None, CoprSignError("foobar"), None
+ ]
+ with pytest.raises(CoprSignError):
+ sign_rpms_in_dir(self.username, self.projectname,
+ self.tmp_dir_path, self.opts, callback=mc_cb)
+
+ assert mc_gp.called
+ assert not mc_cuk.called
+
+ assert mc_so.called
+
+ @mock.patch("backend.sign._sign_one")
+ @mock.patch("backend.sign.create_user_keys")
+ @mock.patch("backend.sign.get_pubkey")
+ def test_sign_rpms_id_dir_sign_error_all(
+ self, mc_gp, mc_cuk, mc_so, tmp_dir, tmp_files):
+
+ mc_cb = MagicMock()
+
+ mc_so.side_effect = CoprSignError("foobar")
+ with pytest.raises(CoprSignError):
+ sign_rpms_in_dir(self.username, self.projectname,
+ self.tmp_dir_path, self.opts, callback=mc_cb)
+
+ assert mc_gp.called
+ assert not mc_cuk.called
+
+ assert mc_so.called
+
9 years, 5 months
[copr] master: [backend] backend.sign: discover `keygen_host` from backend config file (853f547)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 853f547326eee205a11c0b567a1d81b285051f5a
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Tue Nov 25 19:13:50 2014 +0100
[backend] backend.sign: discover `keygen_host` from backend config file
>---------------------------------------------------------------
backend/backend/helpers.py | 3 +++
backend/backend/mockremote/__init__.py | 3 ++-
backend/backend/sign.py | 24 ++++++++++++++++--------
backend/conf/copr-be.conf.example | 6 +++++-
backend/conf/copr-be.local.conf | 3 ++-
5 files changed, 28 insertions(+), 11 deletions(-)
diff --git a/backend/backend/helpers.py b/backend/backend/helpers.py
index 116bb54..a862db2 100644
--- a/backend/backend/helpers.py
+++ b/backend/backend/helpers.py
@@ -105,6 +105,9 @@ class BackendConfigReader(object):
opts.do_sign = _get_conf(
cp, "backend", "do_sign", False, mode="bool")
+ opts.keygen_host = _get_conf(
+ cp, "backend", "keygen_host", "copr-keygen.cloud.fedoraproject.org")
+
opts.build_user = _get_conf(
cp, "backend", "build_user", DEF_BUILD_USER)
diff --git a/backend/backend/mockremote/__init__.py b/backend/backend/mockremote/__init__.py
index d66c058..61c4233 100755
--- a/backend/backend/mockremote/__init__.py
+++ b/backend/backend/mockremote/__init__.py
@@ -211,7 +211,8 @@ class MockRemote(object):
sign_rpms_in_dir(self.job.project_owner,
self.job.project_name,
get_target_dir(self.chroot_dir, self.pkg),
- callback=self.callback)
+ opts=self.opts,
+ callback=self.callback,)
except Exception as e:
self.callback.error(
"failed to sign packages "
diff --git a/backend/backend/sign.py b/backend/backend/sign.py
index 5049988..334beb9 100755
--- a/backend/backend/sign.py
+++ b/backend/backend/sign.py
@@ -19,10 +19,6 @@ from .exceptions import CoprSignError, CoprSignNoKeyError, \
SIGN_BINARY = "/bin/sign"
DOMAIN = "fedorahosted.org"
-# TODO: discover from config
-# COPR_KEYGEN_URL = "http://127.0.0.1:3872/gen_key"
-COPR_KEYGEN_URL = "http://209.132.184.124/gen_key"
-
def create_gpg_email(username, projectname):
"""
@@ -97,7 +93,7 @@ def _sign_one(path, email, callback=None):
return stdout, stderr
-def sign_rpms_in_dir(username, projectname, path, callback=None):
+def sign_rpms_in_dir(username, projectname, path, opts, callback=None):
"""
Signs rpms using obs-signd.
@@ -105,9 +101,11 @@ def sign_rpms_in_dir(username, projectname, path, callback=None):
but we continue to try sign other pkgs.
+
:param username: copr username
:param projectname: copr projectname
:param path: directory with rpms to be signed
+ :param Bunch opts: backend config
:param .mockremote.DefaultCallBack callback: object to log progress,
two methods are utilised: ``log`` and ``error``
@@ -121,7 +119,7 @@ def sign_rpms_in_dir(username, projectname, path, callback=None):
try:
get_pubkey(username, projectname)
except CoprSignNoKeyError:
- create_user_keys(username, projectname)
+ create_user_keys(username, projectname, opts)
errors = [] # tuples (rpm_filepath, exception)
for rpm in rpm_list:
@@ -141,13 +139,23 @@ def sign_rpms_in_dir(username, projectname, path, callback=None):
.format([err[0] for err in errors]))
-def create_user_keys(username, projectname):
+def create_user_keys(username, projectname, opts):
+ """
+ Generate a new key-pair at sign host
+
+ :param username:
+ :param projectname:
+ :param opts: backend config
+
+ :return: None
+ """
data = json.dumps({
"name_real": "{}_{}".format(username, projectname),
"name_email": create_gpg_email(username, projectname)
})
- query = dict(url=COPR_KEYGEN_URL, data=data, method="post")
+ keygen_url = "http://{}/gen_key".format(opts.keygen_host)
+ query = dict(url=keygen_url, data=data, method="post")
try:
response = request(**query)
except Exception as e:
diff --git a/backend/conf/copr-be.conf.example b/backend/conf/copr-be.conf.example
index 69bcf7b..81b8333 100644
--- a/backend/conf/copr-be.conf.example
+++ b/backend/conf/copr-be.conf.example
@@ -94,7 +94,11 @@ worker_logdir=/var/log/copr/workers/
# enable package signing, require configured
# signer host and correct /etc/sign.conf
-#do_sign=false
+# do_sign=false
+
+# host or ip of machine with copr-keygen
+# usually the same as in /etc/sign.conf
+# keygen_host=example.com
# minimum age for builds to be pruned
prune_days=14
diff --git a/backend/conf/copr-be.local.conf b/backend/conf/copr-be.local.conf
index 76df89c..f59fb71 100644
--- a/backend/conf/copr-be.local.conf
+++ b/backend/conf/copr-be.local.conf
@@ -94,7 +94,8 @@ worker_logdir=/tmp/log/copr/workers/
# enable package signing, require configured
# signer host and correct /etc/sign.conf
-#do_sign=false
+# do_sign=false
+# keygen_host=example.com
verbose=true
9 years, 5 months
[copr] master: Fix mismatch between documentation and actual API in new build (c1d6550)
by Miroslav Suchý
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit c1d6550daa0ea98be895b7f2b6010a25653957b8
Author: Michael Simacek <msimacek(a)redhat.com>
Date: Tue Nov 25 17:17:27 2014 +0100
Fix mismatch between documentation and actual API in new build
Current implementation returns multiple ids, not just one
>---------------------------------------------------------------
frontend/coprs_frontend/coprs/templates/api.html | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/frontend/coprs_frontend/coprs/templates/api.html b/frontend/coprs_frontend/coprs/templates/api.html
index 2157aa0..a38c389 100644
--- a/frontend/coprs_frontend/coprs/templates/api.html
+++ b/frontend/coprs_frontend/coprs/templates/api.html
@@ -277,7 +277,7 @@ copr_url = {{ ('https://' + config['PUBLIC_COPR_HOSTNAME'])| fix_url_https_front
{
"output": "ok",
"message": "Build was added to log4j.",
- "id": 5
+ "ids": [5]
}
</pre>
9 years, 5 months