[copr] master: [backend] test coverage for backend.daemons.backend (fe1ad61)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit fe1ad61776590ffb0cd7892dde8dfc20367f16b8
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Mon Dec 8 19:34:30 2014 +0100
[backend] test coverage for backend.daemons.backend
>---------------------------------------------------------------
backend/backend/daemons/backend.py | 44 ++---
backend/tests/deamons/test_backend.py | 348 +++++++++++++++++++++++++++++++++
2 files changed, 369 insertions(+), 23 deletions(-)
diff --git a/backend/backend/daemons/backend.py b/backend/backend/daemons/backend.py
index 3c73794..264dccb 100644
--- a/backend/backend/daemons/backend.py
+++ b/backend/backend/daemons/backend.py
@@ -15,7 +15,7 @@ import time
from collections import defaultdict
import lockfile
-import daemon
+from daemon import DaemonContext
from retask.queue import Queue
from retask import ConnectionError
@@ -60,7 +60,6 @@ class CoprBackend(object):
# what:str}
self.abort = False
-
if not os.path.exists(self.opts.worker_logdir):
os.makedirs(self.opts.worker_logdir, mode=0o750)
@@ -144,7 +143,7 @@ class CoprBackend(object):
self.workers_by_group_id[group_id].append(w)
w.start()
- def prune_dead_workers_by_group(self, group):
+ def prune_dead_workers_by_group_id(self, group_id):
""" Removes dead workers from the pool
:return list: alive workers
@@ -153,20 +152,32 @@ class CoprBackend(object):
:py:class:`~backend.exceptions.CoprBackendError` when got dead worker and
option "exit_on_worker" is enabled
"""
- group_id = group["id"]
preserved_workers = []
for w in self.workers_by_group_id[group_id]:
if not w.is_alive():
self.event("Worker {0} died unexpectedly".format(w.worker_num))
+ w.terminate() # kill it with a fire
if self.opts.exit_on_worker:
raise CoprBackendError(
"Worker died unexpectedly, exiting")
- else:
- w.terminate() # kill it with a fire
else:
preserved_workers.append(w)
return preserved_workers
+ def terminate(self):
+ """
+ Cleanup backend processes (just workers for now)
+ And also clean all task queues as they would survive copr restart
+ """
+
+ self.abort = True
+ for group in self.opts.build_groups:
+ group_id = group["id"]
+ for w in self.workers_by_group_id[group_id][:]:
+ self.workers_by_group_id[group_id].remove(w)
+ w.terminate()
+ self.clean_task_queues()
+
def run(self):
"""
Starts backend process. Control sub process start/stop.
@@ -194,25 +205,11 @@ class CoprBackend(object):
# FIXME - if a worker bombs out - we need to check them
# and startup a new one if it happens
# check for dead workers and abort
- preserved_workers = self.prune_dead_workers_by_group(group)
+ preserved_workers = self.prune_dead_workers_by_group_id(group_id)
self.workers_by_group_id[group_id] = preserved_workers
time.sleep(self.opts.sleeptime)
- def terminate(self):
- """
- Cleanup backend processes (just workers for now)
- And also clean all task queues as they would survive copr restart
- """
-
- self.abort = True
- for group in self.opts.build_groups:
- group_id = group["id"]
- for w in self.workers_by_group_id[group_id]:
- self.workers_by_group_id[group_id].remove(w)
- w.terminate()
- self.clean_task_queues()
-
def run_backend(opts):
"""
@@ -226,8 +223,9 @@ def run_backend(opts):
- `pidfile` - path to the backend pidfile
"""
+ cbe = None
try:
- context = daemon.DaemonContext(
+ context = DaemonContext(
pidfile=lockfile.FileLock(opts.pidfile),
gid=grp.getgrnam("copr").gr_gid,
uid=pwd.getpwnam("copr").pw_uid,
@@ -244,6 +242,6 @@ def run_backend(opts):
cbe.run()
except (Exception, KeyboardInterrupt):
sys.stderr.write("Killing/Dying\n")
- if "cbe" in locals():
+ if cbe is not None:
cbe.terminate()
raise
diff --git a/backend/tests/deamons/test_backend.py b/backend/tests/deamons/test_backend.py
index e69de29..4fb81b2 100644
--- a/backend/tests/deamons/test_backend.py
+++ b/backend/tests/deamons/test_backend.py
@@ -0,0 +1,348 @@
+import os
+import tempfile
+import shutil
+import time
+
+from bunch import Bunch
+import pytest
+import retask
+from retask import ConnectionError
+import six
+import sys
+
+from backend.daemons import CoprBackend, run_backend
+from backend.exceptions import CoprBackendError
+
+if six.PY3:
+ from unittest import mock
+ from unittest.mock import MagicMock
+else:
+ import mock
+ from mock import MagicMock
+
+
+STDOUT = "stdout"
+STDERR = "stderr"
+COPR_OWNER = "copr_owner"
+COPR_NAME = "copr_name"
+COPR_VENDOR = "vendor"
+
+
+(a)pytest.yield_fixture
+def mc_rt_queue():
+ with mock.patch("backend.daemons.backend.Queue") as mc_queue:
+ yield mc_queue
+
+(a)pytest.yield_fixture
+def mc_worker():
+ with mock.patch("backend.daemons.backend.Worker") as worker:
+ yield worker
+
+(a)pytest.yield_fixture
+def mc_time():
+ with mock.patch("backend.daemons.backend.time") as time_:
+ yield time_
+
+(a)pytest.yield_fixture
+def mc_be():
+ with mock.patch("backend.daemons.backend.CoprBackend") as obj:
+ yield obj
+
+(a)pytest.yield_fixture
+def mc_daemon_context():
+ with mock.patch("backend.daemons.backend.DaemonContext") as obj:
+ yield obj
+
+
+class TestBackend(object):
+
+ def setup_method(self, method):
+ self.test_time = time.time()
+ subdir = "test_createrepo_{}".format(time.time())
+ self.tmp_dir_path = os.path.join(tempfile.gettempdir(), subdir)
+ os.mkdir(self.tmp_dir_path)
+
+ self.bc_patcher = mock.patch("backend.daemons.backend.BackendConfigReader")
+ self.bc = self.bc_patcher.start()
+
+ self.mp_patcher = mock.patch("backend.daemons.backend.multiprocessing")
+ self.mc_mp = self.mp_patcher.start()
+
+ self.worker_logdir = os.path.join(self.tmp_dir_path, "workers")
+ self.config_file = "/dev/null/copr.conf"
+ self.ext_opts = {}
+
+ # effective config options
+ self.bc_obj = MagicMock()
+
+ self.opts = Bunch(
+ worker_logdir=self.worker_logdir,
+ build_groups=[
+ {
+ "id": 0,
+ "name": "PC",
+ "archs": ["i386", "x86_64"],
+ "max_workers": 2
+ },
+ {
+ "id": 1,
+ "name": "ARM",
+ "archs": ["armv7"],
+ "max_workers": 3
+ },
+ ],
+ exit_on_worker=False,
+ sleeptime=1,
+
+ )
+ self.bc_obj.read.return_value = self.opts
+ self.bc.return_value = self.bc_obj
+
+ # for run backend
+ self.pidfile_path = os.path.join(self.tmp_dir_path, "backend.pid")
+ self.grp_patcher = mock.patch("backend.daemons.backend.grp")
+ self.pwd_patcher = mock.patch("backend.daemons.backend.pwd")
+ self.grp = self.grp_patcher.start()
+ self.pwd = self.pwd_patcher.start()
+
+ self.run_opts = Bunch(
+ daemonize=True,
+ pidfile=self.pidfile_path,
+ config_file=self.config_file,
+ )
+
+
+ @pytest.fixture
+ def init_be(self):
+ self.be = CoprBackend(self.config_file, self.ext_opts)
+
+ def teardown_method(self, method):
+ # print("\nremove: {}".format(self.tmp_dir_path))
+ shutil.rmtree(self.tmp_dir_path)
+ self.bc_patcher.stop()
+ self.grp_patcher.stop()
+ self.pwd_patcher.stop()
+
+ def test_constructor_no_config(self):
+ with pytest.raises(CoprBackendError):
+ self.be = CoprBackend(None, self.ext_opts)
+
+ def test_constructor(self):
+
+ assert not os.path.exists(self.worker_logdir)
+ self.init_be()
+ assert os.path.exists(self.worker_logdir)
+ # import ipdb; ipdb.set_trace()
+
+ assert self.be.config_reader == self.bc_obj
+ assert self.bc_obj.read.called
+
+ def test_clean_task_queue_error(self, init_be):
+ mc_queue = MagicMock(length=1)
+ mc_queue.dequeue.side_effect = retask.ConnectionError()
+ self.be.task_queues[0] = mc_queue
+
+ with pytest.raises(CoprBackendError):
+ self.be.clean_task_queues()
+
+ def test_clean_task_queue_ok(self, init_be):
+ mc_queue = MagicMock(length=5)
+ def decr():
+ mc_queue.length -= 1
+
+ mc_queue.dequeue.side_effect = decr
+ self.be.task_queues[0] = mc_queue
+ self.be.clean_task_queues()
+
+ assert len(mc_queue.dequeue.call_args_list) == 5
+
+ def test_init_task_queues(self, mc_rt_queue, init_be):
+
+ mc_rt_queue.side_effect = lambda name: MagicMock(name=name)
+ self.be.clean_task_queues = MagicMock()
+ self.be.init_task_queues()
+
+ assert mc_rt_queue.call_args_list == \
+ [mock.call("copr-be-0"), mock.call("copr-be-1")]
+ assert self.be.task_queues[0].connect.called
+ assert self.be.task_queues[1].connect.called
+
+ def test_init_task_queues_error(self, mc_rt_queue, init_be):
+
+ mc_rt_queue.return_value.connect.side_effect = ConnectionError()
+ self.be.clean_task_queues = MagicMock()
+
+ with pytest.raises(CoprBackendError):
+ self.be.init_task_queues()
+
+ @mock.patch("backend.daemons.backend.CoprBackendLog")
+ @mock.patch("backend.daemons.backend.CoprJobGrab")
+ def test_init_sub_process(self, mc_jobgrab, mc_logger, init_be):
+
+ self.be.init_sub_process()
+ assert mc_logger.called
+ assert mc_logger.call_args == mock.call(self.be.opts, self.be.events)
+ assert mc_logger.return_value.start.called
+ assert mc_jobgrab.called
+ assert mc_jobgrab.call_args == mock.call(self.be.opts, self.be.events, self.be.lock)
+ assert mc_jobgrab.return_value.start.called
+
+ def test_event(self, mc_time, init_be):
+ mc_time.time.return_value = self.test_time
+
+ self.be.events = MagicMock()
+ self.be.event("foobar")
+
+ self.be.events.put.call_args == mock.call({
+ "what": "foobar", "when": self.test_time, "who": "main"
+ })
+
+ def test_update_conf(self, init_be):
+ test_obj = MagicMock()
+ self.bc_obj.read.return_value = test_obj
+
+ self.be.update_conf()
+ assert self.bc_obj.read.called
+ assert self.be.opts == test_obj
+
+ def test_spin_up_workers_by_group(self, mc_worker, init_be):
+ worker = MagicMock()
+ mc_worker.return_value = worker
+
+ group = self.opts.build_groups[0]
+ self.be.spin_up_workers_by_group(group)
+
+ assert mc_worker.called
+ assert len(mc_worker.call_args_list) == group["max_workers"]
+ assert worker.start.called
+ assert len(worker.start.call_args_list) == group["max_workers"]
+ assert len(self.be.workers_by_group_id[0]) == group["max_workers"]
+
+ def test_spin_up_workers_by_group_partial(self, mc_worker, init_be):
+ worker = MagicMock()
+ mc_worker.return_value = worker
+
+ group = self.opts.build_groups[1]
+
+ self.be.workers_by_group_id[1].append(worker)
+ self.be.spin_up_workers_by_group(group)
+
+ assert mc_worker.called
+ assert len(mc_worker.call_args_list) == group["max_workers"] - 1
+ assert worker.start.called
+ assert len(worker.start.call_args_list) == group["max_workers"] - 1
+ assert len(self.be.workers_by_group_id[1]) == group["max_workers"]
+
+ def test_prune_dead_workers_by_group(self, init_be):
+ worker_alive = MagicMock()
+ worker_alive.is_alive.return_value = True
+ worker_dead = MagicMock()
+ worker_dead.is_alive.return_value = False
+
+
+ self.be.workers_by_group_id[0].append(worker_alive)
+ self.be.workers_by_group_id[0].append(worker_dead)
+
+ self.be.prune_dead_workers_by_group_id(0)
+
+ assert len(self.be.workers_by_group_id) == 1
+ assert worker_dead.terminate.called
+ assert not worker_alive.terminate.called
+
+ def test_prune_dead_workers_by_group_terminate(self, init_be):
+ worker_alive = MagicMock()
+ worker_alive.is_alive.return_value = True
+ worker_dead = MagicMock()
+ worker_dead.is_alive.return_value = False
+
+ self.be.workers_by_group_id[0].append(worker_alive)
+ self.be.workers_by_group_id[0].append(worker_dead)
+
+ self.be.opts.exit_on_worker = True
+
+ with pytest.raises(CoprBackendError):
+ self.be.prune_dead_workers_by_group_id(0)
+
+ assert len(self.be.workers_by_group_id) == 1
+ assert worker_dead.terminate.called
+ assert not worker_alive.terminate.called
+
+ def test_terminate(self, init_be):
+ worker_alive = MagicMock()
+ worker_alive.is_alive.return_value = True
+ worker_dead = MagicMock()
+ worker_dead.is_alive.return_value = False
+
+ self.be.workers_by_group_id[0].append(worker_alive)
+ self.be.workers_by_group_id[0].append(worker_dead)
+
+ self.be.clean_task_queues = MagicMock()
+
+ self.be.terminate()
+
+ assert self.be.clean_task_queues.called
+ assert self.be.abort
+ assert worker_alive.terminate.called
+ assert worker_dead.terminate.called
+
+ def test_run(self, mc_time, mc_rt_queue, init_be):
+ worker_alive = MagicMock()
+ worker_alive.is_alive.return_value = True
+ worker_dead = MagicMock()
+ worker_dead.is_alive.return_value = False
+
+ self.be.clean_task_queues = MagicMock()
+ self.be.init_sub_process = MagicMock()
+ # self.be.init_task_queues = MagicMock()
+ self.be.update_conf = MagicMock()
+ self.be.spin_up_workers_by_group = MagicMock()
+
+ def spin_up():
+ self.be.workers_by_group_id[0].append(worker_alive)
+ self.be.workers_by_group_id[0].append(worker_dead)
+ self.be.workers_by_group_id[1].append(worker_alive)
+ self.be.workers_by_group_id[1].append(worker_dead)
+
+ self.be.spin_up_workers_by_group = MagicMock()
+ self.be.spin_up_workers_by_group.side_effect = lambda foo: spin_up()
+ mc_time.sleep.side_effect = lambda foo: self.be.terminate()
+
+ self.be.run()
+ assert self.be.spin_up_workers_by_group.call_args_list == [
+ mock.call(self.opts.build_groups[0]),
+ mock.call(self.opts.build_groups[1]),
+ ]
+ assert self.be.update_conf.called
+ assert self.be.abort
+ assert not self.be.workers_by_group_id[0]
+ assert not self.be.workers_by_group_id[1]
+
+ def test_run_backend_basic(self, mc_be, mc_daemon_context):
+ self.grp.getgrnam.return_value.gr_gid = 7
+ self.pwd.getpwnam.return_value.pw_uid = 9
+
+ run_backend(self.run_opts)
+
+ print()
+ ddc = mc_daemon_context.call_args[1]
+
+ assert ddc["signal_map"] == {1: u'terminate', 15: u'terminate'}
+ assert ddc["umask"] == 0o22
+ assert ddc["gid"] == 7
+ assert ddc["uid"] == 9
+ assert ddc["stderr"] == sys.stderr
+
+ assert mc_be.called
+ expected_call = mock.call(self.config_file, ext_opts=self.run_opts)
+ assert mc_be.call_args == expected_call
+ # umask=18,
+ # gid=7, stderr=<open file '<stderr>', mode 'w' at 0x7f6b7902c1e0>, detach_process=True, pidfile=<LinkLockFile: '/tmp/test_createrepo_1418062487.37/heaven-79051740.3723-4115117572267162726' -- '/tmp/test_createrepo_1418062487.37/backend.pid'>, uid=<MagicMock name='pwd.getpwnam().pw_uid' id='56906832'>)
+
+ def test_run_backend_keyboard_interrupt(self, mc_be, mc_daemon_context, capsys):
+ mc_be.return_value.run.side_effect = KeyboardInterrupt()
+
+ with pytest.raises(KeyboardInterrupt):
+ run_backend(self.run_opts)
+
+ stdout, stderr = capsys.readouterr()
+ assert "Killing/Dying" in stderr
9 years, 5 months
[copr] master: [backend] minor changes in sphinx docs; more docstrings (66bb2ae)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 66bb2ae10a956525cd686270c1c8fdf63fa8fdd4
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Mon Dec 8 11:17:14 2014 +0100
[backend] minor changes in sphinx docs; more docstrings
>---------------------------------------------------------------
backend/backend/actions.py | 5 +-
backend/backend/daemons/backend.py | 85 ++++++++++++------
backend/backend/daemons/dispatcher.py | 91 ++++++++++++++++----
backend/backend/daemons/job_grab.py | 59 ++++++++++---
backend/backend/daemons/log.py | 14 ++--
backend/docs/source/Autodoc.rst | 2 +-
backend/docs/source/conf.py | 2 +-
backend/docs/source/package/daemons/dispatcher.rst | 6 ++
backend/docs/source/package/dispatcher.rst | 6 --
.../tests/deamons/test_backend.py | 0
backend/tests/{ => deamons}/test_dispatcher.py | 0
11 files changed, 196 insertions(+), 74 deletions(-)
diff --git a/backend/backend/actions.py b/backend/backend/actions.py
index 23e0168..2a79fc5 100644
--- a/backend/backend/actions.py
+++ b/backend/backend/actions.py
@@ -18,7 +18,10 @@ class Action(object):
:param str destdir: filepath with build results
- :param dict action: action job, fields:
+ :param dict action: dict-like object with action task
+
+ Expected **action** keys:
+
- action_type: main field determining what action to apply
# TODO: describe actions
diff --git a/backend/backend/daemons/backend.py b/backend/backend/daemons/backend.py
index a8fba1d..3c73794 100644
--- a/backend/backend/daemons/backend.py
+++ b/backend/backend/daemons/backend.py
@@ -30,14 +30,13 @@ class CoprBackend(object):
"""
Core process - starts/stops/initializes workers and other backend components
+
+
+ :param config_file: path to the backend configuration file
+ :param ext_opts: additional options for backend
"""
def __init__(self, config_file=None, ext_opts=None):
- """
-
- :param config_file: path to the backend configuration file
- :param ext_opts: additional options for backend
- """
# read in config file
# put all the config items into a single self.opts bunch
@@ -55,35 +54,56 @@ class CoprBackend(object):
self.lock = multiprocessing.Lock()
- self.task_queues = []
+ self.task_queues = {}
+ self.events = multiprocessing.Queue()
+ # event format is a dict {when:time, who:[worker|logger|job|main],
+ # what:str}
+
+ self.abort = False
+
+ if not os.path.exists(self.opts.worker_logdir):
+ os.makedirs(self.opts.worker_logdir, mode=0o750)
+
+ def clean_task_queues(self):
+ """
+ Make sure there is nothing in our task queues
+ """
+ try:
+ for queue in self.task_queues.values():
+ while queue.length:
+ queue.dequeue()
+ except ConnectionError:
+ raise CoprBackendError(
+ "Could not connect to a task queue. Is Redis running?")
+
+ def init_task_queues(self):
+ """
+ Connect to the retask.Queue for each group_id. Remove old tasks from queues.
+ """
try:
for group in self.opts.build_groups:
group_id = group["id"]
- self.task_queues.append(Queue("copr-be-{0}".format(group_id)))
- self.task_queues[group_id].connect()
+ queue = Queue("copr-be-{0}".format(group_id))
+ queue.connect()
+ self.task_queues[group_id] = queue
except ConnectionError:
raise CoprBackendError(
"Could not connect to a task queue. Is Redis running?")
- # make sure there is nothing in our task queues
self.clean_task_queues()
- self.events = multiprocessing.Queue()
- # event format is a dict {when:time, who:[worker|logger|job|main],
- # what:str}
-
- # create logger
+ def init_sub_process(self):
+ """
+ - Create backend logger
+ - Create job grabber
+ """
self._logger = CoprBackendLog(self.opts, self.events)
self._logger.start()
self.event("Starting up Job Grabber")
- # create job grabber
+
self._jobgrab = CoprJobGrab(self.opts, self.events, self.lock)
self._jobgrab.start()
- self.abort = False
-
- if not os.path.exists(self.opts.worker_logdir):
- os.makedirs(self.opts.worker_logdir, mode=0o750)
def event(self, what):
"""
@@ -98,18 +118,19 @@ class CoprBackend(object):
"""
self.opts = self.config_reader.read()
- def clean_task_queues(self):
- try:
- for queue in self.task_queues:
- while queue.length:
- queue.dequeue()
- except ConnectionError:
- raise CoprBackendError(
- "Could not connect to a task queue. Is Redis running?")
-
def spin_up_workers_by_group(self, group):
+ """
+ Handles starting/growing the number of workers
+
+ :param dict group: Builders group
+
+ Utilized keys:
+ - **id**
+ - **max_workers**
+
+ """
group_id = group["id"]
- # this handles starting/growing the number of workers
+
if len(self.workers_by_group_id[group_id]) < group["max_workers"]:
self.event("Spinning up more workers")
for _ in range(group["max_workers"] - len(self.workers_by_group_id[group_id])):
@@ -147,6 +168,12 @@ class CoprBackend(object):
return preserved_workers
def run(self):
+ """
+ Starts backend process. Control sub process start/stop.
+ """
+ self.init_sub_process()
+ self.init_task_queues()
+
self.abort = False
while not self.abort:
# re-read config into opts
diff --git a/backend/backend/daemons/dispatcher.py b/backend/backend/daemons/dispatcher.py
index e1c90a7..9b826fd 100644
--- a/backend/backend/daemons/dispatcher.py
+++ b/backend/backend/daemons/dispatcher.py
@@ -44,11 +44,21 @@ def ans_extra_vars_encode(extra_vars, name):
class WorkerCallback(object):
+ """
+ Callback class for worker. Now used only for message logging
+
+ :param logfile: path to the log file
+ """
def __init__(self, logfile=None):
self.logfile = logfile
def log(self, msg):
+ """
+ Safely writes msg to the logfile
+
+ :param str msg: message to be logged
+ """
if self.logfile:
now = time.strftime("%F %T")
try:
@@ -63,6 +73,21 @@ class WorkerCallback(object):
class Worker(multiprocessing.Process):
+ """
+ Worker process dispatches building tasks. Backend spin-up multiple workers, each
+ worker associated to one group_id and process one task at the each moment.
+
+ Worker listens for the new tasks from :py:class:`retask.Queueu` associated with its group_id
+
+ :param Bunch opts: backend config
+ :param queue: (:py:class:`multiprocessing.Queue`) queue to announce new events
+ :param int worker_num: worker number
+ :param int group_id: group_id from the set of groups defined in config
+ :param callback: callback object to handle internal workers events. Should implement method ``log(msg)``.
+ :param lock: (:py:class:`multiprocessing.Lock`) global backend lock
+
+ """
+
def __init__(self, opts, events, worker_num, group_id,
callback=None, lock=None):
@@ -168,7 +193,8 @@ class Worker(multiprocessing.Process):
def run_ansible_playbook(self, args, name="running playbook", attempts=9):
"""
- call ansible playbook
+ Call ansible playbook:
+
- well mostly we run out of space in OpenStack so we rather try
multiple times (attempts param)
- dump any attempt failure
@@ -198,6 +224,12 @@ class Worker(multiprocessing.Process):
return result
def validate_new_vm(self, ipaddr):
+ """
+ Test connectivity to the VM
+
+ :param ipaddr: ip address to the newly created VM
+ :raises: :py:class:`~backend.exceptions.CoprWorkerSpawnFailError`: validation fails
+ """
# we were getting some dead instances
# that's why I'm testing the connectivity here
connection = ansible.runner.Runner(
@@ -258,14 +290,15 @@ class Worker(multiprocessing.Process):
def spawn_instance(self, job):
"""
- call the spawn playbook to startup/provision a building instance
- get an IP and test if the builder responds
- repeat this until you get an IP of working builder
+ Spawn new VM, executing the following steps:
+
+ - call the spawn playbook to startup/provision a building instance
+ - get an IP and test if the builder responds
+ - repeat this until you get an IP of working builder
:param BuildJob job:
- :return:
- :ip of created VM
- :None couldn't find playbook to spin ip VM
+ :return ip: of created VM
+ :return None: if couldn't find playbook to spin ip VM
"""
start = time.time()
@@ -309,7 +342,9 @@ class Worker(multiprocessing.Process):
.format(exception.msg))
def terminate_instance(self, instance_ip):
- """call the terminate playbook to destroy the building instance"""
+ """
+ Call the terminate playbook to destroy the building instance
+ """
term_args = {}
if "ip" in self.opts.terminate_vars:
@@ -368,8 +403,9 @@ class Worker(multiprocessing.Process):
def starting_build(self, job):
"""
Announce to the frontend that a build is starting.
- Return: True if the build can start
- False if the build can not start (build is cancelled)
+
+ :return True: if the build can start
+ :return False: if the build can not start (build is cancelled)
"""
try:
@@ -396,13 +432,16 @@ class Worker(multiprocessing.Process):
return False
def spawn_instance_with_check(self, job):
- """ Wrapper around self.spawn_instance() with exception checking
- :param BuildJob job:
+ """
+ Wrapper around self.spawn_instance() with exception checking
+
+ :param BuildJob job:
- :return str: ip of spawned vm
- :raises:
- CoprWorkError: spawn function doesn't return ip
- AnsibleError: failure during anible command execution
+ :return str: ip of spawned vm
+ :raises:
+
+ - :py:class:`~backend.exceptions.CoprWorkerError`: spawn function doesn't return ip
+ - :py:class:`AnsibleError`: failure during anible command execution
"""
try:
ip = self.spawn_instance(job)
@@ -416,8 +455,11 @@ class Worker(multiprocessing.Process):
return ip
def init_fedmsg(self):
- # Initialize Fedmsg
- # (this assumes there are certs and a fedmsg config on disk)
+ """
+ Initialize Fedmsg
+ (this assumes there are certs and a fedmsg config on disk)
+ """
+
if not (self.opts.fedmsg_enabled and fedmsg):
return
@@ -428,6 +470,9 @@ class Worker(multiprocessing.Process):
"failed to initialize fedmsg: {0}".format(e))
def on_pkg_skip(self, job):
+ """
+ Handle package skip
+ """
self._announce_start(job)
self.callback.log(
"Skipping: package {0} has been already built before."
@@ -436,6 +481,10 @@ class Worker(multiprocessing.Process):
self._announce_end(job)
def obtain_job(self):
+ """
+ Retrieves new build task from queue.
+ Checks if the new job can be started and not skipped.
+ """
setproctitle("worker-{0} {1} No task".format(
self.opts.build_groups[self.group_id]["name"],
self.worker_num))
@@ -474,6 +523,12 @@ class Worker(multiprocessing.Process):
return job
def do_job(self, ip, job):
+ """
+ Executes new job.
+
+ :param ip: ip address of the builder VM
+ :param job: :py:class:`~backend.job.BuildJob`
+ """
self._announce_start(job, ip)
status = BuildStatus.SUCCEEDED
chroot_destdir = os.path.normpath(job.destdir + '/' + job.chroot)
diff --git a/backend/backend/daemons/job_grab.py b/backend/backend/daemons/job_grab.py
index 6ce7493..6353682 100644
--- a/backend/backend/daemons/job_grab.py
+++ b/backend/backend/daemons/job_grab.py
@@ -25,17 +25,17 @@ class CoprJobGrab(Process):
"""
Fetch jobs from the Frontend
- - submit them to the jobs queue for workers
- """
- def connect_queues(self):
- # TODO: better extract connection into the dedicated method
- for group in self.opts.build_groups:
- queue = Queue("copr-be-{0}".format(group["id"]))
- queue.connect()
+ - submit build task to the jobs queue for workers
+ - run Action handler for action tasks
- for arch in group["archs"]:
- self.task_queues_by_arch[arch] = queue
+
+ :param Bunch opts: backend config
+ :param events: :py:class:`multiprocessing.Queue` to listen
+ for events from other backend components
+ :param lock: :py:class:`multiprocessing.Lock` global backend lock
+
+ """
def __init__(self, opts, events, lock):
# base class initialization
@@ -48,14 +48,40 @@ class CoprJobGrab(Process):
self.added_jobs = set()
self.lock = lock
+ def connect_queues(self):
+ """
+ Connects to the retask queues. One queue per builders group.
+ """
+ for group in self.opts.build_groups:
+ queue = Queue("copr-be-{0}".format(group["id"]))
+ queue.connect()
+
+ for arch in group["archs"]:
+ self.task_queues_by_arch[arch] = queue
+
def event(self, what):
+ """
+ Put new event into the event queue
+
+ :param what: message to put into the queue
+ """
self.events.put({"when": time.time(), "who": "jobgrab", "what": what})
def process_build_task(self, task):
+ """
+ Route build task to the appropriate queue.
+ :param task: dict-like object which represent build task
+
+ Utilized **task** keys:
+
+ - ``task_id``
+ - ``chroot``
+ - ``arch``
+
+ :return int: Count of the successfully routed tasks
+ """
count = 0
if "task_id" in task and task["task_id"] not in self.added_jobs:
- # this will ignore and throw away unconfigured architectures
- # FIXME: don't do ^
# TODO: produces memory leak!
self.added_jobs.add(task["task_id"])
@@ -70,6 +96,11 @@ class CoprJobGrab(Process):
return count
def process_action(self, action):
+ """
+ Run action task handler, see :py:class:`~backend.action.Action`
+
+ :param action: dict-like object with action task
+ """
ao = Action(self.events, action, self.lock, destdir=self.opts.destdir,
frontend_callback=FrontendClient(self.opts, self.events),
front_url=self.opts.frontend_base_url,
@@ -77,6 +108,9 @@ class CoprJobGrab(Process):
ao.run()
def load_tasks(self):
+ """
+ Retrieve tasks from frontend and runs appropriate handlers
+ """
try:
r = get("{0}/waiting/".format(self.opts.frontend_url),
auth=("user", self.opts.frontend_auth))
@@ -111,6 +145,9 @@ class CoprJobGrab(Process):
self.process_action(action)
def run(self):
+ """
+ Starts job grabber process
+ """
setproctitle("CoprJobGrab")
self.connect_queues()
try:
diff --git a/backend/backend/daemons/log.py b/backend/backend/daemons/log.py
index 6452fe7..fb9a1ee 100644
--- a/backend/backend/daemons/log.py
+++ b/backend/backend/daemons/log.py
@@ -15,16 +15,16 @@ from setproctitle import setproctitle
class CoprBackendLog(Process):
- """Log mechanism where items from the events queue get recorded"""
+ """Log mechanism where items from the events queue get recorded
- def __init__(self, opts, events):
- """
+ :param Bunch opts: backend config
+ :param events: multiprocessing.Queue to listen
+ for events from other backend components
- :param Bunch opts: backend config
- :param events: multiprocessing.Queue to listen
- for events from other backend components
+ """
+
+ def __init__(self, opts, events):
- """
# base class initialization
Process.__init__(self, name="logger")
diff --git a/backend/docs/source/Autodoc.rst b/backend/docs/source/Autodoc.rst
index 5c49cd7..9baa116 100644
--- a/backend/docs/source/Autodoc.rst
+++ b/backend/docs/source/Autodoc.rst
@@ -6,7 +6,6 @@ backend.
Root backend modules
.. toctree::
- package/dispatcher
package/actions
package/job
package/frontend
@@ -23,6 +22,7 @@ Backend daemons, started by copr-be.py
.. toctree::
package/daemons/backend.rst
+ package/daemons/dispatcher.rst
package/daemons/job_grab.rst
package/daemons/log.rst
diff --git a/backend/docs/source/conf.py b/backend/docs/source/conf.py
index 1f6cfd2..0d00883 100644
--- a/backend/docs/source/conf.py
+++ b/backend/docs/source/conf.py
@@ -251,6 +251,6 @@ autodoc_default_flags = [
'members',
'undoc-members',
'private-members',
- 'special-members',
+ # 'special-members',
# 'inherited-members'
]
diff --git a/backend/docs/source/package/daemons/dispatcher.rst b/backend/docs/source/package/daemons/dispatcher.rst
new file mode 100644
index 0000000..4b991af
--- /dev/null
+++ b/backend/docs/source/package/daemons/dispatcher.rst
@@ -0,0 +1,6 @@
+backend.daemons.dispatcher
+==========================
+
+.. automodule:: backend.daemons.dispatcher
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/dispatcher.rst b/backend/docs/source/package/dispatcher.rst
deleted file mode 100644
index 5d185f4..0000000
--- a/backend/docs/source/package/dispatcher.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-backend.dispatcher
-==================
-
-.. automodule:: backend.dispatcher
- :members:
- :undoc-members:
diff --git a/frontend/coprs_frontend/coprs/logic/__init__.py b/backend/tests/deamons/test_backend.py
similarity index 100%
copy from frontend/coprs_frontend/coprs/logic/__init__.py
copy to backend/tests/deamons/test_backend.py
diff --git a/backend/tests/test_dispatcher.py b/backend/tests/deamons/test_dispatcher.py
similarity index 100%
rename from backend/tests/test_dispatcher.py
rename to backend/tests/deamons/test_dispatcher.py
9 years, 5 months
[copr] master: initial version of playbook to prepare AMI image (a529ced)
by Miroslav Suchý
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit a529ced7674d25423186277e8be056f83f1bfb49
Author: Miroslav Suchý <msuchy(a)redhat.com>
Date: Fri Dec 5 11:16:18 2014 +0100
initial version of playbook to prepare AMI image
will continue once new mock really reach testing repo
>---------------------------------------------------------------
backend/conf/playbooks/prepare-builder-image.yml | 43 ++++++++++++++++++++++
1 files changed, 43 insertions(+), 0 deletions(-)
diff --git a/backend/conf/playbooks/prepare-builder-image.yml b/backend/conf/playbooks/prepare-builder-image.yml
new file mode 100644
index 0000000..f4468ea
--- /dev/null
+++ b/backend/conf/playbooks/prepare-builder-image.yml
@@ -0,0 +1,43 @@
+---
+- name: check/create instance
+ hosts: localhost
+ user: root
+ gather_facts: False
+
+ tasks:
+ - name: install pkgs
+ action: yum state=present pkg={{ item }}
+ with_items:
+ - http://mirror.oss.ou.edu/epel/7/x86_64/e/epel-release-7-2.noarch.rpm
+ - mock
+ - mock-lvm
+ - yum-utils
+ - rsync
+ - openssh-clients
+
+ - name: make sure newest rpm
+ action: yum name=rpm state=latest
+
+ # sometime we need, sometimes not. If you do not need it, just comment it out
+ - yum: name=mock enablerepo=epel-testing state=latest
+
+ - name: mockbuilder user
+ action: user name=mockbuilder groups=mock
+
+ - name: mockbuilder .ssh
+ action: file state=directory path=/home/mockbuilder/.ssh mode=0700 owner=mockbuilder group=mockbuilder
+
+ - name: prepare caches
+ action: command mock -r {{ item }} --init
+ with_items:
+ - epel-5-i386
+ - epel-5-x86_64
+ - epel-6-i386
+ - epel-6-x86_64
+ - epel-7-x86_64
+ - fedora-20-i386
+ - fedora-20-x86_64
+ - fedora-21-i386
+ - fedora-21-x86_64
+ - fedora-rawhide-i386
+ - fedora-rawhide-x86_64
9 years, 5 months
[copr] master: [backend] unittest for backend.daemons.job_grab; minor fixes (75e958e)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 75e958ee268247dd51121198101cdb7a272ac4f7
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Thu Dec 4 15:40:11 2014 +0100
[backend] unittest for backend.daemons.job_grab; minor fixes
>---------------------------------------------------------------
backend/backend/daemons/job_grab.py | 86 ++++++----
backend/backend/exceptions.py | 4 +
backend/conf/copr-be.conf.example | 1 +
backend/tests/deamons/test_job_grab.py | 283 ++++++++++++++++++++++++++++++++
backend/tests/deamons/test_log.py | 3 +-
backend/tests/test_sign.py | 3 +-
6 files changed, 343 insertions(+), 37 deletions(-)
diff --git a/backend/backend/daemons/job_grab.py b/backend/backend/daemons/job_grab.py
index fc93348..6ce7493 100644
--- a/backend/backend/daemons/job_grab.py
+++ b/backend/backend/daemons/job_grab.py
@@ -4,37 +4,48 @@ from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
+from collections import defaultdict
-import multiprocessing
+from multiprocessing import Process
import time
-import setproctitle
+from setproctitle import setproctitle
-import requests
+from requests import get, RequestException
from retask.task import Task
from retask.queue import Queue
-from backend.actions import Action
-from backend.frontend import FrontendClient
+from ..actions import Action
+from ..exceptions import CoprJobGrabError
+from ..frontend import FrontendClient
-class CoprJobGrab(multiprocessing.Process):
+# TODO: Replace entire model with asynchronous queue, so that frontend push task,
+# and workers listen for them
+class CoprJobGrab(Process):
"""
Fetch jobs from the Frontend
- submit them to the jobs queue for workers
"""
+ def connect_queues(self):
+ # TODO: better extract connection into the dedicated method
+ for group in self.opts.build_groups:
+ queue = Queue("copr-be-{0}".format(group["id"]))
+ queue.connect()
+
+ for arch in group["archs"]:
+ self.task_queues_by_arch[arch] = queue
+
def __init__(self, opts, events, lock):
# base class initialization
- multiprocessing.Process.__init__(self, name="jobgrab")
+ Process.__init__(self, name="jobgrab")
self.opts = opts
self.events = events
- self.task_queues = []
- for group in self.opts.build_groups:
- self.task_queues.append(Queue("copr-be-{0}".format(group["id"])))
- self.task_queues[group["id"]].connect()
- self.added_jobs = []
+ self.task_queues_by_arch = {}
+
+ self.added_jobs = set()
self.lock = lock
def event(self, what):
@@ -45,14 +56,17 @@ class CoprJobGrab(multiprocessing.Process):
if "task_id" in task and task["task_id"] not in self.added_jobs:
# this will ignore and throw away unconfigured architectures
# FIXME: don't do ^
+
+ # TODO: produces memory leak!
+ self.added_jobs.add(task["task_id"])
arch = task["chroot"].split("-")[2]
- for group in self.opts.build_groups:
- if arch in group["archs"]:
- self.added_jobs.append(task["task_id"])
- task_obj = Task(task)
- self.task_queues[group["id"]].enqueue(task_obj)
- count += 1
- break
+ if arch not in self.task_queues_by_arch:
+ raise CoprJobGrabError("No builder group for architecture: {}, task: {}"
+ .format(arch, task))
+
+ task_obj = Task(task)
+ self.task_queues_by_arch[arch].enqueue(task_obj)
+ count += 1
return count
def process_action(self, action):
@@ -64,39 +78,43 @@ class CoprJobGrab(multiprocessing.Process):
def load_tasks(self):
try:
- r = requests.get("{0}/waiting/".format(self.opts.frontend_url),
- auth=("user", self.opts.frontend_auth))
- r_json = r.json()
-
- except requests.RequestException as e:
- self.event("Error retrieving jobs from {0}: {1}".format(
- self.opts.frontend_url, e))
+ r = get("{0}/waiting/".format(self.opts.frontend_url),
+ auth=("user", self.opts.frontend_auth))
+ except RequestException as e:
+ self.event("Error retrieving jobs from {0}: {1}"
+ .format(self.opts.frontend_url, e))
return
+ try:
+ r_json = r.json()
except ValueError as e:
- self.event("Error getting JSON build list from FE {0}"
- .format(e))
+ self.event("Error getting JSON build list from FE {0}".format(e))
return
- if "builds" in r_json and r_json["builds"]:
+ if r_json.get("builds"):
self.event("{0} jobs returned".format(len(r_json["builds"])))
count = 0
for task in r_json["builds"]:
- count += self.process_build_task(task)
+ try:
+ count += self.process_build_task(task)
+ except CoprJobGrabError as err:
+ self.event("Failed to enqueue new job: {} with error: {}"
+ .format(task, err))
+
if count:
self.event("New jobs: %s" % count)
- if "actions" in r_json and r_json["actions"]:
+ if r_json.get("actions"):
self.event("{0} actions returned".format(len(r_json["actions"])))
for action in r_json["actions"]:
self.process_action(action)
def run(self):
- setproctitle.setproctitle("CoprJobGrab")
- abort = False
+ setproctitle("CoprJobGrab")
+ self.connect_queues()
try:
- while not abort:
+ while True:
self.load_tasks()
time.sleep(self.opts.sleeptime)
except KeyboardInterrupt:
diff --git a/backend/backend/exceptions.py b/backend/backend/exceptions.py
index 84ccd04..7efa807 100644
--- a/backend/backend/exceptions.py
+++ b/backend/backend/exceptions.py
@@ -81,6 +81,10 @@ class CoprBackendError(Exception):
return self.msg
+class CoprJobGrabError(CoprBackendError):
+ pass
+
+
class CoprWorkerError(CoprBackendError):
pass
diff --git a/backend/conf/copr-be.conf.example b/backend/conf/copr-be.conf.example
index 81b8333..fe76c37 100644
--- a/backend/conf/copr-be.conf.example
+++ b/backend/conf/copr-be.conf.example
@@ -28,6 +28,7 @@ build_groups=1
# max_workers - maximum number of workers in this group
#
# Use prefix groupX where X is number of group starting from zero.
+# Warning: any arch should be used once, so no two groups to build the same arch
#
# Example: (and also default values)
# group0_name=PC
diff --git a/backend/tests/deamons/test_job_grab.py b/backend/tests/deamons/test_job_grab.py
new file mode 100644
index 0000000..5e85a01
--- /dev/null
+++ b/backend/tests/deamons/test_job_grab.py
@@ -0,0 +1,283 @@
+# coding: utf-8
+import copy
+
+from collections import defaultdict
+import logging
+from pprint import pprint
+from bunch import Bunch
+import time
+import requests
+
+from backend.exceptions import BuilderError, BuilderTimeOutError, CoprJobGrabError
+
+from retask.queue import Queue
+
+import tempfile
+import shutil
+import os
+
+import six
+
+if six.PY3:
+ from unittest import mock
+ from unittest.mock import patch, MagickMock, call
+else:
+ import mock
+ from mock import patch, MagicMock, call
+
+import pytest
+
+import backend.daemons.log as log_module
+from backend.daemons.job_grab import CoprJobGrab
+import backend.actions
+
+(a)pytest.yield_fixture
+def mc_logging():
+ with mock.patch("backend.daemons.job_grab.logging") as mc_logging:
+ yield mc_logging
+
+
+(a)pytest.yield_fixture
+def mc_setproctitle():
+ with mock.patch("backend.daemons.job_grab.setproctitle") as mc_spt:
+ yield mc_spt
+
+
+(a)pytest.yield_fixture
+def mc_retask_queue():
+ with mock.patch("backend.daemons.job_grab.Queue") as mc_queue:
+ def make_queue(*args, **kwargs):
+ updated_kwargs = copy.deepcopy(kwargs)
+ updated_kwargs["spec"] = Queue
+ mc = MagicMock(**updated_kwargs)
+ return mc
+
+ mc_queue.side_effect = make_queue
+ yield mc_queue
+
+
+class TestJobGrab(object):
+
+ def setup_method(self, method):
+
+ self.mc_mpp_patcher = mock.patch("backend.daemons.job_grab.Process")
+ self.mc_mpp = self.mc_mpp_patcher.start()
+
+ self.test_time = time.time()
+ subdir = "test_createrepo_{}".format(time.time())
+ self.tmp_dir_path = os.path.join(tempfile.gettempdir(), subdir)
+ os.mkdir(self.tmp_dir_path)
+
+ self.log_dir = os.path.join(self.tmp_dir_path, "copr")
+ self.log_file = os.path.join(self.log_dir, "copr.log")
+
+ self.opts = Bunch(
+ logfile=self.log_file,
+ verbose=False,
+ build_groups=[
+ {"id": 0, "name": "x86", "archs": ["i386", "i686", "x86_64"]},
+ {"id": 1, "name": "arm", "archs": ["armv7"]},
+ ],
+ destdir="/dev/null",
+ frontend_base_url="http://example.com/",
+ frontend_url="http://example.com/backend",
+ frontend_auth="foobar",
+ results_baseurl="http://example.com/results/",
+ sleeptime=1,
+ )
+
+ self.queue = MagicMock()
+ self.lock = MagicMock()
+
+ self.task_dict_1 = dict(
+ task_id=12345,
+ chroot="fedora-20-x86_64",
+ )
+ self.task_dict_2 = dict(
+ task_id=12346,
+ chroot="fedora-20-armv7",
+ )
+ self.task_dict_bad_arch = dict(
+ task_id=12346,
+ chroot="fedora-20-s390x",
+ )
+
+ def teardown_method(self, method):
+ self.mc_mpp_patcher.stop()
+
+ shutil.rmtree(self.tmp_dir_path)
+ if hasattr(self, "cbl"):
+ del self.cbl
+
+ for handler in logging.root.handlers[:]:
+ logging.root.removeHandler(handler)
+
+ @pytest.yield_fixture
+ def mc_time(self):
+ with mock.patch("backend.daemons.job_grab.time") as mc_time:
+ mc_time.time.return_value = self.test_time
+ yield mc_time
+
+ @pytest.fixture
+ def init_jg(self, mc_retask_queue):
+ self.jg = CoprJobGrab(self.opts, self.queue, self.lock)
+ self.jg.connect_queues()
+
+ def test_connect_queues(self, mc_retask_queue):
+ self.jg = CoprJobGrab(self.opts, self.queue, self.lock)
+
+ assert len(self.jg.task_queues_by_arch) == 0
+ self.jg.connect_queues()
+
+ # created retask queue
+ expected = [call(u'copr-be-0'), call(u'copr-be-1')]
+ assert mc_retask_queue.call_args_list == expected
+ # connected to them
+ for obj in self.jg.task_queues_by_arch.values():
+ assert obj.connect.called
+
+ def test_event(self, init_jg, mc_time):
+ # adds an event with current time into the queue
+ content = "foobar"
+ self.jg.event(content)
+
+ assert self.queue.put.call_args == call({
+ u'what': 'foobar', u'who': u'jobgrab', u'when': self.test_time})
+
+ def test_process_build_task_skip_added(self, init_jg):
+ self.jg.added_jobs.add(12345)
+ self.jg.added_jobs.add(12346)
+
+ assert self.jg.process_build_task(self.task_dict_1) == 0
+ assert self.jg.process_build_task(self.task_dict_2) == 0
+ for obj in self.jg.task_queues_by_arch.values():
+ assert not obj.enqueue.called
+
+ def test_process_build_task_correct_group_1(self, init_jg):
+
+ assert self.jg.process_build_task(self.task_dict_1) == 1
+ assert self.jg.task_queues_by_arch["x86_64"].enqueue.called
+ assert not self.jg.task_queues_by_arch["armv7"].enqueue.called
+
+ def test_process_build_task_correct_group_2(self, init_jg):
+
+ assert self.jg.process_build_task(self.task_dict_2) == 1
+ assert not self.jg.task_queues_by_arch["x86_64"].enqueue.called
+ assert self.jg.task_queues_by_arch["armv7"].enqueue.called
+
+ def test_process_build_task_correct_group_error(self, init_jg):
+
+ with pytest.raises(CoprJobGrabError) as err:
+ self.jg.process_build_task(self.task_dict_bad_arch)
+
+ assert not self.jg.task_queues_by_arch["x86_64"].enqueue.called
+ assert not self.jg.task_queues_by_arch["armv7"].enqueue.called
+
+ @mock.patch("backend.daemons.job_grab.FrontendClient")
+ @mock.patch("backend.daemons.job_grab.Action", spec=backend.actions.Action)
+ def test_process_action(self, mc_action, mc_fe_c, init_jg):
+ test_action = MagicMock()
+
+ self.jg.process_action(test_action)
+
+ expected_call = call(
+ self.queue, test_action, self.lock,
+ destdir=self.opts.destdir,
+ frontend_callback=mc_fe_c(self.opts, self.queue),
+ front_url=self.opts.frontend_base_url,
+ results_root_url=self.opts.results_baseurl
+ )
+ assert expected_call == mc_action.call_args
+ assert mc_action.return_value.run.called
+
+ @mock.patch("backend.daemons.job_grab.get")
+ def test_load_tasks_error_request(self, mc_get, init_jg):
+ mc_get.side_effect = requests.RequestException()
+
+ self.jg.process_build_task = MagicMock()
+ self.jg.event = MagicMock()
+ self.jg.process_action = MagicMock()
+
+ assert self.jg.load_tasks() is None
+
+ assert not self.jg.process_build_task.called
+ assert not self.jg.process_action.called
+
+ assert "Error retrieving jobs from" in self.jg.event.call_args[0][0]
+
+ @mock.patch("backend.daemons.job_grab.get")
+ def test_load_tasks_error_request_json(self, mc_get, init_jg):
+ mc_get.return_value.json.side_effect = ValueError()
+
+ self.jg.process_build_task = MagicMock()
+ self.jg.event = MagicMock()
+ self.jg.process_action = MagicMock()
+
+ assert self.jg.load_tasks() is None
+
+ assert not self.jg.process_build_task.called
+ assert not self.jg.process_action.called
+
+ assert "Error getting JSON" in self.jg.event.call_args[0][0]
+
+ @mock.patch("backend.daemons.job_grab.get")
+ def test_load_tasks_builds(self, mc_get, init_jg):
+ mc_get.return_value.json.return_value = {
+ "builds": [
+ self.task_dict_1,
+ self.task_dict_2,
+ self.task_dict_2
+ ]
+ }
+
+ self.jg.process_build_task = MagicMock()
+ self.jg.process_build_task.side_effect = [
+ 1,
+ 1,
+ CoprJobGrabError("foobar"),
+ ]
+ self.jg.event = MagicMock()
+ self.jg.process_action = MagicMock()
+
+ self.jg.load_tasks()
+
+ assert len(self.jg.process_build_task.call_args_list) == 3
+ assert not self.jg.process_action.called
+
+ assert any(["New jobs: 2" in cl[0][0] for cl in self.jg.event.call_args_list])
+ assert any(["Failed to enqueue" in cl[0][0] for cl in self.jg.event.call_args_list])
+
+ @mock.patch("backend.daemons.job_grab.get")
+ def test_load_tasks_actions(self, mc_get, init_jg):
+ action_1 = MagicMock()
+ action_2 = MagicMock()
+ mc_get.return_value.json.return_value = {
+ "actions": [
+ action_1,
+ action_2,
+ ],
+ "builds": [],
+ }
+
+ self.jg.process_build_task = MagicMock()
+ self.jg.event = MagicMock()
+ self.jg.process_action = MagicMock()
+
+ self.jg.load_tasks()
+
+ expected_calls = [call(action_1), call(action_2)]
+ assert self.jg.process_action.call_args_list == expected_calls
+
+ def test_run(self, mc_time, mc_setproctitle, init_jg):
+ self.jg.connect_queues = MagicMock()
+ self.jg.load_tasks = MagicMock()
+ self.jg.load_tasks.side_effect = [
+ None,
+ KeyboardInterrupt
+ ]
+
+ self.jg.run()
+
+ assert mc_setproctitle.called
+ assert self.jg.connect_queues.called_once
+ assert self.jg.load_tasks.called
diff --git a/backend/tests/deamons/test_log.py b/backend/tests/deamons/test_log.py
index c1ff665..8b4bcd9 100644
--- a/backend/tests/deamons/test_log.py
+++ b/backend/tests/deamons/test_log.py
@@ -27,17 +27,18 @@ import backend.daemons.log as log_module
from backend.daemons.log import CoprBackendLog
-
@pytest.yield_fixture
def mc_logging():
with mock.patch("backend.daemons.log.logging") as mc_logging:
yield mc_logging
+
@pytest.yield_fixture
def mc_setproctitle():
with mock.patch("backend.daemons.log.setproctitle") as mc_spt:
yield mc_spt
+
class TestLog(object):
def setup_method(self, method):
diff --git a/backend/tests/test_sign.py b/backend/tests/test_sign.py
index 0d5436b..fe9a63c 100644
--- a/backend/tests/test_sign.py
+++ b/backend/tests/test_sign.py
@@ -195,8 +195,7 @@ class TestSign(object):
with pytest.raises(CoprKeygenRequestError) as err:
create_user_keys(self.username, self.projectname, self.opts)
-
- print(str(err))
+ assert "Failed to create key-pair for user: foo, project:bar" in str(err)
@mock.patch("backend.sign._sign_one")
@mock.patch("backend.sign.create_user_keys")
9 years, 5 months
[copr] master: [backend] moved backend.dispatcher -> backend.daemons.dispatcher (fc298d6)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit fc298d6b06f1eca68fdc76979c7a72da3d9c774f
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Thu Dec 4 12:44:41 2014 +0100
[backend] moved backend.dispatcher -> backend.daemons.dispatcher
>---------------------------------------------------------------
backend/backend/daemons/__init__.py | 1 +
backend/backend/daemons/backend.py | 5 ++-
backend/backend/{ => daemons}/dispatcher.py | 13 ++++----
backend/backend/helpers.py | 5 ++-
backend/tests/test_dispatcher.py | 44 +++++++++++++-------------
5 files changed, 36 insertions(+), 32 deletions(-)
diff --git a/backend/backend/daemons/__init__.py b/backend/backend/daemons/__init__.py
index fa27477..0baeca5 100644
--- a/backend/backend/daemons/__init__.py
+++ b/backend/backend/daemons/__init__.py
@@ -2,4 +2,5 @@
from .job_grab import CoprJobGrab
from .log import CoprBackendLog
+from .dispatcher import Worker
from .backend import CoprBackend, run_backend
diff --git a/backend/backend/daemons/backend.py b/backend/backend/daemons/backend.py
index 14a3105..a8fba1d 100644
--- a/backend/backend/daemons/backend.py
+++ b/backend/backend/daemons/backend.py
@@ -20,9 +20,10 @@ from retask.queue import Queue
from retask import ConnectionError
from ..exceptions import CoprBackendError
-from ..dispatcher import Worker
from ..helpers import BackendConfigReader
-from . import CoprJobGrab, CoprBackendLog
+from .job_grab import CoprJobGrab
+from .log import CoprBackendLog
+from .dispatcher import Worker
class CoprBackend(object):
diff --git a/backend/backend/dispatcher.py b/backend/backend/daemons/dispatcher.py
similarity index 98%
rename from backend/backend/dispatcher.py
rename to backend/backend/daemons/dispatcher.py
index ead03c9..e1c90a7 100644
--- a/backend/backend/dispatcher.py
+++ b/backend/backend/daemons/dispatcher.py
@@ -17,14 +17,15 @@ from ansible.errors import AnsibleError
from setproctitle import setproctitle
from IPy import IP
from retask.queue import Queue
-from backend.mockremote.callback import CliLogCallBack
-from .exceptions import MockRemoteError, CoprWorkerError, CoprWorkerSpawnFailError
-from .job import BuildJob
+from ..mockremote.callback import CliLogCallBack
-from .mockremote import MockRemote
-from .frontend import FrontendClient
-from .constants import BuildStatus
+from ..exceptions import MockRemoteError, CoprWorkerError, CoprWorkerSpawnFailError
+from ..job import BuildJob
+
+from ..mockremote import MockRemote
+from ..frontend import FrontendClient
+from ..constants import BuildStatus
ansible_playbook = "ansible-playbook"
diff --git a/backend/backend/helpers.py b/backend/backend/helpers.py
index 1786694..5948888 100644
--- a/backend/backend/helpers.py
+++ b/backend/backend/helpers.py
@@ -13,10 +13,11 @@ import time
from bunch import Bunch
import datetime
-from backend.constants import DEF_BUILD_USER
+
from copr.client import CoprClient
-from .exceptions import CoprBackendError
+from backend.constants import DEF_BUILD_USER
+from backend.exceptions import CoprBackendError
class SortedOptParser(optparse.OptionParser):
diff --git a/backend/tests/test_dispatcher.py b/backend/tests/test_dispatcher.py
index 9f15282..1b6a2a2 100644
--- a/backend/tests/test_dispatcher.py
+++ b/backend/tests/test_dispatcher.py
@@ -30,7 +30,7 @@ else:
from mock import MagicMock
-from backend.dispatcher import Worker, WorkerCallback
+from backend.daemons.dispatcher import Worker, WorkerCallback
STDOUT = "stdout"
STDERR = "stderr"
@@ -269,7 +269,7 @@ class TestDispatcher(object):
with pytest.raises(CoprWorkerSpawnFailError):
self.worker.try_spawn(self.try_spawn_args)
- @mock.patch("backend.dispatcher.ansible.runner.Runner")
+ @mock.patch("backend.daemons.dispatcher.ansible.runner.Runner")
def test_validate_new_vm(self, mc_runner, init_worker):
mc_ans_conn = MagicMock()
mc_ans_conn.run.return_value = {"contacted": {self.vm_ip: "ok"}}
@@ -278,7 +278,7 @@ class TestDispatcher(object):
self.worker.validate_new_vm(self.vm_ip)
assert mc_ans_conn.run.called
- @mock.patch("backend.dispatcher.ansible.runner.Runner")
+ @mock.patch("backend.daemons.dispatcher.ansible.runner.Runner")
def test_validate_new_vm_ans_error(self, mc_runner, init_worker):
mc_ans_conn = MagicMock()
mc_ans_conn.run.side_effect = IOError()
@@ -289,7 +289,7 @@ class TestDispatcher(object):
assert mc_ans_conn.run.called
- @mock.patch("backend.dispatcher.ansible.runner.Runner")
+ @mock.patch("backend.daemons.dispatcher.ansible.runner.Runner")
def test_validate_new_vm_bad_response(self, mc_runner, init_worker):
mc_ans_conn = MagicMock()
mc_ans_conn.run.return_value = {"contacted": {}}
@@ -351,7 +351,7 @@ class TestDispatcher(object):
self.worker.terminate_instance(self.vm_ip)
assert not mc_run_ans.called
- @mock.patch("backend.dispatcher.fedmsg")
+ @mock.patch("backend.daemons.dispatcher.fedmsg")
def test_event(self, mc_fedmsg, init_worker):
template = "foo: {foo}, bar: {bar}"
content = {"foo": "foo", "bar": "bar"}
@@ -364,7 +364,7 @@ class TestDispatcher(object):
assert el["who"] == "worker-2"
assert el["what"] == "foo: foo, bar: bar"
- @mock.patch("backend.dispatcher.fedmsg")
+ @mock.patch("backend.daemons.dispatcher.fedmsg")
def test_event_error(self, mc_fedmsg, init_worker):
template = "foo: {foo}, bar: {bar}"
content = {"foo": "foo", "bar": "bar"}
@@ -378,7 +378,7 @@ class TestDispatcher(object):
assert el["who"] == "worker-2"
assert el["what"] == "foo: foo, bar: bar"
- @mock.patch("backend.dispatcher.fedmsg")
+ @mock.patch("backend.daemons.dispatcher.fedmsg")
def test_event_disable_fedmsg(self, mc_fedmsg, init_worker):
template = "foo: {foo}, bar: {bar}"
content = {"foo": "foo", "bar": "bar"}
@@ -388,7 +388,7 @@ class TestDispatcher(object):
self.worker.event(topic, template, content)
assert not mc_fedmsg.publish.called
- @mock.patch("backend.dispatcher.subprocess")
+ @mock.patch("backend.daemons.dispatcher.subprocess")
def test_run_ansible_playbook_first_try_ok(self, mc_subprocess, init_worker):
exptected_result = "ok"
mc_subprocess.check_output.return_value = exptected_result
@@ -403,8 +403,8 @@ class TestDispatcher(object):
- @mock.patch("backend.dispatcher.time")
- @mock.patch("backend.dispatcher.subprocess")
+ @mock.patch("backend.daemons.dispatcher.time")
+ @mock.patch("backend.daemons.dispatcher.subprocess")
def test_run_ansible_playbook_first_second_ok(self, mc_subprocess,
mc_time, init_worker, capsys):
expected_result = "ok"
@@ -417,8 +417,8 @@ class TestDispatcher(object):
stdout, stderr = capsys.readouterr()
assert len(mc_subprocess.check_output.call_args_list) == 2
- @mock.patch("backend.dispatcher.time")
- @mock.patch("backend.dispatcher.subprocess")
+ @mock.patch("backend.daemons.dispatcher.time")
+ @mock.patch("backend.daemons.dispatcher.subprocess")
def test_run_ansible_playbook_all_attempts_failed(self, mc_subprocess,
mc_time, init_worker, capsys):
expected_result = "ok"
@@ -443,7 +443,7 @@ class TestDispatcher(object):
obtained = handle.read()
assert msg in obtained
- @mock.patch("backend.dispatcher.open", create=True)
+ @mock.patch("backend.daemons.dispatcher.open", create=True)
def test_worker_callback_error(self, mc_open, capsys):
wc = WorkerCallback(self.logfile_path)
mc_open.side_effect = IOError()
@@ -528,8 +528,8 @@ class TestDispatcher(object):
with pytest.raises(CoprWorkerError):
self.worker.starting_build(self.job)
- @mock.patch("backend.dispatcher.MockRemote")
- @mock.patch("backend.dispatcher.os")
+ @mock.patch("backend.daemons.dispatcher.MockRemote")
+ @mock.patch("backend.daemons.dispatcher.os")
def test_do_job_failure_on_mkdirs(self, mc_os, mc_mr, init_worker):
mc_os.path.exists.return_value = False
mc_os.makedirs.side_effect = IOError()
@@ -539,7 +539,7 @@ class TestDispatcher(object):
assert self.job.status == BuildStatus.FAILURE
assert not mc_mr.called
- @mock.patch("backend.dispatcher.MockRemote")
+ @mock.patch("backend.daemons.dispatcher.MockRemote")
def test_do_job(self, mc_mr_class, init_worker):
assert not os.path.exists(self.DESTDIR_CHROOT)
@@ -548,7 +548,7 @@ class TestDispatcher(object):
assert self.job.status == BuildStatus.SUCCEEDED
assert os.path.exists(self.DESTDIR_CHROOT)
- @mock.patch("backend.dispatcher.MockRemote")
+ @mock.patch("backend.daemons.dispatcher.MockRemote")
def test_do_job_updates_details(self, mc_mr_class, init_worker):
assert not os.path.exists(self.DESTDIR_CHROOT)
mc_mr_class.return_value.build_pkg.return_value = {
@@ -561,7 +561,7 @@ class TestDispatcher(object):
assert self.job.results == self.test_time
assert os.path.exists(self.DESTDIR_CHROOT)
- @mock.patch("backend.dispatcher.MockRemote")
+ @mock.patch("backend.daemons.dispatcher.MockRemote")
def test_do_job_mr_error(self, mc_mr_class, init_worker):
mc_mr_class.return_value.build_pkg.side_effect = MockRemoteError("foobar")
@@ -569,7 +569,7 @@ class TestDispatcher(object):
self.worker.do_job(self.vm_ip, self.job)
assert self.job.status == BuildStatus.FAILURE
- @mock.patch("backend.dispatcher.fedmsg")
+ @mock.patch("backend.daemons.dispatcher.fedmsg")
def test_init_fedmsg(self, mc_fedmsg, init_worker):
self.worker.init_fedmsg()
assert not mc_fedmsg.init.called
@@ -641,7 +641,7 @@ class TestDispatcher(object):
assert self.worker.obtain_job() is None
assert not self.worker.pkg_built_before.called
- @mock.patch("backend.dispatcher.time")
+ @mock.patch("backend.daemons.dispatcher.time")
def test_run(self, mc_time, init_worker):
self.worker.init_fedmsg = MagicMock()
self.worker.obtain_job = MagicMock()
@@ -664,7 +664,7 @@ class TestDispatcher(object):
assert self.worker.obtain_job.called
assert self.worker.terminate_instance.called
- @mock.patch("backend.dispatcher.time")
+ @mock.patch("backend.daemons.dispatcher.time")
def test_run_finalize(self, mc_time, init_worker):
self.worker.init_fedmsg = MagicMock()
self.worker.obtain_job = MagicMock()
@@ -690,7 +690,7 @@ class TestDispatcher(object):
assert self.worker.obtain_job.called
assert self.worker.terminate_instance.called
- @mock.patch("backend.dispatcher.time")
+ @mock.patch("backend.daemons.dispatcher.time")
def test_run_no_job(self, mc_time, init_worker):
self.worker.init_fedmsg = MagicMock()
self.worker.obtain_job = MagicMock()
9 years, 5 months
[copr] master: [backend] improved tests to cover some exceptions __str__ methods, fixed related bug (9e96af0)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 9e96af0040df036626dfd9752772bdc283c96f45
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Wed Dec 3 17:39:38 2014 +0100
[backend] improved tests to cover some exceptions __str__ methods, fixed related bug
>---------------------------------------------------------------
backend/backend/exceptions.py | 4 ++--
backend/tests/test_sign.py | 16 ++++++++++++----
2 files changed, 14 insertions(+), 6 deletions(-)
diff --git a/backend/backend/exceptions.py b/backend/backend/exceptions.py
index 61f7447..84ccd04 100644
--- a/backend/backend/exceptions.py
+++ b/backend/backend/exceptions.py
@@ -65,9 +65,9 @@ class CoprKeygenRequestError(MockRemoteError):
def __str__(self):
out = super(CoprKeygenRequestError, self).__str__()
- out += "\nrequest to copr-keygen: {0}\n".format(self.request)
+ out += "\nrequest to copr-keygen: {}\n".format(self.request)
if self.response:
- out += "status code: {1}\n" "response content: {2}\n" \
+ out += "status code: {}\n" "response content: {}\n" \
.format(self.response.status_code, self.response.content)
return out
diff --git a/backend/tests/test_sign.py b/backend/tests/test_sign.py
index 7271c3c..0d5436b 100644
--- a/backend/tests/test_sign.py
+++ b/backend/tests/test_sign.py
@@ -86,9 +86,11 @@ class TestSign(object):
mc_handle.returncode = 1
mc_popen.return_value = mc_handle
- with pytest.raises(CoprSignNoKeyError):
+ with pytest.raises(CoprSignNoKeyError) as err:
get_pubkey(self.username, self.projectname)
+ assert "There are no gpg keys for user foo in keyring" in str(err)
+
@mock.patch("backend.sign.Popen")
def test_get_pubkey_unknown_error(self, mc_popen):
mc_handle = MagicMock()
@@ -96,9 +98,11 @@ class TestSign(object):
mc_handle.returncode = 1
mc_popen.return_value = mc_handle
- with pytest.raises(CoprSignError):
+ with pytest.raises(CoprSignError) as err:
get_pubkey(self.username, self.projectname)
+ assert "Failed to get user pubkey" in str(err)
+
@mock.patch("backend.sign.Popen")
def test_get_pubkey_outfile(self, mc_popen, tmp_dir):
mc_handle = MagicMock()
@@ -177,18 +181,22 @@ class TestSign(object):
@mock.patch("backend.sign.request")
def test_create_user_keys_error_1(self, mc_request):
mc_request.side_effect = IOError()
- with pytest.raises(CoprKeygenRequestError):
+ with pytest.raises(CoprKeygenRequestError) as err:
create_user_keys(self.username, self.projectname, self.opts)
+ assert "Failed to create key-pair" in str(err)
+
@mock.patch("backend.sign.request")
def test_create_user_keys(self, mc_request):
for code in [400, 401, 404, 500, 599]:
mc_request.return_value.status_code = code
+ mc_request.return_value.content = "error: {}".format(code)
- with pytest.raises(CoprKeygenRequestError):
+ with pytest.raises(CoprKeygenRequestError) as err:
create_user_keys(self.username, self.projectname, self.opts)
+ print(str(err))
@mock.patch("backend.sign._sign_one")
@mock.patch("backend.sign.create_user_keys")
9 years, 5 months
[copr] master: [backend] unittest for backend.daemons.log (fa4e54a)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit fa4e54a24a7f52d16d49149c8a99b11c12f18294
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Wed Dec 3 17:21:48 2014 +0100
[backend] unittest for backend.daemons.log
>---------------------------------------------------------------
backend/backend/daemons/log.py | 42 +++++++--
backend/{ => docs}/requirements.txt | 4 +-
backend/docs/source/README.rst | 4 +
backend/docs/source/conf.py | 12 +++
backend/requirements.txt | 1 +
backend/tests/deamons/test_log.py | 170 +++++++++++++++++++++++++++++++++++
6 files changed, 222 insertions(+), 11 deletions(-)
diff --git a/backend/backend/daemons/log.py b/backend/backend/daemons/log.py
index f46d489..6452fe7 100644
--- a/backend/backend/daemons/log.py
+++ b/backend/backend/daemons/log.py
@@ -6,21 +6,27 @@ from __future__ import division
from __future__ import absolute_import
import logging
-import multiprocessing
+from multiprocessing import Process
import os
import sys
import time
-import setproctitle
+from setproctitle import setproctitle
-class CoprBackendLog(multiprocessing.Process):
+class CoprBackendLog(Process):
- """log mechanism where items from the events queue get recorded"""
+ """Log mechanism where items from the events queue get recorded"""
def __init__(self, opts, events):
+ """
+ :param Bunch opts: backend config
+ :param events: multiprocessing.Queue to listen
+ for events from other backend components
+
+ """
# base class initialization
- multiprocessing.Process.__init__(self, name="logger")
+ Process.__init__(self, name="logger")
self.opts = opts
self.events = events
@@ -30,14 +36,27 @@ class CoprBackendLog(multiprocessing.Process):
os.makedirs(logdir, mode=0o750)
def setup_log_handler(self):
+ """
+ Configures standard python logger
+ """
sys.stderr.write("Running setup handler {} \n".format(self.opts))
# setup a log file to write to
logging.basicConfig(filename=self.opts.logfile, level=logging.DEBUG)
- self.log({"when": time.time(), "who": self.__class__.__name__, "what": "Logger iniated"})
+ self.log({"when": time.time(), "who": self.__class__.__name__,
+ "what": "Logger initiated"})
def log(self, event):
+ """
+ Format event into the log message
+
+ :param event: dict-like object
+ Expected **event** keys:
+ - `when`: unixtime
+ - `who`: event producer [worker|logger|job|main]
+ - `what`: content
+ """
when = time.strftime("%F %T", time.gmtime(event["when"]))
msg = "{0} : {1}: {2}".format(when,
event["who"],
@@ -51,16 +70,19 @@ class CoprBackendLog(multiprocessing.Process):
except (IOError, OSError) as e:
sys.stderr.write("Could not write to logfile {0} - {1}\n".format(
- self.logfile, e))
+ self.opts.logfile, e))
# event format is a dict {when:time, who:[worker|logger|job|main],
# what:str}
def run(self):
- setproctitle.setproctitle("CoprLog")
+ """
+ Starts logger process
+ """
+ setproctitle("CoprLog")
self.setup_log_handler()
- abort = False
+
try:
- while not abort:
+ while True:
event = self.events.get()
if "when" in event and "who" in event and "what" in event:
self.log(event)
diff --git a/backend/requirements.txt b/backend/docs/requirements.txt
similarity index 90%
copy from backend/requirements.txt
copy to backend/docs/requirements.txt
index f98e8fd..a8cf351 100644
--- a/backend/requirements.txt
+++ b/backend/docs/requirements.txt
@@ -1,11 +1,13 @@
PyYAML
-# ansible
+ansible
setproctitle
redis
retask
python-daemon
bunch
IPy
+
+
# documentation
sphinx
sphinx-argparse
diff --git a/backend/docs/source/README.rst b/backend/docs/source/README.rst
index 1ca375a..07cff11 100644
--- a/backend/docs/source/README.rst
+++ b/backend/docs/source/README.rst
@@ -1,3 +1,7 @@
README
======
+COPR is lightweight build system. It allows you to create new project in WebUI,
+and submit new builds and COPR will create yum repository from latest builds.
+
+This package contains backend. Copr-backend is responsible for build process, package signing, manages project repositories.
diff --git a/backend/docs/source/conf.py b/backend/docs/source/conf.py
index f8fdf6c..1f6cfd2 100644
--- a/backend/docs/source/conf.py
+++ b/backend/docs/source/conf.py
@@ -242,3 +242,15 @@ texinfo_documents = [
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
+
+# ---
+# enabling __init__ for automodule
+
+autodoc_member_order = "bysource"
+autodoc_default_flags = [
+ 'members',
+ 'undoc-members',
+ 'private-members',
+ 'special-members',
+ # 'inherited-members'
+]
diff --git a/backend/requirements.txt b/backend/requirements.txt
index f98e8fd..71e1cdf 100644
--- a/backend/requirements.txt
+++ b/backend/requirements.txt
@@ -1,3 +1,4 @@
+setproctitle
PyYAML
# ansible
setproctitle
diff --git a/backend/tests/deamons/test_log.py b/backend/tests/deamons/test_log.py
new file mode 100644
index 0000000..c1ff665
--- /dev/null
+++ b/backend/tests/deamons/test_log.py
@@ -0,0 +1,170 @@
+# coding: utf-8
+import copy
+
+from collections import defaultdict
+import logging
+from pprint import pprint
+from bunch import Bunch
+import time
+from backend.exceptions import BuilderError, BuilderTimeOutError
+
+import tempfile
+import shutil
+import os
+
+import six
+
+if six.PY3:
+ from unittest import mock
+ from unittest.mock import patch, MagickMock
+else:
+ import mock
+ from mock import patch, MagicMock
+
+import pytest
+
+import backend.daemons.log as log_module
+from backend.daemons.log import CoprBackendLog
+
+
+
+(a)pytest.yield_fixture
+def mc_logging():
+ with mock.patch("backend.daemons.log.logging") as mc_logging:
+ yield mc_logging
+
+(a)pytest.yield_fixture
+def mc_setproctitle():
+ with mock.patch("backend.daemons.log.setproctitle") as mc_spt:
+ yield mc_spt
+
+class TestLog(object):
+
+ def setup_method(self, method):
+
+ self.mc_mpp_patcher = mock.patch("backend.daemons.log.Process")
+ self.mc_mpp = self.mc_mpp_patcher.start()
+
+ self.test_time = time.time()
+ subdir = "test_createrepo_{}".format(time.time())
+ self.tmp_dir_path = os.path.join(tempfile.gettempdir(), subdir)
+ os.mkdir(self.tmp_dir_path)
+
+ self.log_dir = os.path.join(self.tmp_dir_path, "copr")
+ self.log_file = os.path.join(self.log_dir, "copr.log")
+ self.opts = Bunch(
+ logfile=self.log_file,
+ verbose=False,
+ )
+
+ self.queue = MagicMock()
+
+ def teardown_method(self, method):
+ self.mc_mpp_patcher.stop()
+
+ shutil.rmtree(self.tmp_dir_path)
+ if hasattr(self, "cbl"):
+ del self.cbl
+
+ for handler in logging.root.handlers[:]:
+ logging.root.removeHandler(handler)
+
+ @pytest.fixture
+ def init_log(self):
+ self.cbl = CoprBackendLog(self.opts, self.queue)
+
+ def test_constructor(self):
+ # with mock.patch("backend.daemons.log.Process.__init__") as mc_pi:
+ assert not os.path.exists(self.log_file)
+ assert not os.path.exists(self.log_dir)
+ cbl = CoprBackendLog(self.opts, self.queue)
+
+ # creates log dir
+ assert os.path.exists(self.log_dir)
+
+ # calls parent init
+ # TODO: it's tricky to check call to __init__
+
+ def test_setup_log_handler(self, init_log, mc_logging, capsys):
+ self.cbl.log = MagicMock()
+
+ self.cbl.setup_log_handler()
+
+ stdout, stderr = capsys.readouterr()
+
+ assert "Running setup handler" in stderr
+ assert mc_logging.basicConfig.called
+ assert mc_logging.basicConfig.call_args[1]["filename"] == self.log_file
+ assert self.cbl.log.called
+
+ def test_log(self, init_log, capsys):
+ event = {"who": "main", "when": self.test_time, "what": "foobar"}
+
+ self.cbl.setup_log_handler()
+ self.cbl.log(event)
+
+ stdout, stderr = capsys.readouterr()
+ assert not stdout
+ assert "Running setup handler" in stderr
+ assert "foobar" not in stderr
+
+ assert os.path.exists(self.log_file)
+ with open(self.log_file) as handle:
+ data = handle.read()
+ assert "Logger initiated" in data
+ assert "foobar" in data
+
+ def test_log_verbose(self, init_log, capsys):
+ self.cbl.opts.verbose = True
+ event = {"who": "main", "when": self.test_time, "what": "foobar"}
+
+ self.cbl.setup_log_handler()
+ self.cbl.log(event)
+
+ stdout, stderr = capsys.readouterr()
+ assert not stdout
+ assert "Running setup handler" in stderr
+ assert "foobar" in stderr
+
+ assert os.path.exists(self.log_file)
+ with open(self.log_file) as handle:
+ data = handle.read()
+ assert "Logger initiated" in data
+ assert "foobar" in data
+
+ def test_log_error(self, init_log, mc_logging, capsys):
+ mc_logging.debug.side_effect = IOError("error_message")
+ event = {"who": "main", "when": self.test_time, "what": "foobar"}
+
+ self.cbl.setup_log_handler()
+ self.cbl.log(event)
+
+ stdout, stderr = capsys.readouterr()
+ assert not stdout
+ assert "Running setup handler" in stderr
+ assert "foobar" not in stderr
+ assert "Could not write to logfile" in stderr
+
+ def test_run(self, init_log, mc_setproctitle, capsys):
+ self.cbl.setup_log_handler = MagicMock()
+
+ mc_log = MagicMock()
+ self.cbl.log = mc_log
+
+ self.queue.get.side_effect = [
+ {"who": "main", "when": self.test_time, "what": "foobar"},
+ {"who": "main", "what": "foobar"},
+ {"who": "main", "when": self.test_time + 1, "what": "foobar"},
+ ]
+
+ mc_log.side_effect = [
+ None,
+ KeyboardInterrupt()
+ ]
+ self.cbl.run()
+
+ expected = [
+ mock.call({'what': 'foobar', 'who': 'main', 'when': self.test_time}),
+ mock.call({'what': 'foobar', 'who': 'main', 'when': self.test_time + 1})
+ ]
+ assert expected == mc_log.call_args_list
9 years, 5 months
[copr] master: [backend] adding sphinx documentation, initial commit (666fc1f)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 666fc1f2ef0865b9adb6ee6bc4e02ffef4b1e8e2
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Wed Dec 3 14:14:16 2014 +0100
[backend] adding sphinx documentation, initial commit
>---------------------------------------------------------------
.gitignore | 2 +
backend/backend/daemons/backend.py | 30 ++++++++++++++--
backend/backend/sign.py | 7 +++-
{python => backend}/docs/Makefile | 14 ++++----
backend/docs/source/Autodoc.rst | 38 ++++++++++++++++++++
backend/docs/source/Configuration.rst | 10 +++++
backend/docs/source/INSTALL.rst | 2 +
backend/docs/source/README.rst | 3 ++
{python/docs => backend/docs/source}/conf.py | 38 +++++++++----------
backend/docs/source/index.rst | 28 ++++++++++++++
backend/docs/source/package/actions.rst | 6 +++
backend/docs/source/package/constants.rst | 6 +++
backend/docs/source/package/createrepo.rst | 6 +++
backend/docs/source/package/daemons/backend.rst | 6 +++
backend/docs/source/package/daemons/job_grab.rst | 6 +++
backend/docs/source/package/daemons/log.rst | 6 +++
backend/docs/source/package/dispatcher.rst | 6 +++
backend/docs/source/package/exceptions.rst | 6 +++
backend/docs/source/package/frontend.rst | 6 +++
backend/docs/source/package/helpers.rst | 6 +++
backend/docs/source/package/job.rst | 6 +++
.../docs/source/package/mockremote/__init__.rst | 6 +++
backend/docs/source/package/mockremote/builder.rst | 6 +++
.../docs/source/package/mockremote/callback.rst | 6 +++
backend/docs/source/package/sign.rst | 6 +++
backend/docs/source/run_scripts.rst | 6 +++
backend/docs/source/scripts/copr_be.rst | 9 +++++
backend/docs/source/scripts/prune_repo.rst | 19 ++++++++++
backend/requirements.txt | 3 ++
29 files changed, 267 insertions(+), 32 deletions(-)
diff --git a/.gitignore b/.gitignore
index 7467ee8..3a6d37e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,3 +13,5 @@ documentation/python-doc
_venv/
_report/
_tmp/
+
+backend/docs/build
diff --git a/backend/backend/daemons/backend.py b/backend/backend/daemons/backend.py
index da1f13e..14a3105 100644
--- a/backend/backend/daemons/backend.py
+++ b/backend/backend/daemons/backend.py
@@ -28,10 +28,15 @@ from . import CoprJobGrab, CoprBackendLog
class CoprBackend(object):
"""
- Core process - starts/stops/initializes workers
+ Core process - starts/stops/initializes workers and other backend components
"""
def __init__(self, config_file=None, ext_opts=None):
+ """
+
+ :param config_file: path to the backend configuration file
+ :param ext_opts: additional options for backend
+ """
# read in config file
# put all the config items into a single self.opts bunch
@@ -80,9 +85,16 @@ class CoprBackend(object):
os.makedirs(self.opts.worker_logdir, mode=0o750)
def event(self, what):
+ """
+ Put a new event into the queue
+ :param what: Event content
+ """
self.events.put({"when": time.time(), "who": "main", "what": what})
def update_conf(self):
+ """
+ Update backend config from config file
+ """
self.opts = self.config_reader.read()
def clean_task_queues(self):
@@ -114,9 +126,10 @@ class CoprBackend(object):
""" Removes dead workers from the pool
:return list: alive workers
+
:raises:
- :CoprBackendError: when got dead worker and
- has option "exit_on_worker" enabled
+ :py:class:`~backend.exceptions.CoprBackendError` when got dead worker and
+ option "exit_on_worker" is enabled
"""
group_id = group["id"]
preserved_workers = []
@@ -174,6 +187,17 @@ class CoprBackend(object):
def run_backend(opts):
+ """
+ Start main backend daemon
+
+ :param opts: Bunch object with command line options
+
+ Expected **opts** fields:
+ - `config_file` - path to the backend config file
+ - `daemonize` - boolean flag to enable daemon mode
+ - `pidfile` - path to the backend pidfile
+
+ """
try:
context = daemon.DaemonContext(
pidfile=lockfile.FileLock(opts.pidfile),
diff --git a/backend/backend/sign.py b/backend/backend/sign.py
index 21b6cf4..e44b60f 100755
--- a/backend/backend/sign.py
+++ b/backend/backend/sign.py
@@ -34,7 +34,8 @@ def get_pubkey(username, projectname, outfile=None):
:param outfile: [optional] file to write obtained key
:return: public keys
- :raises: CoprSignError or CoprSignNoKeyError
+ :raises CoprSignError: failed to retrieve key, see error message
+ :raises CoprSignNoKeyError: if there are no such user in keyring
"""
usermail = create_gpg_email(username, projectname)
cmd = ["sudo", SIGN_BINARY, "-u", usermail, "-p"]
@@ -107,8 +108,10 @@ def sign_rpms_in_dir(username, projectname, path, opts, callback=None):
:param path: directory with rpms to be signed
:param Bunch opts: backend config
- :param .mockremote.DefaultCallBack callback: object to log progress,
+ :param callback: :py:class:`backend.mockremote.DefaultCallBack` object to log progress,
two methods are utilised: ``log`` and ``error``
+
+ :raises: :py:class:`backend.exceptions.CoprSignError` failed to sign at least one package
"""
rpm_list = [
os.path.join(path, filename)
diff --git a/python/docs/Makefile b/backend/docs/Makefile
similarity index 94%
copy from python/docs/Makefile
copy to backend/docs/Makefile
index 217ec49..87a774f 100644
--- a/python/docs/Makefile
+++ b/backend/docs/Makefile
@@ -5,14 +5,14 @@
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
-BUILDDIR = _build
+BUILDDIR = build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
@@ -77,17 +77,17 @@ qthelp:
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
- @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/python-copr.qhcp"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/copr-backend.qhcp"
@echo "To view the help file:"
- @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/python-copr.qhc"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/copr-backend.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
- @echo "# mkdir -p $$HOME/.local/share/devhelp/python-copr"
- @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/python-copr"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/copr-backend"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/copr-backend"
@echo "# devhelp"
epub:
diff --git a/backend/docs/source/Autodoc.rst b/backend/docs/source/Autodoc.rst
new file mode 100644
index 0000000..5c49cd7
--- /dev/null
+++ b/backend/docs/source/Autodoc.rst
@@ -0,0 +1,38 @@
+Auto documentation
+==================
+
+backend.
+--------
+Root backend modules
+
+.. toctree::
+ package/dispatcher
+ package/actions
+ package/job
+ package/frontend
+ package/constants
+ package/sign
+ package/createrepo
+ package/helpers
+ package/exceptions
+
+
+backend.daemons.
+----------------
+Backend daemons, started by copr-be.py
+
+.. toctree::
+ package/daemons/backend.rst
+ package/daemons/job_grab.rst
+ package/daemons/log.rst
+
+backend.mockremote.
+-------------------
+Package dedicated to executing job builds on remote VMs.
+
+.. toctree::
+ package/mockremote/__init__
+ package/mockremote/builder
+ package/mockremote/callback
+
+
diff --git a/backend/docs/source/Configuration.rst b/backend/docs/source/Configuration.rst
new file mode 100644
index 0000000..5f449f4
--- /dev/null
+++ b/backend/docs/source/Configuration.rst
@@ -0,0 +1,10 @@
+Configuration
+=============
+
+Copr backend is configured by `.ini` file.
+Default location is: ``/etc/copr/copr-be.conf``
+
+Example config shows all available options.
+
+.. literalinclude:: ../../conf/copr-be.conf.example
+ :language: ini
diff --git a/backend/docs/source/INSTALL.rst b/backend/docs/source/INSTALL.rst
new file mode 100644
index 0000000..622363a
--- /dev/null
+++ b/backend/docs/source/INSTALL.rst
@@ -0,0 +1,2 @@
+INSTALL
+=======
diff --git a/backend/docs/source/README.rst b/backend/docs/source/README.rst
new file mode 100644
index 0000000..1ca375a
--- /dev/null
+++ b/backend/docs/source/README.rst
@@ -0,0 +1,3 @@
+README
+======
+
diff --git a/python/docs/conf.py b/backend/docs/source/conf.py
similarity index 90%
copy from python/docs/conf.py
copy to backend/docs/source/conf.py
index ff2001e..f8fdf6c 100644
--- a/python/docs/conf.py
+++ b/backend/docs/source/conf.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
#
-# python-copr documentation build configuration file, created by
-# sphinx-quickstart on Thu Sep 4 16:44:28 2014.
+# copr-backend documentation build configuration file, created by
+# sphinx-quickstart on Tue Dec 2 15:51:01 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
@@ -13,12 +13,10 @@
import sys, os
-
-
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.insert(0, os.path.abspath('..'))
+sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
@@ -27,7 +25,9 @@ sys.path.insert(0, os.path.abspath('..'))
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage',
+ 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
+extensions += ['sphinxarg.ext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -42,17 +42,17 @@ source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
-project = u'python-copr'
-copyright = u'2014, Valentin Gologuzov'
+project = u'copr-backend'
+copyright = u'2014, Copr dev team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
-version = '1.52'
+version = '0.48'
# The full version, including alpha/beta/rc tags.
-release = '1.52'
+release = '0.48'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -66,7 +66,7 @@ release = '1.52'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
-exclude_patterns = ['_build']
+exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
@@ -166,7 +166,7 @@ html_static_path = ['_static']
#html_file_suffix = None
# Output file base name for HTML help builder.
-htmlhelp_basename = 'python-coprdoc'
+htmlhelp_basename = 'copr-backenddoc'
# -- Options for LaTeX output --------------------------------------------------
@@ -185,8 +185,8 @@ latex_elements = {
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
- ('index', 'python-copr.tex', u'python-copr Documentation',
- u'Valentin Gologuzov', 'manual'),
+ ('index', 'copr-backend.tex', u'copr-backend Documentation',
+ u'Copr dev team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
@@ -215,8 +215,8 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
- ('index', 'python-copr', u'python-copr Documentation',
- [u'Valentin Gologuzov'], 1)
+ ('index', 'copr-backend', u'copr-backend Documentation',
+ [u'Copr dev team'], 1)
]
# If true, show URL addresses after external links.
@@ -229,8 +229,8 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- ('index', 'python-copr', u'python-copr Documentation',
- u'Valentin Gologuzov', 'python-copr', 'One line description of project.',
+ ('index', 'copr-backend', u'copr-backend Documentation',
+ u'Copr dev team', 'copr-backend', 'One line description of project.',
'Miscellaneous'),
]
@@ -242,5 +242,3 @@ texinfo_documents = [
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
-
-#autodoc_member_order = 'bysource'
diff --git a/backend/docs/source/index.rst b/backend/docs/source/index.rst
new file mode 100644
index 0000000..58df8e7
--- /dev/null
+++ b/backend/docs/source/index.rst
@@ -0,0 +1,28 @@
+.. copr-backend documentation master file, created by
+ sphinx-quickstart on Tue Dec 2 15:51:01 2014.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to copr-backend's documentation!
+========================================
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ README
+ INSTALL
+ Configuration
+ Autodoc
+ run_scripts
+
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/backend/docs/source/package/actions.rst b/backend/docs/source/package/actions.rst
new file mode 100644
index 0000000..804994d
--- /dev/null
+++ b/backend/docs/source/package/actions.rst
@@ -0,0 +1,6 @@
+backend.actions
+==================
+
+.. automodule:: backend.actions
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/constants.rst b/backend/docs/source/package/constants.rst
new file mode 100644
index 0000000..ad9b418
--- /dev/null
+++ b/backend/docs/source/package/constants.rst
@@ -0,0 +1,6 @@
+backend.constants
+==================
+
+.. automodule:: backend.constants
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/createrepo.rst b/backend/docs/source/package/createrepo.rst
new file mode 100644
index 0000000..98fbdeb
--- /dev/null
+++ b/backend/docs/source/package/createrepo.rst
@@ -0,0 +1,6 @@
+backend.createrepo
+==================
+
+.. automodule:: backend.createrepo
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/daemons/backend.rst b/backend/docs/source/package/daemons/backend.rst
new file mode 100644
index 0000000..c4b15dc
--- /dev/null
+++ b/backend/docs/source/package/daemons/backend.rst
@@ -0,0 +1,6 @@
+backend.daemons.backend
+=======================
+
+.. automodule:: backend.daemons.backend
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/daemons/job_grab.rst b/backend/docs/source/package/daemons/job_grab.rst
new file mode 100644
index 0000000..43793a2
--- /dev/null
+++ b/backend/docs/source/package/daemons/job_grab.rst
@@ -0,0 +1,6 @@
+backend.daemons.job_grab
+========================
+
+.. automodule:: backend.daemons.job_grab
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/daemons/log.rst b/backend/docs/source/package/daemons/log.rst
new file mode 100644
index 0000000..aa112c6
--- /dev/null
+++ b/backend/docs/source/package/daemons/log.rst
@@ -0,0 +1,6 @@
+backend.daemons.log
+===================
+
+.. automodule:: backend.daemons.log
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/dispatcher.rst b/backend/docs/source/package/dispatcher.rst
new file mode 100644
index 0000000..5d185f4
--- /dev/null
+++ b/backend/docs/source/package/dispatcher.rst
@@ -0,0 +1,6 @@
+backend.dispatcher
+==================
+
+.. automodule:: backend.dispatcher
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/exceptions.rst b/backend/docs/source/package/exceptions.rst
new file mode 100644
index 0000000..97a0580
--- /dev/null
+++ b/backend/docs/source/package/exceptions.rst
@@ -0,0 +1,6 @@
+backend.exceptions
+==================
+
+.. automodule:: backend.exceptions
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/frontend.rst b/backend/docs/source/package/frontend.rst
new file mode 100644
index 0000000..169ad18
--- /dev/null
+++ b/backend/docs/source/package/frontend.rst
@@ -0,0 +1,6 @@
+backend.frontend
+==================
+
+.. automodule:: backend.frontend
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/helpers.rst b/backend/docs/source/package/helpers.rst
new file mode 100644
index 0000000..016f183
--- /dev/null
+++ b/backend/docs/source/package/helpers.rst
@@ -0,0 +1,6 @@
+backend.helpers
+==================
+
+.. automodule:: backend.helpers
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/job.rst b/backend/docs/source/package/job.rst
new file mode 100644
index 0000000..d134f1d
--- /dev/null
+++ b/backend/docs/source/package/job.rst
@@ -0,0 +1,6 @@
+backend.job
+==================
+
+.. automodule:: backend.job
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/mockremote/__init__.rst b/backend/docs/source/package/mockremote/__init__.rst
new file mode 100644
index 0000000..c0e8d5b
--- /dev/null
+++ b/backend/docs/source/package/mockremote/__init__.rst
@@ -0,0 +1,6 @@
+backend.mockremote.__init__
+========================
+
+.. automodule:: backend.mockremote.__init__
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/mockremote/builder.rst b/backend/docs/source/package/mockremote/builder.rst
new file mode 100644
index 0000000..5289572
--- /dev/null
+++ b/backend/docs/source/package/mockremote/builder.rst
@@ -0,0 +1,6 @@
+backend.mockremote.builder
+==========================
+
+.. automodule:: backend.mockremote.builder
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/mockremote/callback.rst b/backend/docs/source/package/mockremote/callback.rst
new file mode 100644
index 0000000..33f271f
--- /dev/null
+++ b/backend/docs/source/package/mockremote/callback.rst
@@ -0,0 +1,6 @@
+backend.mockremote.callback
+===========================
+
+.. automodule:: backend.mockremote.callback
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/package/sign.rst b/backend/docs/source/package/sign.rst
new file mode 100644
index 0000000..8d02748
--- /dev/null
+++ b/backend/docs/source/package/sign.rst
@@ -0,0 +1,6 @@
+backend.sign
+==================
+
+.. automodule:: backend.sign
+ :members:
+ :undoc-members:
diff --git a/backend/docs/source/run_scripts.rst b/backend/docs/source/run_scripts.rst
new file mode 100644
index 0000000..a5afb17
--- /dev/null
+++ b/backend/docs/source/run_scripts.rst
@@ -0,0 +1,6 @@
+
+Run scripts
+-----------
+.. toctree::
+ scripts/copr_be.rst
+ scripts/prune_repo.rst
diff --git a/backend/docs/source/scripts/copr_be.rst b/backend/docs/source/scripts/copr_be.rst
new file mode 100644
index 0000000..a305fdc
--- /dev/null
+++ b/backend/docs/source/scripts/copr_be.rst
@@ -0,0 +1,9 @@
+Start copr backend
+==================
+Main script to run copr-backend ``/usr/bin/copr-be.py``
+Uses backend config with default location ``/etc/copr/copr-be.conf``.
+
+
+TODO: rename to copr_be.py; use `http://sphinx-argparse.readthedocs.org/en/latest/`
+
+
diff --git a/backend/docs/source/scripts/prune_repo.rst b/backend/docs/source/scripts/prune_repo.rst
new file mode 100644
index 0000000..c55d1d7
--- /dev/null
+++ b/backend/docs/source/scripts/prune_repo.rst
@@ -0,0 +1,19 @@
+Prune repository
+================
+
+To prune result builds use ``run/copr_prune_results.py``.
+Internally it would invoke sh script ``run/copr_prune_old_builds.sh``
+
+copr_prune_results.py
+---------------------
+Clean ups old builds. Don't affect projects with disabled ``auto_creatrepo`` option.
+
+
+Doesn't have startup options. Uses backend config with default location ``/etc/copr/copr-be.conf``.
+Can be changed by setting environment variable **BACKEND_CONFIG**
+
+copr_prune_old_builds.sh
+------------------------
+
+
+copr_prune_old_builds.sh REPOPATH DAYS
diff --git a/backend/requirements.txt b/backend/requirements.txt
index 9576e53..f98e8fd 100644
--- a/backend/requirements.txt
+++ b/backend/requirements.txt
@@ -6,3 +6,6 @@ retask
python-daemon
bunch
IPy
+# documentation
+sphinx
+sphinx-argparse
9 years, 5 months
[copr] master: [backend] refactored backend/daemons (713ea55)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 713ea55dea78d5f58a223c6b1807f7153626fd53
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Tue Dec 2 15:33:36 2014 +0100
[backend] refactored backend/daemons
>---------------------------------------------------------------
backend/backend/daemons/__init__.py | 2 +-
backend/backend/daemons/backend.py | 75 +++++++++++++++++++++--------------
backend/backend/daemons/job_grab.py | 48 ++++++++++++----------
backend/backend/daemons/log.py | 8 ++--
4 files changed, 77 insertions(+), 56 deletions(-)
diff --git a/backend/backend/daemons/__init__.py b/backend/backend/daemons/__init__.py
index d60155d..fa27477 100644
--- a/backend/backend/daemons/__init__.py
+++ b/backend/backend/daemons/__init__.py
@@ -1,5 +1,5 @@
# coding: utf-8
from .job_grab import CoprJobGrab
-from .log import CoprLog
+from .log import CoprBackendLog
from .backend import CoprBackend, run_backend
diff --git a/backend/backend/daemons/backend.py b/backend/backend/daemons/backend.py
index ff942c8..da1f13e 100644
--- a/backend/backend/daemons/backend.py
+++ b/backend/backend/daemons/backend.py
@@ -22,7 +22,7 @@ from retask import ConnectionError
from ..exceptions import CoprBackendError
from ..dispatcher import Worker
from ..helpers import BackendConfigReader
-from . import CoprJobGrab, CoprLog
+from . import CoprJobGrab, CoprBackendLog
class CoprBackend(object):
@@ -67,7 +67,7 @@ class CoprBackend(object):
# what:str}
# create logger
- self._logger = CoprLog(self.opts, self.events)
+ self._logger = CoprBackendLog(self.opts, self.events)
self._logger.start()
self.event("Starting up Job Grabber")
@@ -94,6 +94,44 @@ class CoprBackend(object):
raise CoprBackendError(
"Could not connect to a task queue. Is Redis running?")
+ def spin_up_workers_by_group(self, group):
+ group_id = group["id"]
+ # this handles starting/growing the number of workers
+ if len(self.workers_by_group_id[group_id]) < group["max_workers"]:
+ self.event("Spinning up more workers")
+ for _ in range(group["max_workers"] - len(self.workers_by_group_id[group_id])):
+ self.max_worker_num_by_group_id[group_id] += 1
+ w = Worker(
+ self.opts, self.events,
+ self.max_worker_num_by_group_id[group_id],
+ group_id, lock=self.lock
+ )
+
+ self.workers_by_group_id[group_id].append(w)
+ w.start()
+
+ def prune_dead_workers_by_group(self, group):
+ """ Removes dead workers from the pool
+
+ :return list: alive workers
+ :raises:
+ :CoprBackendError: when got dead worker and
+ has option "exit_on_worker" enabled
+ """
+ group_id = group["id"]
+ preserved_workers = []
+ for w in self.workers_by_group_id[group_id]:
+ if not w.is_alive():
+ self.event("Worker {0} died unexpectedly".format(w.worker_num))
+ if self.opts.exit_on_worker:
+ raise CoprBackendError(
+ "Worker died unexpectedly, exiting")
+ else:
+ w.terminate() # kill it with a fire
+ else:
+ preserved_workers.append(w)
+ return preserved_workers
+
def run(self):
self.abort = False
while not self.abort:
@@ -102,24 +140,11 @@ class CoprBackend(object):
for group in self.opts.build_groups:
group_id = group["id"]
- self.event(
- "# jobs in {0} queue: {1}"
- .format(group["name"], self.task_queues[group_id].length)
- )
- # this handles starting/growing the number of workers
- if len(self.workers_by_group_id[group_id]) < group["max_workers"]:
- self.event("Spinning up more workers")
- for _ in range(group["max_workers"] - len(self.workers_by_group_id[group_id])):
- self.max_worker_num_by_group_id[group_id] += 1
- w = Worker(
- self.opts, self.events,
- self.max_worker_num_by_group_id[group_id],
- group_id, lock=self.lock
- )
-
- self.workers_by_group_id[group_id].append(w)
- w.start()
+ self.event("# jobs in {0} queue: {1}"
+ .format(group["name"], self.task_queues[group_id].length))
+ self.spin_up_workers_by_group(group)
self.event("Finished starting worker processes")
+
# FIXME - prune out workers
# if len(self.workers) > self.opts.num_workers:
# killnum = len(self.workers) - self.opts.num_workers
@@ -128,17 +153,7 @@ class CoprBackend(object):
# FIXME - if a worker bombs out - we need to check them
# and startup a new one if it happens
# check for dead workers and abort
- preserved_workers = []
- for w in self.workers_by_group_id[group_id]:
- if not w.is_alive():
- self.event("Worker {0} died unexpectedly".format(w.worker_num))
- if self.opts.exit_on_worker:
- raise CoprBackendError(
- "Worker died unexpectedly, exiting")
- else:
- w.terminate() # kill it with a fire
- else:
- preserved_workers.append(w)
+ preserved_workers = self.prune_dead_workers_by_group(group)
self.workers_by_group_id[group_id] = preserved_workers
time.sleep(self.opts.sleeptime)
diff --git a/backend/backend/daemons/job_grab.py b/backend/backend/daemons/job_grab.py
index c9accd4..fc93348 100644
--- a/backend/backend/daemons/job_grab.py
+++ b/backend/backend/daemons/job_grab.py
@@ -40,11 +40,32 @@ class CoprJobGrab(multiprocessing.Process):
def event(self, what):
self.events.put({"when": time.time(), "who": "jobgrab", "what": what})
+ def process_build_task(self, task):
+ count = 0
+ if "task_id" in task and task["task_id"] not in self.added_jobs:
+ # this will ignore and throw away unconfigured architectures
+ # FIXME: don't do ^
+ arch = task["chroot"].split("-")[2]
+ for group in self.opts.build_groups:
+ if arch in group["archs"]:
+ self.added_jobs.append(task["task_id"])
+ task_obj = Task(task)
+ self.task_queues[group["id"]].enqueue(task_obj)
+ count += 1
+ break
+ return count
+
+ def process_action(self, action):
+ ao = Action(self.events, action, self.lock, destdir=self.opts.destdir,
+ frontend_callback=FrontendClient(self.opts, self.events),
+ front_url=self.opts.frontend_base_url,
+ results_root_url=self.opts.results_baseurl)
+ ao.run()
+
def load_tasks(self):
try:
- r = requests.get(
- "{0}/waiting/".format(self.opts.frontend_url),
- auth=("user", self.opts.frontend_auth))
+ r = requests.get("{0}/waiting/".format(self.opts.frontend_url),
+ auth=("user", self.opts.frontend_auth))
r_json = r.json()
except requests.RequestException as e:
@@ -61,30 +82,15 @@ class CoprJobGrab(multiprocessing.Process):
self.event("{0} jobs returned".format(len(r_json["builds"])))
count = 0
for task in r_json["builds"]:
- if "task_id" in task and task["task_id"] not in self.added_jobs:
- # this will ignore and throw away unconfigured architectures
- # FIXME: don't do ^
- arch = task["chroot"].split("-")[2]
- for group in self.opts.build_groups:
- if arch in group["archs"]:
- self.added_jobs.append(task["task_id"])
- task_obj = Task(task)
- self.task_queues[group["id"]].enqueue(task_obj)
- count += 1
- break
+ count += self.process_build_task(task)
if count:
self.event("New jobs: %s" % count)
if "actions" in r_json and r_json["actions"]:
- self.event("{0} actions returned".format(
- len(r_json["actions"])))
+ self.event("{0} actions returned".format(len(r_json["actions"])))
for action in r_json["actions"]:
- ao = Action(self.events, action, self.lock, destdir=self.opts.destdir,
- frontend_callback=FrontendClient(self.opts, self.events),
- front_url=self.opts.frontend_base_url,
- results_root_url=self.opts.results_baseurl)
- ao.run()
+ self.process_action(action)
def run(self):
setproctitle.setproctitle("CoprJobGrab")
diff --git a/backend/backend/daemons/log.py b/backend/backend/daemons/log.py
index f8a128c..f46d489 100644
--- a/backend/backend/daemons/log.py
+++ b/backend/backend/daemons/log.py
@@ -13,7 +13,7 @@ import time
import setproctitle
-class CoprLog(multiprocessing.Process):
+class CoprBackendLog(multiprocessing.Process):
"""log mechanism where items from the events queue get recorded"""
@@ -61,8 +61,8 @@ class CoprLog(multiprocessing.Process):
abort = False
try:
while not abort:
- e = self.events.get()
- if "when" in e and "who" in e and "what" in e:
- self.log(e)
+ event = self.events.get()
+ if "when" in event and "who" in event and "what" in event:
+ self.log(event)
except KeyboardInterrupt:
return
9 years, 5 months
[copr] master: [frontend] presentation: inverted auto_createrepo html presentation; html changes about auto_createrepo (d5d4ce9)
by vgologuz@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit d5d4ce9b7748e142f7199d8fde0b6f9763c8b5e2
Author: Valentin Gologuzov <vgologuz(a)redhat.com>
Date: Tue Dec 2 11:33:24 2014 +0100
[frontend] presentation: inverted auto_createrepo html presentation; html changes about auto_createrepo
>---------------------------------------------------------------
frontend/coprs_frontend/coprs/forms.py | 5 +--
frontend/coprs_frontend/coprs/logic/coprs_logic.py | 4 +-
frontend/coprs_frontend/coprs/models.py | 10 +++++
.../coprs/templates/coprs/_coprs_forms.html | 15 +++++---
.../coprs/templates/coprs/detail/overview.html | 38 ++++++++++----------
.../coprs/views/api_ns/api_general.py | 15 ++++++--
.../coprs/views/coprs_ns/coprs_general.py | 5 ++-
7 files changed, 56 insertions(+), 36 deletions(-)
diff --git a/frontend/coprs_frontend/coprs/forms.py b/frontend/coprs_frontend/coprs/forms.py
index 538ff92..241a4b2 100644
--- a/frontend/coprs_frontend/coprs/forms.py
+++ b/frontend/coprs_frontend/coprs/forms.py
@@ -125,10 +125,7 @@ class CoprFormFactory(object):
validators=[UrlListValidator()],
filters=[StringListFilter()])
- # FIXME: false_values used for API, json API shouldn't not be
- # validated in the same way as plain POST requests
- auto_createrepo = wtforms.BooleanField(
- default=True, false_values=["", "false", False])
+ disable_createrepo = wtforms.BooleanField(default=False)
@property
def selected_chroots(self):
diff --git a/frontend/coprs_frontend/coprs/logic/coprs_logic.py b/frontend/coprs_frontend/coprs/logic/coprs_logic.py
index 7613b66..2dba3d0 100644
--- a/frontend/coprs_frontend/coprs/logic/coprs_logic.py
+++ b/frontend/coprs_frontend/coprs/logic/coprs_logic.py
@@ -128,14 +128,14 @@ class CoprsLogic(object):
@classmethod
def add(cls, user, name, repos, selected_chroots, description,
- instructions, auto_createrepo, check_for_duplicates=False):
+ instructions, check_for_duplicates=False, **kwargs):
copr = models.Copr(name=name,
repos=repos,
owner=user,
description=description,
instructions=instructions,
created_on=int(time.time()),
- auto_createrepo=auto_createrepo)
+ **kwargs)
# form validation checks for duplicates
CoprsLogic.new(user, copr,
diff --git a/frontend/coprs_frontend/coprs/models.py b/frontend/coprs_frontend/coprs/models.py
index 111a092..dc3239d 100644
--- a/frontend/coprs_frontend/coprs/models.py
+++ b/frontend/coprs_frontend/coprs/models.py
@@ -179,6 +179,16 @@ class Copr(db.Model, helpers.Serializer):
return len(self.builds)
+ @property
+ def disable_createrepo(self):
+
+ return not self.auto_createrepo
+
+ @disable_createrepo.setter
+ def disable_createrepo(self, value):
+
+ self.auto_createrepo = not bool(value)
+
def check_copr_chroot(self, chroot):
"""
Return object of chroot, if is related to our copr or None
diff --git a/frontend/coprs_frontend/coprs/templates/coprs/_coprs_forms.html b/frontend/coprs_frontend/coprs/templates/coprs/_coprs_forms.html
index 81c9193..0165f39 100644
--- a/frontend/coprs_frontend/coprs/templates/coprs/_coprs_forms.html
+++ b/frontend/coprs_frontend/coprs/templates/coprs/_coprs_forms.html
@@ -59,15 +59,18 @@
{{ render_field(form.initial_pkgs, rows=5, cols=50, placeholder='Optional - list of src.rpm to build initially. Can be skipped and submitted later.') }}
{% endif %}
- <dt class="field-label">
- <label for="auto_createrepo">Build release options</label>
- </dt>
+ <dt><b>Package repositories</b></dt>
+ <dd class="field-label-help">
+ Repository meta data is refreshed after each build. <br />
+ You can disable it with the option below. <br />
+ You will be still able to regenerate your repositories manually on the overview page.
+ </dd>
<dd>
- Create repository metadata automatically:
- {{ form.auto_createrepo }}
+ <!-- Create repository metadata automatically: -->
+ Disable automatic repository meta data generation
+ {{ form.disable_createrepo }}
</dd>
-
<dt><input type="submit" value="{% if copr %}Update{% else %}Create{% endif %}"></dt>
</dl>
</form>
diff --git a/frontend/coprs_frontend/coprs/templates/coprs/detail/overview.html b/frontend/coprs_frontend/coprs/templates/coprs/detail/overview.html
index a88dc9a..6f02e8c 100644
--- a/frontend/coprs_frontend/coprs/templates/coprs/detail/overview.html
+++ b/frontend/coprs_frontend/coprs/templates/coprs/detail/overview.html
@@ -50,11 +50,12 @@
<a id="modified-chroot-{{mock_chroot.name}}">[modified]</a>
{% endif %}
</td>
+ <td>
{% if mock_chroot.os_release != copr.active_chroots[loop.index0 - 1].os_release or
mock_chroot.os_version != copr.active_chroots[loop.index0 - 1].os_version or
loop.index0 == 0 %}
{# previous os_release-os_version were different or this is the first one #}
- <td>
+
<a href="{{
url_for(
'coprs_ns.generate_repo_file',
@@ -66,11 +67,8 @@
)|fix_url_https_frontend}}">
{{ copr.owner.name }}-{{ copr.name }}-{{mock_chroot.os_release+"-"+mock_chroot.os_version}}.repo
</a>
- </td>
- {% else %}
- <td></td>
{% endif %}
-
+ </td>
</tr>
{% else %}
<tr colspan="2"><td>No active releases</td></tr>
@@ -85,21 +83,23 @@
</ul>
{% endif %}
- {% if g.user and g.user.can_edit(copr) and not copr.auto_createrepo %}
- <div>
-
- <h2>Release options</h2>
+ {% if g.user and g.user.can_edit(copr) and copr and copr.owner and not copr.auto_createrepo %}
+ <dt>
+ <!--<h2>Release options</h2>-->
+ </dt>
+ <dd>
+ <form action="{{
+ url_for(
+ 'coprs_ns.copr_createrepo',
+ username=copr.owner.name,
+ coprname=copr.name
+ ) }}" method="post" >
- <form action="{{
- url_for(
- 'coprs_ns.copr_createrepo',
- username=copr.owner.name,
- coprname=copr.name
- ) }}" method="post" >
- Create repository metodata forcible:
- <button type="submit">Run createrepo</button>
- </form>
- </div>
+ Automatic repository meta data generation is disabled.
+ You can regenerate it manually or enable the automatic refresh on the edit page.
+ <button type="submit">Regenerate repositories</button>
+ </form>
+ </dd>
{% endif %}
{% for chroot in copr.modified_chroots %}
diff --git a/frontend/coprs_frontend/coprs/views/api_ns/api_general.py b/frontend/coprs_frontend/coprs/views/api_ns/api_general.py
index cae3495..faf08cc 100644
--- a/frontend/coprs_frontend/coprs/views/api_ns/api_general.py
+++ b/frontend/coprs_frontend/coprs/views/api_ns/api_general.py
@@ -1,5 +1,6 @@
import base64
import datetime
+import json
import urlparse
import flask
@@ -72,6 +73,7 @@ def api_new_copr(username):
httpcode = 200
# are there any arguments in POST which our form doesn't know?
+ # TODO: don't use WTFform for parsing and validation here
if any([post_key not in form.__dict__.keys()
for post_key in flask.request.form.keys()]):
output = {"output": "notok",
@@ -80,9 +82,16 @@ def api_new_copr(username):
elif form.validate_on_submit():
infos = []
- auto_createrepo = form.auto_createrepo.data
- if "auto_createrepo" not in flask.request.data:
- auto_createrepo = True
+
+ auto_createrepo = True
+ dct = json.loads(flask.request.data)
+ if "auto_createrepo" in dct:
+ val = dct["auto_createrepo"]
+ if isinstance(val, bool):
+ auto_createrepo = val
+ elif str(val).lower() in ["false", "no"]:
+ auto_createrepo = False
+
try:
copr = coprs_logic.CoprsLogic.add(
name=form.name.data.strip(),
diff --git a/frontend/coprs_frontend/coprs/views/coprs_ns/coprs_general.py b/frontend/coprs_frontend/coprs/views/coprs_ns/coprs_general.py
index 85ddc3d..c5e765b 100644
--- a/frontend/coprs_frontend/coprs/views/coprs_ns/coprs_general.py
+++ b/frontend/coprs_frontend/coprs/views/coprs_ns/coprs_general.py
@@ -133,7 +133,7 @@ def copr_new(username):
selected_chroots=form.selected_chroots,
description=form.description.data,
instructions=form.instructions.data,
- auto_createrepo=form.auto_createrepo.data,
+ disable_createrepo=form.disable_createrepo.data,
)
db.session.commit()
@@ -257,7 +257,7 @@ def copr_update(username, coprname):
copr.repos = form.repos.data.replace("\n", " ")
copr.description = form.description.data
copr.instructions = form.instructions.data
- copr.auto_createrepo = form.auto_createrepo.data
+ copr.disable_createrepo = form.disable_createrepo.data
coprs_logic.CoprChrootsLogic.update_from_names(
flask.g.user, copr, form.selected_chroots)
@@ -414,6 +414,7 @@ def copr_createrepo(username, coprname):
chroots=chroots)
db.session.commit()
+ flask.flash("Repository metadata will be regenerated in a few minutes ...")
return flask.redirect(flask.url_for("coprs_ns.copr_detail",
username=copr.owner.name,
coprname=copr.name))
9 years, 5 months