[mirrormanager] master: model: let host.category_urls() return private URLs too (d86cda6)
by Matt Domsch
Repository : http://git.fedorahosted.org/cgit/
On branch : master
>---------------------------------------------------------------
commit d86cda6d3a138401b19570036c7210c020fd3923
Author: Matt Domsch <matt(a)domsch.com>
Date: Mon Jul 22 16:36:23 2013 -0500
model: let host.category_urls() return private URLs too
The crawler is the only thing that uses this function, and it needs
access to private URLs (e.g. private rsync) as well as public. So
remove the public=True select condition.
>---------------------------------------------------------------
server/mirrormanager/model.py | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/server/mirrormanager/model.py b/server/mirrormanager/model.py
index 60a62ae..5ac8d27 100644
--- a/server/mirrormanager/model.py
+++ b/server/mirrormanager/model.py
@@ -394,7 +394,7 @@ class Host(SQLObject):
def category_urls(self, cname):
for hc in self.categories:
if hc.category.name == cname:
- return [hcurl.url for hcurl in HostCategoryUrl.selectBy(host_category=hc, private=False)]
+ return [hcurl.url for hcurl in HostCategoryUrl.selectBy(host_category=hc)]
def directory_urls(self, directory, category):
"""Given what we know about the host and the categories it carries
10 years, 10 months
[mirrormanager] master: mv-instrepos: fix for rawhide (again) (325ec06)
by Matt Domsch
Repository : http://git.fedorahosted.org/cgit/
On branch : master
>---------------------------------------------------------------
commit 325ec0639e40ac8efe2367fcc9cf5c40a42d1cb9
Author: Matt Domsch <matt(a)domsch.com>
Date: Mon Jul 22 07:55:48 2013 -0500
mv-instrepos: fix for rawhide (again)
So, there are already Repository pointers for the directories that
fedora-install-rawhide would have pointed to. Adding the new
Repository pointers causes the cache pickle to blow up (need to figure
out why...).
Instead, we need a RepositoryRedirect in here,
fromRepo=u'fedora-install-rawhide', toRepo=u'rawhide'). And use the
unique constraint in the database to be sure we don't get several.
>---------------------------------------------------------------
server/mv-instrepos | 51 ++++++++++++++++++++++++++-------------------------
1 files changed, 26 insertions(+), 25 deletions(-)
diff --git a/server/mv-instrepos b/server/mv-instrepos
index 2170d07..af75737 100755
--- a/server/mv-instrepos
+++ b/server/mv-instrepos
@@ -30,7 +30,16 @@ def doit():
print "destroying %s" % r
r.destroySelf()
- if options.version != 'development': # yeah, development is ver.name, displayname is rawhide
+ if options.version == 'development': # yeah, development is ver.name, displayname is rawhide
+ # We need a RepositoryRedirect here instead, as Repositories already exist to these directories.
+ fromRepo = u'fedora-install-rawhide'
+ toRepo = u'rawhide'
+ try:
+ rr = RepositoryRedirect(fromRepo = fromRepo, toRepo = toRepo)
+ except:
+ print "Repository Redirect %s -> %s already exists, ignoring." % (fromRepo, toRepo)
+ pass
+ else:
prefix=u'fedora-install-%s' % ver.name
if a.primaryArch:
d = u'pub/fedora/linux/releases/%s/Fedora/%s/os' % (ver.name, a.name)
@@ -38,31 +47,23 @@ def doit():
else:
d = u'pub/fedora-secondary/releases/%s/Fedora/%s/os' % (ver.name, a.name)
category = Category.byName(u'Fedora Secondary Arches')
- else:
- prefix=u'fedora-install-rawhide'
- if a.primaryArch:
- d = u'pub/fedora/linux/development/rawhide/%s/os' % (a.name)
- category = Category.byName(u'Fedora Linux')
- else:
- d = u'pub/fedora-secondary/development/rawhide/%s/os' % (a.name)
- category = Category.byName(u'Fedora Secondary Arches')
-
- repos = Repository.selectBy(name=d)
- for r in repos:
- print "destroying %s" % r
- r.destroySelf()
- if not os.path.isdir(os.path.join('/', d)):
- print "directory %s does not exist on disk, skipping creation of a repository there" % d
- continue
-
- try:
- d = Directory.byName(d)
- except sqlobject.SQLObjectNotFound:
- print "directory %s exists on disk, but not in the database yet, skipping creation of a repository there until after the next UMDL run."
- continue
- print "creating %s repo for arch %s" % (prefix, a.name)
- r = Repository(name=d.name, prefix=prefix, arch=a, directory=d, version=ver, category=category)
+ repos = Repository.selectBy(name=d)
+ for r in repos:
+ print "destroying %s" % r
+ r.destroySelf()
+
+ if not os.path.isdir(os.path.join('/', d)):
+ print "directory %s does not exist on disk, skipping creation of a repository there" % d
+ continue
+
+ try:
+ d = Directory.byName(d)
+ except sqlobject.SQLObjectNotFound:
+ print "directory %s exists on disk, but not in the database yet, skipping creation of a repository there until after the next UMDL run."
+ continue
+ print "creating %s repo for arch %s" % (prefix, a.name)
+ r = Repository(name=d.name, prefix=prefix, arch=a, directory=d, version=ver, category=category)
def main():
10 years, 10 months
[mirrormanager] master: mv-instrepos: add --version=rawhide handling (90a97f8)
by Matt Domsch
Repository : http://git.fedorahosted.org/cgit/
On branch : master
>---------------------------------------------------------------
commit 90a97f8ddddbd784a083f9a7b64e34bd681c0d2f
Author: Matt Domsch <matt(a)domsch.com>
Date: Sun Jul 21 21:35:01 2013 -0500
mv-instrepos: add --version=rawhide handling
>---------------------------------------------------------------
server/mv-instrepos | 26 ++++++++++++++++++--------
1 files changed, 18 insertions(+), 8 deletions(-)
diff --git a/server/mv-instrepos b/server/mv-instrepos
index 7170b76..2170d07 100755
--- a/server/mv-instrepos
+++ b/server/mv-instrepos
@@ -30,12 +30,22 @@ def doit():
print "destroying %s" % r
r.destroySelf()
- if a.primaryArch:
- d = u'pub/fedora/linux/releases/%s/Fedora/%s/os' % (ver.name, a.name)
- category = Category.byName(u'Fedora Linux')
+ if options.version != 'development': # yeah, development is ver.name, displayname is rawhide
+ prefix=u'fedora-install-%s' % ver.name
+ if a.primaryArch:
+ d = u'pub/fedora/linux/releases/%s/Fedora/%s/os' % (ver.name, a.name)
+ category = Category.byName(u'Fedora Linux')
+ else:
+ d = u'pub/fedora-secondary/releases/%s/Fedora/%s/os' % (ver.name, a.name)
+ category = Category.byName(u'Fedora Secondary Arches')
else:
- d = u'pub/fedora-secondary/releases/%s/Fedora/%s/os' % (ver.name, a.name)
- category = Category.byName(u'Fedora Secondary Arches')
+ prefix=u'fedora-install-rawhide'
+ if a.primaryArch:
+ d = u'pub/fedora/linux/development/rawhide/%s/os' % (a.name)
+ category = Category.byName(u'Fedora Linux')
+ else:
+ d = u'pub/fedora-secondary/development/rawhide/%s/os' % (a.name)
+ category = Category.byName(u'Fedora Secondary Arches')
repos = Repository.selectBy(name=d)
for r in repos:
@@ -43,7 +53,7 @@ def doit():
r.destroySelf()
if not os.path.isdir(os.path.join('/', d)):
- print "directory %s does not exist on disk, skipping creation of a repository there"
+ print "directory %s does not exist on disk, skipping creation of a repository there" % d
continue
try:
@@ -51,8 +61,8 @@ def doit():
except sqlobject.SQLObjectNotFound:
print "directory %s exists on disk, but not in the database yet, skipping creation of a repository there until after the next UMDL run."
continue
- print "creating fedora-install-%s repo for arch %s" % (ver.name, a.name)
- r = Repository(name=d.name, prefix=u'fedora-install-%s' % ver.name, arch=a, directory=d, version=ver, category=category)
+ print "creating %s repo for arch %s" % (prefix, a.name)
+ r = Repository(name=d.name, prefix=prefix, arch=a, directory=d, version=ver, category=category)
def main():
10 years, 10 months
[mirrormanager] master: crawler_perhost: no default timeout (8298960)
by Matt Domsch
Repository : http://git.fedorahosted.org/cgit/
On branch : master
>---------------------------------------------------------------
commit 8298960e81fcd03ed887a315678de5f5ea17bb43
Author: Matt Domsch <matt(a)domsch.com>
Date: Sun Jul 21 21:27:06 2013 -0500
crawler_perhost: no default timeout
We're handling timeouts from the crawler master process, which will
kill timed out child processes. But leave the option I suppose...
>---------------------------------------------------------------
server/crawler_perhost | 7 ++++---
1 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/server/crawler_perhost b/server/crawler_perhost
index e29aaa9..b3d15ee 100755
--- a/server/crawler_perhost
+++ b/server/crawler_perhost
@@ -773,7 +773,8 @@ def per_host(host, options):
hoststate.close()
- signal.alarm(0) # we got this far, don't alarm out now!
+ if options.timeout_minutes > 0:
+ signal.alarm(0) # we got this far, don't alarm out now!
if rc == 0:
if len(host_category_dirs) > 0:
sync_hcds(host, host_category_dirs)
@@ -799,8 +800,8 @@ def main():
help="Include hosts marked 'private' in the crawl")
parser.add_option("--timeout-minutes", type="int",
- dest="timeout_minutes", default=180,
- help="Minutes to let the crawler run before killed (default=180)")
+ dest="timeout_minutes", default=0,
+ help="Minutes to let the crawler run before killed (default=0, no timeout)")
parser.add_option("--logfile", type="string", metavar="FILE",
dest="logfile", default=None,
help="write logs to FILE")
10 years, 10 months
[mirrormanager] master: crawler: default timeout 120 minutes (c1e635c)
by Matt Domsch
Repository : http://git.fedorahosted.org/cgit/
On branch : master
>---------------------------------------------------------------
commit c1e635cb85662f263f6b2a16b9cc4d5396139f54
Author: Matt Domsch <matt(a)domsch.com>
Date: Sun Jul 21 21:24:38 2013 -0500
crawler: default timeout 120 minutes
A few hosts don't quite complete within 90 minutes, particularly
getting FTP listings.
>---------------------------------------------------------------
server/crawler | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/server/crawler b/server/crawler
index dde5d62..f904429 100755
--- a/server/crawler
+++ b/server/crawler
@@ -182,7 +182,7 @@ def main():
dest="logdir", default='/var/log/mirrormanager/crawler',
help="write individual host logfiles to DIR")
parser.add_option("--timeout-minutes", type="int",
- dest="timeout_minutes", default=90,
+ dest="timeout_minutes", default=120,
help="per-host timeout, in minutes")
parser.add_option("--logfile", type="string", metavar="FILE",
dest="logfile", default='/var/log/mirrormanager/crawler.log',
10 years, 10 months
[mirrormanager] master: Host template: display last_crawl_duration (c2dd8e3)
by Matt Domsch
Repository : http://git.fedorahosted.org/cgit/
On branch : master
>---------------------------------------------------------------
commit c2dd8e3fff793c1e74f5673a719dfbd0c8ab90d6
Author: Matt Domsch <matt(a)domsch.com>
Date: Sun Jul 21 21:23:27 2013 -0500
Host template: display last_crawl_duration
>---------------------------------------------------------------
server/mirrormanager/templates/host.kid | 5 +++--
1 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/server/mirrormanager/templates/host.kid b/server/mirrormanager/templates/host.kid
index 6d54d7a..20a80c2 100644
--- a/server/mirrormanager/templates/host.kid
+++ b/server/mirrormanager/templates/host.kid
@@ -25,8 +25,9 @@ ${form(value=values, action=tg.url(action), disabled_fields=disabled_fields)}
<div py:if="values is not None">
-Last Checked In: ${values.lastCheckedIn}<br></br>
-Last Crawled: ${values.lastCrawled} <a href="${tg.url('/crawler/'+str(values.id)+'.log')}">[Log]</a><br></br>
+Last Checked In: ${values.lastCheckedIn}<br/>
+Last Crawled: ${values.lastCrawled} <a href="${tg.url('/crawler/'+str(values.id)+'.log')}">[Log]</a><br/>
+Last Crawl Duration: ${values.last_crawl_duration} seconds<br/>
<div py:if="is_siteadmin">
10 years, 10 months
[mirrormanager] master: crawler_perhost: handle rsync connection refused / empty content (2336731)
by Matt Domsch
Repository : http://git.fedorahosted.org/cgit/
On branch : master
>---------------------------------------------------------------
commit 233673193ece9f06bfefa3617085758cf72b7ed3
Author: Matt Domsch <matt(a)domsch.com>
Date: Sun Jul 21 15:11:33 2013 -0500
crawler_perhost: handle rsync connection refused / empty content
Exit without scanning the category if we get return code 10 (socket
I/O error, likely Connection Refused), or if the rsync listing is
empty.
>---------------------------------------------------------------
server/crawler_perhost | 11 +++++++++--
1 files changed, 9 insertions(+), 2 deletions(-)
diff --git a/server/crawler_perhost b/server/crawler_perhost
index cb04cca..e29aaa9 100755
--- a/server/crawler_perhost
+++ b/server/crawler_perhost
@@ -487,10 +487,14 @@ def try_percategory(trydirs, url, host_category_dirs, hc, host, categoryPrefixLe
rsync_start_time = datetime.utcnow()
result, listing = run_rsync(url, '--no-motd')
rsync_stop_time = datetime.utcnow()
- if result > 0:
- logger.info('rsync returned exit code %d' % result)
msg = "rsync time: %s" % str(rsync_stop_time - rsync_start_time)
logger.info(msg)
+ if result == 10:
+ # no rsync content, fail!
+ logger.warning('Connection to host %s Refused. Please check that the URL is correct and that the host has an rsync module still available.' % host.name)
+ return False
+ if result > 0:
+ logger.info('rsync returned exit code %d' % result)
# put the rsync listing in a dict for easy access
while True:
@@ -503,6 +507,9 @@ def try_percategory(trydirs, url, host_category_dirs, hc, host, categoryPrefixLe
logger.debug("invalid rsync line: %s\n" % line)
logger.debug("rsync listing has %d lines" % len(rsync))
+ if len(rsync) == 0:
+ # no rsync content, fail!
+ return False
# for all directories in this category
for d in trydirs:
if must_dienow:
10 years, 10 months
[mirrormanager] master: SCHEMA CHANGE: last_crawl_duration is now BigIntCol (23fc31c)
by Matt Domsch
Repository : http://git.fedorahosted.org/cgit/
On branch : master
>---------------------------------------------------------------
commit 23fc31c37a9f51a2caa150ae4b92e64a6742bc35
Author: Matt Domsch <matt(a)domsch.com>
Date: Sun Jul 21 15:03:18 2013 -0500
SCHEMA CHANGE: last_crawl_duration is now BigIntCol
First, I misnamed this field in the previous commit message. Ignore
that.
Next, last_crawl_duration is now a BigIntCol with default=0, which
contains the duration in seconds. This can be obtained using
timedelta.total_seconds() on python => 2.7, and we fake it on lower
versions. This is good enough for our purposes.
>---------------------------------------------------------------
server/crawler | 13 +++++++++----
server/mirrormanager/model.py | 2 +-
server/mirrormanager/schema_updates/__init__.py | 4 ++--
3 files changed, 12 insertions(+), 7 deletions(-)
diff --git a/server/crawler b/server/crawler
index 9b6716e..dde5d62 100755
--- a/server/crawler
+++ b/server/crawler
@@ -26,6 +26,9 @@ from turbogears.database import PackageHub
hub = __connection__ = None
logger = None
+def total_seconds(td):
+ td.seconds + td.days * 24 * 3600
+
class ForkingMaster:
def __init__(self, max_children = 10):
self.active_children = []
@@ -107,11 +110,13 @@ class ForkingMaster:
diff = self.timings[p.pid]['stop'] - self.timings[p.pid]['start']
host = Host.get(self.timings[p.pid]['hostid'])
logger.info('Host %s (id=%s) crawl time %s' % (host.name, host.id, str(diff)))
- host.last_crawl_duration = diff
+ try:
+ seconds = int(diff.total_seconds())
+ except AttributeError: # python < 2.7
+ seconds = total_seconds(diff)
+ host.last_crawl_duration = seconds
del self.timings[p.pid]
-
-
def doit():
master = ForkingMaster(max_children=options.threads)
commonargs = [ options.crawler_perhost, '-c', options.config]
@@ -120,7 +125,7 @@ def doit():
numhosts = Host.selectBy(private=False).count()
i = 0
- for h in list(Host.selectBy(private=False).orderBy(['-last_crawl_duration', 'id'])):
+ for h in list(Host.selectBy(private=False).orderBy('-last_crawl_duration')):
i += 1
try:
if h.id < options.startid: continue
diff --git a/server/mirrormanager/model.py b/server/mirrormanager/model.py
index 0968fe8..60a62ae 100644
--- a/server/mirrormanager/model.py
+++ b/server/mirrormanager/model.py
@@ -236,7 +236,7 @@ class Host(SQLObject):
asn = IntCol(default=None)
asn_clients = BoolCol(default=True)
max_connections = IntCol(default=1, notNone=True, unsigned=True)
- last_crawl_duration = DateTimeCol(default=None)
+ last_crawl_duration = BigIntCol(default=0)
countries_allowed = MultipleJoin('HostCountryAllowed')
netblocks = MultipleJoin('HostNetblock', orderBy='netblock')
acl_ips = MultipleJoin('HostAclIp', orderBy='ip')
diff --git a/server/mirrormanager/schema_updates/__init__.py b/server/mirrormanager/schema_updates/__init__.py
index 98d3f4c..6ad499f 100644
--- a/server/mirrormanager/schema_updates/__init__.py
+++ b/server/mirrormanager/schema_updates/__init__.py
@@ -66,7 +66,7 @@ def change_tables():
changes['host.max_connections'] = True
if 'lastCrawlDuration' not in OldHost.sqlmeta.columns:
- OldHost.sqlmeta.addColumn(IntCol("last_crawl_duration", default=None), changeSchema=True)
+ OldHost.sqlmeta.addColumn(IntCol("last_crawl_duration", default=0), changeSchema=True)
changes['host.last_crawl_duration'] = True
if 'sortorder' not in OldVersion.sqlmeta.columns and \
@@ -181,7 +181,7 @@ def fill_new_columns():
if changes.get('host.max_connections'):
h.max_connections = 1
if changes.get('host.last_crawl_duration'):
- h.last_crawl_duration = None
+ h.last_crawl_duration = 0
if changes.get('host.max_connections'):
_set_not_null(OldHost, 'max_connections')
10 years, 10 months
[mirrormanager] master: crawler: don't log crawler_perhost stdout and stderr, drop --debug (69815c2)
by Matt Domsch
Repository : http://git.fedorahosted.org/cgit/
On branch : master
>---------------------------------------------------------------
commit 69815c26382dbe8480a65e51d164992ca979eaee
Author: Matt Domsch <matt(a)domsch.com>
Date: Sun Jul 21 00:35:27 2013 -0500
crawler: don't log crawler_perhost stdout and stderr, drop --debug
crawler_perhost stdout is already being logged via the logging module
into a log file, no need to duplicate that. stderr isn't generating
anything useful at this point either. So don't bother keeping them on
disk.
Also, now that the crawler is working better, we don't need to pass
--debug to crawler_perhost.
>---------------------------------------------------------------
server/crawler | 6 ++----
1 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/server/crawler b/server/crawler
index 8346667..9b6716e 100755
--- a/server/crawler
+++ b/server/crawler
@@ -76,9 +76,7 @@ class ForkingMaster:
def process_request(self, command, args, host):
"""Fork a new subprocess to process the request."""
logger.info("Starting crawler %s: %s" % (host.name, args))
- stdout = open(os.path.join(options.logdir, '%d-stdout.log' % host.id), 'a')
- stderr = open(os.path.join(options.logdir, '%d-stderr.log' % host.id), 'a')
- p = Popen(args, executable=command, stdin=self.devnull, stdout=stdout, stderr=stderr, close_fds=True)
+ p = Popen(args, executable=command, stdin=self.devnull, stdout=self.devnull, stderr=self.devnull, close_fds=True)
self.start_time(p, host.id)
logger.debug("Adding child pid %d" % p.pid)
self.active_children.append(p)
@@ -116,7 +114,7 @@ class ForkingMaster:
def doit():
master = ForkingMaster(max_children=options.threads)
- commonargs = [ options.crawler_perhost, '-c', options.config, '--debug']
+ commonargs = [ options.crawler_perhost, '-c', options.config]
if options.canary:
commonargs.append('--canary')
10 years, 10 months
[mirrormanager] master: SCHEMA UPDATE: add Host.last_crawled_duration, use it in crawler (2760248)
by Matt Domsch
Repository : http://git.fedorahosted.org/cgit/
On branch : master
>---------------------------------------------------------------
commit 2760248a5d6296b58310a114178686e39cbeb5cf
Author: Matt Domsch <matt(a)domsch.com>
Date: Sun Jul 21 00:26:44 2013 -0500
SCHEMA UPDATE: add Host.last_crawled_duration, use it in crawler
We want to start the crawlers in decending time order of the last
crawl duration. This lets us start crawlers that will take a long
time first, and crawlers that will complete quickly, later. In this
order, the overall completion time should be the fastest possible, as
a long-running job won't get stuck after an arbitrarily long bunch of
faster jobs.
Set the last_crawled_duration in the crawler after crawler_perhost has
completed, and use the value in crawler to sort the list of hosts to
be crawled.
>---------------------------------------------------------------
server/crawler | 3 ++-
server/mirrormanager/model.py | 1 +
server/mirrormanager/schema_updates/__init__.py | 13 ++++++++++---
3 files changed, 13 insertions(+), 4 deletions(-)
diff --git a/server/crawler b/server/crawler
index c565cd3..8346667 100755
--- a/server/crawler
+++ b/server/crawler
@@ -109,6 +109,7 @@ class ForkingMaster:
diff = self.timings[p.pid]['stop'] - self.timings[p.pid]['start']
host = Host.get(self.timings[p.pid]['hostid'])
logger.info('Host %s (id=%s) crawl time %s' % (host.name, host.id, str(diff)))
+ host.last_crawl_duration = diff
del self.timings[p.pid]
@@ -121,7 +122,7 @@ def doit():
numhosts = Host.selectBy(private=False).count()
i = 0
- for h in list(Host.selectBy(private=False).orderBy('id')):
+ for h in list(Host.selectBy(private=False).orderBy(['-last_crawl_duration', 'id'])):
i += 1
try:
if h.id < options.startid: continue
diff --git a/server/mirrormanager/model.py b/server/mirrormanager/model.py
index b988e54..0968fe8 100644
--- a/server/mirrormanager/model.py
+++ b/server/mirrormanager/model.py
@@ -236,6 +236,7 @@ class Host(SQLObject):
asn = IntCol(default=None)
asn_clients = BoolCol(default=True)
max_connections = IntCol(default=1, notNone=True, unsigned=True)
+ last_crawl_duration = DateTimeCol(default=None)
countries_allowed = MultipleJoin('HostCountryAllowed')
netblocks = MultipleJoin('HostNetblock', orderBy='netblock')
acl_ips = MultipleJoin('HostAclIp', orderBy='ip')
diff --git a/server/mirrormanager/schema_updates/__init__.py b/server/mirrormanager/schema_updates/__init__.py
index 551589b..98d3f4c 100644
--- a/server/mirrormanager/schema_updates/__init__.py
+++ b/server/mirrormanager/schema_updates/__init__.py
@@ -65,6 +65,10 @@ def change_tables():
OldHost.sqlmeta.addColumn(IntCol("max_connections", default=1), changeSchema=True)
changes['host.max_connections'] = True
+ if 'lastCrawlDuration' not in OldHost.sqlmeta.columns:
+ OldHost.sqlmeta.addColumn(IntCol("last_crawl_duration", default=None), changeSchema=True)
+ changes['host.last_crawl_duration'] = True
+
if 'sortorder' not in OldVersion.sqlmeta.columns and \
'codename' not in OldVersion.sqlmeta.columns:
OldVersion.sqlmeta.addColumn(IntCol("sortorder", default=0), changeSchema=True)
@@ -172,12 +176,15 @@ def fill_new_columns():
s.emailOnDrop=False
s.emailOnAdd=False
- if changes.get('host.max_connections'):
- for h in Host.select():
+
+ for h in Host.select():
+ if changes.get('host.max_connections'):
h.max_connections = 1
+ if changes.get('host.last_crawl_duration'):
+ h.last_crawl_duration = None
+ if changes.get('host.max_connections'):
_set_not_null(OldHost, 'max_connections')
-
if changes.get('version.sortorder_codename'):
for v in Version.select():
v.sortorder = 0
10 years, 10 months