Architecture specific change in rpms/spyder.git
by githook-noreply@fedoraproject.org
The package rpms/spyder.git has added or updated architecture specific content in its
spec file (ExclusiveArch/ExcludeArch or %ifarch/%ifnarch) in commit(s):
https://src.fedoraproject.org/cgit/rpms/spyder.git/commit/?id=0656feb7a9a...
https://src.fedoraproject.org/cgit/rpms/spyder.git/commit/?id=fff9a965af4....
Change:
+ExclusiveArch: %{qt5_qtwebengine_arches}
+ExcludeArch: %{ix86}
Thanks.
Full change:
============
commit 4fd0934c4af28f0ad6597856e45c4a642d0fa4c6
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Tue Sep 26 13:51:03 2023 -0400
Use upstream PR for useless shebangs now that it’s rebased on 5.x
diff --git a/spyder-5.4.5-useless-shebangs.patch b/21372.patch
similarity index 88%
rename from spyder-5.4.5-useless-shebangs.patch
rename to 21372.patch
index d681a51..4c434db 100644
--- a/spyder-5.4.5-useless-shebangs.patch
+++ b/21372.patch
@@ -1,4 +1,4 @@
-From 8afc036557de13b132b01c833fc77569870767e2 Mon Sep 17 00:00:00 2001
+From 7ae5b5797248765a6dfec7378b83fbd7335ac946 Mon Sep 17 00:00:00 2001
From: "Benjamin A. Beasley" <code(a)musicinmybrain.net>
Date: Mon, 25 Sep 2023 12:05:16 -0400
Subject: [PATCH 1/2] Remove useless shebang lines
@@ -16,7 +16,7 @@ script-like (has a “main routine” or other interesting side effects).
3 files changed, 3 deletions(-)
diff --git a/spyder/app/restart.py b/spyder/app/restart.py
-index de40fb917..041f89ded 100644
+index de40fb91716..041f89deddf 100644
--- a/spyder/app/restart.py
+++ b/spyder/app/restart.py
@@ -1,4 +1,3 @@
@@ -25,7 +25,7 @@ index de40fb917..041f89ded 100644
#
# Copyright © Spyder Project Contributors
diff --git a/spyder/plugins/editor/widgets/tests/assets/ipython_file.ipy b/spyder/plugins/editor/widgets/tests/assets/ipython_file.ipy
-index 05b56b8e3..8dc9efb92 100644
+index 05b56b8e33e..8dc9efb9256 100644
--- a/spyder/plugins/editor/widgets/tests/assets/ipython_file.ipy
+++ b/spyder/plugins/editor/widgets/tests/assets/ipython_file.ipy
@@ -1,4 +1,3 @@
@@ -34,7 +34,7 @@ index 05b56b8e3..8dc9efb92 100644
"""
Created on Mon Dec 14 12:25:29 2020
diff --git a/spyder/utils/external/github.py b/spyder/utils/external/github.py
-index 56182433b..b0a5032e9 100644
+index 56182433ba9..b0a5032e998 100644
--- a/spyder/utils/external/github.py
+++ b/spyder/utils/external/github.py
@@ -1,4 +1,3 @@
@@ -42,11 +42,8 @@ index 56182433b..b0a5032e9 100644
# -*-coding: utf8 -*-
"""
---
-2.41.0
-
-From 0c02c04780a983f380f94897a2783433e6b64110 Mon Sep 17 00:00:00 2001
+From 3336314909489f2115dc5ed1fb1974fbdc6954c5 Mon Sep 17 00:00:00 2001
From: "Benjamin A. Beasley" <code(a)musicinmybrain.net>
Date: Mon, 25 Sep 2023 12:07:20 -0400
Subject: [PATCH 2/2] Make tools/rm_whitespace.py executable
@@ -59,6 +56,3 @@ Subject: [PATCH 2/2] Make tools/rm_whitespace.py executable
diff --git a/tools/rm_whitespace.py b/tools/rm_whitespace.py
old mode 100644
new mode 100755
---
-2.41.0
-
diff --git a/spyder.spec b/spyder.spec
index 7a69252..4d25476 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -25,8 +25,7 @@ Patch: %{forgeurl}/pull/21367.patch
# Ensure no source files have useless shebangs
# https://github.com/spyder-ide/spyder/pull/21372
-# PR opened against master, here rebased against the 5.x branch.
-Patch: spyder-5.4.5-useless-shebangs.patch
+Patch: %{forgeurl}/pull/21372.patch
BuildArch: noarch
# https://fedoraproject.org/wiki/Changes/EncourageI686LeafRemoval
commit 3b056490de37d025c8d3ba4347d1a9530f9697e3
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Mon Sep 25 12:22:49 2023 -0400
Send a PR upstream for useless shebangs, replacing the downstream patch
diff --git a/spyder-5.4.5-useless-shebangs.patch b/spyder-5.4.5-useless-shebangs.patch
new file mode 100644
index 0000000..d681a51
--- /dev/null
+++ b/spyder-5.4.5-useless-shebangs.patch
@@ -0,0 +1,64 @@
+From 8afc036557de13b132b01c833fc77569870767e2 Mon Sep 17 00:00:00 2001
+From: "Benjamin A. Beasley" <code(a)musicinmybrain.net>
+Date: Mon, 25 Sep 2023 12:05:16 -0400
+Subject: [PATCH 1/2] Remove useless shebang lines
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+There is no value in having a shebang (#!) line in a file that does not
+have the executable bit set in its filesystem permissions, even if it is
+script-like (has a “main routine” or other interesting side effects).
+---
+ spyder/app/restart.py | 1 -
+ spyder/plugins/editor/widgets/tests/assets/ipython_file.ipy | 1 -
+ spyder/utils/external/github.py | 1 -
+ 3 files changed, 3 deletions(-)
+
+diff --git a/spyder/app/restart.py b/spyder/app/restart.py
+index de40fb917..041f89ded 100644
+--- a/spyder/app/restart.py
++++ b/spyder/app/restart.py
+@@ -1,4 +1,3 @@
+-#!/usr/bin/env python
+ # -*- coding: utf-8 -*-
+ #
+ # Copyright © Spyder Project Contributors
+diff --git a/spyder/plugins/editor/widgets/tests/assets/ipython_file.ipy b/spyder/plugins/editor/widgets/tests/assets/ipython_file.ipy
+index 05b56b8e3..8dc9efb92 100644
+--- a/spyder/plugins/editor/widgets/tests/assets/ipython_file.ipy
++++ b/spyder/plugins/editor/widgets/tests/assets/ipython_file.ipy
+@@ -1,4 +1,3 @@
+-#!/usr/bin/env python3
+ # -*- coding: utf-8 -*-
+ """
+ Created on Mon Dec 14 12:25:29 2020
+diff --git a/spyder/utils/external/github.py b/spyder/utils/external/github.py
+index 56182433b..b0a5032e9 100644
+--- a/spyder/utils/external/github.py
++++ b/spyder/utils/external/github.py
+@@ -1,4 +1,3 @@
+-#!/usr/bin/env python
+ # -*-coding: utf8 -*-
+
+ """
+--
+2.41.0
+
+
+From 0c02c04780a983f380f94897a2783433e6b64110 Mon Sep 17 00:00:00 2001
+From: "Benjamin A. Beasley" <code(a)musicinmybrain.net>
+Date: Mon, 25 Sep 2023 12:07:20 -0400
+Subject: [PATCH 2/2] Make tools/rm_whitespace.py executable
+
+---
+ tools/rm_whitespace.py | 0
+ 1 file changed, 0 insertions(+), 0 deletions(-)
+ mode change 100644 => 100755 tools/rm_whitespace.py
+
+diff --git a/tools/rm_whitespace.py b/tools/rm_whitespace.py
+old mode 100644
+new mode 100755
+--
+2.41.0
+
diff --git a/spyder.spec b/spyder.spec
index 0545f26..7a69252 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -23,6 +23,11 @@ Patch: %{forgeurl}/commit/285ef8a385c29ca7874027f57ab9dc44cbffae97.patch
# https://github.com/spyder-ide/spyder/pull/21367
Patch: %{forgeurl}/pull/21367.patch
+# Ensure no source files have useless shebangs
+# https://github.com/spyder-ide/spyder/pull/21372
+# PR opened against master, here rebased against the 5.x branch.
+Patch: spyder-5.4.5-useless-shebangs.patch
+
BuildArch: noarch
# https://fedoraproject.org/wiki/Changes/EncourageI686LeafRemoval
ExcludeArch: %{ix86}
@@ -81,10 +86,6 @@ find . -type f \( \
-name '*.rst' -o -name '*.md' -o -name '*.py' -o -name '*.css' \
\) -exec dos2unix --keepdate '{}' '+'
-# Remove shebang
-sed -i '/^#!/d' spyder/utils/external/github.py
-sed -i '/^#!/d' spyder/app/restart.py
-
%generate_buildrequires
%pyproject_buildrequires
commit 27542f0fe5a38021d18f9eddbaf19334e13b7cfe
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Mon Sep 25 13:07:49 2023 -0400
Fix mixed tabs and spaces
These were accidentally introduced in a previous commit
[skip changelog]
diff --git a/spyder.spec b/spyder.spec
index 18f4a3a..0545f26 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -34,7 +34,7 @@ BuildRequires: python3-devel
BuildRequires: dos2unix
-BuildRequires: desktop-file-utils
+BuildRequires: desktop-file-utils
# Still required by guidelines for now since Fedora uses appstream-builder
# (https://pagure.io/packaging-committee/issue/1053):
BuildRequires: libappstream-glib
@@ -63,9 +63,9 @@ components, such as the interactive console, in your own software.}
Summary: %{summary}
# For %%{_datadir}/icons
-Requires: hicolor-icon-theme
+Requires: hicolor-icon-theme
# Unbundled from spyder/plugins/help/utils/js/mathjax
-Requires: mathjax
+Requires: mathjax
%description -n python3-spyder %_description
commit d983194c4630e3313fee69c4a5ef476c265e9d21
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Mon Sep 25 11:42:58 2023 -0400
Replace downstream dep. version patch with commit/PR backports
diff --git a/21367.patch b/21367.patch
new file mode 100644
index 0000000..b056d14
--- /dev/null
+++ b/21367.patch
@@ -0,0 +1,67 @@
+From 60ae3c650d2c5778a12f5b5a3fcfec8fef63f507 Mon Sep 17 00:00:00 2001
+From: "Benjamin A. Beasley" <code(a)musicinmybrain.net>
+Date: Sun, 24 Sep 2023 08:21:13 -0400
+Subject: [PATCH] Bump jedi upper bound from <0.19.0 to <0.20.0
+
+This comes from python-lsp-server, which supports 0.19.x since 1.8.0.
+
+Fixes #21296.
+---
+ binder/environment.yml | 2 +-
+ requirements/main.yml | 2 +-
+ setup.py | 2 +-
+ spyder/dependencies.py | 2 +-
+ 4 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/binder/environment.yml b/binder/environment.yml
+index 2e50b7f931c..9259ae23141 100644
+--- a/binder/environment.yml
++++ b/binder/environment.yml
+@@ -14,7 +14,7 @@ dependencies:
+ - diff-match-patch >=20181111
+ - intervaltree >=3.0.2
+ - ipython >=7.31.1,<9.0.0,!=8.8.0,!=8.9.0,!=8.10.0,!=8.11.0,!=8.12.0,!=8.12.1
+-- jedi >=0.17.2,<0.19.0
++- jedi >=0.17.2,<0.20.0
+ - jellyfish >=0.7
+ - jsonschema >=3.2.0
+ - keyring >=17.0.0
+diff --git a/requirements/main.yml b/requirements/main.yml
+index aa2a3ddf40e..9966d16e151 100644
+--- a/requirements/main.yml
++++ b/requirements/main.yml
+@@ -12,7 +12,7 @@ dependencies:
+ - diff-match-patch >=20181111
+ - intervaltree >=3.0.2
+ - ipython >=7.31.1,<9.0.0,!=8.8.0,!=8.9.0,!=8.10.0,!=8.11.0,!=8.12.0,!=8.12.1
+- - jedi >=0.17.2,<0.19.0
++ - jedi >=0.17.2,<0.20.0
+ - jellyfish >=0.7
+ - jsonschema >=3.2.0
+ - keyring >=17.0.0
+diff --git a/setup.py b/setup.py
+index 14655bd51a0..4e8cc34564a 100644
+--- a/setup.py
++++ b/setup.py
+@@ -210,7 +210,7 @@ def run(self):
+ 'diff-match-patch>=20181111',
+ 'intervaltree>=3.0.2',
+ 'ipython>=7.31.1,<9.0.0,!=8.8.0,!=8.9.0,!=8.10.0,!=8.11.0,!=8.12.0,!=8.12.1',
+- 'jedi>=0.17.2,<0.19.0',
++ 'jedi>=0.17.2,<0.20.0',
+ 'jellyfish>=0.7',
+ 'jsonschema>=3.2.0',
+ 'keyring>=17.0.0',
+diff --git a/spyder/dependencies.py b/spyder/dependencies.py
+index bc12e38a2fb..0689efb32ae 100644
+--- a/spyder/dependencies.py
++++ b/spyder/dependencies.py
+@@ -40,7 +40,7 @@
+ INTERVALTREE_REQVER = None if is_pynsist() else '>=3.0.2'
+ IPYTHON_REQVER = (
+ ">=7.31.1,<9.0.0,!=8.8.0,!=8.9.0,!=8.10.0,!=8.11.0,!=8.12.0,!=8.12.1")
+-JEDI_REQVER = '>=0.17.2,<0.19.0'
++JEDI_REQVER = '>=0.17.2,<0.20.0'
+ JELLYFISH_REQVER = '>=0.7'
+ JSONSCHEMA_REQVER = '>=3.2.0'
+ KEYRING_REQVER = '>=17.0.0'
diff --git a/285ef8a385c29ca7874027f57ab9dc44cbffae97.patch b/285ef8a385c29ca7874027f57ab9dc44cbffae97.patch
new file mode 100644
index 0000000..7d1194d
--- /dev/null
+++ b/285ef8a385c29ca7874027f57ab9dc44cbffae97.patch
@@ -0,0 +1,22 @@
+From 285ef8a385c29ca7874027f57ab9dc44cbffae97 Mon Sep 17 00:00:00 2001
+From: Carlos Cordoba <ccordoba12(a)gmail.com>
+Date: Thu, 14 Sep 2023 11:58:30 -0500
+Subject: [PATCH] Bump PyLSP version when using Spyder in dev mode
+
+---
+ setup.py | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/setup.py b/setup.py
+index c912a88a5ce..14655bd51a0 100644
+--- a/setup.py
++++ b/setup.py
+@@ -251,7 +251,7 @@ def run(self):
+ reqs_to_loosen = {'python-lsp-server[all]', 'qtconsole', 'spyder-kernels'}
+ install_requires = [req for req in install_requires
+ if req.split(">")[0] not in reqs_to_loosen]
+- install_requires.append('python-lsp-server[all]>=1.7.4,<1.9.0')
++ install_requires.append('python-lsp-server[all]>=1.8.0,<1.10.0')
+ install_requires.append('qtconsole>=5.4.2,<5.6.0')
+ install_requires.append('spyder-kernels>=2.4.4,<2.6.0')
+
diff --git a/2beb128b6c71eb4d4556a2f79cb385a7352d16f9.patch b/2beb128b6c71eb4d4556a2f79cb385a7352d16f9.patch
new file mode 100644
index 0000000..694c588
--- /dev/null
+++ b/2beb128b6c71eb4d4556a2f79cb385a7352d16f9.patch
@@ -0,0 +1,64 @@
+From 2beb128b6c71eb4d4556a2f79cb385a7352d16f9 Mon Sep 17 00:00:00 2001
+From: Carlos Cordoba <ccordoba12(a)gmail.com>
+Date: Wed, 13 Sep 2023 20:19:05 -0500
+Subject: [PATCH] Dependencies: Bump minimal required version of PyLSP
+
+---
+ binder/environment.yml | 2 +-
+ requirements/main.yml | 2 +-
+ setup.py | 2 +-
+ spyder/dependencies.py | 2 +-
+ 4 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/binder/environment.yml b/binder/environment.yml
+index d15cc2a3e26..2e50b7f931c 100644
+--- a/binder/environment.yml
++++ b/binder/environment.yml
+@@ -31,7 +31,7 @@ dependencies:
+ - pyqt <5.16
+ - pyqtwebengine <5.16
+ - python-lsp-black >=1.2.0,<3.0.0
+-- python-lsp-server >=1.7.4,<1.8.0
++- python-lsp-server >=1.8.0,<1.9.0
+ - pyxdg >=0.26
+ - pyzmq >=22.1.0
+ - qdarkstyle >=3.0.2,<3.2.0
+diff --git a/requirements/main.yml b/requirements/main.yml
+index ca4f2576d0a..aa2a3ddf40e 100644
+--- a/requirements/main.yml
++++ b/requirements/main.yml
+@@ -29,7 +29,7 @@ dependencies:
+ - pyqt <5.16
+ - pyqtwebengine <5.16
+ - python-lsp-black >=1.2.0,<3.0.0
+- - python-lsp-server >=1.7.4,<1.8.0
++ - python-lsp-server >=1.8.0,<1.9.0
+ - pyzmq >=22.1.0
+ - qdarkstyle >=3.0.2,<3.2.0
+ - qstylizer >=0.2.2
+diff --git a/setup.py b/setup.py
+index a90bbc1be4b..c912a88a5ce 100644
+--- a/setup.py
++++ b/setup.py
+@@ -229,7 +229,7 @@ def run(self):
+ 'pyls-spyder>=0.4.0',
+ 'pyqt5<5.16',
+ 'pyqtwebengine<5.16',
+- 'python-lsp-server[all]>=1.7.4,<1.8.0',
++ 'python-lsp-server[all]>=1.8.0,<1.9.0',
+ 'pyxdg>=0.26;platform_system=="Linux"',
+ 'pyzmq>=22.1.0',
+ 'qdarkstyle>=3.0.2,<3.2.0',
+diff --git a/spyder/dependencies.py b/spyder/dependencies.py
+index 3a297b0edaf..bc12e38a2fb 100644
+--- a/spyder/dependencies.py
++++ b/spyder/dependencies.py
+@@ -54,7 +54,7 @@
+ PYGMENTS_REQVER = '>=2.0'
+ PYLINT_REQVER = '>=2.5.0,<3.0'
+ PYLINT_VENV_REQVER = '>=3.0.2'
+-PYLSP_REQVER = '>=1.7.4,<1.8.0'
++PYLSP_REQVER = '>=1.8.0,<1.9.0'
+ PYLSP_BLACK_REQVER = '>=1.2.0,<3.0.0'
+ PYLS_SPYDER_REQVER = '>=0.4.0'
+ PYXDG_REQVER = '>=0.26'
diff --git a/spyder.spec b/spyder.spec
index 866c4f1..18f4a3a 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -11,7 +11,17 @@ License: MIT
URL: https://www.spyder-ide.org/
Source: %forgesource
-Patch: spyder_relax_versions.patch
+# Dependencies: Bump minimal required version of PyLSP
+# https://github.com/spyder-ide/spyder/commit/2beb128b6c71eb4d4556a2f79cb38...
+Patch: %{forgeurl}/commit/2beb128b6c71eb4d4556a2f79cb385a7352d16f9.patch
+
+# Bump PyLSP version when using Spyder in dev mode
+# https://github.com/spyder-ide/spyder/commit/285ef8a385c29ca7874027f57ab9d...
+Patch: %{forgeurl}/commit/285ef8a385c29ca7874027f57ab9dc44cbffae97.patch
+
+# Bump jedi upper bound from <0.19.0 to <0.20.0
+# https://github.com/spyder-ide/spyder/pull/21367
+Patch: %{forgeurl}/pull/21367.patch
BuildArch: noarch
# https://fedoraproject.org/wiki/Changes/EncourageI686LeafRemoval
diff --git a/spyder_relax_versions.patch b/spyder_relax_versions.patch
deleted file mode 100644
index cacc1a8..0000000
--- a/spyder_relax_versions.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-diff --git a/setup.py b/setup.py
-index a90bbc1be..c68fbb029 100644
---- a/setup.py
-+++ b/setup.py
-@@ -210,7 +210,7 @@ def run(self):
- 'diff-match-patch>=20181111',
- 'intervaltree>=3.0.2',
- 'ipython>=7.31.1,<9.0.0,!=8.8.0,!=8.9.0,!=8.10.0,!=8.11.0,!=8.12.0,!=8.12.1',
-- 'jedi>=0.17.2,<0.19.0',
-+ 'jedi>=0.17.2',
- 'jellyfish>=0.7',
- 'jsonschema>=3.2.0',
- 'keyring>=17.0.0',
-@@ -229,7 +229,7 @@ def run(self):
- 'pyls-spyder>=0.4.0',
- 'pyqt5<5.16',
- 'pyqtwebengine<5.16',
-- 'python-lsp-server[all]>=1.7.4,<1.8.0',
-+ 'python-lsp-server>=1.7.4',
- 'pyxdg>=0.26;platform_system=="Linux"',
- 'pyzmq>=22.1.0',
- 'qdarkstyle>=3.0.2,<3.2.0',
commit 31c88b881923c05801987700054fa668133583d2
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Sun Sep 24 09:41:58 2023 -0400
Package the changelog and readme files as documentation
diff --git a/spyder.spec b/spyder.spec
index 1e7f59c..866c4f1 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -131,6 +131,9 @@ end
%files -n python3-spyder -f %{pyproject_files}
+%doc CHANGELOG.md
+%doc README.md
+
# A backed-up bundled mathjax directory from a previous upgrade may be present:
%ghost %{python3_sitelib}/spyder/plugins/help/utils/js/mathjax.rpmmoved
%{python3_sitelib}/spyder/plugins/help/utils/js/mathjax
commit 85a22a41ede37f442352fe808cebc522aa773800
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Sun Sep 24 09:38:56 2023 -0400
Use whitespace to break up the files list into logical sections
[skip changelog]
diff --git a/spyder.spec b/spyder.spec
index 4a2a9f1..1e7f59c 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -134,8 +134,10 @@ end
# A backed-up bundled mathjax directory from a previous upgrade may be present:
%ghost %{python3_sitelib}/spyder/plugins/help/utils/js/mathjax.rpmmoved
%{python3_sitelib}/spyder/plugins/help/utils/js/mathjax
+
%{_bindir}/spyder
%{_bindir}/spyder3
+
%{_metainfodir}/%{appname}.appdata.xml
%{_datadir}/applications/spyder.desktop
%{_datadir}/icons/spyder.png
commit d2d8fc41ea2b397d422be8bca0f3503246bed48e
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Sun Sep 24 09:37:29 2023 -0400
Comment the manual Requires
[skip changelog]
diff --git a/spyder.spec b/spyder.spec
index a0b8c72..4a2a9f1 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -52,8 +52,10 @@ components, such as the interactive console, in your own software.}
%package -n python3-spyder
Summary: %{summary}
-Requires: hicolor-icon-theme
-Requires: mathjax
+# For %%{_datadir}/icons
+Requires: hicolor-icon-theme
+# Unbundled from spyder/plugins/help/utils/js/mathjax
+Requires: mathjax
%description -n python3-spyder %_description
commit 17274e77fdd879621864f068ba8b0f273ce9fc21
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Mon Sep 25 11:29:40 2023 -0400
Add missing explicit BuildRequires on python3-devel
diff --git a/spyder.spec b/spyder.spec
index aa5f229..a0b8c72 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -20,6 +20,8 @@ ExcludeArch: %{ix86}
# to follow suit.
ExclusiveArch: %{qt5_qtwebengine_arches}
+BuildRequires: python3-devel
+
BuildRequires: dos2unix
BuildRequires: desktop-file-utils
commit 3cdde2809646d492af3c4570c79a5f55d420a73a
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Sun Sep 24 09:08:38 2023 -0400
Tidy up AppStream metadata handling
- Validate in check, and add a validation with appstreamcli
- Use the _metainfodir macro in paths
diff --git a/spyder.spec b/spyder.spec
index 7705d5e..aa5f229 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -22,6 +22,15 @@ ExclusiveArch: %{qt5_qtwebengine_arches}
BuildRequires: dos2unix
+BuildRequires: desktop-file-utils
+# Still required by guidelines for now since Fedora uses appstream-builder
+# (https://pagure.io/packaging-committee/issue/1053):
+BuildRequires: libappstream-glib
+# Matches what gnome-software and others use:
+BuildRequires: appstream
+
+%global appname org.spyder_ide.spyder
+
%global _description %{expand:
Spyder is a powerful scientific environment written in Python, for Python, and
designed by and for scientists, engineers and data analysts. It offers a unique
@@ -41,9 +50,6 @@ components, such as the interactive console, in your own software.}
%package -n python3-spyder
Summary: %{summary}
-BuildRequires: desktop-file-utils
-BuildRequires: libappstream-glib
-
Requires: hicolor-icon-theme
Requires: mathjax
@@ -80,9 +86,6 @@ sed -i '/^#!/d' spyder/app/restart.py
desktop-file-install --dir=%{buildroot}%{_datadir}/applications scripts/spyder.desktop
-# install appdata file
-appstream-util validate-relax --nonet %{buildroot}/%{_datadir}/metainfo/org.spyder_ide.spyder.appdata.xml
-
# cleanup
rm -rvf %{buildroot}%{_bindir}/spyder_win_post_install.py
@@ -94,6 +97,16 @@ ln -s %{_datadir}/javascript/mathjax/ \
ln -s spyder %{buildroot}%{_bindir}/spyder3
+%check
+# Still required by guidelines for now since Fedora uses appstream-builder
+# (https://pagure.io/packaging-committee/issue/1053):
+appstream-util validate-relax --nonet \
+ %{buildroot}/%{_metainfodir}/%{appname}.appdata.xml
+# Matches what gnome-software and others use:
+appstreamcli validate --nonet \
+ %{buildroot}/%{_metainfodir}/%{appname}.appdata.xml
+
+
%pretrans -n python3-spyder -p <lua>
--[[Back up any bundled mathjax directory from the old package. See:
https://docs.fedoraproject.org/en-US/packaging-guidelines/Directory_Repla...
@@ -119,7 +132,7 @@ end
%{python3_sitelib}/spyder/plugins/help/utils/js/mathjax
%{_bindir}/spyder
%{_bindir}/spyder3
-%{_datadir}/metainfo/org.spyder_ide.spyder.appdata.xml
+%{_metainfodir}/%{appname}.appdata.xml
%{_datadir}/applications/spyder.desktop
%{_datadir}/icons/spyder.png
commit 266870abc64bb388b42c005078922e828e1ef0f3
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Sun Sep 24 08:58:35 2023 -0400
Simplify fixing DOS line endings
diff --git a/spyder.spec b/spyder.spec
index 99369e3..7705d5e 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -20,6 +20,8 @@ ExcludeArch: %{ix86}
# to follow suit.
ExclusiveArch: %{qt5_qtwebengine_arches}
+BuildRequires: dos2unix
+
%global _description %{expand:
Spyder is a powerful scientific environment written in Python, for Python, and
designed by and for scientists, engineers and data analysts. It offers a unique
@@ -54,12 +56,10 @@ Requires: mathjax
# Remove bundled external dependencies
rm -rvf external-deps/ spyder/plugins/help/utils/js/mathjax
-# Remove DOS line endings
-for file in `find -name "*.rst" -o -name "*.py" -o -name "*.css"`; do
- sed "s|\r||g" $file > $file.new && \
- touch -r $file $file.new && \
- mv $file.new $file
-done
+# Fix DOS/CRNL line endings in files that may be installed
+find . -type f \( \
+ -name '*.rst' -o -name '*.md' -o -name '*.py' -o -name '*.css' \
+ \) -exec dos2unix --keepdate '{}' '+'
# Remove shebang
sed -i '/^#!/d' spyder/utils/external/github.py
commit 2950d3d968667f2caeb51397233b80ce9f69f300
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Mon Sep 25 11:25:28 2023 -0400
Simplify removing bundled dependencies
Remove mathjax with the same “rm” command as external-deps/
[skip changelog]
diff --git a/spyder.spec b/spyder.spec
index 82d7274..99369e3 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -52,7 +52,7 @@ Requires: mathjax
%forgeautosetup -p1
# Remove bundled external dependencies
-rm -rvf external-deps/
+rm -rvf external-deps/ spyder/plugins/help/utils/js/mathjax
# Remove DOS line endings
for file in `find -name "*.rst" -o -name "*.py" -o -name "*.css"`; do
@@ -61,9 +61,6 @@ for file in `find -name "*.rst" -o -name "*.py" -o -name "*.css"`; do
mv $file.new $file
done
-# remove bundled mathjax
-rm -rvf spyder/plugins/help/utils/js/mathjax
-
# Remove shebang
sed -i '/^#!/d' spyder/utils/external/github.py
sed -i '/^#!/d' spyder/app/restart.py
commit 86a65fbba3139b788d6d83da8008980cc3e7d5ae
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Mon Sep 25 11:24:00 2023 -0400
Explicitly remove bundled dependencies in prep
This was accidentally dropped in c3308d907692d513eb49d53362b570a8bbdd4dab.
diff --git a/spyder.spec b/spyder.spec
index 38f7c2d..82d7274 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -51,6 +51,9 @@ Requires: mathjax
%prep
%forgeautosetup -p1
+# Remove bundled external dependencies
+rm -rvf external-deps/
+
# Remove DOS line endings
for file in `find -name "*.rst" -o -name "*.py" -o -name "*.css"`; do
sed "s|\r||g" $file > $file.new && \
commit 71aaa81e3ee7209a0566bd0610be04d26e876a01
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Sun Sep 24 08:52:42 2023 -0400
Drop AppData XML file sed-patch
- It no longer has any effect
diff --git a/spyder.spec b/spyder.spec
index 804f04d..38f7c2d 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -51,8 +51,6 @@ Requires: mathjax
%prep
%forgeautosetup -p1
-sed -i 's/\xe2\x80\x8b//g' scripts/org.spyder_ide.spyder.appdata.xml
-
# Remove DOS line endings
for file in `find -name "*.rst" -o -name "*.py" -o -name "*.css"`; do
sed "s|\r||g" $file > $file.new && \
commit e045d216abb3988c8bf5ed1f81195f2863e60edb
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Mon Sep 25 11:21:16 2023 -0400
Reduce macro indirection in the spec file
[skip changelog]
diff --git a/spyder.spec b/spyder.spec
index a661397..804f04d 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -11,7 +11,7 @@ License: MIT
URL: https://www.spyder-ide.org/
Source: %forgesource
-Patch: %{name}_relax_versions.patch
+Patch: spyder_relax_versions.patch
BuildArch: noarch
# https://fedoraproject.org/wiki/Changes/EncourageI686LeafRemoval
@@ -36,7 +36,7 @@ components, such as the interactive console, in your own software.}
%description %_description
-%package -n python3-%{name}
+%package -n python3-spyder
Summary: %{summary}
BuildRequires: desktop-file-utils
@@ -45,7 +45,7 @@ BuildRequires: libappstream-glib
Requires: hicolor-icon-theme
Requires: mathjax
-%description -n python3-%{name} %_description
+%description -n python3-spyder %_description
%prep
@@ -78,9 +78,9 @@ sed -i '/^#!/d' spyder/app/restart.py
%install
%pyproject_install
-%pyproject_save_files %{name}
+%pyproject_save_files spyder
-desktop-file-install --dir=%{buildroot}%{_datadir}/applications scripts/%{name}.desktop
+desktop-file-install --dir=%{buildroot}%{_datadir}/applications scripts/spyder.desktop
# install appdata file
appstream-util validate-relax --nonet %{buildroot}/%{_datadir}/metainfo/org.spyder_ide.spyder.appdata.xml
@@ -96,7 +96,7 @@ ln -s %{_datadir}/javascript/mathjax/ \
ln -s spyder %{buildroot}%{_bindir}/spyder3
-%pretrans -n python3-%{name} -p <lua>
+%pretrans -n python3-spyder -p <lua>
--[[Back up any bundled mathjax directory from the old package. See:
https://docs.fedoraproject.org/en-US/packaging-guidelines/Directory_Repla...
]]
@@ -115,14 +115,14 @@ if st and st.type == "directory" then
end
-%files -n python3-%{name} -f %{pyproject_files}
+%files -n python3-spyder -f %{pyproject_files}
# A backed-up bundled mathjax directory from a previous upgrade may be present:
%ghost %{python3_sitelib}/spyder/plugins/help/utils/js/mathjax.rpmmoved
%{python3_sitelib}/spyder/plugins/help/utils/js/mathjax
-%{_bindir}/%{name}
-%{_bindir}/%{name}3
+%{_bindir}/spyder
+%{_bindir}/spyder3
%{_datadir}/metainfo/org.spyder_ide.spyder.appdata.xml
-%{_datadir}/applications/%{name}.desktop
+%{_datadir}/applications/spyder.desktop
%{_datadir}/icons/spyder.png
commit 7902da2b6b303e2640763650b81f4e47a4f4c569
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Mon Sep 25 11:19:03 2023 -0400
Do not number the sole Source
[skip changelog]
diff --git a/spyder.spec b/spyder.spec
index 8e3e2e1..a661397 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -9,7 +9,7 @@ Summary: Scientific Python Development Environment
%forgemeta
License: MIT
URL: https://www.spyder-ide.org/
-Source0: %forgesource
+Source: %forgesource
Patch: %{name}_relax_versions.patch
commit c9424d38258336c0d898e6ce89df7a43e05e5808
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Sun Sep 24 08:43:11 2023 -0400
Adjust whitespace in base package metadata
The goal is improved legibility.
[skip changelog]
diff --git a/spyder.spec b/spyder.spec
index e44bc57..8e3e2e1 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -5,11 +5,14 @@ Name: spyder
Version: 5.4.5
Release: %autorelease
Summary: Scientific Python Development Environment
+
%forgemeta
License: MIT
URL: https://www.spyder-ide.org/
Source0: %forgesource
+
Patch: %{name}_relax_versions.patch
+
BuildArch: noarch
# https://fedoraproject.org/wiki/Changes/EncourageI686LeafRemoval
ExcludeArch: %{ix86}
commit c8bce87f3f5a03bf206a6930ecb55e9dc457fa7f
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Sun Sep 24 08:41:23 2023 -0400
Make the spyder3 symlink relative
diff --git a/spyder.spec b/spyder.spec
index f236775..e44bc57 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -90,7 +90,7 @@ ln -s %{_datadir}/javascript/mathjax/ \
%{buildroot}%{python3_sitelib}/spyder/plugins/help/utils/js/mathjax
# provide spyder3 as symlink to spyder binary for continuity
-ln -s %{_bindir}/spyder %{buildroot}%{_bindir}/spyder3
+ln -s spyder %{buildroot}%{_bindir}/spyder3
%pretrans -n python3-%{name} -p <lua>
commit 824ea43a73e4c2a229558f1ae9a145a77e612df7
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Sun Sep 24 08:40:34 2023 -0400
Remove obsolete ldconfig_scriptlets macro
diff --git a/spyder.spec b/spyder.spec
index ede7aaf..f236775 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -92,8 +92,6 @@ ln -s %{_datadir}/javascript/mathjax/ \
# provide spyder3 as symlink to spyder binary for continuity
ln -s %{_bindir}/spyder %{buildroot}%{_bindir}/spyder3
-%ldconfig_scriptlets
-
%pretrans -n python3-%{name} -p <lua>
--[[Back up any bundled mathjax directory from the old package. See:
commit 93140021d1d5223b63f603a82eee4ba2f6d957d6
Author: Benjamin A. Beasley <code(a)musicinmybrain.net>
Date: Sun Sep 24 08:39:55 2023 -0400
Updated description from upstream
diff --git a/spyder.spec b/spyder.spec
index 6e828d1..ede7aaf 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -18,16 +18,17 @@ ExcludeArch: %{ix86}
ExclusiveArch: %{qt5_qtwebengine_arches}
%global _description %{expand:
-Spyder is a powerful scientific environment written in Python, for
-Python, and designed by and for scientists, engineers and data
-analysts. It features a unique combination of the advanced editing,
-analysis, debugging and profiling functionality of a comprehensive
-development tool with the data exploration, interactive execution, deep
-inspection and beautiful visualization capabilities of an analysis
-package. Furthermore, Spyder offers built-in integration with many
-popular scientific libraries, including NumPy, SciPy, Pandas, IPython,
-QtConsole, Matplotlib, SymPy, and more, and can be extended further
-with full plugin support.}
+Spyder is a powerful scientific environment written in Python, for Python, and
+designed by and for scientists, engineers and data analysts. It offers a unique
+combination of the advanced editing, analysis, debugging, and profiling
+functionality of a comprehensive development tool with the data exploration,
+interactive execution, deep inspection, and beautiful visualization
+capabilities of a scientific package.
+
+Beyond its many built-in features, its abilities can be extended even further
+via its plugin system and API. Furthermore, Spyder can also be used as a PyQt5
+extension library, allowing you to build upon its functionality and embed its
+components, such as the interactive console, in your own software.}
%description %_description
commit 9dc7cf6d0d44eaadbd0b64557098d85d4015d2a3
Author: Sandro <devel(a)penguinpee.nl>
Date: Sun Sep 24 16:06:27 2023 +0200
Drop versioned patch
This won't work with release monitoring and other automation
tools like Packit.
diff --git a/spyder.spec b/spyder.spec
index 05acf6f..6e828d1 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -9,7 +9,7 @@ Summary: Scientific Python Development Environment
License: MIT
URL: https://www.spyder-ide.org/
Source0: %forgesource
-Patch: %{name}-%{version}_relax_versions.patch
+Patch: %{name}_relax_versions.patch
BuildArch: noarch
# https://fedoraproject.org/wiki/Changes/EncourageI686LeafRemoval
ExcludeArch: %{ix86}
diff --git a/spyder-5.4.5_relax_versions.patch b/spyder_relax_versions.patch
similarity index 100%
rename from spyder-5.4.5_relax_versions.patch
rename to spyder_relax_versions.patch
commit 0656feb7a9a7fbf72021f02361f7c6748c0672e1
Author: Sandro <devel(a)penguinpee.nl>
Date: Sun Sep 24 15:42:10 2023 +0200
Add ExclusiveArch
- Follow suit since we depend on `pyqtwebengine`
diff --git a/spyder.spec b/spyder.spec
index ff48b16..05acf6f 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -13,6 +13,9 @@ Patch: %{name}-%{version}_relax_versions.patch
BuildArch: noarch
# https://fedoraproject.org/wiki/Changes/EncourageI686LeafRemoval
ExcludeArch: %{ix86}
+# Taken from pyqtwebengine's spec file. Since we require this, we need
+# to follow suit.
+ExclusiveArch: %{qt5_qtwebengine_arches}
%global _description %{expand:
Spyder is a powerful scientific environment written in Python, for
commit d6fb669fd45cb3ae9b531a59a55df97205b1ad39
Author: Sandro <devel(a)penguinpee.nl>
Date: Sun Sep 24 13:31:23 2023 +0200
Update to 5.4.5 (RHBZ#2220598)
diff --git a/.gitignore b/.gitignore
index 25c9536..54d475a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,3 +35,5 @@
/v5.2.1.tar.gz
/v5.3.0.tar.gz
/v5.3.1.tar.gz
+/spyder-5.3.1.tar.gz
+/spyder-5.4.5.tar.gz
diff --git a/sources b/sources
index 97aed91..a8ac661 100644
--- a/sources
+++ b/sources
@@ -1 +1 @@
-SHA512 (v5.3.1.tar.gz) = 2501ddc45df11671b1a318eb33b50788b334ab10fbafb97a9491775a0efcc6a57814672068ab84314f9cb43961d6fabd9b906fb164cd58c77b6d6ecf0d6621e6
+SHA512 (spyder-5.4.5.tar.gz) = d938e3e64c22837bd4a7ab55822e9d763842cadba3cfb3de952652164054a4b87c2495bd8ae2617dbdd8bad5120e84e9017269d4bcdd6cbf73fb3d4dddbcad8f
diff --git a/spyder-5.3.1_relax_versions.patch b/spyder-5.3.1_relax_versions.patch
deleted file mode 100644
index d8cb787..0000000
--- a/spyder-5.3.1_relax_versions.patch
+++ /dev/null
@@ -1,20 +0,0 @@
---- setup.py-orig 2022-05-28 14:38:47.486105605 -0500
-+++ setup.py 2022-05-28 14:39:47.257341635 -0500
-@@ -209,7 +209,7 @@
- 'cookiecutter>=1.6.0',
- 'diff-match-patch>=20181111',
- 'intervaltree>=3.0.2',
-- 'ipython>=7.31.1,<8.0.0',
-+ 'ipython>=7.31.1',
- 'jedi>=0.17.2,<0.19.0',
- 'jellyfish>=0.7',
- 'jsonschema>=3.2.0',
-@@ -228,7 +228,7 @@
- 'pyls-spyder>=0.4.0',
- 'pyqt5<5.16',
- 'pyqtwebengine<5.16',
-- 'python-lsp-server[all]>=1.4.1,<1.5.0',
-+ 'python-lsp-server>=1.4.1,<1.5.0',
- 'pyxdg>=0.26;platform_system=="Linux"',
- 'pyzmq>=22.1.0',
- 'qdarkstyle>=3.0.2,<3.1.0',
diff --git a/spyder-5.4.5_relax_versions.patch b/spyder-5.4.5_relax_versions.patch
new file mode 100644
index 0000000..cacc1a8
--- /dev/null
+++ b/spyder-5.4.5_relax_versions.patch
@@ -0,0 +1,22 @@
+diff --git a/setup.py b/setup.py
+index a90bbc1be..c68fbb029 100644
+--- a/setup.py
++++ b/setup.py
+@@ -210,7 +210,7 @@ def run(self):
+ 'diff-match-patch>=20181111',
+ 'intervaltree>=3.0.2',
+ 'ipython>=7.31.1,<9.0.0,!=8.8.0,!=8.9.0,!=8.10.0,!=8.11.0,!=8.12.0,!=8.12.1',
+- 'jedi>=0.17.2,<0.19.0',
++ 'jedi>=0.17.2',
+ 'jellyfish>=0.7',
+ 'jsonschema>=3.2.0',
+ 'keyring>=17.0.0',
+@@ -229,7 +229,7 @@ def run(self):
+ 'pyls-spyder>=0.4.0',
+ 'pyqt5<5.16',
+ 'pyqtwebengine<5.16',
+- 'python-lsp-server[all]>=1.7.4,<1.8.0',
++ 'python-lsp-server>=1.7.4',
+ 'pyxdg>=0.26;platform_system=="Linux"',
+ 'pyzmq>=22.1.0',
+ 'qdarkstyle>=3.0.2,<3.2.0',
diff --git a/spyder.spec b/spyder.spec
index 0d31c06..ff48b16 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -2,7 +2,7 @@
%global forgeurl https://github.com/spyder-ide/spyder/
Name: spyder
-Version: 5.3.1
+Version: 5.4.5
Release: %autorelease
Summary: Scientific Python Development Environment
%forgemeta
commit c3308d907692d513eb49d53362b570a8bbdd4dab
Author: Sandro <devel(a)penguinpee.nl>
Date: Sun Sep 24 10:01:36 2023 +0200
Spec file overhaul
- Use forge macros
- Use Python RPM macros
- Restructure spec file
- Clean up obsolete BRs
diff --git a/spyder.spec b/spyder.spec
index 71e7393..0d31c06 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -1,59 +1,50 @@
-
-%global _description %{expand:
-Spyder is a powerful scientific environment written in Python, for Python, and
-designed by and for scientists, engineers and data analysts. It features a
-unique combination of the advanced editing, analysis, debugging and profiling
-functionality of a comprehensive development tool with the data exploration,
-interactive execution, deep inspection and beautiful visualization capabilities
-of an analysis package. Furthermore, Spyder offers built-in integration with
-many popular scientific libraries, including NumPy, SciPy, Pandas, IPython,
-QtConsole, Matplotlib, SymPy, and more, and can be extended further with
-full plugin support.
-}
+# Use forge macros for pulling from GitHub
+%global forgeurl https://github.com/spyder-ide/spyder/
Name: spyder
Version: 5.3.1
Release: %autorelease
Summary: Scientific Python Development Environment
-
-Source0: https://github.com/%{name}-ide/%{name}/archive/v%{version}.tar.gz
-
-Patch0: %{name}-%{version}_relax_versions.patch
+%forgemeta
License: MIT
URL: https://www.spyder-ide.org/
+Source0: %forgesource
+Patch: %{name}-%{version}_relax_versions.patch
BuildArch: noarch
# https://fedoraproject.org/wiki/Changes/EncourageI686LeafRemoval
ExcludeArch: %{ix86}
-%description
-%_description
+%global _description %{expand:
+Spyder is a powerful scientific environment written in Python, for
+Python, and designed by and for scientists, engineers and data
+analysts. It features a unique combination of the advanced editing,
+analysis, debugging and profiling functionality of a comprehensive
+development tool with the data exploration, interactive execution, deep
+inspection and beautiful visualization capabilities of an analysis
+package. Furthermore, Spyder offers built-in integration with many
+popular scientific libraries, including NumPy, SciPy, Pandas, IPython,
+QtConsole, Matplotlib, SymPy, and more, and can be extended further
+with full plugin support.}
+
+%description %_description
%package -n python3-%{name}
Summary: %{summary}
-%{?python_provide:%python_provide python3-%{name}}
-
-BuildRequires: python3-devel
-BuildRequires: python3-sphinx
-BuildRequires: python3-setuptools
BuildRequires: desktop-file-utils
BuildRequires: libappstream-glib
-Requires: hicolor-icon-theme
-Requires: mathjax
+Requires: hicolor-icon-theme
+Requires: mathjax
-%description -n python3-%{name}
-%_description
+%description -n python3-%{name} %_description
%prep
-%setup -q -n %{name}-%{version}
-sed -i 's/\xe2\x80\x8b//g' scripts/org.spyder_ide.spyder.appdata.xml
-
-%patch0
+%forgeautosetup -p1
-rm -rf PKG-INFO external-deps
+sed -i 's/\xe2\x80\x8b//g' scripts/org.spyder_ide.spyder.appdata.xml
# Remove DOS line endings
for file in `find -name "*.rst" -o -name "*.py" -o -name "*.css"`; do
@@ -65,23 +56,29 @@ done
# remove bundled mathjax
rm -rvf spyder/plugins/help/utils/js/mathjax
+# Remove shebang
+sed -i '/^#!/d' spyder/utils/external/github.py
+sed -i '/^#!/d' spyder/app/restart.py
+
+
+%generate_buildrequires
+%pyproject_buildrequires
+
%build
-%py3_build
+%pyproject_wheel
%install
-mkdir -p %{buildroot}%{_datadir}/appdata
-mkdir -p %{buildroot}%{_datadir}/icons/hicolor/scalable/apps/
+%pyproject_install
+%pyproject_save_files %{name}
-%py3_install
desktop-file-install --dir=%{buildroot}%{_datadir}/applications scripts/%{name}.desktop
# install appdata file
appstream-util validate-relax --nonet %{buildroot}/%{_datadir}/metainfo/org.spyder_ide.spyder.appdata.xml
# cleanup
-rm -rvf %{buildroot}%{python3_sitelib}/spyderlib/doc/{.buildinfo,.doctrees}
rm -rvf %{buildroot}%{_bindir}/spyder_win_post_install.py
# replace bundled mathjax with a symlink to the system mathjax
@@ -113,11 +110,10 @@ if st and st.type == "directory" then
end
-%files -n python3-%{name}
-%{python3_sitelib}/spyder-*.egg-info
-%{python3_sitelib}/spyder/
+%files -n python3-%{name} -f %{pyproject_files}
# A backed-up bundled mathjax directory from a previous upgrade may be present:
%ghost %{python3_sitelib}/spyder/plugins/help/utils/js/mathjax.rpmmoved
+%{python3_sitelib}/spyder/plugins/help/utils/js/mathjax
%{_bindir}/%{name}
%{_bindir}/%{name}3
%{_datadir}/metainfo/org.spyder_ide.spyder.appdata.xml
commit fff9a965af418ca076758ce47b2fd2034796c42e
Author: Sandro <devel(a)penguinpee.nl>
Date: Sat Sep 23 18:18:28 2023 +0200
Whitespace changes
- Use spaces NOT tabs (this is Python after all)
- Be consistent with intersection spacing
diff --git a/spyder.spec b/spyder.spec
index 53930f9..71e7393 100644
--- a/spyder.spec
+++ b/spyder.spec
@@ -11,41 +11,42 @@ QtConsole, Matplotlib, SymPy, and more, and can be extended further with
full plugin support.
}
-Name: spyder
-Version: 5.3.1
-Release: %autorelease
-Summary: Scientific Python Development Environment
+Name: spyder
+Version: 5.3.1
+Release: %autorelease
+Summary: Scientific Python Development Environment
-Source0: https://github.com/%{name}-ide/%{name}/archive/v%{version}.tar.gz
+Source0: https://github.com/%{name}-ide/%{name}/archive/v%{version}.tar.gz
-Patch0: %{name}-%{version}_relax_versions.patch
-License: MIT
-URL: https://www.spyder-ide.org/
-BuildArch: noarch
+Patch0: %{name}-%{version}_relax_versions.patch
+License: MIT
+URL: https://www.spyder-ide.org/
+BuildArch: noarch
# https://fedoraproject.org/wiki/Changes/EncourageI686LeafRemoval
-ExcludeArch: %{ix86}
-
+ExcludeArch: %{ix86}
%description
%_description
+
%package -n python3-%{name}
-Summary: %{summary}
+Summary: %{summary}
%{?python_provide:%python_provide python3-%{name}}
-BuildRequires: python3-devel
-BuildRequires: python3-sphinx
-BuildRequires: python3-setuptools
-BuildRequires: desktop-file-utils
-BuildRequires: libappstream-glib
+BuildRequires: python3-devel
+BuildRequires: python3-sphinx
+BuildRequires: python3-setuptools
+BuildRequires: desktop-file-utils
+BuildRequires: libappstream-glib
-Requires: hicolor-icon-theme
-Requires: mathjax
+Requires: hicolor-icon-theme
+Requires: mathjax
%description -n python3-%{name}
%_description
+
%prep
%setup -q -n %{name}-%{version}
sed -i 's/\xe2\x80\x8b//g' scripts/org.spyder_ide.spyder.appdata.xml
@@ -56,9 +57,9 @@ rm -rf PKG-INFO external-deps
# Remove DOS line endings
for file in `find -name "*.rst" -o -name "*.py" -o -name "*.css"`; do
- sed "s|\r||g" $file > $file.new && \
- touch -r $file $file.new && \
- mv $file.new $file
+ sed "s|\r||g" $file > $file.new && \
+ touch -r $file $file.new && \
+ mv $file.new $file
done
# remove bundled mathjax
7 months, 1 week
[Report] Packages Restricting Arches
by root
Package no longer excluding arches (1)
==================================
- rust-cssparser-macros
List of packages currently excluding arches (1142)
===========================================
- 0ad
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64 ppc64le
- 90-Second-Portraits
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 ppc64le
- BareBonesBrowserLaunch
ExclusiveArch: %{java_arches} noarch
- CFR
ExclusiveArch: %{java_arches} noarch
- CardManager
ExclusiveArch: %{java_arches} noarch
- GAPDoc
ExclusiveArch: %{gap_arches} noarch
- GoldenCheetah
ExclusiveArch: %{qt5_qtwebengine_arches}
- GtkAda
ExclusiveArch: %{GPRbuild_arches}
- GtkAda3
ExclusiveArch: %{GPRbuild_arches}
- IPAddress
ExclusiveArch: %{java_arches} noarch
- Mars
ExclusiveArch: %{java_arches} noarch
- OpenColorIO
ExclusiveArch: x86_64 ppc64le
- OpenImageIO
ExclusiveArch: x86_64 ppc64le
- OpenMolcas
ExclusiveArch: x86_64 aarch64 ppc64le s390x
- OpenStego
ExclusiveArch: %{java_arches} noarch
- PragmARC
ExclusiveArch: %{GPRbuild_arches}
- R-V8
ExclusiveArch: %{nodejs_arches}
- R-rJava
ExclusiveArch: %{java_arches}
- RdRand
ExclusiveArch: %{ix86} x86_64
- RediSearch
ExclusiveArch: x86_64
- SLOF
ExclusiveArch: ppc64le
- YafaRay
ExclusiveArch: %{ix86} x86_64
- aardvark-dns
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: aarch64 ppc64le s390x x86_64
- accel-config
ExclusiveArch: %{ix86} x86_64
- acpid
ExclusiveArch: ia64 x86_64 %{ix86} %{arm} aarch64
- ahven
ExclusiveArch: %{GPRbuild_arches}
- algobox
ExclusiveArch: %{qt5_qtwebengine_arches}
- american-fuzzy-lop
ExclusiveArch: %{ix86} x86_64 s390x
- anet
ExclusiveArch: %{GPRbuild_arches}
- ant
ExclusiveArch: %{java_arches} noarch
- ant-antunit
ExclusiveArch: %{java_arches} noarch
- ant-contrib
ExclusiveArch: %{java_arches} noarch
- antlr
ExclusiveArch: %{java_arches}
- antlr-maven-plugin
ExclusiveArch: %{java_arches} noarch
- antlr3
ExclusiveArch: %{java_arches}
- antlr4-project
ExclusiveArch: %{java_arches}
- antlrworks
ExclusiveArch: %{java_arches} noarch
- aopalliance
ExclusiveArch: %{java_arches} noarch
- apache-commons-beanutils
ExclusiveArch: %{java_arches} noarch
- apache-commons-cli
ExclusiveArch: %{java_arches} noarch
- apache-commons-codec
ExclusiveArch: %{java_arches} noarch
- apache-commons-collections
ExclusiveArch: %{java_arches} noarch
- apache-commons-collections4
ExclusiveArch: %{java_arches} noarch
- apache-commons-compress
ExclusiveArch: %{java_arches} noarch
- apache-commons-configuration
ExclusiveArch: %{java_arches} noarch
- apache-commons-digester
ExclusiveArch: %{java_arches} noarch
- apache-commons-exec
ExclusiveArch: %{java_arches} noarch
- apache-commons-io
ExclusiveArch: %{java_arches} noarch
- apache-commons-jexl
ExclusiveArch: %{java_arches} noarch
- apache-commons-jxpath
ExclusiveArch: %{java_arches} noarch
- apache-commons-lang3
ExclusiveArch: %{java_arches} noarch
- apache-commons-logging
ExclusiveArch: %{java_arches} noarch
- apache-commons-math
ExclusiveArch: %{java_arches} noarch
- apache-commons-modeler
ExclusiveArch: %{java_arches} noarch
- apache-commons-net
ExclusiveArch: %{java_arches} noarch
- apache-commons-parent
ExclusiveArch: %{java_arches} noarch
- apache-commons-pool
ExclusiveArch: %{java_arches} noarch
- apache-commons-vfs
ExclusiveArch: %{java_arches} noarch
- apache-ivy
ExclusiveArch: %{java_arches} noarch
- apache-parent
ExclusiveArch: %{java_arches} noarch
- apache-resource-bundles
ExclusiveArch: %{java_arches} noarch
- apache-sshd
ExclusiveArch: %{java_arches} noarch
- apiguardian
ExclusiveArch: %{java_arches} noarch
- apmd
ExclusiveArch: %{ix86}
- appstream-generator
ExclusiveArch: %{ldc_arches}
- aqute-bnd
ExclusiveArch: %{java_arches} noarch
- args4j
ExclusiveArch: %{java_arches} noarch
- arianna
ExclusiveArch: %{qt5_qtwebengine_arches}
- arm-trusted-firmware
ExclusiveArch: aarch64
- assertj-core
ExclusiveArch: %{java_arches} noarch
- atinject
ExclusiveArch: %{java_arches} noarch
- aunit
ExclusiveArch: %GPRbuild_arches
- auto
ExclusiveArch: %{java_arches} noarch
- autolink-java
ExclusiveArch: noarch %{java_arches}
- avgtime
ExclusiveArch: %{ldc_arches}
- aws
ExclusiveArch: %GPRbuild_arches
- azure-cli
ExclusiveArch: %{java_arches} noarch
- batik
ExclusiveArch: %{java_arches} noarch
- bcal
ExclusiveArch: x86_64 aarch64 ia64 ppc64 ppc64le s390x
- bcc
ExclusiveArch: x86_64 %{power64} aarch64 s390x armv7hl
- bcel
ExclusiveArch: %{java_arches} noarch
- bcm283x-firmware
ExclusiveArch: aarch64
- beansbinding
ExclusiveArch: %{java_arches} noarch
- belle-sip
ExclusiveArch: %{java_arches}
- berusky2
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64 %{mips}
- beust-jcommander
ExclusiveArch: %{java_arches} noarch
- bibletime
ExclusiveArch: %{java_arches}
- biosdevname
ExclusiveArch: %{ix86} x86_64
- bless
ExclusiveArch: %mono_arches
- bodhi-server
ExclusiveArch: %{golang_arches_future}
- bolzplatz2006
ExclusiveArch: %{java_arches}
- bouncycastle
ExclusiveArch: %{java_arches} noarch
- box64
ExclusiveArch: aarch64 ppc64le x86_64
- bpftrace
ExclusiveArch: x86_64 %{power64} aarch64 s390x
- brazil
ExclusiveArch: %{java_arches} noarch
- bsf
ExclusiveArch: %{java_arches} noarch
- bsh
ExclusiveArch: %{java_arches} noarch
- build-helper-maven-plugin
ExclusiveArch: %{java_arches} noarch
- buildah
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: aarch64 ppc64le s390x x86_64
- byte-buddy
ExclusiveArch: %{java_arches} noarch
- byteman
ExclusiveArch: %{java_arches} noarch
- cachelib
ExclusiveArch: x86_64 aarch64 ppc64le
- caddy
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: %{golang_arches}
- caffe
ExclusiveArch: x86_64 aarch64 ppc64le
- calamares
ExclusiveArch: %{ix86} x86_64 aarch64
- calibre
ExclusiveArch: aarch64 x86_64
- cambozola
ExclusiveArch: %{java_arches} noarch
- canl-java
ExclusiveArch: %{java_arches} noarch
- catatonit
ExclusiveArch: aarch64 ppc64le s390x x86_64
ExclusiveArch: %{golang_arches_future}
- ccdciel
ExclusiveArch: %{fpc_arches}
- cdcollect
ExclusiveArch: %{mono_arches}
- cdi-api
ExclusiveArch: %{java_arches} noarch
- ceph
ExclusiveArch: x86_64 aarch64 ppc64le s390x
- cglib
ExclusiveArch: %{java_arches} noarch
- chromium
ExclusiveArch: x86_64
ExclusiveArch: x86_64 aarch64
ExclusiveArch: x86_64 aarch64
- cjdns
ExclusiveArch: %{nodejs_arches}
- classloader-leak-test-framework
ExclusiveArch: %{java_arches} noarch
- classpathless-compiler
ExclusiveArch: %{java_arches} noarch
- clevis-pin-tpm2
ExclusiveArch: %{rust_arches}
- clojure
ExclusiveArch: %{java_arches} noarch
- clojure-core-specs-alpha
ExclusiveArch: %{java_arches} noarch
- clojure-maven-plugin
ExclusiveArch: %{java_arches} noarch
- clojure-spec-alpha
ExclusiveArch: %{java_arches} noarch
- cmospwd
ExclusiveArch: %{ix86} x86_64
- cmrt
ExclusiveArch: %{ix86} x86_64 ia64
- codehaus-parent
ExclusiveArch: %{java_arches} noarch
- colorful
ExclusiveArch: %{fpc_arches}
- colossus
ExclusiveArch: %{java_arches} noarch
- conmon
ExclusiveArch: %{golang_arches_future}
- console-image-viewer
ExclusiveArch: %{java_arches} noarch
- containernetworking-plugins
ExclusiveArch: %{golang_arches}
- coq
ExclusiveArch: %{ocaml_native_compiler}
- cortado
ExclusiveArch: %{java_arches} noarch
- cpu-x
ExclusiveArch: i686 x86_64
- cpufetch
ExclusiveArch: %{arm} aarch64 x86_64 ppc ppc64 ppc64le
- cpuid
ExclusiveArch: %{ix86} x86_64
- cpuinfo
ExclusiveArch: x86_64 aarch64
- cqrlog
ExclusiveArch: %{fpc_arches}
- crash
ExclusiveArch: %{ix86} ia64 x86_64 ppc ppc64 s390 s390x %{arm} aarch64 ppc64le
- crash-gcore-command
ExclusiveArch: aarch64 ppc64le x86_64
- crash-trace-command
ExclusiveArch: aarch64 ppc64le s390x x86_64
- credentials-fetcher
ExclusiveArch: x86_64 aarch64 s390x
- cri-o
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 aarch64 %{arm}}
- cri-tools
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 aarch64 %{arm}}
- criu
ExclusiveArch: x86_64 %{arm} ppc64le aarch64 s390x
- crun
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: aarch64 ppc64le riscv64 s390x x86_64
- cryptlib
ExclusiveArch: x86_64 aarch64 ppc64le
- crypto-policies
ExclusiveArch: %{java_arches} noarch
- cryptobone
ExclusiveArch: x86_64 ppc64le aarch64
- csslint
ExclusiveArch: %{nodejs_arches} noarch
- daq
ExclusiveArch: x86_64 aarch64
- dbus-sharp
ExclusiveArch: %mono_arches
- dbus-sharp-glib
ExclusiveArch: %mono_arches
- decentxml
ExclusiveArch: %{java_arches} noarch
- deepin-daemon
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 aarch64 %{arm}}
- deepin-desktop-schemas
ExclusiveArch: %{go_arches}
- directory-maven-plugin
ExclusiveArch: %{java_arches} noarch
- dirgra
ExclusiveArch: %{java_arches} noarch
- disruptor
ExclusiveArch: %{java_arches} noarch
- ditaa
ExclusiveArch: %{java_arches} noarch
- dlm
ExclusiveArch: i686 x86_64
- dmidecode
ExclusiveArch: %{ix86} x86_64 ia64 aarch64
- dmtcp
ExclusiveArch: x86_64
- docker-distribution
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 %{arm}}
- dogtag-pki
ExclusiveArch: %{java_arches}
- dolphin-emu
ExclusiveArch: x86_64 aarch64
- dom4j
ExclusiveArch: %{java_arches} noarch
- dotnet6.0
ExclusiveArch: aarch64 x86_64 s390x
ExclusiveArch: x86_64
- dotnet7.0
ExclusiveArch: aarch64 ppc64le s390x x86_64
ExclusiveArch: x86_64
- doublecmd
ExclusiveArch: %{ix86} x86_64
- dpdk
ExclusiveArch: x86_64 i686 aarch64 ppc64le
- dub
ExclusiveArch: %{ldc_arches}
- dxvk-native
ExclusiveArch: %{ix86} x86_64
- dyninst
ExclusiveArch: %{ix86} x86_64 ppc64le aarch64
- e3
ExclusiveArch: %{ix86} x86_64
- easymock
ExclusiveArch: %{java_arches} noarch
- ecj
ExclusiveArch: %{java_arches} noarch
- eclipse-swt
ExclusiveArch: %{java_arches}
- ed25519-java
ExclusiveArch: %{java_arches} noarch
- edk2
ExclusiveArch: x86_64 aarch64 riscv64
- efibootmgr
ExclusiveArch: %{efi}
- efifs
ExclusiveArch: %{efi}
- efitools
ExclusiveArch: %{efi}
- efivar
ExclusiveArch: %{efi}
- elk
ExclusiveArch: x86_64 %{ix86}
ExclusiveArch: x86_64 %{ix86} aarch64 %{arm} %{power64}
- emacs-slime
ExclusiveArch: %{arm} %{ix86} x86_64 ppc sparcv9 aarch64
- embree
ExclusiveArch: aarch64 x86_64
- embree3
ExclusiveArch: aarch64 x86_64
- enjarify
ExclusiveArch: %{java_arches} noarch
- enki
ExclusiveArch: %{qt5_qtwebengine_arches} noarch
- envytools
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64
- erlang-corba
ExclusiveArch: %{java_arches}
- esmi_ib_library
ExclusiveArch: x86_64
- eth-tools
ExclusiveArch: x86_64
- exec-maven-plugin
ExclusiveArch: %{java_arches} noarch
- external-editor-revived
ExclusiveArch: %{rust_arches}
- extra-enforcer-rules
ExclusiveArch: %{java_arches} noarch
- fasterxml-oss-parent
ExclusiveArch: %{java_arches} noarch
- fb303
ExclusiveArch: x86_64 aarch64 ppc64le
- fbthrift
ExclusiveArch: x86_64 aarch64 ppc64le
- fcitx-libpinyin
ExclusiveArch: %{qt5_qtwebengine_arches}
- fedora-dockerfiles
ExclusiveArch: %{go_arches}
- felix-parent
ExclusiveArch: %{java_arches} noarch
- felix-utils
ExclusiveArch: %{java_arches} noarch
- fernflower
ExclusiveArch: %{java_arches} noarch
- fes
ExclusiveArch: %{ix86} x86_64
- filedrop
ExclusiveArch: %{java_arches} noarch
- firecracker
ExclusiveArch: aarch64 x86_64
- fishbowl
ExclusiveArch: %{java_arches} noarch
- fizz
ExclusiveArch: x86_64 aarch64 ppc64le
- flexmark-java
ExclusiveArch: noarch %{java_arches}
- flocq
ExclusiveArch: %{ocaml_native_compiler}
- florist
ExclusiveArch: %{GPRbuild_arches}
- fluent-bit
ExclusiveArch: x86_64 aarch64
- flute
ExclusiveArch: %{java_arches} noarch
- folly
ExclusiveArch: x86_64 aarch64 ppc64le
- fop
ExclusiveArch: %{java_arches} noarch
- forge-parent
ExclusiveArch: %{java_arches} noarch
- fpc
ExclusiveArch: %{arm} aarch64 %{ix86} x86_64 ppc64le
- frama-c
ExclusiveArch: %{ocaml_native_compiler}
- freecol
ExclusiveArch: %{java_arches} noarch
- freemarker
ExclusiveArch: %{java_arches} noarch
- freerouting
ExclusiveArch: %{java_arches} noarch
- frescobaldi
ExclusiveArch: %{qt5_qtwebengine_arches}
- frysk
ExclusiveArch: x86_64 ppc64
- fuse-overlayfs
ExclusiveArch: %{arm64} ppc64le s390x x86_64
- fusesource-pom
ExclusiveArch: %{java_arches} noarch
- fwts
ExclusiveArch: x86_64 %{arm} aarch64 s390x riscv64 %{power64}
- fwupd-efi
ExclusiveArch: x86_64 aarch64
- ga
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64 ppc64le
- gap
ExclusiveArch: %{gap_arches}
- gap-pkg-ace
ExclusiveArch: %{gap_arches}
- gap-pkg-aclib
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-alnuth
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-atlasrep
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-autodoc
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-automata
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-autpgrp
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-browse
ExclusiveArch: %{gap_arches}
- gap-pkg-caratinterface
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-circle
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-cohomolo
ExclusiveArch: %{gap_arches}
- gap-pkg-congruence
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-corelg
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-crime
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-crisp
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-crypting
ExclusiveArch: %{gap_arches}
- gap-pkg-cryst
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-crystcat
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-ctbllib
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-curlinterface
ExclusiveArch: %{gap_arches}
- gap-pkg-cvec
ExclusiveArch: %{gap_arches}
- gap-pkg-datastructures
ExclusiveArch: %{gap_arches}
- gap-pkg-design
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-digraphs
ExclusiveArch: %{gap_arches}
- gap-pkg-edim
ExclusiveArch: %{gap_arches}
- gap-pkg-factint
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-ferret
ExclusiveArch: %{gap_arches}
- gap-pkg-fga
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-fining
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-float
ExclusiveArch: %{gap_arches}
- gap-pkg-format
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-forms
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-fr
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-francy
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-gbnp
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-genss
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-grape
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-groupoids
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-grpconst
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-guava
ExclusiveArch: %{gap_arches}
- gap-pkg-hap
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-hapcryst
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-hecke
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-images
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-io
ExclusiveArch: %{gap_arches}
- gap-pkg-irredsol
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-json
ExclusiveArch: %{gap_arches}
- gap-pkg-jupyterkernel
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-jupyterviz
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-laguna
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-liealgdb
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-liepring
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-liering
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-loops
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-lpres
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-mapclass
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-nautytracesinterface
ExclusiveArch: %{gap_arches}
- gap-pkg-nq
ExclusiveArch: %{gap_arches}
- gap-pkg-openmath
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-orb
ExclusiveArch: %{gap_arches}
- gap-pkg-polenta
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-polycyclic
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-polymaking
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-primgrp
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-profiling
ExclusiveArch: %{gap_arches}
- gap-pkg-qpa
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-quagroup
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-radiroot
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-recog
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-repsn
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-resclasses
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-scscp
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-semigroups
ExclusiveArch: %{gap_arches}
- gap-pkg-singular
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-sla
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-smallgrp
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-smallsemi
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-sonata
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-sophus
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-spinsym
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-standardff
ExclusiveArch: %{gap_arches}
- gap-pkg-tomlib
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-toric
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-transgrp
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-utils
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-uuid
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-xmod
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-zeromqinterface
ExclusiveArch: %{gap_arches}
- gappalib-coq
ExclusiveArch: %{ocaml_native_compiler}
- gbrainy
ExclusiveArch: %mono_arches
- gdb
ExclusiveArch: %{ix86} x86_64
- gdb-exploitable
ExclusiveArch: x86_64 i386
ExclusiveArch: x86_64 noarch
- gearhead1
ExclusiveArch: %{fpc_arches}
- gearhead2
ExclusiveArch: %{fpc_arches}
- ghdl
ExclusiveArch: %{GNAT_arches}
- ghostwriter
ExclusiveArch: %{qt5_qtwebengine_arches}
- gio-sharp
ExclusiveArch: %mono_arches
- gir-to-d
ExclusiveArch: %{ldc_arches}
- git-octopus
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 aarch64 %{arm}}
- gitqlient
ExclusiveArch: %{qt5_qtwebengine_arches}
- giver
ExclusiveArch: %{mono_arches}
- gkeyfile-sharp
ExclusiveArch: %mono_arches
- glibc32
ExclusiveArch: x86_64
- glibd
ExclusiveArch: %{ldc_arches}
- gnatcoll
ExclusiveArch: %{GPRbuild_arches}
- gnatcoll-bindings
ExclusiveArch: %{GPRbuild_arches}
- gnatcoll-db
ExclusiveArch: %{GPRbuild_arches}
- gnome-boxes
ExclusiveArch: x86_64
- gnome-desktop-sharp
ExclusiveArch: %mono_arches
- gnome-do
ExclusiveArch: %mono_arches
- gnome-keyring-sharp
ExclusiveArch: %mono_arches
- gnome-rdp
ExclusiveArch: %{mono_arches}
- gnome-sharp
ExclusiveArch: %mono_arches
- gnome-subtitles
ExclusiveArch: %mono_arches
- gnu-efi
ExclusiveArch: %{efi}
- go-bindata
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 %{arm}}
- godep
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 %{arm}}
- golang
ExclusiveArch: %{golang_arches}
- google-gson
ExclusiveArch: %{java_arches} noarch
- google-guice
ExclusiveArch: %{java_arches} noarch
- gotun
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 %{arm}}
ExclusiveArch: x86_64
- goverlay
ExclusiveArch: %{fpc_arches}
- gprbuild
ExclusiveArch: %{GPRbuild_arches}
- gprolog
ExclusiveArch: x86_64 %{ix86} ppc alpha aarch64
- grafana
ExclusiveArch: %{grafana_arches}
- grafana-pcp
ExclusiveArch: %{grafanapcp_arches}
- gtk-sharp-beans
ExclusiveArch: %mono_arches
- gtk-sharp2
ExclusiveArch: %mono_arches
- gtk-sharp3
ExclusiveArch: %{mono_arches}
- gtkd
ExclusiveArch: %{ldc_arches}
- guava
ExclusiveArch: %{java_arches} noarch
- gudev-sharp
ExclusiveArch: %mono_arches
- guestfs-tools
ExclusiveArch: %{kernel_arches}
- gvisor-tap-vsock
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: aarch64 ppc64le s390x x86_64
- hamcrest
ExclusiveArch: %{java_arches} noarch
- harec
ExclusiveArch: x86_64 aarch64
- hawtjni
ExclusiveArch: %{java_arches} noarch
- hedgewars
ExclusiveArch: %{fpc_arches}
- hibernate-jpa-2.0-api
ExclusiveArch: %{java_arches} noarch
- hid4java
ExclusiveArch: %{java_arches} noarch
- hipcub
ExclusiveArch: x86_64
- hsakmt
ExclusiveArch: x86_64 aarch64 ppc64le
- httpcomponents-client
ExclusiveArch: %{java_arches} noarch
- httpcomponents-core
ExclusiveArch: %{java_arches} noarch
- httpcomponents-project
ExclusiveArch: %{java_arches} noarch
- hyena
ExclusiveArch: %{mono_arches}
- hyperscan
ExclusiveArch: x86_64
- hyperv-daemons
ExclusiveArch: i686 x86_64 aarch64
- icaro
ExclusiveArch: %{ix86} x86_64 noarch
- icedtea-web
ExclusiveArch: %{java_arches}
- icu4j
ExclusiveArch: %{java_arches} noarch
- imagej
ExclusiveArch: %{java_arches} noarch
- imhex
ExclusiveArch: x86_64 %{arm64}
- imvirt
ExclusiveArch: %{ix86} x86_64 ia64
- indistarter
ExclusiveArch: %{fpc_arches}
- infinipath-psm
ExclusiveArch: x86_64
- intel-cm-compiler
ExclusiveArch: i686 x86_64
- intel-cmt-cat
ExclusiveArch: x86_64
- intel-compute-runtime
ExclusiveArch: x86_64
- intel-gmmlib
ExclusiveArch: x86_64 i686
- intel-igc
ExclusiveArch: x86_64
- intel-ipp-crypto-mb
ExclusiveArch: x86_64
- intel-ipsec-mb
ExclusiveArch: x86_64
- intel-mediasdk
ExclusiveArch: x86_64
- intel-undervolt
ExclusiveArch: i386 x86_64
- ioport
ExclusiveArch: %{ix86} x86_64
- ipmctl
ExclusiveArch: x86_64
- ispc
ExclusiveArch: x86_64 aarch64
- iucode-tool
ExclusiveArch: %{ix86} x86_64
- iyfct
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 ppc64le
- jFormatString
ExclusiveArch: %{java_arches} noarch
- jackson-annotations
ExclusiveArch: %{java_arches} noarch
- jackson-bom
ExclusiveArch: %{java_arches} noarch
- jackson-core
ExclusiveArch: %{java_arches} noarch
- jackson-databind
ExclusiveArch: %{java_arches} noarch
- jackson-dataformats-binary
ExclusiveArch: %{java_arches} noarch
- jackson-dataformats-text
ExclusiveArch: %{java_arches} noarch
- jackson-jaxrs-providers
ExclusiveArch: %{java_arches} noarch
- jackson-modules-base
ExclusiveArch: %{java_arches} noarch
- jackson-parent
ExclusiveArch: %{java_arches} noarch
- jacoco
ExclusiveArch: %{java_arches} noarch
- jacop
ExclusiveArch: %{java_arches} noarch
- jakarta-activation
ExclusiveArch: %{java_arches} noarch
- jakarta-activation1
ExclusiveArch: %{java_arches} noarch
- jakarta-annotations
ExclusiveArch: %{java_arches} noarch
- jakarta-el
ExclusiveArch: %{java_arches} noarch
- jakarta-interceptors
ExclusiveArch: %{java_arches} noarch
- jakarta-json
ExclusiveArch: %{java_arches} noarch
- jakarta-mail
ExclusiveArch: %{java_arches} noarch
- jakarta-mail1
ExclusiveArch: %{java_arches} noarch
- jakarta-oro
ExclusiveArch: %{java_arches} noarch
- jakarta-saaj
ExclusiveArch: %{java_arches} noarch
- jakarta-server-pages
ExclusiveArch: %{java_arches} noarch
- jakarta-servlet
ExclusiveArch: %{java_arches} noarch
- jakarta-xml-ws
ExclusiveArch: %{java_arches} noarch
- janino
ExclusiveArch: %{java_arches} noarch
- jansi
ExclusiveArch: %{java_arches}
- jansi-native
ExclusiveArch: %{java_arches}
- jansi1
ExclusiveArch: %{java_arches} noarch
- java-1.8.0-openjdk
ExclusiveArch: %{java_arches}
- java-1.8.0-openjdk-aarch32
ExclusiveArch: %{arm}
- java-1.8.0-openjdk-portable
ExclusiveArch: %{java_arches}
- java-11-openjdk
ExclusiveArch: %{java_arches}
- java-11-openjdk-portable
ExclusiveArch: %{java_arches}
- java-17-openjdk
ExclusiveArch: %{java_arches}
- java-17-openjdk-portable
ExclusiveArch: %{java_arches}
- java-diff-utils
ExclusiveArch: %{java_arches} noarch
- java-dirq
ExclusiveArch: %{java_arches} noarch
- java-jd-decompiler
ExclusiveArch: %{java_arches} noarch
- java-latest-openjdk
ExclusiveArch: %{java_arches}
- java-latest-openjdk-portable
ExclusiveArch: %{java_arches}
- java-runtime-decompiler
ExclusiveArch: %{java_arches} noarch
- java-scrypt
ExclusiveArch: %{java_arches} noarch
- java_cup
ExclusiveArch: %{java_arches} noarch
- javacc
ExclusiveArch: %{java_arches} noarch
- javacc-maven-plugin
ExclusiveArch: %{java_arches} noarch
- javaewah
ExclusiveArch: %{java_arches} noarch
- javapackages-bootstrap
ExclusiveArch: %{java_arches}
- javaparser
ExclusiveArch: %{java_arches} noarch
- javapoet
ExclusiveArch: %{java_arches} noarch
- javassist
ExclusiveArch: %{java_arches} noarch
- jaxb
ExclusiveArch: %{java_arches} noarch
- jaxb-api
ExclusiveArch: %{java_arches} noarch
- jaxb-api2
ExclusiveArch: %{java_arches} noarch
- jaxb-dtd-parser
ExclusiveArch: %{java_arches} noarch
- jaxb-fi
ExclusiveArch: %{java_arches} noarch
- jaxb-istack-commons
ExclusiveArch: %{java_arches} noarch
- jaxb-stax-ex
ExclusiveArch: %{java_arches} noarch
- jaxen
ExclusiveArch: %{java_arches} noarch
- jboss-jaxrs-2.0-api
ExclusiveArch: %{java_arches} noarch
- jboss-logging
ExclusiveArch: %{java_arches} noarch
- jboss-logging-tools
ExclusiveArch: %{java_arches} noarch
- jboss-parent
ExclusiveArch: %{java_arches} noarch
- jchardet
ExclusiveArch: %{java_arches} noarch
- jcip-annotations
ExclusiveArch: %{java_arches} noarch
- jctools
ExclusiveArch: %{java_arches} noarch
- jcuber
ExclusiveArch: %{java_arches} noarch
- jdeparser
ExclusiveArch: %{java_arches} noarch
- jdepend
ExclusiveArch: %{java_arches} noarch
- jdependency
ExclusiveArch: %{java_arches} noarch
- jdom
ExclusiveArch: %{java_arches} noarch
- jdom2
ExclusiveArch: %{java_arches} noarch
- jedit
ExclusiveArch: %{java_arches} noarch
- jericho-html
ExclusiveArch: %{java_arches} noarch
- jetbrains-annotations
ExclusiveArch: noarch %{java_arches}
- jetty
ExclusiveArch: %{java_arches} noarch
- jflex
ExclusiveArch: %{java_arches} noarch
- jfreechart
ExclusiveArch: %{java_arches} noarch
- jgit
ExclusiveArch: %{java_arches} noarch
- jglobus
ExclusiveArch: %{java_arches} noarch
- jgoodies-common
ExclusiveArch: %{java_arches} noarch
- jgoodies-forms
ExclusiveArch: %{java_arches} noarch
- jgoodies-looks
ExclusiveArch: %{java_arches} noarch
- jigawatts
ExclusiveArch: x86_64 %{arm} ppc64le aarch64 s390x
- jline
ExclusiveArch: %{java_arches}
- jline2
ExclusiveArch: %{java_arches} noarch
- jmock
ExclusiveArch: %{java_arches} noarch
- jmol
ExclusiveArch: %{java_arches} noarch
- jna
ExclusiveArch: %{java_arches}
- jneuroml-core
ExclusiveArch: %{java_arches} noarch
- jni-inchi
ExclusiveArch: %{java_arches}
- jol
ExclusiveArch: %{java_arches} noarch
- jolokia-jvm-agent
ExclusiveArch: %{java_arches} noarch
- jopt-simple
ExclusiveArch: %{java_arches} noarch
- jorbis
ExclusiveArch: %{java_arches} noarch
- jowl
ExclusiveArch: %{nodejs_arches} noarch
- jpanoramamaker
ExclusiveArch: %{java_arches} noarch
- jsch
ExclusiveArch: %{java_arches} noarch
- jsch-agent-proxy
ExclusiveArch: %{java_arches} noarch
- json_simple
ExclusiveArch: %{java_arches} noarch
- jsoup
ExclusiveArch: %{java_arches} noarch
- jsr-305
ExclusiveArch: %{java_arches} noarch
- jss
ExclusiveArch: %{java_arches}
- jssc
ExclusiveArch: %{java_arches}
- jtidy
ExclusiveArch: %{java_arches} noarch
- julia
ExclusiveArch: x86_64
- junit
ExclusiveArch: %{java_arches} noarch
- junit5
ExclusiveArch: %{java_arches} noarch
- juniversalchardet
ExclusiveArch: %{java_arches} noarch
- jzlib
ExclusiveArch: %{java_arches} noarch
- kaidan
ExclusiveArch: %{qt5_qtwebengine_arches}
- kchmviewer
ExclusiveArch: %{qt5_qtwebengine_arches}
- kernel
ExclusiveArch: noarch x86_64 s390x aarch64 ppc64le
ExclusiveArch: noarch i386 i686 x86_64 s390x aarch64 ppc64le
- keylime-agent-rust
ExclusiveArch: %{rust_arches}
- keyring-ima-signer
ExclusiveArch: %{rust_arches}
- kf5-akonadi-search
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- kf5-audiocd-kio
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- kf5-kblog
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- kf5-kcalendarcore
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- kf5-kcalendarutils
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- kf5-kitinerary
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- kf5-ktnef
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- kf5-libkdcraw
ExclusiveArch: x86_64 ppc64le %{arm}
- kicad
ExclusiveArch: x86_64 aarch64 ppc64le
- kiwix-desktop
ExclusiveArch: %{qt5_qtwebengine_arches}
- knot-resolver
ExclusiveArch: %{ix86} x86_64
ExclusiveArch: %{arm} aarch64 %{ix86} x86_64
- knotes
ExclusiveArch: x86_64 %{arm}
- kubernetes
ExclusiveArch: x86_64 aarch64 ppc64le s390x %{arm}
- laf-plugin
ExclusiveArch: %{java_arches} noarch
- lazarus
ExclusiveArch: %{fpc_arches}
- lazpaint
ExclusiveArch: %{fpc_arches}
- ldapjdk
ExclusiveArch: %{java_arches} noarch
- ldc
ExclusiveArch: %{ldc_arches} ppc64le
- ldc1.30
ExclusiveArch: %{ldc_arches} ppc64le
- ldc1.32
ExclusiveArch: %{ldc_arches} ppc64le
- libbase
ExclusiveArch: %{java_arches} noarch
- libclc
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64 %{power64} s390x
- libcxl
ExclusiveArch: %{power64}
- libdfp
ExclusiveArch: ppc ppc64 ppc64le s390 s390x x86_64
- libdispatch
ExclusiveArch: x86_64 aarch64 ppc64le
- libfonts
ExclusiveArch: %{java_arches} noarch
- libformula
ExclusiveArch: %{java_arches} noarch
- libguestfs
ExclusiveArch: %{kernel_arches}
- libica
ExclusiveArch: s390 s390x
- libipt
ExclusiveArch: %{ix86} x86_64
ExclusiveArch: %{ix86} x86_64
- libkgapi
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- libkrun
ExclusiveArch: x86_64 aarch64
- libkrunfw
ExclusiveArch: x86_64 aarch64
- liblayout
ExclusiveArch: %{java_arches} noarch
- libloader
ExclusiveArch: %{java_arches} noarch
- libnxz
ExclusiveArch: ppc64le
- libocxl
ExclusiveArch: ppc64le
- libpsm2
ExclusiveArch: x86_64
- libquentier
ExclusiveArch: %{qt5_qtwebengine_arches}
- libreoffice-TexMaths
ExclusiveArch: %{java_arches}
- librepository
ExclusiveArch: %{java_arches} noarch
- libretro-desmume2015
ExclusiveArch: i686 x86_64
- librtas
ExclusiveArch: %{power64}
- libserializer
ExclusiveArch: %{java_arches} noarch
- libservicelog
ExclusiveArch: ppc %{power64}
- libsmbios
ExclusiveArch: x86_64 %{ix86}
- libunicode
ExclusiveArch: x86_64 aarch64
- libunwind
ExclusiveArch: %{arm} aarch64 hppa ia64 mips ppc %{power64} s390x %{ix86} x86_64
- libva-nvidia-driver
ExclusiveArch: %{x86_64} %{ix86} %{arm64} ppc64le
- libvirt-java
ExclusiveArch: %{java_arches} noarch
- libvma
ExclusiveArch: x86_64 ppc64le ppc64 aarch64
- libvmi
ExclusiveArch: x86_64
- libvpd
ExclusiveArch: %{power64}
- libxsmm
ExclusiveArch: x86_64
- libzdnn
ExclusiveArch: s390x
- libzfcphbaapi
ExclusiveArch: s390 s390x
- libzpc
ExclusiveArch: s390x
- llhttp
ExclusiveArch: %{nodejs_arches}
- log4j
ExclusiveArch: %{java_arches} noarch
- log4net
ExclusiveArch: %mono_arches
- lrmi
ExclusiveArch: %{ix86}
- lsvpd
ExclusiveArch: %{power64}
- luajit
ExclusiveArch: %{arm} %{ix86} x86_64 %{mips} aarch64
- lucene
ExclusiveArch: %{java_arches} noarch
- lujavrite
ExclusiveArch: %{java_arches}
- luxcorerender
ExclusiveArch: x86_64
- mactel-boot
ExclusiveArch: x86_64
- magicmirror
ExclusiveArch: %{nodejs_arches} noarch
- magicmirror-module-airnow
ExclusiveArch: %{nodejs_arches} noarch
- magicmirror-module-onthisday
ExclusiveArch: %{nodejs_arches} noarch
- magicmirror-module-singlestock
ExclusiveArch: %{nodejs_arches} noarch
- manifest-tool
ExclusiveArch: x86_64 aarch64 ppc64le s390x
- mariadb-java-client
ExclusiveArch: %{java_arches} noarch
- marked
ExclusiveArch: %{nodejs_arches} noarch
ExclusiveArch: %{ix86} x86_64 %{arm} noarch
- matrix-synapse
ExclusiveArch: %{rust_arches}
- maui-mauikit
ExclusiveArch: %{ix86} s390x aarch64 x86_64
- maven
ExclusiveArch: %{java_arches} noarch
- maven-antrun-plugin
ExclusiveArch: %{java_arches} noarch
- maven-archetype
ExclusiveArch: %{java_arches} noarch
- maven-archiver
ExclusiveArch: %{java_arches} noarch
- maven-artifact-transfer
ExclusiveArch: %{java_arches} noarch
- maven-assembly-plugin
ExclusiveArch: %{java_arches} noarch
- maven-bundle-plugin
ExclusiveArch: %{java_arches} noarch
- maven-clean-plugin
ExclusiveArch: %{java_arches} noarch
- maven-common-artifact-filters
ExclusiveArch: %{java_arches} noarch
- maven-compiler-plugin
ExclusiveArch: %{java_arches} noarch
- maven-dependency-analyzer
ExclusiveArch: %{java_arches} noarch
- maven-dependency-plugin
ExclusiveArch: %{java_arches} noarch
- maven-dependency-tree
ExclusiveArch: %{java_arches} noarch
- maven-doxia
ExclusiveArch: %{java_arches} noarch
- maven-doxia-sitetools
ExclusiveArch: %{java_arches} noarch
- maven-enforcer
ExclusiveArch: %{java_arches} noarch
- maven-file-management
ExclusiveArch: %{java_arches} noarch
- maven-filtering
ExclusiveArch: %{java_arches} noarch
- maven-invoker
ExclusiveArch: %{java_arches} noarch
- maven-invoker-plugin
ExclusiveArch: %{java_arches} noarch
- maven-jar-plugin
ExclusiveArch: %{java_arches} noarch
- maven-mapping
ExclusiveArch: %{java_arches} noarch
- maven-native
ExclusiveArch: %{java_arches} noarch
- maven-parent
ExclusiveArch: %{java_arches} noarch
- maven-patch-plugin
ExclusiveArch: %{java_arches} noarch
- maven-plugin-testing
ExclusiveArch: %{java_arches} noarch
- maven-plugin-tools
ExclusiveArch: %{java_arches} noarch
- maven-remote-resources-plugin
ExclusiveArch: %{java_arches} noarch
- maven-reporting-api
ExclusiveArch: %{java_arches} noarch
- maven-reporting-impl
ExclusiveArch: %{java_arches} noarch
- maven-resolver
ExclusiveArch: %{java_arches} noarch
- maven-resources-plugin
ExclusiveArch: %{java_arches} noarch
- maven-script-interpreter
ExclusiveArch: %{java_arches} noarch
- maven-shade-plugin
ExclusiveArch: %{java_arches} noarch
- maven-shared-incremental
ExclusiveArch: %{java_arches} noarch
- maven-shared-io
ExclusiveArch: %{java_arches} noarch
- maven-shared-utils
ExclusiveArch: %{java_arches} noarch
- maven-source-plugin
ExclusiveArch: %{java_arches} noarch
- maven-surefire
ExclusiveArch: %{java_arches} noarch
- maven-verifier
ExclusiveArch: %{java_arches} noarch
- maven-verifier-plugin
ExclusiveArch: %{java_arches} noarch
- maven-wagon
ExclusiveArch: %{java_arches} noarch
- maxima
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 ppc sparcv9
ExclusiveArch: %{ix86} x86_64 ppc sparcv9
- mbpfan
ExclusiveArch: x86_64
- mcelog
ExclusiveArch: i686 x86_64
- mcrouter
ExclusiveArch: x86_64 aarch64 ppc64le
- mecab-java
ExclusiveArch: %java_arches
- mediaconch
ExclusiveArch: %{qt5_qtwebengine_arches}
- mellowplayer
ExclusiveArch: %{qt5_qtwebengine_arches}
- memkind
ExclusiveArch: x86_64 ppc64 ppc64le s390x aarch64
- memtest86+
ExclusiveArch: x86_64 %{ix86}
- merkuro
ExclusiveArch: %{qt5_qtwebengine_arches}
- microcode_ctl
ExclusiveArch: %{ix86} x86_64
- micropython
ExclusiveArch: %{arm} x86_64 riscv64
- miglayout
ExclusiveArch: %{java_arches} noarch
- mine_detector
ExclusiveArch: %{GPRbuild_arches}
- minetest
ExclusiveArch: %{ix86} x86_64
ExclusiveArch: %{arm} %{ix86} x86_64 %{mips} aarch64
- mingw-libidn2
ExclusiveArch: %{ix86} x86_64 %{arm}
- mingw-wine-gecko
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64
- mkbootdisk
ExclusiveArch: %{ix86} sparc sparc64 x86_64
- moby-engine
ExclusiveArch: %{golang_arches}
- mockito
ExclusiveArch: %{java_arches} noarch
- mod_mono
ExclusiveArch: %mono_arches
- modello
ExclusiveArch: %{java_arches} noarch
- moditect
ExclusiveArch: %{java_arches} noarch
- module-build-service
ExclusiveArch: %{ix86} x86_64 noarch
- modulemaker-maven-plugin
ExclusiveArch: %{java_arches} noarch
- mojo-executor
ExclusiveArch: %{java_arches} noarch
- mojo-parent
ExclusiveArch: %{java_arches} noarch
- mokutil
ExclusiveArch: %{ix86} x86_64 aarch64 %{arm}
- mono
ExclusiveArch: %mono_arches
- mono-addins
ExclusiveArch: %mono_arches
- mono-basic
ExclusiveArch: %{mono_arches}
- mono-bouncycastle
ExclusiveArch: %mono_arches
- mono-cecil
ExclusiveArch: %mono_arches
- mono-cecil-flowanalysis
ExclusiveArch: %mono_arches
- mono-reflection
ExclusiveArch: %mono_arches
- mono-tools
ExclusiveArch: %mono_arches
- mono-zeroconf
ExclusiveArch: %mono_arches
- monodevelop
ExclusiveArch: %mono_arches
- monodevelop-debugger-gdb
ExclusiveArch: %{mono_arches}
- mrrescue
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 ppc64le
- msr-tools
ExclusiveArch: %{ix86} x86_64
- mvfst
ExclusiveArch: x86_64 aarch64 ppc64le
- mxparser
ExclusiveArch: %{java_arches} noarch
- mysql-connector-java
ExclusiveArch: %{java_arches} noarch
- mysql-connector-net
ExclusiveArch: %{mono_arches}
- naev
ExclusiveArch: %{arm} %{ix86} x86_64 %{mips} aarch64
- naga
ExclusiveArch: %{java_arches} noarch
- nant
ExclusiveArch: %mono_arches
- nativejit
ExclusiveArch: x86_64
- nbc
ExclusiveArch: %{fpc_arches}
- nbdkit
ExclusiveArch: x86_64
- ndesk-dbus
ExclusiveArch: %{mono_arches}
- ndesk-dbus-glib
ExclusiveArch: %{mono_arches}
- nekohtml
ExclusiveArch: %{java_arches} noarch
- netavark
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: aarch64 ppc64le s390x x86_64
- newtonsoft-json
ExclusiveArch: %{mono_arches}
- nodejs-acorn-object-spread
ExclusiveArch: %{nodejs_arches} noarch
ExclusiveArch: %{ix86} x86_64 %{arm} noarch
- nodejs-backbone
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-bash-language-server
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-buble
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-colors
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-generic-pool
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-less
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-linefix
ExclusiveArch: %{nodejs_arches} noarch
ExclusiveArch: %{ix86} x86_64 %{arm} noarch
- nodejs-nodemon
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-packaging
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-replace-require-self
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-underscore
ExclusiveArch: %{nodejs_arches} noarch
ExclusiveArch: %{ix86} x86_64 %{arm} noarch
- nodejs18
ExclusiveArch: %{nodejs_arches}
- nodejs20
ExclusiveArch: %{nodejs_arches}
- nom-tam-fits
ExclusiveArch: %{java_arches} noarch
- notify-sharp
ExclusiveArch: %{mono_arches}
- notify-sharp3
ExclusiveArch: %{mono_arches}
- nuget
ExclusiveArch: %{mono_arches}
- numatop
ExclusiveArch: x86_64 ppc64le
- nunit
ExclusiveArch: %{mono_arches}
- nunit2
ExclusiveArch: %{mono_arches}
- nvml
ExclusiveArch: x86_64 ppc64le
- objectweb-asm
ExclusiveArch: %{java_arches} noarch
- objenesis
ExclusiveArch: %{java_arches} noarch
- obs-service-rust2rpm
ExclusiveArch: %{rust_arches} noarch
- oci-seccomp-bpf-hook
ExclusiveArch: x86_64 %{power64} aarch64 s390x armv7hl
ExclusiveArch: %{golang_arches_future}
- oidn
ExclusiveArch: x86_64
- olpc-utils
ExclusiveArch: %{ix86} %{arm}
- oneVPL
ExclusiveArch: x86_64
- oneVPL-intel-gpu
ExclusiveArch: x86_64
- oneapi-level-zero
ExclusiveArch: x86_64
- onednn
ExclusiveArch: x86_64 aarch64 ppc64le s390x
- onedrive
ExclusiveArch: %{ldc_arches}
- ongres-scram
ExclusiveArch: %{java_arches} noarch
- ongres-stringprep
ExclusiveArch: %{java_arches} noarch
- opae
ExclusiveArch: x86_64
- opal-prd
ExclusiveArch: ppc64le
- open-vm-tools
ExclusiveArch: x86_64 aarch64
ExclusiveArch: %{ix86} x86_64 aarch64
ExclusiveArch: x86_64
- openblas
ExclusiveArch: %{openblas_arches}
- openjdk-asmtools
ExclusiveArch: %{java_arches} noarch
- openjdk-asmtools7
ExclusiveArch: %{java_arches} noarch
- openjfx
ExclusiveArch: %{java_arches}
- openjfx8
ExclusiveArch: x86_64
- openlibm
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 %{power64}
- openms
ExclusiveArch: %{qt5_qtwebengine_arches}
- openni
ExclusiveArch: x86_64 %{arm}
- openni-primesense
ExclusiveArch: %{ix86} x86_64 %{arm}
- openoffice.org-diafilter
ExclusiveArch: %{java_arches}
- openpgl
ExclusiveArch: aarch64 x86_64
- openssl-ibmca
ExclusiveArch: s390 s390x
- openstack-java-sdk
ExclusiveArch: %{java_arches} noarch
- opentest4j
ExclusiveArch: %{java_arches} noarch
- openvkl
ExclusiveArch: aarch64 x86_64
- optee_client
ExclusiveArch: aarch64
- optee_os
ExclusiveArch: aarch64
- options
ExclusiveArch: %{java_arches} noarch
- orthorobot
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 ppc64le
- osgi-annotation
ExclusiveArch: %{java_arches} noarch
- osgi-compendium
ExclusiveArch: %{java_arches} noarch
- osgi-core
ExclusiveArch: %{java_arches} noarch
- pacemaker
ExclusiveArch: aarch64 i686 ppc64le s390x x86_64 %{arm}
ExclusiveArch: aarch64 i686 ppc64le s390x x86_64
- pageedit
ExclusiveArch: %{qt5_qtwebengine_arches}
- parserng
ExclusiveArch: %{java_arches} noarch
- pasdoc
ExclusiveArch: %{fpc_arches}
- pcc
ExclusiveArch: %{ix86} x86_64
- pcfi
ExclusiveArch: %{java_arches} noarch
- pcm
ExclusiveArch: %{ix86} x86_64
- pcmciautils
ExclusiveArch: %{ix86} x86_64 ia64 ppc ppc64 %{arm}
- pdbg
ExclusiveArch: ppc64le
- pdfbox
ExclusiveArch: %{java_arches} noarch
- pdfmod
ExclusiveArch: %mono_arches
- pdftk-java
ExclusiveArch: %{java_arches} noarch
- pentaho-libxml
ExclusiveArch: %{java_arches} noarch
- pentaho-reporting-flow-engine
ExclusiveArch: %{java_arches} noarch
- perl-Dumbbench
ExclusiveArch: %{ix86} x86_64 noarch
- perl-Parse-DMIDecode
ExclusiveArch: %{ix86} x86_64 ia64 aarch64
- pesign
ExclusiveArch: %{ix86} x86_64 ia64 aarch64 %{arm}
- pesign-test-app
ExclusiveArch: x86_64
- picocli
ExclusiveArch: %{java_arches} noarch
- pinta
ExclusiveArch: %mono_arches
- pioneer
ExclusiveArch: %{ix86} x86_64
- plantuml
ExclusiveArch: %{java_arches} noarch
- plasma-dialer
ExclusiveArch: %{java_arches}
- playonlinux
ExclusiveArch: %{arm} aarch64 %{ix86} x86_64
- plexus-archiver
ExclusiveArch: %{java_arches} noarch
- plexus-build-api
ExclusiveArch: %{java_arches} noarch
- plexus-build-api0
ExclusiveArch: %{java_arches} noarch
- plexus-cipher
ExclusiveArch: %{java_arches} noarch
- plexus-classworlds
ExclusiveArch: %{java_arches} noarch
- plexus-compiler
ExclusiveArch: %{java_arches} noarch
- plexus-component-api
ExclusiveArch: %{java_arches} noarch
- plexus-components-pom
ExclusiveArch: %{java_arches} noarch
- plexus-containers
ExclusiveArch: %{java_arches} noarch
- plexus-i18n
ExclusiveArch: %{java_arches} noarch
- plexus-interpolation
ExclusiveArch: %{java_arches} noarch
- plexus-io
ExclusiveArch: %{java_arches} noarch
- plexus-languages
ExclusiveArch: %{java_arches} noarch
- plexus-pom
ExclusiveArch: %{java_arches} noarch
- plexus-resources
ExclusiveArch: %{java_arches} noarch
- plexus-sec-dispatcher
ExclusiveArch: %{java_arches} noarch
- plexus-utils
ExclusiveArch: %{java_arches} noarch
- plexus-velocity
ExclusiveArch: %{java_arches} noarch
- podman
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: aarch64 ppc64le s390x x86_64
- poppler-sharp
ExclusiveArch: %mono_arches
- popub
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 %{arm}}
- postgresql-jdbc
ExclusiveArch: %{java_arches} noarch
- powerpc-utils
ExclusiveArch: ppc %{power64}
- powerstat
ExclusiveArch: %{ix86} x86_64
- ppc64-diag
ExclusiveArch: ppc %{power64}
- procyon
ExclusiveArch: %{java_arches} noarch
ExclusiveArch: %{java_arches} noarch
ExclusiveArch: %{java_arches} noarch
ExclusiveArch: %{java_arches} noarch
ExclusiveArch: %{java_arches} noarch
ExclusiveArch: %{java_arches} noarch
- proguard
ExclusiveArch: %{java_arches} noarch
- prometheus-jmx-exporter
ExclusiveArch: %{java_arches} noarch
- prometheus-simpleclient-java
ExclusiveArch: %{java_arches} noarch
- proxygen
ExclusiveArch: x86_64 aarch64 ppc64le
- pveclib
ExclusiveArch: ppc %{power64}
- pyqtwebengine
ExclusiveArch: %{qt5_qtwebengine_arches}
- python-ast-monitor
ExclusiveArch: %{qt6_qtwebengine_arches} noarch
- python-cryptography
ExclusiveArch: %{rust_arches}
- python-damo
ExclusiveArch: x86_64 aarch64 ppc64le s390x noarch
- python-etcd
ExclusiveArch: noarch %{ix86} x86_64 %{arm} aarch64 ppc64le s390x
- python-javaobj
ExclusiveArch: %{java_arches} noarch
- python-jnius
ExclusiveArch: %{java_arches}
- python-jupyter-polymake
ExclusiveArch: noarch aarch64 ppc64le s390x x86_64
- python-openoffice
ExclusiveArch: noarch x86_64
- python-pymoc
ExclusiveArch: aarch64 ppc64 ppc64le x86_64 s390x
- python-pyqt6-webengine
ExclusiveArch: aarch64 x86_64
- q4wine
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64
- qatengine
ExclusiveArch: x86_64
- qatzip
ExclusiveArch: x86_64
- qbe
ExclusiveArch: x86_64 aarch64
- qcint
ExclusiveArch: x86_64
- qclib
ExclusiveArch: s390 s390x
- qdox
ExclusiveArch: %{java_arches} noarch
- qemu-sanity-check
ExclusiveArch: %{kernel_arches}
- qevercloud
ExclusiveArch: %{qt5_qtwebengine_arches}
- qmapshack
ExclusiveArch: %{qt5_qtwebengine_arches}
- qt4pas
ExclusiveArch: %{fpc_arches}
- qt5-qtwebengine
ExclusiveArch: %{qt5_qtwebengine_arches}
- qt6-qtwebengine
ExclusiveArch: aarch64 x86_64
- qt6-qtwebview
ExclusiveArch: aarch64 x86_64
- quantum-espresso
ExclusiveArch: x86_64 %{ix86} aarch64 %{power64}
- quentier
ExclusiveArch: %{qt5_qtwebengine_arches}
- rEFInd
ExclusiveArch: %{efi}
- rachota
ExclusiveArch: %{java_arches} noarch
- rear
ExclusiveArch: %ix86 x86_64 ppc ppc64 ppc64le ia64
- reflections
ExclusiveArch: %{java_arches} noarch
- reg
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 aarch64 %{arm}}
- regexp
ExclusiveArch: %{java_arches} noarch
- relaxng-datatype-java
ExclusiveArch: %{java_arches} noarch
- replacer
ExclusiveArch: %{java_arches} noarch
- reptyr
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64
- rescene
ExclusiveArch: %{mono_arches}
- resteasy
ExclusiveArch: %{java_arches} noarch
- restool
ExclusiveArch: aarch64
- restsharp
ExclusiveArch: %{mono_arches}
- retsnoop
ExclusiveArch: %{rust_arches}
- rhino
ExclusiveArch: %{java_arches} noarch
- river
ExclusiveArch: %{zig_arches}
- rkcommon
ExclusiveArch: x86_64 aarch64
- rocclr
ExclusiveArch: x86_64 aarch64 ppc64le
- rocm-compilersupport
ExclusiveArch: x86_64 aarch64 ppc64le
- rocm-device-libs
ExclusiveArch: x86_64 aarch64 ppc64le
- rocm-runtime
ExclusiveArch: x86_64 aarch64 ppc64le
- rocm-smi
ExclusiveArch: x86_64 aarch64 ppc64le
- rocminfo
ExclusiveArch: x86_64 aarch64 ppc64le
- rocprim
ExclusiveArch: x86_64
- rocrand
ExclusiveArch: x86_64
- rocthrust
ExclusiveArch: x86_64
- rpm-ostree
ExclusiveArch: %{rust_arches}
- rr
ExclusiveArch: %{ix86} x86_64 aarch64
- rssguard
ExclusiveArch: %{qt5_qtwebengine_arches}
- rstudio
ExclusiveArch: %{java_arches}
- rsyntaxtextarea
ExclusiveArch: %{java_arches} noarch
- rubygem-childprocess
ExclusiveArch: %{ix86} x86_64 noarch
- runc
ExclusiveArch: %{golang_arches_future}
- rundoc
ExclusiveArch: %{java_arches} noarch
- rust
ExclusiveArch: %{rust_arches}
- rust-actix-router
ExclusiveArch: %{rust_arches}
- rust-argmax
ExclusiveArch: %{rust_arches}
- rust-askalono-cli
ExclusiveArch: %{rust_arches}
- rust-base-x
ExclusiveArch: %{rust_arches}
- rust-bit-set
ExclusiveArch: %{rust_arches}
- rust-bootupd
ExclusiveArch: x86_64 aarch64
- rust-brotli
ExclusiveArch: %{rust_arches}
- rust-capnp-futures
ExclusiveArch: %{rust_arches}
- rust-cascade
ExclusiveArch: %{rust_arches}
- rust-cast
ExclusiveArch: %{rust_arches}
- rust-combine
ExclusiveArch: %{rust_arches}
- rust-compress-tools
ExclusiveArch: %{rust_arches}
- rust-const-oid0.6
ExclusiveArch: %{rust_arches}
- rust-coreos-installer
ExclusiveArch: %{rust_arches}
- rust-crossbeam
ExclusiveArch: %{rust_arches}
- rust-ctr
ExclusiveArch: %{rust_arches}
- rust-dashmap4
ExclusiveArch: %{rust_arches}
- rust-deflate
ExclusiveArch: %{rust_arches}
- rust-deflate0.8
ExclusiveArch: %{rust_arches}
- rust-drg
ExclusiveArch: %{rust_arches}
- rust-event-listener
ExclusiveArch: %{rust_arches}
- rust-fail
ExclusiveArch: %{rust_arches}
- rust-gag
ExclusiveArch: %{rust_arches}
- rust-getch
ExclusiveArch: %{rust_arches}
- rust-gzip-header
ExclusiveArch: %{rust_arches}
- rust-hamming
ExclusiveArch: %{rust_arches}
- rust-hidapi
ExclusiveArch: %{rust_arches}
- rust-httparse
ExclusiveArch: %{rust_arches}
- rust-humantime-serde
ExclusiveArch: %{rust_arches}
- rust-hyperlocal
ExclusiveArch: %{rust_arches}
- rust-image0.23
ExclusiveArch: %{rust_arches}
- rust-inflate
ExclusiveArch: %{rust_arches}
- rust-jpeg-decoder0.1
ExclusiveArch: %{rust_arches}
- rust-k9
ExclusiveArch: %{rust_arches}
- rust-krunvm
ExclusiveArch: x86_64 aarch64
- rust-kstring
ExclusiveArch: %{rust_arches}
- rust-kvm-ioctls
ExclusiveArch: x86_64 aarch64
- rust-lebe
ExclusiveArch: %{rust_arches}
- rust-libslirp-sys
ExclusiveArch: %{rust_arches}
- rust-msru
ExclusiveArch: x86_64
- rust-nanorand
ExclusiveArch: %{rust_arches}
- rust-netstat2
ExclusiveArch: %{rust_arches}
- rust-onig
ExclusiveArch: %{rust_arches}
- rust-openat-ext
ExclusiveArch: %{rust_arches}
- rust-ordered-float2
ExclusiveArch: %{rust_arches}
- rust-os_type
ExclusiveArch: %{rust_arches}
- rust-pager
ExclusiveArch: %{rust_arches}
- rust-pathsearch
ExclusiveArch: %{rust_arches}
- rust-pretty-bytes
ExclusiveArch: %{rust_arches}
- rust-pretty-hex
ExclusiveArch: %{rust_arches}
- rust-primal-bit
ExclusiveArch: %{rust_arches}
- rust-primal-check
ExclusiveArch: %{rust_arches}
- rust-primal-estimate
ExclusiveArch: %{rust_arches}
- rust-procfs0.9
ExclusiveArch: %{rust_arches}
- rust-prost
ExclusiveArch: %{rust_arches}
- rust-prost-derive
ExclusiveArch: %{rust_arches}
- rust-prost-derive0.8
ExclusiveArch: %{rust_arches}
- rust-prost0.8
ExclusiveArch: %{rust_arches}
- rust-psl-types
ExclusiveArch: %{rust_arches}
- rust-psm
ExclusiveArch: %{rust_arches}
- rust-pwd
ExclusiveArch: %{rust_arches}
- rust-rand_core
ExclusiveArch: %{rust_arches}
- rust-schemafy_core
ExclusiveArch: %{rust_arches}
- rust-schemafy_lib
ExclusiveArch: %{rust_arches}
- rust-seahash
ExclusiveArch: %{rust_arches}
- rust-serde-big-array
ExclusiveArch: %{rust_arches}
- rust-serde_qs
ExclusiveArch: %{rust_arches}
- rust-serde_yaml0.8
ExclusiveArch: %{rust_arches}
- rust-sev
ExclusiveArch: x86_64
- rust-sevctl
ExclusiveArch: x86_64
- rust-silver
ExclusiveArch: %{rust_arches}
- rust-sinit
ExclusiveArch: %{rust_arches}
- rust-snphost
ExclusiveArch: x86_64
- rust-ssh-key-dir
ExclusiveArch: %{rust_arches}
- rust-stacker
ExclusiveArch: %{rust_arches}
- rust-structopt
ExclusiveArch: %{rust_arches}
- rust-tiff0.6
ExclusiveArch: %{rust_arches}
- rust-totp-lite
ExclusiveArch: %{rust_arches}
- rust-tower-layer
ExclusiveArch: %{rust_arches}
- rust-tpm2-policy
ExclusiveArch: %{rust_arches}
- rust-tree-sitter
ExclusiveArch: %{rust_arches}
- rust-tree-sitter-cli
ExclusiveArch: %{rust_arches}
- rust-ubyte
ExclusiveArch: %{rust_arches}
- rust-unicode-normalization
ExclusiveArch: %{rust_arches}
- rust-unicode-xid
ExclusiveArch: %{rust_arches}
- rust-uriparse
ExclusiveArch: %{rust_arches}
- rust-uucore
ExclusiveArch: %{rust_arches}
- rust-varlink_generator
ExclusiveArch: %{rust_arches}
- rust-varlink_parser
ExclusiveArch: %{rust_arches}
- rust-virtio-bindings
ExclusiveArch: x86_64 aarch64 ppc64le
- rust-vmm-sys-util
ExclusiveArch: x86_64 aarch64 ppc64le
- rust-vtparse
ExclusiveArch: %{rust_arches}
- rust-web-ext-native-messaging
ExclusiveArch: %{rust_arches}
- rust-wezterm-color-types
ExclusiveArch: %{rust_arches}
- rust-wezterm-dynamic
ExclusiveArch: %{rust_arches}
- rust-wezterm-dynamic-derive
ExclusiveArch: %{rust_arches}
- rust-zbus1
ExclusiveArch: %{rust_arches}
- rust-zbus_macros1
ExclusiveArch: %{rust_arches}
- rust-zincati
ExclusiveArch: %{rust_arches}
- rust-zvariant2
ExclusiveArch: %{rust_arches}
- rust-zvariant_derive2
ExclusiveArch: %{rust_arches}
- s390utils
ExclusiveArch: s390 s390x
- sac
ExclusiveArch: %{java_arches} noarch
- safetyblanket
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 ppc64le
- sat4j
ExclusiveArch: %{java_arches} noarch
- sbcl
ExclusiveArch: %{ix86} x86_64 aarch64 %{power64}
- sbd
ExclusiveArch: i686 x86_64 s390x aarch64 ppc64le
- sblim-cim-client
ExclusiveArch: %{java_arches} noarch
- sblim-cim-client2
ExclusiveArch: %{java_arches} noarch
- sbsigntools
ExclusiveArch: x86_64 aarch64 %{arm} %{ix86}
- scala
ExclusiveArch: %{java_arches} noarch
- scalacheck
ExclusiveArch: %{java_arches} noarch
- scannotation
ExclusiveArch: %{java_arches} noarch
- sdljava
ExclusiveArch: %{java_arches}
- sdrangel
ExclusiveArch: %{qt5_qtwebengine_arches}
- sdsl-lite
ExclusiveArch: %{power64} x86_64 aarch64
- sdubby
ExclusiveArch: %{efi}
- seabios
ExclusiveArch: x86_64
- seadrive-gui
ExclusiveArch: %{qt5_qtwebengine_arches}
- seafile-client
ExclusiveArch: %{qt5_qtwebengine_arches}
- seamonkey
ExclusiveArch: x86_64
- seqan3
ExclusiveArch: %{power64} x86_64 aarch64
- sequence-library
ExclusiveArch: %{java_arches} noarch
- servicelog
ExclusiveArch: ppc %{power64}
- shaman
ExclusiveArch: %{java_arches} noarch
- sharpfont
ExclusiveArch: %mono_arches
- sharpziplib
ExclusiveArch: %{mono_arches}
- shim
ExclusiveArch: %{efi}
- shim-unsigned-aarch64
ExclusiveArch: aarch64
- shim-unsigned-x64
ExclusiveArch: x86_64
- sigul
ExclusiveArch: x86_64
- singularity-ce
ExclusiveArch: %{go_arches}
- sisu
ExclusiveArch: %{java_arches} noarch
- sisu-mojos
ExclusiveArch: %{java_arches} noarch
- skopeo
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: aarch64 ppc64le s390x x86_64
- skychart
ExclusiveArch: %{fpc_arches}
- slf4j
ExclusiveArch: %{java_arches} noarch
- slirp4netns
ExclusiveArch: %{golang_arches_future}
- snakeyaml
ExclusiveArch: %{java_arches} noarch
- snapd
ExclusiveArch: %{?golang_arches}%{!?golang_arches:%{ix86} x86_64 %{arm} aarch64 ppc64le s390x}
- snip
ExclusiveArch: %{java_arches} noarch
- softnet-stat
ExclusiveArch: %{rust_arches}
- soup-sharp
ExclusiveArch: %{mono_arches}
- spacebar
ExclusiveArch: %{java_arches}
- sparkleshare
ExclusiveArch: %{mono_arches}
- spec-version-maven-plugin
ExclusiveArch: %{java_arches} noarch
- spicctrl
ExclusiveArch: %{ix86} x86_64
- spice
ExclusiveArch: x86_64
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64
- spice-parent
ExclusiveArch: %{java_arches} noarch
- springlobby
ExclusiveArch: %{ix86} x86_64
- sqljet
ExclusiveArch: %{java_arches} noarch
- squeekboard
ExclusiveArch: %{rust_arches}
- startdde
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 %{arm}}
- stats-collect
ExclusiveArch: %{ix86} x86_64 noarch
- statsd
ExclusiveArch: %{nodejs_arches} noarch
- stratis-cli
ExclusiveArch: %{rust_arches} noarch
- stratisd
ExclusiveArch: %{rust_arches}
ExclusiveArch: %{rust_arches}
ExclusiveArch: %{rust_arches}
- string-template-maven-plugin
ExclusiveArch: %{java_arches} noarch
- stringtemplate
ExclusiveArch: %{java_arches} noarch
- stringtemplate4
ExclusiveArch: %{java_arches} noarch
- stripesnoop
ExclusiveArch: %{ix86} x86_64
- subscription-manager-cockpit
ExclusiveArch: %{nodejs_arches} noarch
- supercollider
ExclusiveArch: %{qt5_qtwebengine_arches}
- supermin
ExclusiveArch: %{kernel_arches}
- svnkit
ExclusiveArch: %{java_arches} noarch
- svt-vp9
ExclusiveArch: x86_64
- swift-lang
ExclusiveArch: x86_64 aarch64
- swing-layout
ExclusiveArch: %{java_arches} noarch
- sysbench
ExclusiveArch: %{arm} %{ix86} x86_64 %{mips}
ExclusiveArch: %{arm} %{ix86} x86_64 %{mips} aarch64
ExclusiveArch: %{arm} %{ix86} x86_64 %{mips} aarch64 ppc64le s390x
- syslinux
ExclusiveArch: %{ix86} x86_64
ExclusiveArch: %{ix86} x86_64
- system-rules
ExclusiveArch: %{java_arches} noarch
- systemd-boot
ExclusiveArch: %efi
- t-digest
ExclusiveArch: %{java_arches} noarch
- taglib-sharp
ExclusiveArch: %{mono_arches}
- tagsoup
ExclusiveArch: %{java_arches} noarch
- tarantool
ExclusiveArch: %{ix86} x86_64
- tboot
ExclusiveArch: %{ix86} x86_64
- tdlib
ExclusiveArch: x86_64 aarch64
- templates_parser
ExclusiveArch: %GPRbuild_arches
- ternimal
ExclusiveArch: %{rust_arches}
- testcloud
ExclusiveArch: %{kernel_arches} noarch
- testng
ExclusiveArch: %{java_arches} noarch
- texlive
ExclusiveArch: %{java_arches} noarch
- thermald
ExclusiveArch: %{ix86} x86_64
- tilix
ExclusiveArch: %{ldc_arches}
- tomboy
ExclusiveArch: %{mono_arches}
- tomcat
ExclusiveArch: %{java_arches} noarch
- tomcat-native
ExclusiveArch: %{java_arches}
- tomcat-taglibs-parent
ExclusiveArch: %{java_arches} noarch
- tomcatjss
ExclusiveArch: %{java_arches} noarch
- torbrowser-launcher
ExclusiveArch: %{ix86} x86_64
- treelayout
ExclusiveArch: %{java_arches} noarch
- trilead-ssh2
ExclusiveArch: %{java_arches} noarch
- truth
ExclusiveArch: %{java_arches} noarch
- tuned-profiles-nfv-host-bin
ExclusiveArch: %{ix86} x86_64
- typescript
ExclusiveArch: %{nodejs_arches} noarch
- uClibc
ExclusiveArch: %{arm} %{ix86} x86_64 %{mips}
- ucx
ExclusiveArch: aarch64 ppc64le x86_64
- uglify-js
ExclusiveArch: %{nodejs_arches} noarch
- umr
ExclusiveArch: x86_64 aarch64 ppc64le
- unetbootin
ExclusiveArch: %{ix86} x86_64
- univocity-parsers
ExclusiveArch: %{java_arches} noarch
- ursa-major
ExclusiveArch: noarch aarch64 ppc64le s390x x86_64
- usd
ExclusiveArch: aarch64 x86_64
- v8-314
ExclusiveArch: %{ix86} x86_64 %{arm} mips mipsel ppc ppc64
- vakzination
ExclusiveArch: %{java_arches}
- valgrind
ExclusiveArch: %{ix86} x86_64 ppc ppc64 ppc64le s390x armv7hl aarch64
- vboot-utils
ExclusiveArch: %{arm} aarch64 %{ix86} x86_64
- vecmath1.2
ExclusiveArch: %{java_arches} noarch
- velocity
ExclusiveArch: %{java_arches} noarch
- vim-go
ExclusiveArch: %{?golang_arches}%{!?golang_arches:%{ix86} x86_64 %{arm}}
- vim-syntastic
ExclusiveArch: %{java_arches} noarch
- virt-p2v
ExclusiveArch: x86_64
- virt-v2v
ExclusiveArch: x86_64
- virtiofsd
ExclusiveArch: %{rust_arches}
- virtualbox-guest-additions
ExclusiveArch: i686 x86_64
- vkd3d
ExclusiveArch: %{ix86} x86_64 %{arm}
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64
- vmaf
ExclusiveArch: x86_64
- voms-api-java
ExclusiveArch: %{java_arches} noarch
- voms-clients-java
ExclusiveArch: %{java_arches} noarch
- vrq
ExclusiveArch: %{ix86} x86_64
- wangle
ExclusiveArch: x86_64 aarch64 ppc64le
- warsow
ExclusiveArch: %{ix86} x86_64 %{arm}
- warsow-data
ExclusiveArch: %{ix86} x86_64 %{arm} noarch
ExclusiveArch: %{ix86} x86_64 %{arm}
- wasmedge
ExclusiveArch: x86_64 aarch64
- watchman
ExclusiveArch: x86_64 aarch64 ppc64le
- wdt
ExclusiveArch: x86_64 aarch64 ppc64le
- webkit2-sharp
ExclusiveArch: %mono_arches
- weld-parent
ExclusiveArch: %{java_arches} noarch
- why3
ExclusiveArch: %{ocaml_native_compiler}
- wine
ExclusiveArch: %{ix86} x86_64 aarch64
ExclusiveArch: %{ix86} x86_64
ExclusiveArch: %{ix86}
- wine-dxvk
ExclusiveArch: %{ix86} x86_64
- winetricks
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64
- ws-commons-util
ExclusiveArch: %{java_arches} noarch
- wsdl4j
ExclusiveArch: %{java_arches} noarch
- wult
ExclusiveArch: x86_64
- wxMaxima
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 ppc sparcv9
- x2goclient
ExclusiveArch: x86_64
- x86-simd-sort
ExclusiveArch: x86_64
- xalan-j2
ExclusiveArch: %{java_arches} noarch
- xbean
ExclusiveArch: %{java_arches} noarch
- xbyak
ExclusiveArch: x86_64
- xbyak_aarch64
ExclusiveArch: aarch64
- xe-guest-utilities-latest
ExclusiveArch: %{ix86} x86_64
- xen
ExclusiveArch: x86_64 aarch64
- xerces-j2
ExclusiveArch: %{java_arches} noarch
- xgap
ExclusiveArch: %{gap_arches}
- xml-commons-apis
ExclusiveArch: %{java_arches} noarch
- xml-commons-resolver
ExclusiveArch: %{java_arches} noarch
- xml-maven-plugin
ExclusiveArch: %{java_arches} noarch
- xmlada
ExclusiveArch: %{GPRbuild_arches}
- xmlgraphics-commons
ExclusiveArch: %{java_arches} noarch
- xmlpull
ExclusiveArch: %{java_arches} noarch
- xmlstreambuffer
ExclusiveArch: %{java_arches} noarch
- xmlunit
ExclusiveArch: %{java_arches} noarch
- xmvn
ExclusiveArch: %{java_arches} noarch
- xmvn-connector-ivy
ExclusiveArch: %{java_arches} noarch
- xmvn-generator
ExclusiveArch: %{java_arches}
- xorg-x11-drv-armsoc
ExclusiveArch: %{arm} aarch64
- xorg-x11-drv-intel
ExclusiveArch: %{ix86} x86_64
- xorg-x11-drv-openchrome
ExclusiveArch: %{ix86} x86_64
- xorg-x11-drv-vesa
ExclusiveArch: %{ix86} x86_64
- xorg-x11-drv-vmware
ExclusiveArch: %{ix86} x86_64 ia64
- xsp
ExclusiveArch: %mono_arches
- xstream
ExclusiveArch: %{java_arches} noarch
- xz-java
ExclusiveArch: %{java_arches} noarch
- yarnpkg
ExclusiveArch: %{nodejs_arches} noarch
- zcfan
ExclusiveArch: x86_64
- zeal
ExclusiveArch: aarch64 x86_64
- zenon
ExclusiveArch: %{ocaml_native_compiler}
- zeromq-ada
ExclusiveArch: %{GPRbuild_arches}
- zig
ExclusiveArch: %{zig_arches}
- zlib-ada
ExclusiveArch: %{GPRbuild_arches}
- zlib-ng
ExclusiveArch: aarch64 i686 ppc64le s390x x86_64
- zola
ExclusiveArch: %{rust_arches}
7 months, 1 week
Architecture specific change in rpms/rust-cssparser-macros.git
by githook-noreply@fedoraproject.org
The package rpms/rust-cssparser-macros.git has added or updated architecture specific content in its
spec file (ExclusiveArch/ExcludeArch or %ifarch/%ifnarch) in commit(s):
https://src.fedoraproject.org/cgit/rpms/rust-cssparser-macros.git/commit/....
Change:
-ExclusiveArch: %{rust_arches}
Thanks.
Full change:
============
commit fcb589c2f5e60407aa4af7a3288f94dc090181a6
Author: Kalev Lember <klember(a)redhat.com>
Date: Thu Sep 28 17:33:08 2023 +0200
Update to 0.6.1
- Disable failing doctests
diff --git a/.gitignore b/.gitignore
index 184eb4d..fa4f22e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,3 +5,4 @@
/cssparser-macros-0.3.5.crate
/cssparser-macros-0.3.6.crate
/cssparser-macros-0.6.0.crate
+/cssparser-macros-0.6.1.crate
diff --git a/rust-cssparser-macros.spec b/rust-cssparser-macros.spec
index 38f74f7..ea03930 100644
--- a/rust-cssparser-macros.spec
+++ b/rust-cssparser-macros.spec
@@ -5,7 +5,7 @@
%global crate cssparser-macros
Name: rust-cssparser-macros
-Version: 0.6.0
+Version: 0.6.1
Release: %autorelease
Summary: Procedural macros for cssparser
@@ -60,7 +60,9 @@ use the "default" feature of the "%{crate}" crate.
%if %{with check}
%check
-%cargo_test
+# doctests are failing
+# https://github.com/servo/rust-cssparser/issues/364
+%cargo_test -- --lib
%endif
%changelog
diff --git a/sources b/sources
index e9556c7..5e61126 100644
--- a/sources
+++ b/sources
@@ -1 +1 @@
-SHA512 (cssparser-macros-0.6.0.crate) = 9b0aa19d46efe600fbb8b4763639affa5491727ae05312f5c7fb9c5c4d29971c33cb9afcee5001c7d0aec8677d36b5763986783ce8c897de892411f1bdb9a4ea
+SHA512 (cssparser-macros-0.6.1.crate) = 21d2f8bcaad0a8fff865b23bb2b59dcc5165f8861c4b9d7d70e9f302b58e2cc5ef8555bcab97889bfe81889a2b27863c755494baa64a52c65cd74687fe58b124
commit e05c8195ae19a98825a328f69ca0865a1ee38cf9
Author: Kalev Lember <klember(a)redhat.com>
Date: Thu Sep 28 17:32:38 2023 +0200
Convert to %autorelease and %autochangelog
[skip changelog]
diff --git a/changelog b/changelog
new file mode 100644
index 0000000..0ff45a2
--- /dev/null
+++ b/changelog
@@ -0,0 +1,67 @@
+* Fri Jul 21 2023 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.6.0-9
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_39_Mass_Rebuild
+
+* Fri Jan 20 2023 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.6.0-8
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild
+
+* Mon Sep 05 2022 Kalev Lember <klember(a)redhat.com> - 0.6.0-7
+- Re-generate spec file with rust2rpm 22
+
+* Fri Jan 21 2022 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.6.0-6
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_36_Mass_Rebuild
+
+* Fri Jul 23 2021 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.6.0-5
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_35_Mass_Rebuild
+
+* Wed Jan 27 2021 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.6.0-4
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild
+
+* Sat Aug 01 2020 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.6.0-3
+- Second attempt - Rebuilt for
+ https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild
+
+* Wed Jul 29 2020 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.6.0-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild
+
+* Sat Feb 15 14:58:24 CET 2020 Igor Raits <ignatenkobrain(a)fedoraproject.org> - 0.6.0-1
+- Update to 0.6.0
+
+* Thu Jan 30 2020 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.3.6-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild
+
+* Mon Sep 16 15:34:15 CEST 2019 Igor Gnatenko <ignatenkobrain(a)fedoraproject.org> - 0.3.6-1
+- Update to 0.3.6
+
+* Fri Jul 26 2019 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.3.5-4
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild
+
+* Sun Feb 10 2019 Igor Gnatenko <ignatenkobrain(a)fedoraproject.org> - 0.3.5-3
+- Run tests in infrastructure
+
+* Sat Feb 02 2019 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.3.5-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild
+
+* Tue Jan 08 2019 Josh Stone <jistone(a)redhat.com> - 0.3.5-1
+- Update to 0.3.5
+- Adapt to new packaging
+
+* Sat Jul 14 2018 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.3.4-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild
+
+* Sat Jun 30 2018 Josh Stone <jistone(a)redhat.com> - 0.3.4-1
+- Update to 0.3.4
+
+* Tue Apr 17 2018 Josh Stone <jistone(a)redhat.com> - 0.3.3-1
+- Update to 0.3.3
+
+* Fri Mar 09 2018 Igor Gnatenko <ignatenkobrain(a)fedoraproject.org> - 0.3.2-1
+- Update to 0.3.2
+
+* Fri Mar 09 2018 Igor Gnatenko <ignatenkobrain(a)fedoraproject.org> - 0.3.0-3
+- Bump syn to 0.12, quote to 0.4
+
+* Fri Feb 09 2018 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.3.0-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild
+
+* Wed Jan 24 2018 Igor Gnatenko <ignatenkobrain(a)fedoraproject.org> - 0.3.0-1
+- Initial package
diff --git a/rust-cssparser-macros.spec b/rust-cssparser-macros.spec
index 33e3470..38f74f7 100644
--- a/rust-cssparser-macros.spec
+++ b/rust-cssparser-macros.spec
@@ -6,7 +6,7 @@
Name: rust-cssparser-macros
Version: 0.6.0
-Release: 9%{?dist}
+Release: %autorelease
Summary: Procedural macros for cssparser
License: MPL-2.0
@@ -64,70 +64,4 @@ use the "default" feature of the "%{crate}" crate.
%endif
%changelog
-* Fri Jul 21 2023 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.6.0-9
-- Rebuilt for https://fedoraproject.org/wiki/Fedora_39_Mass_Rebuild
-
-* Fri Jan 20 2023 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.6.0-8
-- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild
-
-* Mon Sep 05 2022 Kalev Lember <klember(a)redhat.com> - 0.6.0-7
-- Re-generate spec file with rust2rpm 22
-
-* Fri Jan 21 2022 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.6.0-6
-- Rebuilt for https://fedoraproject.org/wiki/Fedora_36_Mass_Rebuild
-
-* Fri Jul 23 2021 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.6.0-5
-- Rebuilt for https://fedoraproject.org/wiki/Fedora_35_Mass_Rebuild
-
-* Wed Jan 27 2021 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.6.0-4
-- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild
-
-* Sat Aug 01 2020 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.6.0-3
-- Second attempt - Rebuilt for
- https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild
-
-* Wed Jul 29 2020 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.6.0-2
-- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild
-
-* Sat Feb 15 14:58:24 CET 2020 Igor Raits <ignatenkobrain(a)fedoraproject.org> - 0.6.0-1
-- Update to 0.6.0
-
-* Thu Jan 30 2020 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.3.6-2
-- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild
-
-* Mon Sep 16 15:34:15 CEST 2019 Igor Gnatenko <ignatenkobrain(a)fedoraproject.org> - 0.3.6-1
-- Update to 0.3.6
-
-* Fri Jul 26 2019 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.3.5-4
-- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild
-
-* Sun Feb 10 2019 Igor Gnatenko <ignatenkobrain(a)fedoraproject.org> - 0.3.5-3
-- Run tests in infrastructure
-
-* Sat Feb 02 2019 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.3.5-2
-- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild
-
-* Tue Jan 08 2019 Josh Stone <jistone(a)redhat.com> - 0.3.5-1
-- Update to 0.3.5
-- Adapt to new packaging
-
-* Sat Jul 14 2018 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.3.4-2
-- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild
-
-* Sat Jun 30 2018 Josh Stone <jistone(a)redhat.com> - 0.3.4-1
-- Update to 0.3.4
-
-* Tue Apr 17 2018 Josh Stone <jistone(a)redhat.com> - 0.3.3-1
-- Update to 0.3.3
-
-* Fri Mar 09 2018 Igor Gnatenko <ignatenkobrain(a)fedoraproject.org> - 0.3.2-1
-- Update to 0.3.2
-
-* Fri Mar 09 2018 Igor Gnatenko <ignatenkobrain(a)fedoraproject.org> - 0.3.0-3
-- Bump syn to 0.12, quote to 0.4
-
-* Fri Feb 09 2018 Fedora Release Engineering <releng(a)fedoraproject.org> - 0.3.0-2
-- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild
-
-* Wed Jan 24 2018 Igor Gnatenko <ignatenkobrain(a)fedoraproject.org> - 0.3.0-1
-- Initial package
+%autochangelog
commit d5d17ec45d4e38d56d8cf7751a18e141dc93a14b
Author: Kalev Lember <klember(a)redhat.com>
Date: Thu Sep 28 17:32:05 2023 +0200
Re-generate spec file with rust2rpm 24
diff --git a/rust-cssparser-macros.spec b/rust-cssparser-macros.spec
index 9b90d26..33e3470 100644
--- a/rust-cssparser-macros.spec
+++ b/rust-cssparser-macros.spec
@@ -1,4 +1,4 @@
-# Generated by rust2rpm 22
+# Generated by rust2rpm 24
%bcond_without check
%global debug_package %{nil}
@@ -13,8 +13,6 @@ License: MPL-2.0
URL: https://crates.io/crates/cssparser-macros
Source: %{crates_source}
-ExclusiveArch: %{rust_arches}
-
BuildRequires: rust-packaging >= 21
%global _description %{expand:
7 months, 1 week
Architecture specific change in rpms/libreoffice.git
by githook-noreply@fedoraproject.org
The package rpms/libreoffice.git has added or updated architecture specific content in its
spec file (ExclusiveArch/ExcludeArch or %ifarch/%ifnarch) in commit(s):
https://src.fedoraproject.org/cgit/rpms/libreoffice.git/commit/?id=04a7ee....
Change:
+%ifarch aarch64 s390x
Thanks.
Full change:
============
commit 04a7ee138b78c0807c4e80bba27b62c96766e49c
Author: Mattia Verga <mattia.verga(a)proton.me>
Date: Sun Oct 1 15:52:59 2023 +0200
Add patch 501 for failing tests on aarc64 and s390x
diff --git a/kahansum_test_fix_for_aarc64_s390x.patch b/kahansum_test_fix_for_aarc64_s390x.patch
new file mode 100644
index 0000000..e0110c0
--- /dev/null
+++ b/kahansum_test_fix_for_aarc64_s390x.patch
@@ -0,0 +1,16 @@
+diff -U 3 -dHrN a/sc/inc/arraysumfunctor.hxx b/sc/inc/arraysumfunctor.hxx
+--- a/sc/inc/arraysumfunctor.hxx 2023-09-22 21:07:01.000000000 +0200
++++ b/sc/inc/arraysumfunctor.hxx 2023-10-01 09:13:03.523493665 +0200
+@@ -85,8 +85,12 @@
+ inline KahanSum sumArray(const double* pArray, size_t nSize)
+ {
+ size_t i = 0;
++#if 0
+ const double* pCurrent = pArray;
+ KahanSum fSum = executeFast(i, nSize, pCurrent);
++#else
++ KahanSum fSum = 0.0;
++#endif
+
+ // sum rest of the array
+ for (; i < nSize; ++i)
diff --git a/libreoffice.spec b/libreoffice.spec
index d6c45dd..6e040b1 100644
--- a/libreoffice.spec
+++ b/libreoffice.spec
@@ -58,7 +58,7 @@ Summary: Free Software Productivity Suite
Name: libreoffice
Epoch: 1
Version: %{libo_version}.1
-Release: 1%{?libo_prerelease}%{?dist}
+Release: 2%{?libo_prerelease}%{?dist}
# default new files are: MPLv2
# older files are typically: MPLv2 incorporating work under ASLv2
# nlpsolver is: LGPLv3
@@ -279,6 +279,8 @@ Patch6: limit-tests-giving-dubious-results-to-x86_64.patch
# https://lists.freedesktop.org/archives/libreoffice/2023-August/090870.html
Patch11: lo-7.6-ppc64le-tests.patch
Patch500: 0001-disable-libe-book-support.patch
+# https://lists.freedesktop.org/archives/libreoffice/2023-September/090948....
+Patch501: kahansum_test_fix_for_aarc64_s390x.patch
%global instdir %{_libdir}
%global baseinstdir %{instdir}/libreoffice
@@ -1016,6 +1018,10 @@ mv -f redhat.soc extras/source/palettes/standard.soc
%patch500 -p1
%endif
+%ifarch aarch64 s390x
+%patch501 -p1
+%endif
+
# Temporarily disable failing tests
%ifarch ppc64le
sed -i -e /CppunitTest_sc_array_functions_test/d sc/Module_sc.mk
@@ -2258,6 +2264,9 @@ gtk-update-icon-cache -q %{_datadir}/icons/hicolor &>/dev/null || :
%{_includedir}/LibreOfficeKit
%changelog
+* Sun Oct 01 2023 Mattia Verga <mattia.verga(a)proton.me> - 1:7.6.2.1-2
+- Add patch 501 for failing tests on aarc64 and s390x
+
* Sat Sep 30 2023 Mattia Verga <mattia.verga(a)proton.me> - 1:7.6.2.1-1
- 7.6.2.1
commit e25c329969108c1b1e019a741f8aae87bdd69a7d
Author: Mattia Verga <mattia.verga(a)proton.me>
Date: Sat Sep 30 11:25:33 2023 +0200
7.6.2.1
diff --git a/.gitignore b/.gitignore
index b8793d5..b923ebb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -41,3 +41,9 @@
/libreoffice-help-7.6.1.2.tar.xz.asc
/libreoffice-translations-7.6.1.2.tar.xz
/libreoffice-translations-7.6.1.2.tar.xz.asc
+/libreoffice-7.6.2.1.tar.xz
+/libreoffice-7.6.2.1.tar.xz.asc
+/libreoffice-help-7.6.2.1.tar.xz
+/libreoffice-help-7.6.2.1.tar.xz.asc
+/libreoffice-translations-7.6.2.1.tar.xz
+/libreoffice-translations-7.6.2.1.tar.xz.asc
diff --git a/libreoffice.spec b/libreoffice.spec
index ba34747..d6c45dd 100644
--- a/libreoffice.spec
+++ b/libreoffice.spec
@@ -1,5 +1,5 @@
# download path contains version without the last (fourth) digit
-%global libo_version 7.6.1
+%global libo_version 7.6.2
# Should contain .alphaX / .betaX, if this is pre-release (actually
# pre-RC) version. The pre-release string is part of tarball file names,
# so we need a way to define it easily at one place.
@@ -57,7 +57,7 @@ ExcludeArch: %{ix86}
Summary: Free Software Productivity Suite
Name: libreoffice
Epoch: 1
-Version: %{libo_version}.2
+Version: %{libo_version}.1
Release: 1%{?libo_prerelease}%{?dist}
# default new files are: MPLv2
# older files are typically: MPLv2 incorporating work under ASLv2
@@ -273,6 +273,7 @@ Patch3: 0001-Revert-tdf-101630-gdrive-support-w-oAuth-and-Drive-A.patch
Patch4: 0001-default-to-sifr-for-gnome-light-mode.patch
# backported
Patch5: 0001-Only-pass-I.-arguments-to-g-ir-scanner-by-using-pkg-.patch
+Patch6: limit-tests-giving-dubious-results-to-x86_64.patch
# not upstreamed
# fix FTB in ppc64le from sharkcz
# https://lists.freedesktop.org/archives/libreoffice/2023-August/090870.html
@@ -2257,6 +2258,9 @@ gtk-update-icon-cache -q %{_datadir}/icons/hicolor &>/dev/null || :
%{_includedir}/LibreOfficeKit
%changelog
+* Sat Sep 30 2023 Mattia Verga <mattia.verga(a)proton.me> - 1:7.6.2.1-1
+- 7.6.2.1
+
* Fri Sep 08 2023 Gwyn Ciesla <gwync(a)protonmail.com> - 1:7.6.1.2-1
- 7.6.1.2
diff --git a/limit-tests-giving-dubious-results-to-x86_64.patch b/limit-tests-giving-dubious-results-to-x86_64.patch
new file mode 100644
index 0000000..f68ecad
--- /dev/null
+++ b/limit-tests-giving-dubious-results-to-x86_64.patch
@@ -0,0 +1,32 @@
+From 3426c96d0e32e0c83e4ce5fdb3405787a0c81e6c Mon Sep 17 00:00:00 2001
+From: Dan Horák <dan(a)danny.cz>
+Date: Fri, 01 Sep 2023 18:45:42 +0200
+Subject: [PATCH] limit tests giving dubious results to x86_64
+
+We have confirmed that ArrayFunctionsTest::testDubiousArrayFormulasFODS
+gives different results depending on the -ffp-contract setting on
+ppc64le and likely also on s390x in addition to aarch64. Thus limit the
+check only to x86_64 where it's known to give consistent results.
+
+Change-Id: Iedb63fb4340cfe9a88e374c7498d97574bcdfcc7
+Reviewed-on: https://gerrit.libreoffice.org/c/core/+/156453
+Tested-by: Jenkins
+Reviewed-by: Stephan Bergmann <sbergman(a)redhat.com>
+---
+
+diff --git a/sc/qa/unit/functions_array.cxx b/sc/qa/unit/functions_array.cxx
+index 7584c4e..2053c44 100644
+--- a/sc/qa/unit/functions_array.cxx
++++ b/sc/qa/unit/functions_array.cxx
+@@ -27,8 +27,9 @@
+ {
+ //TODO: sc/qa/unit/data/functions/array/dubious/fods/linest.fods produces widely different
+ // values when built with -ffp-contract enabled (-ffp-contract=on default on Clang 14,
+- // -ffp-contract=fast default when building with optimizations on GCC) on at least aarch64:
+-#if !((defined __clang__ || defined __GNUC__) && defined __aarch64__)
++ // -ffp-contract=fast default when building with optimizations on GCC) on at least aarch64
++ // and ppc64le. Thus limit the check only to platforms with consistent results.
++#if defined X86_64
+ OUString aDirectoryURL
+ = m_directories.getURLFromSrc(u"/sc/qa/unit/data/functions/array/dubious/fods/");
+ recursiveScan(test::pass, "OpenDocument Spreadsheet Flat XML", aDirectoryURL,
diff --git a/sources b/sources
index 05db5fa..9a74a16 100644
--- a/sources
+++ b/sources
@@ -1,12 +1,12 @@
-SHA512 (libreoffice-7.6.1.2.tar.xz) = e7bd1702ec4a893b91981d02bf813609be8f4a2991114a2c550dbedb11e249c23a753c369bba182691e3985253b6b1c9073102d806dd2d7f90a6ab6ad28aa17a
-SHA512 (libreoffice-7.6.1.2.tar.xz.asc) = 35ad56be21fe508fa246fdbea97338f32ace483b4d172959f2d48c7091bc1504076c25a43ec88fca81383ff38e739683f03073b622bc41ff74872760efdb0b4b
-SHA512 (libreoffice-help-7.6.1.2.tar.xz) = f0ce03b98385cf012a4f4e57651e6bb8fbe306c39108bab2c72c34e80b77f5d76c61cbf329658a6b82034c0315d8c67d27a8fc76d883574dd134a515b7695492
-SHA512 (libreoffice-help-7.6.1.2.tar.xz.asc) = beea8538e1ea3c5fc41165eec472c6be58a127be0ce1fd8a5e24ead86f3ce85a786b09fb67edb10237b632731d6697796c2ecfb733e58bba03f39e76d3a57914
-SHA512 (libreoffice-translations-7.6.1.2.tar.xz) = 7fb49db5c31d79d7b51ee8e5f7270006a02036df6c9586001c9466a3eb9634e60804ed61703bca93bf0095b5637957572f72ea4d9f4dde0c7d5f5015f939a865
-SHA512 (libreoffice-translations-7.6.1.2.tar.xz.asc) = 48bb16b56a7f114d543fafed6df4904d67459add8284a88e3b76ec8d28d55d78730a9c972b1a9d3a03f3f453a807fe25067288be00686098359cade602a6d1f2
SHA512 (17410483b5b5f267aa18b7e00b65e6e0-hsqldb_1_8_0.zip) = a231eba4a1baca11766ef292ab45e302081115477fe23018652882923308856835cf8c9ecba61a5cf22543474ccef3136965d794a90c9e4e9e6dcc21f9af6e1a
SHA512 (185d60944ea767075d27247c3162b3bc-unowinreg.dll) = 854b8ae29b57b40ba6bb6ff66e723a0e8dad053fcc2849f0ad763cd8a31352f4aeba9636fd4e3f0f2a0cd985a6f49b4261b9ace68d6be821ed42cfa7a73eb13c
SHA512 (a7983f859eafb2677d7ff386a023bc40-xsltml_2.1.2.zip) = 2d3835f7ac356805025cafedcad97faa48d0f5da386e6ac7b7451030059df8e2fdb0861ade07a576ebf9fb5b88a973585ab0437944b06aac9289d6898ba8586a
SHA512 (f543e6e2d7275557a839a164941c0a86e5f2c3f2a0042bfc434c88c6dde9e140-opens___.ttf) = 6a6d131dad5191614950a49323ae6d9385afe331983c1c85fde82ce6ee816051d95dde9ef90658b8f0a8a0a21754e72ff724bf41f6b96c046b7b4c2660f7095b
SHA512 (libreoffice-multiliblauncher.sh) = db532afdf5000bc66f9e02c7d0ab586468466f63f8f0bdb204832581e8277c5c59f688fa096548d642411cb8c46e8de4a744676b4e624c075262cfd6945138cd
SHA512 (dtoa-20180411.tgz) = 722aa814c33a34bfffe6c0201b0035cc3b65854a0ba9ae2f51620a89d68019353e2c306651b35bca337186b22b2e9865ef3c5e3df8e9328006f882e4577f8c85
+SHA512 (libreoffice-7.6.2.1.tar.xz) = 4b209c444437c067bbc08c5b7d0273fa32675a19afc15751a403e1ee024795fe87aecf64de193203ee4f84f5b7ae8d547cde4ed9aced4efd3d6743af65ac6896
+SHA512 (libreoffice-7.6.2.1.tar.xz.asc) = 0278e6e92971ce9d68f21056f0539c38a6a3def6269daf9263d3ed54bddd4761dea0cc2646b3dfff237a92d2bdb958339aee641651d67aa27d179f94ad26c480
+SHA512 (libreoffice-help-7.6.2.1.tar.xz) = 8fda9424a5ff5fbe0317e08d91504b75e3075d13d3f9b65aa21cdbc53578b1dda40b14168becc577b972eefb09a8255a5ce00322277714274c8adf6975f08a61
+SHA512 (libreoffice-help-7.6.2.1.tar.xz.asc) = 46a4546cbee7a7c66df3ebeda99674e5369aaae169c3ab8691d71a9ef5d59189df88084832991afd9d3928584ad83de8f060f452a1d78b2ef229e537eff5cd1d
+SHA512 (libreoffice-translations-7.6.2.1.tar.xz) = 4cecbc09d648ee751ca7bbcfa61b8ec174ac71aae7255c6359af5f6cbff019cc4ad5b6b4ca757329907175b13ce57e5afaa70355a9bfc48e86f6ccf8d0b7e80d
+SHA512 (libreoffice-translations-7.6.2.1.tar.xz.asc) = 936f34adf6648526db69efb968e66095430e8f5f7fbff241a64f44844dfb4eab2571e2813c39da52103054f72ebf08662b0f1c5d02a0876180dc138090ef6dfe
7 months, 1 week
Architecture specific change in rpms/condor.git
by githook-noreply@fedoraproject.org
The package rpms/condor.git has added or updated architecture specific content in its
spec file (ExclusiveArch/ExcludeArch or %ifarch/%ifnarch) in commit(s):
https://src.fedoraproject.org/cgit/rpms/condor.git/commit/?id=98d727a48da...
https://src.fedoraproject.org/cgit/rpms/condor.git/commit/?id=ad0b762dbb3....
Change:
+%ifarch s390x
-%ifarch %{arm} %{ix86} x86_64
Thanks.
Full change:
============
commit f41e8b0a4bf691d4b3f663996dc82dbea64d899b
Merge: a17ee9b c79ff5e
Author: Tim Theisen <ttheisen(a)fedoraproject.org>
Date: Sun Oct 1 08:44:59 2023 -0500
Merge branch 'f38' into f39
diff --cc condor.spec
index f41c1e0,0b6e012..578346e
--- a/condor.spec
+++ b/condor.spec
@@@ -1059,12 -858,11 +858,17 @@@ don
/sbin/ldconfig
%changelog
+ * Sat Sep 30 2023 Tim Theisen <ttheisen(a)fedoraproject.org> - 23.0.0-1
+ - Update to latest upstream 23.0.0 - rhbz#1959462
+ - Fix build issues - rhbz#2114520, rhbz#2172630, rhbz#2172684
+ - Update to PCRE2 - rhbz#2128284
+
+* Wed Jul 19 2023 Fedora Release Engineering <releng(a)fedoraproject.org> - 8.8.15-12
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_39_Mass_Rebuild
+
+* Thu Jun 15 2023 Python Maint <python-maint(a)redhat.com> - 8.8.15-11
+- Rebuilt for Python 3.12
+
* Thu Jan 19 2023 Fedora Release Engineering <releng(a)fedoraproject.org> - 8.8.15-10
- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild
commit c79ff5e9b21c1ad806fbe25e804dd3cf483559bc
Author: Tim Theisen <ttheisen(a)fedoraproject.org>
Date: Sun Oct 1 07:41:18 2023 -0500
Get s390x to build
diff --git a/condor.spec b/condor.spec
index a8777ca..0b6e012 100644
--- a/condor.spec
+++ b/condor.spec
@@ -287,8 +287,7 @@ rm -rf %{buildroot}
# TODO: Fix up cmake and remove this hack
%ifarch s390x
-ls -l %{buildroot}/usr/lib %{buildroot}/usr/%{_lib}
-mv %{buildroot}/usr/lib %{buildroot}/usr/%{_lib}
+mv %{buildroot}/usr/lib/* %{buildroot}/usr/%{_lib}
%endif
# Drop in a symbolic link for backward compatibility
commit 39de83929d2d69b67fe3a96245253cff23511f9d
Author: Tim Theisen <ttheisen(a)fedoraproject.org>
Date: Sun Oct 1 00:43:05 2023 -0500
Lets find out what's wrong on s390x
diff --git a/condor.spec b/condor.spec
index 62dbe95..a8777ca 100644
--- a/condor.spec
+++ b/condor.spec
@@ -287,6 +287,7 @@ rm -rf %{buildroot}
# TODO: Fix up cmake and remove this hack
%ifarch s390x
+ls -l %{buildroot}/usr/lib %{buildroot}/usr/%{_lib}
mv %{buildroot}/usr/lib %{buildroot}/usr/%{_lib}
%endif
commit 98d727a48da241bc1a2798d5e050cd9ee8c586dd
Author: Tim Theisen <ttheisen(a)fedoraproject.org>
Date: Sun Oct 1 00:01:12 2023 -0500
Get s390x to build
diff --git a/condor.spec b/condor.spec
index 1928f1e..62dbe95 100644
--- a/condor.spec
+++ b/condor.spec
@@ -285,6 +285,11 @@ function populate {
rm -rf %{buildroot}
%cmake_install
+# TODO: Fix up cmake and remove this hack
+%ifarch s390x
+mv %{buildroot}/usr/lib %{buildroot}/usr/%{_lib}
+%endif
+
# Drop in a symbolic link for backward compatibility
ln -s ../..%{_libdir}/condor/condor_ssh_to_job_sshd_config_template %{buildroot}/%_sysconfdir/condor/condor_ssh_to_job_sshd_config_template
commit 7d25ac872a25b2a2c4ac0ce0000adea5603000fb
Author: Tim Theisen <ttheisen(a)fedoraproject.org>
Date: Sat Sep 30 23:07:56 2023 -0500
Make 32-bit builds work again
diff --git a/condor.spec b/condor.spec
index 7f57bbf..1928f1e 100644
--- a/condor.spec
+++ b/condor.spec
@@ -363,12 +363,12 @@ cp %{SOURCE8} %{buildroot}%{_datadir}/condor/
#Fixups for packaged build, should have been done by cmake
mkdir -p %{buildroot}/usr/share/condor
-mv %{buildroot}/usr/lib64/condor/Chirp.jar %{buildroot}/usr/share/condor
-mv %{buildroot}/usr/lib64/condor/CondorJava*.class %{buildroot}/usr/share/condor
-mv %{buildroot}/usr/lib64/condor/libchirp_client.so %{buildroot}/usr/lib64
-mv %{buildroot}/usr/lib64/condor/libcondorapi.so %{buildroot}/usr/lib64
-mv %{buildroot}/usr/lib64/condor/libcondor_utils_*.so %{buildroot}/usr/lib64
-mv %{buildroot}/usr/lib64/condor/libpyclassad3*.so %{buildroot}/usr/lib64
+mv %{buildroot}/usr/%{_lib}/condor/Chirp.jar %{buildroot}/usr/share/condor
+mv %{buildroot}/usr/%{_lib}/condor/CondorJava*.class %{buildroot}/usr/share/condor
+mv %{buildroot}/usr/%{_lib}/condor/libchirp_client.so %{buildroot}/usr/%{_lib}
+mv %{buildroot}/usr/%{_lib}/condor/libcondorapi.so %{buildroot}/usr/%{_lib}
+mv %{buildroot}/usr/%{_lib}/condor/libcondor_utils_*.so %{buildroot}/usr/%{_lib}
+mv %{buildroot}/usr/%{_lib}/condor/libpyclassad3*.so %{buildroot}/usr/%{_lib}
rm -rf %{buildroot}/usr/share/doc/condor/LICENSE
rm -rf %{buildroot}/usr/share/doc/condor/NOTICE.txt
@@ -810,10 +810,10 @@ done
%_libdir/libpyclassad3*.so
%_libexecdir/condor/libclassad_python_user.cpython-3*.so
%_libexecdir/condor/libclassad_python3_user.so
-/usr/lib64/python%{python3_version}/site-packages/classad/
-/usr/lib64/python%{python3_version}/site-packages/htcondor/
-/usr/lib64/python%{python3_version}/site-packages/htcondor-*.egg-info/
-/usr/lib64/python%{python3_version}/site-packages/htcondor_cli/
+/usr/%{_lib}/python%{python3_version}/site-packages/classad/
+/usr/%{_lib}/python%{python3_version}/site-packages/htcondor/
+/usr/%{_lib}/python%{python3_version}/site-packages/htcondor-*.egg-info/
+/usr/%{_lib}/python%{python3_version}/site-packages/htcondor_cli/
%files credmon-oauth
%doc /usr/share/doc/condor/examples/condor_credmon_oauth
commit ad0b762dbb3fe163d3385a7dff4a563c84f742af
Author: Tim Theisen <ttheisen(a)fedoraproject.org>
Date: Sat Sep 30 21:25:56 2023 -0500
Update to latest upstream 23.0.0 - rhbz#1959462
Fix build issues - rhbz#2114520, rhbz#2172630, rhbz#2172684
Update to PCRE2 - rhbz#2128284
diff --git a/.gitignore b/.gitignore
index 62ba8ec..1f2a152 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,3 +40,4 @@ _build
/htcondor-8.8.8.tar.gz
/htcondor-8.8.10.tar.gz
/htcondor-8.8.15.tar.gz
+/htcondor-23.0.0.tar.gz
diff --git a/0001-Apply-the-user-s-condor_config-last-rather-than-firs.patch b/0001-Apply-the-user-s-condor_config-last-rather-than-firs.patch
deleted file mode 100644
index 82906b5..0000000
--- a/0001-Apply-the-user-s-condor_config-last-rather-than-firs.patch
+++ /dev/null
@@ -1,104 +0,0 @@
-From 47a7bb8fb64885d46c995a18d2c4601fbf9609f9 Mon Sep 17 00:00:00 2001
-From: Brian Bockelman <bbockelm(a)cse.unl.edu>
-Date: Tue, 24 Jul 2012 09:40:06 -0500
-Subject: [PATCH] Apply the user's condor_config last, rather than first.
-
----
- src/condor_utils/condor_config.cpp | 55 +++++++++++++++++++++++++++++------
- 1 files changed, 45 insertions(+), 10 deletions(-)
-
-diff --git a/src/condor_utils/condor_config.cpp b/src/condor_utils/condor_config.cpp
-index ef35572..455bdfa 100644
---- a/src/condor_utils/condor_config.cpp
-+++ b/src/condor_utils/condor_config.cpp
-@@ -110,6 +110,7 @@ void check_params();
- // External variables
- extern int ConfigLineNo;
- } /* End extern "C" */
-+bool find_user_file(std::string &);
-
- // Global variables
- BUCKET *ConfigTab[TABLESIZE];
-@@ -654,6 +655,14 @@ real_config(char* host, int wantsQuiet, bool wantExtraInfo)
- if(dirlist) { free(dirlist); dirlist = NULL; }
- if(newdirlist) { free(newdirlist); newdirlist = NULL; }
-
-+ // Now, insert overrides from the user config file
-+ std::string file_location;
-+ if (find_user_file(file_location))
-+ {
-+ process_config_source( file_location.c_str(), "user local source", host, false );
-+ local_config_sources.append(file_location.c_str());
-+ }
-+
- // Now, insert any macros defined in the environment.
- char **my_environ = GetEnviron();
- for( int i = 0; my_environ[i]; i++ ) {
-@@ -996,6 +1005,38 @@ find_global()
- }
-
-
-+// Find user-specific location of a file
-+// Returns true if found, and puts the location in the file_location argument.
-+// If not found, returns false. The contents of file_location are undefined.
-+bool
-+find_user_file(std::string &file_location)
-+{
-+#ifdef UNIX
-+ // $HOME/.condor/condor_config
-+ struct passwd *pw = getpwuid( geteuid() );
-+ std::stringstream ss;
-+ if ( can_switch_ids() || !pw || !pw->pw_dir ) {
-+ return false;
-+ }
-+ ss << pw->pw_dir << "/." << myDistro->Get() << "/" << myDistro->Get() << "_config";
-+ file_location = ss.str();
-+
-+ int fd;
-+ if ((fd = safe_open_wrapper_follow(file_location.c_str(), O_RDONLY)) < 0) {
-+ return false;
-+ } else {
-+ close(fd);
-+ dprintf(D_FULLDEBUG, "Reading condor configuration from '%s'\n", file_location.c_str());
-+ }
-+
-+ return true;
-+#else
-+ // To get rid of warnings...
-+ file_location = "";
-+ return false;
-+#endif
-+}
-+
- // Find location of specified file
- char*
- find_file(const char *env_name, const char *file_name)
-@@ -1052,21 +1093,15 @@ find_file(const char *env_name, const char *file_name)
- if (!config_source) {
- // List of condor_config file locations we'll try to open.
- // As soon as we find one, we'll stop looking.
-- const int locations_length = 4;
-+ const int locations_length = 3;
- MyString locations[locations_length];
-- // 1) $HOME/.condor/condor_config
-- struct passwd *pw = getpwuid( geteuid() );
-- if ( !can_switch_ids() && pw && pw->pw_dir ) {
-- formatstr( locations[0], "%s/.%s/%s", pw->pw_dir, myDistro->Get(),
-- file_name );
-- }
- // 2) /etc/condor/condor_config
-- locations[1].formatstr( "/etc/%s/%s", myDistro->Get(), file_name );
-+ locations[0].formatstr( "/etc/%s/%s", myDistro->Get(), file_name );
- // 3) /usr/local/etc/condor_config (FreeBSD)
-- locations[2].formatstr( "/usr/local/etc/%s", file_name );
-+ locations[1].formatstr( "/usr/local/etc/%s", file_name );
- if (tilde) {
- // 4) ~condor/condor_config
-- locations[3].formatstr( "%s/%s", tilde, file_name );
-+ locations[2].formatstr( "%s/%s", tilde, file_name );
- }
-
- int ctr;
---
-1.7.4.1
-
diff --git a/00personal_condor.config b/00personal_condor.config
deleted file mode 100644
index 57604b9..0000000
--- a/00personal_condor.config
+++ /dev/null
@@ -1,34 +0,0 @@
-## What machine is your central manager?
-
-CONDOR_HOST = $(FULL_HOSTNAME)
-
-## Pool's short description
-
-COLLECTOR_NAME = Personal Condor at $(FULL_HOSTNAME)
-
-## When is this machine willing to start a job?
-
-START = TRUE
-
-
-## When to suspend a job?
-
-SUSPEND = FALSE
-
-
-## When to nicely stop a job?
-## (as opposed to killing it instantaneously)
-
-PREEMPT = FALSE
-
-
-## When to instantaneously kill a preempting job
-## (e.g. if a job is in the pre-empting stage for too long)
-
-KILL = FALSE
-
-## This macro determines what daemons the condor_master will start and keep its watchful eyes on.
-## The list is a comma or space separated list of subsystem names
-
-DAEMON_LIST = COLLECTOR, MASTER, NEGOTIATOR, SCHEDD, STARTD
-
diff --git a/BZ1000106.patch b/BZ1000106.patch
deleted file mode 100644
index 004d9c2..0000000
--- a/BZ1000106.patch
+++ /dev/null
@@ -1,9 +0,0 @@
-diff --git a/src/condor_examples/condor_config.generic.redhat b/src/condor_examples/condor_config.generic.redhat
-index 6f3caaa..a3ac4e7 100644
---- a/src/condor_examples/condor_config.generic.redhat
-+++ b/src/condor_examples/condor_config.generic.redhat
-@@ -2315,3 +2315,4 @@ UNHIBERNATE = CurrentTime - MachineLastMatchTime < 1200
- ##--------------------------------------------------------------------
- FILETRANSFER_PLUGINS = $(LIBEXEC)/curl_plugin, $(LIBEXEC)/data_plugin
-
-+USE_CLONE_TO_CREATE_PROCESSES = False
diff --git a/NOTICE.txt b/NOTICE.txt
deleted file mode 100644
index cc4365b..0000000
--- a/NOTICE.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-Condor
-
-Copyright (C) 1990-2007, Condor Team, Computer Sciences Department,
-University of Wisconsin-Madison, WI.
-
-This source code is covered by the Apache License, Version 2.0, which
-can be found in the accompanying LICENSE-2.0.txt file, or online at
-http://www.apache.org/licenses/ .
-
-This product includes software developed by and/or derived from the
-Globus Project (http://www.globus.org/) to which the U.S. Government
-retains certain rights. Copyright (c) 1999 University of Chicago and
-The University of Southern California. All Rights Reserved.
-
-This product includes software developed by the OpenSSL Project for
-use in the OpenSSL Toolkit (http://www.openssl.org/). Complete
-conditions and disclaimers for OpenSSL can be found at
-http://www.openssl.org/source/license.html
-
-Some distributions of Condor include software developed by the
-Info-ZIP Project (http://www.info-zip.org/). Complete conditions
-and disclaimers for Info-ZIP can be found at
-http://www.info-zip.org/doc/LICENSE
-
-Some distributions of Condor include MAKEMSI software developed by
-Dennis Bareis (http://dennisbareis.com/makemsi.htm). Complete
-conditions and disclaimers for MAKEMSI can be found at
-http://makemsi-manual.dennisbareis.com/disclaimer.htm
-
-Some distributions of Condor include a compiled, unmodified version
-of the GNU C library. The complete source code to GNU glibc can be
-found at http://www.gnu.org/software/libc/.
-
-Part of the software embedded in this product is gSOAP software.
-Portions created by gSOAP are Copyright (C) 2001-2004 Robert A. van
-Engelen, Genivia inc. All Rights Reserved.
-THE SOFTWARE IN THIS PRODUCT WAS IN PART PROVIDED BY GENIVIA INC AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
-GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
-IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Werror_replace.patch b/Werror_replace.patch
deleted file mode 100644
index 4c6f6a3..0000000
--- a/Werror_replace.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/src/condor_contrib/CMakeLists.txt b/src/condor_contrib/CMakeLists.txt
-index 627334660f..5f02bcc581 100644
---- a/src/condor_contrib/CMakeLists.txt
-+++ b/src/condor_contrib/CMakeLists.txt
-@@ -21,7 +21,7 @@
-
- if (WANT_CONTRIB)
- dprint("building contrib modules")
-- string(REGEX REPLACE "-Werror" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
-+ #string(REGEX REPLACE "-Werror" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
- #dprint( "CMAKE_CXX_FLAGS = ${CMAKE_CXX_FLAGS}")
-
- include_directories("${CMAKE_CURRENT_SOURCE_DIR}/utils")
diff --git a/boost-python38.patch b/boost-python38.patch
deleted file mode 100644
index d62a797..0000000
--- a/boost-python38.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/externals/bundles/boost/1.66.0/CMakeLists.txt b/externals/bundles/boost/1.66.0/CMakeLists.txt
-index 4a9a20b..5a972ca 100644
---- a/externals/bundles/boost/1.66.0/CMakeLists.txt
-+++ b/externals/bundles/boost/1.66.0/CMakeLists.txt
-@@ -52,7 +52,7 @@ else (WINDOWS)
- set (BOOST_COMPONENTS unit_test_framework ${BOOST_COMPONENTS})
- endif()
- if (WITH_PYTHON_BINDINGS)
-- set (BOOST_COMPONENTS python ${BOOST_COMPONENTS})
-+ set (BOOST_COMPONENTS python38 ${BOOST_COMPONENTS})
- endif()
-
- endif()
diff --git a/cgroup_reset_stats.patch b/cgroup_reset_stats.patch
deleted file mode 100644
index 458e7ad..0000000
--- a/cgroup_reset_stats.patch
+++ /dev/null
@@ -1,181 +0,0 @@
-diff --git a/src/condor_procd/proc_family.cpp b/src/condor_procd/proc_family.cpp
-index d35ffcc..29d9471 100644
---- a/src/condor_procd/proc_family.cpp
-+++ b/src/condor_procd/proc_family.cpp
-@@ -54,7 +54,9 @@ ProcFamily::ProcFamily(ProcFamilyMonitor* monitor,
- m_member_list(NULL)
- #if defined(HAVE_EXT_LIBCGROUP)
- , m_cgroup_string(""),
-- m_cm(CgroupManager::getInstance())
-+ m_cm(CgroupManager::getInstance()),
-+ m_initial_user_cpu(0),
-+ m_initial_sys_cpu(0)
- #endif
- {
- #if !defined(WIN32)
-@@ -188,6 +190,7 @@ after_migrate:
- cgroup_free(&orig_cgroup);
- }
-
-+
- after_restore:
- if (orig_cgroup_string != NULL) {
- free(orig_cgroup_string);
-@@ -231,6 +234,27 @@ ProcFamily::set_cgroup(const std::string &cgroup_string)
- member = member->m_next;
- }
-
-+ // Record the amount of pre-existing CPU usage here.
-+ m_initial_user_cpu = 0;
-+ m_initial_sys_cpu = 0;
-+ get_cpu_usage_cgroup(m_initial_user_cpu, m_initial_sys_cpu);
-+
-+ // Reset block IO controller
-+ if (m_cm.isMounted(CgroupManager::BLOCK_CONTROLLER)) {
-+ struct cgroup *tmp_cgroup = cgroup_new_cgroup(m_cgroup_string.c_str());
-+ struct cgroup_controller *blkio_controller = cgroup_add_controller(tmp_cgroup, BLOCK_CONTROLLER_STR);
-+ ASSERT (blkio_controller != NULL); // Block IO controller should already exist.
-+ cgroup_add_value_uint64(blkio_controller, "blkio.reset_stats", 0);
-+ int err;
-+ if ((err = cgroup_modify_cgroup(tmp_cgroup))) {
-+ // Not allowed to reset stats?
-+ dprintf(D_ALWAYS,
-+ "Unable to reset cgroup %s block IO statistics. "
-+ "Some block IO accounting will be inaccurate (ProcFamily %u): %u %s\n",
-+ m_cgroup_string.c_str(), m_root_pid, err, cgroup_strerror(err));
-+ }
-+ }
-+
- return 0;
- }
-
-@@ -486,6 +510,40 @@ ProcFamily::aggregate_usage_cgroup_blockio(ProcFamilyUsage* usage)
- return 0;
- }
-
-+int ProcFamily::get_cpu_usage_cgroup(long &user_time, long &sys_time) {
-+
-+ if (!m_cm.isMounted(CgroupManager::CPUACCT_CONTROLLER)) {
-+ return 1;
-+ }
-+
-+ void * handle = NULL;
-+ u_int64_t tmp = 0;
-+ struct cgroup_stat stats;
-+ int err = cgroup_read_stats_begin(CPUACCT_CONTROLLER_STR, m_cgroup_string.c_str(), &handle, &stats);
-+ while (err != ECGEOF) {
-+ if (err > 0) {
-+ dprintf(D_PROCFAMILY,
-+ "Unable to read cgroup %s cpuacct stats (ProcFamily %u): %s.\n",
-+ m_cgroup_string.c_str(), m_root_pid, cgroup_strerror(err));
-+ break;
-+ }
-+ if (_check_stat_uint64(stats, "user", &tmp)) {
-+ user_time = tmp/clock_tick-m_initial_user_cpu;
-+ } else if (_check_stat_uint64(stats, "system", &tmp)) {
-+ sys_time = tmp/clock_tick-m_initial_sys_cpu;
-+ }
-+ err = cgroup_read_stats_next(&handle, &stats);
-+ }
-+ if (handle != NULL) {
-+ cgroup_read_stats_end(&handle);
-+ }
-+ if (err != ECGEOF) {
-+ dprintf(D_ALWAYS, "Internal cgroup error when retrieving CPU statistics: %s\n", cgroup_strerror(err));
-+ return 1;
-+ }
-+ return 0;
-+}
-+
- int
- ProcFamily::aggregate_usage_cgroup(ProcFamilyUsage* usage)
- {
-@@ -496,16 +554,13 @@ ProcFamily::aggregate_usage_cgroup(ProcFamilyUsage* usage)
-
- int err;
- struct cgroup_stat stats;
-- void **handle;
-+ void *handle = NULL;
- u_int64_t tmp = 0, image = 0;
- bool found_rss = false;
-
- // Update memory
-- handle = (void **)malloc(sizeof(void*));
-- ASSERT (handle != NULL);
-- *handle = NULL;
-
-- err = cgroup_read_stats_begin(MEMORY_CONTROLLER_STR, m_cgroup_string.c_str(), handle, &stats);
-+ err = cgroup_read_stats_begin(MEMORY_CONTROLLER_STR, m_cgroup_string.c_str(), &handle, &stats);
- while (err != ECGEOF) {
- if (err > 0) {
- dprintf(D_PROCFAMILY,
-@@ -522,10 +577,10 @@ ProcFamily::aggregate_usage_cgroup(ProcFamilyUsage* usage)
- } else if (_check_stat_uint64(stats, "total_swap", &tmp)) {
- image += tmp;
- }
-- err = cgroup_read_stats_next(handle, &stats);
-+ err = cgroup_read_stats_next(&handle, &stats);
- }
-- if (*handle != NULL) {
-- cgroup_read_stats_end(handle);
-+ if (handle != NULL) {
-+ cgroup_read_stats_end(&handle);
- }
- if (found_rss) {
- usage->total_image_size = image/1024;
-@@ -540,29 +595,12 @@ ProcFamily::aggregate_usage_cgroup(ProcFamilyUsage* usage)
- m_max_image_size = image/1024;
- }
- // Try updating the max size using cgroups
-- update_max_image_size_cgroup();
-+ // XXX: This is taken out for now - kernel calculates max INCLUDING
-+ // the filesystem cache. Not what you want.
-+ //update_max_image_size_cgroup();
-
- // Update CPU
-- *handle = NULL;
-- err = cgroup_read_stats_begin(CPUACCT_CONTROLLER_STR, m_cgroup_string.c_str(), handle, &stats);
-- while (err != ECGEOF) {
-- if (err > 0) {
-- dprintf(D_PROCFAMILY,
-- "Unable to read cgroup %s cpuacct stats (ProcFamily %u): %s.\n",
-- m_cgroup_string.c_str(), m_root_pid, cgroup_strerror(err));
-- break;
-- }
-- if (_check_stat_uint64(stats, "user", &tmp)) {
-- usage->user_cpu_time = tmp/clock_tick;
-- } else if (_check_stat_uint64(stats, "system", &tmp)) {
-- usage->sys_cpu_time = tmp/clock_tick;
-- }
-- err = cgroup_read_stats_next(handle, &stats);
-- }
-- if (*handle != NULL) {
-- cgroup_read_stats_end(handle);
-- }
-- free(handle);
-+ get_cpu_usage_cgroup(usage->user_cpu_time, usage->sys_cpu_time);
-
- aggregate_usage_cgroup_blockio(usage);
-
---- a/src/condor_procd/proc_family.h
-+++ b/src/condor_procd/proc_family.h
-@@ -181,6 +181,11 @@ private:
- std::string m_cgroup_string;
- CgroupManager &m_cm;
- static long clock_tick;
-+ // Sometimes Condor doesn't successfully clear out the cgroup from the
-+ // previous run. Hence, we subtract off any CPU usage found at the
-+ // start of the job.
-+ long m_initial_user_cpu;
-+ long m_initial_sys_cpu;
- static bool have_warned_about_memsw;
-
- int count_tasks_cgroup();
-@@ -190,6 +195,7 @@ private:
- int spree_cgroup(int);
- int migrate_to_cgroup(pid_t);
- void update_max_image_size_cgroup();
-+ int get_cpu_usage_cgroup(long &user_cpu, long &sys_cpu);
- #endif
- };
-
diff --git a/cgroups_noswap.patch b/cgroups_noswap.patch
deleted file mode 100644
index b94e7df..0000000
--- a/cgroups_noswap.patch
+++ /dev/null
@@ -1,100 +0,0 @@
-diff --git a/src/condor_procd/proc_family.cpp b/src/condor_procd/proc_family.cpp
-index d35ffcc..2a5839f 100644
---- a/src/condor_procd/proc_family.cpp
-+++ b/src/condor_procd/proc_family.cpp
-@@ -36,6 +36,10 @@
-
- #include <unistd.h>
- long ProcFamily::clock_tick = sysconf( _SC_CLK_TCK );
-+
-+// Swap accounting is sometimes turned off. We use this variable so we
-+// warn about that situation only once.
-+bool ProcFamily::have_warned_about_memsw = false;
- #endif
-
- ProcFamily::ProcFamily(ProcFamilyMonitor* monitor,
-@@ -425,10 +429,19 @@ ProcFamily::update_max_image_size_cgroup()
- return;
- }
- if ((err = cgroup_get_value_uint64(memct, "memory.memsw.max_usage_in_bytes", &max_image))) {
-- dprintf(D_PROCFAMILY,
-- "Unable to load max memory usage for cgroup %s (ProcFamily %u): %u %s\n",
-- m_cgroup_string.c_str(), m_root_pid, err, cgroup_strerror(err));
-- return;
-+ // On newer nodes, swap accounting is disabled by default.
-+ // In some cases, swap accounting causes a kernel oops at the time of writing.
-+ // So, we check memory.max_usage_in_bytes instead.
-+ int err2 = cgroup_get_value_uint64(memct, "memory.max_usage_in_bytes", &max_image);
-+ if (err2) {
-+ dprintf(D_PROCFAMILY,
-+ "Unable to load max memory usage for cgroup %s (ProcFamily %u): %u %s\n",
-+ m_cgroup_string.c_str(), m_root_pid, err, cgroup_strerror(err));
-+ return;
-+ } else if (!have_warned_about_memsw) {
-+ have_warned_about_memsw = true;
-+ dprintf(D_ALWAYS, "Swap acounting is not available; only doing RAM accounting.\n");
-+ }
- }
- m_max_image_size = max_image/1024;
- }
-diff --git a/src/condor_procd/proc_family.h b/src/condor_procd/proc_family.h
-index 28a854c..d831d8e 100644
---- a/src/condor_procd/proc_family.h
-+++ b/src/condor_procd/proc_family.h
-@@ -181,6 +181,7 @@ private:
- std::string m_cgroup_string;
- CgroupManager &m_cm;
- static long clock_tick;
-+ static bool have_warned_about_memsw;
-
- int count_tasks_cgroup();
- int aggregate_usage_cgroup_blockio(ProcFamilyUsage*);
-diff --git a/src/condor_starter.V6.1/cgroup.linux.cpp b/src/condor_starter.V6.1/cgroup.linux.cpp
-index 97407b3..4fbd00d 100644
---- a/src/condor_starter.V6.1/cgroup.linux.cpp
-+++ b/src/condor_starter.V6.1/cgroup.linux.cpp
-@@ -185,13 +185,6 @@ int CgroupManager::create(const std::string &cgroup_string, Cgroup &cgroup,
- has_cgroup, changed_cgroup)) {
- return -1;
- }
-- if ((preferred_controllers & CPUACCT_CONTROLLER) &&
-- initialize_controller(*cgroupp, CPUACCT_CONTROLLER,
-- CPUACCT_CONTROLLER_STR,
-- required_controllers & CPUACCT_CONTROLLER,
-- has_cgroup, changed_cgroup)) {
-- return -1;
-- }
- if ((preferred_controllers & BLOCK_CONTROLLER) &&
- initialize_controller(*cgroupp, BLOCK_CONTROLLER,
- BLOCK_CONTROLLER_STR,
-diff --git a/src/condor_starter.V6.1/cgroup_limits.cpp b/src/condor_starter.V6.1/cgroup_limits.cpp
-index 71830a5..93e311c 100644
---- a/src/condor_starter.V6.1/cgroup_limits.cpp
-+++ b/src/condor_starter.V6.1/cgroup_limits.cpp
-@@ -20,7 +20,7 @@ CgroupLimits::CgroupLimits(std::string &cgroup) : m_cgroup_string(cgroup)
- int CgroupLimits::set_memory_limit_bytes(uint64_t mem_bytes, bool soft)
- {
- if (!m_cgroup.isValid() || !CgroupManager::getInstance().isMounted(CgroupManager::MEMORY_CONTROLLER)) {
-- dprintf(D_ALWAYS, "Unable to set memory limit because cgroup is invalid.");
-+ dprintf(D_ALWAYS, "Unable to set memory limit because cgroup is invalid.\n");
- return 1;
- }
-
-@@ -55,7 +55,7 @@ int CgroupLimits::set_memory_limit_bytes(uint64_t mem_bytes, bool soft)
- int CgroupLimits::set_cpu_shares(uint64_t shares)
- {
- if (!m_cgroup.isValid() || !CgroupManager::getInstance().isMounted(CgroupManager::CPU_CONTROLLER)) {
-- dprintf(D_ALWAYS, "Unable to set CPU shares because cgroup is invalid.");
-+ dprintf(D_ALWAYS, "Unable to set CPU shares because cgroup is invalid.\n");
- return 1;
- }
-
-@@ -89,7 +89,7 @@ int CgroupLimits::set_cpu_shares(uint64_t shares)
- int CgroupLimits::set_blockio_weight(uint64_t weight)
- {
- if (!m_cgroup.isValid() || !CgroupManager::getInstance().isMounted(CgroupManager::BLOCK_CONTROLLER)) {
-- dprintf(D_ALWAYS, "Unable to set blockio weight because cgroup is invalid.");
-+ dprintf(D_ALWAYS, "Unable to set blockio weight because cgroup is invalid.\n");
- return 1;
- }
-
diff --git a/condor-1605-v2.patch b/condor-1605-v2.patch
deleted file mode 100644
index 1702b1d..0000000
--- a/condor-1605-v2.patch
+++ /dev/null
@@ -1,1457 +0,0 @@
-diff --git a/src/condor_c-gahp/schedd_client.cpp b/src/condor_c-gahp/schedd_client.cpp
-index 32f0059..b8fda84 100644
---- a/src/condor_c-gahp/schedd_client.cpp
-+++ b/src/condor_c-gahp/schedd_client.cpp
-@@ -562,7 +562,7 @@ doContactSchedd()
- // Try connecting to the queue
- Qmgr_connection * qmgr_connection;
-
-- if ((qmgr_connection = ConnectQ(dc_schedd.addr(), QMGMT_TIMEOUT, false, NULL, NULL, dc_schedd.version() )) == NULL) {
-+ if ((qmgr_connection = ConnectQ(dc_schedd, QMGMT_TIMEOUT, false, NULL, NULL, dc_schedd.version() )) == NULL) {
- error = TRUE;
- sprintf( error_msg, "Error connecting to schedd %s", ScheddAddr );
- dprintf( D_ALWAYS, "%s\n", error_msg.c_str() );
-diff --git a/src/condor_contrib/triggerd/src/Triggerd.cpp b/src/condor_contrib/triggerd/src/Triggerd.cpp
-index 40e1197..59f2e55 100644
---- a/src/condor_contrib/triggerd/src/Triggerd.cpp
-+++ b/src/condor_contrib/triggerd/src/Triggerd.cpp
-@@ -819,7 +819,7 @@ Triggerd::PerformQueries()
- // Perform the query and check the result
- if (NULL != query_collector)
- {
-- status = query->fetchAds(result, query_collector->addr(), &errstack);
-+ status = query->fetchAds(result, *query_collector, &errstack);
- }
- else
- {
-diff --git a/src/condor_credd/credd.cpp b/src/condor_credd/credd.cpp
-index 7f0973f..c3b7b1f 100644
---- a/src/condor_credd/credd.cpp
-+++ b/src/condor_credd/credd.cpp
-@@ -80,7 +80,7 @@ store_cred_handler(Service * /*service*/, int /*i*/, Stream *stream) {
-
- if (!socket->triedAuthentication()) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(socket, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(socket, WRITE, &errstack, NULL) ) {
- dprintf (D_ALWAYS, "Unable to authenticate, qutting\n");
- goto EXIT;
- }
-@@ -236,7 +236,7 @@ get_cred_handler(Service * /*service*/, int /*i*/, Stream *stream) {
- // Authenticate
- if (!socket->triedAuthentication()) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(socket, READ, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(socket, READ, &errstack, NULL) ) {
- dprintf (D_ALWAYS, "Unable to authenticate, qutting\n");
- goto EXIT;
- }
-@@ -351,7 +351,7 @@ query_cred_handler(Service * /*service*/, int /*i*/, Stream *stream) {
-
- if (!socket->triedAuthentication()) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(socket, READ, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(socket, READ, &errstack, NULL) ) {
- dprintf (D_ALWAYS, "Unable to authenticate, qutting\n");
- goto EXIT;
- }
-@@ -426,7 +426,7 @@ rm_cred_handler(Service * /*service*/, int /*i*/, Stream *stream) {
-
- if (!socket->triedAuthentication()) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(socket, READ, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(socket, READ, &errstack, NULL) ) {
- dprintf (D_ALWAYS, "Unable to authenticate, qutting\n");
- goto EXIT;
- }
-diff --git a/src/condor_daemon_client/daemon.cpp b/src/condor_daemon_client/daemon.cpp
-index e2afded..5ccb2aa 100644
---- a/src/condor_daemon_client/daemon.cpp
-+++ b/src/condor_daemon_client/daemon.cpp
-@@ -162,7 +162,7 @@ Daemon::Daemon( const ClassAd* tAd, daemon_t tType, const char* tPool )
-
- getInfoFromAd( tAd );
-
-- dprintf( D_HOSTNAME, "New Daemon obj (%s) name: \"%s\", pool: "
-+ dprintf( D_HOSTNAME, "From ClassAd, new Daemon obj (%s) name: \"%s\", pool: "
- "\"%s\", addr: \"%s\"\n", daemonString(_type),
- _name ? _name : "NULL", _pool ? _pool : "NULL",
- _addr ? _addr : "NULL" );
-@@ -533,7 +533,7 @@ Daemon::connectSock(Sock *sock, int sec, CondorError* errstack, bool non_blockin
-
-
- StartCommandResult
--Daemon::startCommand( int cmd, Sock* sock, int timeout, CondorError *errstack, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking, char const *cmd_description, char *, SecMan *sec_man, bool raw_protocol, char const *sec_session_id )
-+Daemon::startCommand( int cmd, Sock* sock, int timeout, CondorError *errstack, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking, char const *cmd_description, char *, SecMan *sec_man, bool raw_protocol, char const *sec_session_id, const char * hostname )
- {
- // This function may be either blocking or non-blocking, depending
- // on the flag that is passed in. All versions of Daemon::startCommand()
-@@ -555,7 +555,7 @@ Daemon::startCommand( int cmd, Sock* sock, int timeout, CondorError *errstack, S
- sock->timeout( timeout );
- }
-
-- start_command_result = sec_man->startCommand(cmd, sock, raw_protocol, errstack, 0, callback_fn, misc_data, nonblocking, cmd_description, sec_session_id);
-+ start_command_result = sec_man->startCommand(cmd, sock, raw_protocol, errstack, 0, callback_fn, misc_data, nonblocking, cmd_description, sec_session_id, hostname);
-
- if(callback_fn) {
- // SecMan::startCommand() called the callback function, so we just return here
-@@ -619,7 +619,8 @@ Daemon::startCommand( int cmd, Stream::stream_type st,Sock **sock,int timeout, C
- _version,
- &_sec_man,
- raw_protocol,
-- sec_session_id);
-+ sec_session_id,
-+ _full_hostname);
- }
-
- Sock*
-@@ -662,7 +663,7 @@ Daemon::startCommand_nonblocking( int cmd, Sock* sock, int timeout, CondorError
- {
- // This is the nonblocking version of startCommand().
- const bool nonblocking = true;
-- return startCommand(cmd,sock,timeout,errstack,callback_fn,misc_data,nonblocking,cmd_description,_version,&_sec_man,raw_protocol,sec_session_id);
-+ return startCommand(cmd,sock,timeout,errstack,callback_fn,misc_data,nonblocking,cmd_description,_version,&_sec_man,raw_protocol,sec_session_id, _full_hostname);
- }
-
- bool
-@@ -670,7 +671,7 @@ Daemon::startCommand( int cmd, Sock* sock, int timeout, CondorError *errstack, c
- {
- // This is a blocking version of startCommand().
- const bool nonblocking = false;
-- StartCommandResult rc = startCommand(cmd,sock,timeout,errstack,NULL,NULL,nonblocking,cmd_description,_version,&_sec_man,raw_protocol,sec_session_id);
-+ StartCommandResult rc = startCommand(cmd,sock,timeout,errstack,NULL,NULL,nonblocking,cmd_description,_version,&_sec_man,raw_protocol,sec_session_id, _full_hostname);
- switch(rc) {
- case StartCommandSucceeded:
- return true;
-@@ -2097,7 +2098,7 @@ Daemon::forceAuthentication( ReliSock* rsock, CondorError* errstack )
- return true;
- }
-
-- return SecMan::authenticate_sock(rsock, CLIENT_PERM, errstack );
-+ return SecMan::authenticate_sock(rsock, CLIENT_PERM, errstack, _full_hostname);
- }
-
-
-diff --git a/src/condor_daemon_client/daemon.h b/src/condor_daemon_client/daemon.h
-index 57fcd8a..9aa3b9f 100644
---- a/src/condor_daemon_client/daemon.h
-+++ b/src/condor_daemon_client/daemon.h
-@@ -761,7 +761,7 @@ protected:
- It may be either blocking or nonblocking, depending on the
- nonblocking flag. This version uses an existing socket.
- */
-- static StartCommandResult startCommand( int cmd, Sock* sock, int timeout, CondorError *errstack, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking, char const *cmd_description, char *version, SecMan *sec_man, bool raw_protocol, char const *sec_session_id );
-+ static StartCommandResult startCommand( int cmd, Sock* sock, int timeout, CondorError *errstack, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking, char const *cmd_description, char *version, SecMan *sec_man, bool raw_protocol, char const *sec_session_id, char const *hostname );
-
- /**
- Internal function used by public versions of startCommand().
-@@ -769,7 +769,7 @@ protected:
- nonblocking flag. This version creates a socket of the
- specified type and connects it.
- */
-- StartCommandResult startCommand( int cmd, Stream::stream_type st,Sock **sock,int timeout, CondorError *errstack, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking, char const *cmd_description=NULL, bool raw_protocol=false, char const *sec_session_id=NULL );
-+ StartCommandResult startCommand( int cmd, Stream::stream_type st,Sock **sock,int timeout, CondorError *errstack, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking, char const *cmd_description, bool raw_protocol, char const *sec_session_id );
-
- /**
- Class used internally to handle non-blocking connects for
-diff --git a/src/condor_daemon_client/daemon_list.cpp b/src/condor_daemon_client/daemon_list.cpp
-index 244d58a..597d981 100644
---- a/src/condor_daemon_client/daemon_list.cpp
-+++ b/src/condor_daemon_client/daemon_list.cpp
-@@ -330,7 +330,7 @@ CollectorList::query(CondorQuery & cQuery, ClassAdList & adList, CondorError *er
- }
-
- result =
-- cQuery.fetchAds (adList, daemon->addr(), errstack);
-+ cQuery.fetchAds (adList, *daemon, errstack);
-
- if( num_collectors > 1 ) {
- daemon->blacklistMonitorQueryFinished( result == Q_OK );
-diff --git a/src/condor_daemon_client/dc_schedd.cpp b/src/condor_daemon_client/dc_schedd.cpp
-index af1049e..769e12c 100644
---- a/src/condor_daemon_client/dc_schedd.cpp
-+++ b/src/condor_daemon_client/dc_schedd.cpp
-@@ -41,6 +41,9 @@ DCSchedd::DCSchedd( const char* the_name, const char* the_pool )
- {
- }
-
-+DCSchedd::DCSchedd( const ClassAd* ad, const char* pool )
-+ : Daemon( ad, DT_SCHEDD ,pool ) // Surprise! DT_SCHEDD is the second argument.
-+{}
-
- DCSchedd::~DCSchedd( void )
- {
-diff --git a/src/condor_daemon_client/dc_schedd.h b/src/condor_daemon_client/dc_schedd.h
-index 201c9b4..6389e09 100644
---- a/src/condor_daemon_client/dc_schedd.h
-+++ b/src/condor_daemon_client/dc_schedd.h
-@@ -62,6 +62,13 @@ public:
- */
- DCSchedd( const char* const name = NULL, const char* pool = NULL );
-
-+ /** Constructor. Same as a Daemon object.
-+ @param ad The classad of the schedd object; saves a query
-+ to the collector.
-+ @param pool The name of the pool, NULL if you want local
-+ */
-+ DCSchedd( const ClassAd* ad, const char* pool = NULL );
-+
- /// Destructor
- ~DCSchedd();
-
-diff --git a/src/condor_daemon_client/dc_startd.cpp b/src/condor_daemon_client/dc_startd.cpp
-index ec3ab14..9a4b44c 100644
---- a/src/condor_daemon_client/dc_startd.cpp
-+++ b/src/condor_daemon_client/dc_startd.cpp
-@@ -914,7 +914,6 @@ DCStartd::getAds( ClassAdList &adsList )
- // fetch the query
- QueryResult q;
- CondorQuery* query;
-- char* ad_addr;
-
- // instantiate query object
- if (!(query = new CondorQuery (STARTD_AD))) {
-@@ -923,8 +922,7 @@ DCStartd::getAds( ClassAdList &adsList )
- }
-
- if( this->locate() ){
-- ad_addr = this->addr();
-- q = query->fetchAds(adsList, ad_addr, &errstack);
-+ q = query->fetchAds(adsList, *this, &errstack);
- if (q != Q_OK) {
- if (q == Q_COMMUNICATION_ERROR) {
- dprintf( D_ALWAYS, "%s\n", errstack.getFullText(true) );
-diff --git a/src/condor_daemon_core.V6/daemon_command.cpp b/src/condor_daemon_core.V6/daemon_command.cpp
-index e6da114..3a96315 100644
---- a/src/condor_daemon_core.V6/daemon_command.cpp
-+++ b/src/condor_daemon_core.V6/daemon_command.cpp
-@@ -991,7 +991,7 @@ DaemonCommandProtocol::CommandProtocolResult DaemonCommandProtocol::Authenticate
- int auth_timeout = daemonCore->getSecMan()->getSecTimeout( m_comTable[cmd_index].perm );
-
- char *method_used = NULL;
-- bool auth_success = m_sock->authenticate(m_key, auth_methods, &errstack, auth_timeout, &method_used);
-+ bool auth_success = m_sock->authenticate(m_key, auth_methods, &errstack, auth_timeout, &method_used, NULL);
-
- if ( method_used ) {
- m_policy->Assign(ATTR_SEC_AUTHENTICATION_METHODS, method_used);
-@@ -1279,7 +1279,7 @@ DaemonCommandProtocol::CommandProtocolResult DaemonCommandProtocol::ExecCommand(
- m_comTable[cmd_index].force_authentication &&
- !m_sock->triedAuthentication() )
- {
-- SecMan::authenticate_sock(m_sock, WRITE, &errstack);
-+ SecMan::authenticate_sock(m_sock, WRITE, &errstack, NULL);
- // we don't check the return value, because the code below
- // handles what to do with unauthenticated connections
- }
-diff --git a/src/condor_eventd.V2/admin_event.cpp b/src/condor_eventd.V2/admin_event.cpp
-index 3fa1c46..83abc49 100644
---- a/src/condor_eventd.V2/admin_event.cpp
-+++ b/src/condor_eventd.V2/admin_event.cpp
-@@ -1089,7 +1089,7 @@ AdminEvent::FetchAds_ByConstraint( const char *constraint )
-
- query->addORConstraint( constraint );
-
-- q = query->fetchAds( m_collector_query_ads, pool->addr(), &errstack);
-+ q = query->fetchAds( m_collector_query_ads, pool, &errstack);
-
- if( q != Q_OK ){
- dprintf(D_ALWAYS, "Trouble fetching Ads with<<%s>><<%d>>\n",
-diff --git a/src/condor_gridmanager/gridmanager.cpp b/src/condor_gridmanager/gridmanager.cpp
-index 1f97ef7..03db810 100644
---- a/src/condor_gridmanager/gridmanager.cpp
-+++ b/src/condor_gridmanager/gridmanager.cpp
-@@ -644,7 +644,7 @@ doContactSchedd()
- }
-
-
-- schedd = ConnectQ( ScheddAddr, QMGMT_TIMEOUT, false, NULL, myUserName, CondorVersion() );
-+ schedd = ConnectQ( *ScheddObj, QMGMT_TIMEOUT, false, NULL, myUserName, CondorVersion() );
- if ( !schedd ) {
- error_str = "Failed to connect to schedd!";
- goto contact_schedd_failure;
-diff --git a/src/condor_includes/authentication.h b/src/condor_includes/authentication.h
-index d2b976a..6167e84 100644
---- a/src/condor_includes/authentication.h
-+++ b/src/condor_includes/authentication.h
-@@ -40,7 +40,7 @@ class Authentication {
-
- ~Authentication();
-
-- int authenticate( char *hostAddr, const char* auth_methods, CondorError* errstack, int timeout);
-+ int authenticate( const char *hostAddr, const char* auth_methods, CondorError* errstack, int timeout);
- //------------------------------------------
- // PURPOSE: authenticate with the other side
- // REQUIRE: hostAddr -- host to authenticate
-@@ -50,7 +50,7 @@ class Authentication {
- // RETURNS: -1 -- failure
- //------------------------------------------
-
-- int authenticate( char *hostAddr, KeyInfo *& key, const char* auth_methods, CondorError* errstack, int timeout);
-+ int authenticate( const char *hostAddr, KeyInfo *& key, const char* auth_methods, CondorError* errstack, int timeout);
- //------------------------------------------
- // PURPOSE: To send the secret key over. this method
- // is written to keep compatibility issues
-@@ -161,7 +161,7 @@ class Authentication {
-
- #endif /* !SKIP_AUTHENTICATION */
-
-- int authenticate_inner( char *hostAddr, const char* auth_methods, CondorError* errstack, int timeout);
-+ int authenticate_inner( const char *hostAddr, const char* auth_methods, CondorError* errstack, int timeout);
-
- //------------------------------------------
- // Data (private)
-diff --git a/src/condor_includes/condor_auth_x509.h b/src/condor_includes/condor_auth_x509.h
-index bdbc545..dac8aa5 100644
---- a/src/condor_includes/condor_auth_x509.h
-+++ b/src/condor_includes/condor_auth_x509.h
-@@ -103,7 +103,7 @@ class Condor_Auth_X509 : public Condor_Auth_Base {
-
- int authenticate_self_gss(CondorError* errstack);
-
-- int authenticate_client_gss(CondorError* errstack);
-+ int authenticate_client_gss(const char *remoteHost, CondorError* errstack);
-
- int authenticate_server_gss(CondorError* errstack);
-
-diff --git a/src/condor_includes/condor_qmgr.h b/src/condor_includes/condor_qmgr.h
-index 5e5012e..642b602 100644
---- a/src/condor_includes/condor_qmgr.h
-+++ b/src/condor_includes/condor_qmgr.h
-@@ -25,7 +25,7 @@
- #include "proc.h"
- #include "../condor_utils/CondorError.h"
- #include "condor_classad.h"
--
-+#include "daemon.h"
-
- typedef struct {
- bool dummy;
-@@ -54,8 +54,7 @@ int InitializeConnection(const char *, const char *);
- int InitializeReadOnlyConnection(const char * );
-
- /** Initiate connection to schedd job queue and begin transaction.
-- @param qmgr_location can be the name or sinful string of a schedd or
-- NULL to connect to the local schedd
-+ @param daemon a daemon object of type DT_SCHEDD
- @param timeout specifies the maximum time (in seconds) to wait for TCP
- connection establishment
- @param read_only can be set to true to skip the potentially slow
-@@ -64,7 +63,7 @@ int InitializeReadOnlyConnection(const char * );
- @param schedd_version_str Version of schedd if known (o.w. NULL).
- @return opaque Qmgr_connection structure
- */
--Qmgr_connection *ConnectQ(const char *qmgr_location, int timeout=0,
-+Qmgr_connection *ConnectQ(Daemon &daemon, int timeout=0,
- bool read_only=false, CondorError* errstack=NULL,
- const char *effective_owner=NULL,
- char const *schedd_version_str=NULL);
-diff --git a/src/condor_includes/condor_secman.h b/src/condor_includes/condor_secman.h
-index b59519e..9891497 100644
---- a/src/condor_includes/condor_secman.h
-+++ b/src/condor_includes/condor_secman.h
-@@ -106,12 +106,12 @@ public:
- // spawn off a non-blocking attempt to create a security
- // session so that in the future, a UDP command could succeed
- // without StartCommandWouldBlock.
-- StartCommandResult startCommand( int cmd, Sock* sock, bool raw_protocol, CondorError* errstack, int subcmd, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking,char const *cmd_description,char const *sec_session_id);
-+ StartCommandResult startCommand( int cmd, Sock* sock, bool raw_protocol, CondorError* errstack, int subcmd, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking,char const *cmd_description,char const *sec_session_id, const char *hostname);
-
- // Authenticate a socket using whatever authentication methods
- // have been configured for the specified perm level.
-- static int authenticate_sock(Sock *s,DCpermission perm, CondorError* errstack);
-- static int authenticate_sock(Sock *s,KeyInfo *&ki, DCpermission perm, CondorError* errstack);
-+ static int authenticate_sock(Sock *s,DCpermission perm, CondorError* errstack, const char * hostname);
-+ static int authenticate_sock(Sock *s,KeyInfo *&ki, DCpermission perm, CondorError* errstack, const char * hostname);
-
-
- //------------------------------------------
-diff --git a/src/condor_includes/reli_sock.h b/src/condor_includes/reli_sock.h
-index b48ce8d..f5cdca8 100644
---- a/src/condor_includes/reli_sock.h
-+++ b/src/condor_includes/reli_sock.h
-@@ -218,9 +218,9 @@ public:
- virtual int peek(char &);
-
- ///
-- int authenticate( const char* methods, CondorError* errstack, int auth_timeout );
-+ int authenticate( const char* methods, CondorError* errstack, int auth_timeout, const char * hostname );
- ///
-- int authenticate( KeyInfo *& key, const char* methods, CondorError* errstack, int auth_timeout, char **method_used=NULL );
-+ int authenticate( KeyInfo *& key, const char* methods, CondorError* errstack, int auth_timeout, char **method_used, const char * hostname );
- ///
- int isClient() { return is_client; };
-
-@@ -254,7 +254,8 @@ protected:
- int prepare_for_nobuffering( stream_coding = stream_unknown);
- int perform_authenticate( bool with_key, KeyInfo *& key,
- const char* methods, CondorError* errstack,
-- int auth_timeout, char **method_used );
-+ int auth_timeout, char **method_used,
-+ const char* hostname );
-
- // This is used internally to recover sanity on the stream after
- // failing to open a file in put_file().
-diff --git a/src/condor_includes/sock.h b/src/condor_includes/sock.h
-index ca1b1c8..53f8b23 100644
---- a/src/condor_includes/sock.h
-+++ b/src/condor_includes/sock.h
-@@ -347,10 +347,10 @@ public:
- bool isAuthenticated() const;
-
- ///
-- virtual int authenticate(const char * auth_methods, CondorError* errstack, int timeout);
-+ virtual int authenticate(const char * auth_methods, CondorError* errstack, int timeout, const char *hostname);
- ///
- // method_used should be freed by the caller when finished with it
-- virtual int authenticate(KeyInfo *&ki, const char * auth_methods, CondorError* errstack, int timeout, char **method_used=NULL);
-+ virtual int authenticate(KeyInfo *&ki, const char * auth_methods, CondorError* errstack, int timeout, char **method_used, const char *hostname);
-
- /// if we are connecting, merges together Stream::get_deadline
- /// and connect_timeout_time()
-diff --git a/src/condor_io/authentication.cpp b/src/condor_io/authentication.cpp
-index 4a11db0..0374857 100644
---- a/src/condor_io/authentication.cpp
-+++ b/src/condor_io/authentication.cpp
-@@ -85,7 +85,7 @@ Authentication::~Authentication()
- #endif
- }
-
--int Authentication::authenticate( char *hostAddr, KeyInfo *& key,
-+int Authentication::authenticate( const char *hostAddr, KeyInfo *& key,
- const char* auth_methods, CondorError* errstack, int timeout)
- {
- int retval = authenticate(hostAddr, auth_methods, errstack, timeout);
-@@ -106,7 +106,7 @@ int Authentication::authenticate( char *hostAddr, KeyInfo *& key,
- return retval;
- }
-
--int Authentication::authenticate( char *hostAddr, const char* auth_methods,
-+int Authentication::authenticate( const char *hostAddr, const char* auth_methods,
- CondorError* errstack, int timeout)
- {
- int retval;
-@@ -124,7 +124,7 @@ int Authentication::authenticate( char *hostAddr, const char* auth_methods,
- return retval;
- }
-
--int Authentication::authenticate_inner( char *hostAddr, const char* auth_methods,
-+int Authentication::authenticate_inner( const char *hostAddr, const char* auth_methods,
- CondorError* errstack, int timeout)
- {
- #if defined(SKIP_AUTHENTICATION)
-diff --git a/src/condor_io/condor_auth_x509.cpp b/src/condor_io/condor_auth_x509.cpp
-index ee80b9d..7c81cea 100644
---- a/src/condor_io/condor_auth_x509.cpp
-+++ b/src/condor_io/condor_auth_x509.cpp
-@@ -92,7 +92,7 @@ Condor_Auth_X509 :: ~Condor_Auth_X509()
- }
- }
-
--int Condor_Auth_X509 :: authenticate(const char * /* remoteHost */, CondorError* errstack)
-+int Condor_Auth_X509 :: authenticate(const char * remoteHost, CondorError* errstack)
- {
- int status = 1;
- int reply = 0;
-@@ -171,7 +171,7 @@ int Condor_Auth_X509 :: authenticate(const char * /* remoteHost */, CondorError*
-
- switch ( mySock_->isClient() ) {
- case 1:
-- status = authenticate_client_gss(errstack);
-+ status = authenticate_client_gss(remoteHost, errstack);
- break;
- default:
- status = authenticate_server_gss(errstack);
-@@ -655,7 +655,7 @@ int Condor_Auth_X509::authenticate_self_gss(CondorError* errstack)
- return TRUE;
- }
-
--int Condor_Auth_X509::authenticate_client_gss(CondorError* errstack)
-+int Condor_Auth_X509::authenticate_client_gss(const char * remoteHost, CondorError* errstack)
- {
- OM_uint32 major_status = 0;
- OM_uint32 minor_status = 0;
-@@ -775,31 +775,48 @@ int Condor_Auth_X509::authenticate_client_gss(CondorError* errstack)
- }
- }
-
-- std::string fqh = get_full_hostname(mySock_->peer_addr());
-- StringList * daemonNames = getDaemonList("GSI_DAEMON_NAME",fqh.c_str());
--
-- // Now, let's see if the name is in the list, I am not using
-- // anycase here, so if the host name and what we are looking for
-- // are in different cases, then we will run into problems.
-- if( daemonNames ) {
-- status = daemonNames->contains_withwildcard(server) == TRUE? 1 : 0;
--
-- if( !status ) {
-- errstack->pushf("GSI", GSI_ERR_UNAUTHORIZED_SERVER,
-- "Failed to authenticate because the subject '%s' is not currently trusted by you. "
-- "If it should be, add it to GSI_DAEMON_NAME or undefine GSI_DAEMON_NAME.", server);
-- dprintf(D_SECURITY,
-- "GSI_DAEMON_NAME is defined and the server %s is not specified in the GSI_DAEMON_NAME parameter\n",
-- server);
-- }
-+ std::vector<MyString> fqhs;
-+ if (remoteHost)
-+ {
-+ std::vector<MyString> fqhs_copy = get_hostname_with_alias(mySock_->peer_addr());
-+ fqhs.push_back(remoteHost);
-+ fqhs.insert(fqhs.begin()+1, fqhs_copy.begin(), fqhs_copy.end());
- }
-- else {
-- status = CheckServerName(fqh.c_str(),mySock_->peer_ip_str(),mySock_,errstack);
-+ else
-+ {
-+ fqhs = get_hostname_with_alias(mySock_->peer_addr());
- }
-+ dprintf(D_FULLDEBUG, "Number of aliases: %zu\n", fqhs.size());
-+ for(std::vector<MyString>::const_iterator it = fqhs.begin(); it != fqhs.end(); ++it) {
-+ dprintf(D_FULLDEBUG, "Checking validity of alias %s\n", it->Value());
-+ std::string fqh = it->Value();
-+ StringList * daemonNames = getDaemonList("GSI_DAEMON_NAME",fqh.c_str());
-+
-+ // Now, let's see if the name is in the list, I am not using
-+ // anycase here, so if the host name and what we are looking for
-+ // are in different cases, then we will run into problems.
-+ if( daemonNames ) {
-+ status = daemonNames->contains_withwildcard(server) == TRUE? 1 : 0;
-+
-+ if( !status ) {
-+ errstack->pushf("GSI", GSI_ERR_UNAUTHORIZED_SERVER,
-+ "Failed to authenticate because the subject '%s' is not currently trusted by you. "
-+ "If it should be, add it to GSI_DAEMON_NAME or undefine GSI_DAEMON_NAME.", server);
-+ dprintf(D_SECURITY,
-+ "GSI_DAEMON_NAME is defined and the server %s is not specified in the GSI_DAEMON_NAME parameter\n",
-+ server);
-+ }
-+ }
-+ else {
-+ status = CheckServerName(fqh.c_str(),mySock_->peer_ip_str(),mySock_,errstack);
-+ }
-+ delete daemonNames;
-
-- if (status) {
-- dprintf(D_SECURITY, "valid GSS connection established to %s\n", server);
-- }
-+ if (status) {
-+ dprintf(D_SECURITY, "valid GSS connection established to %s\n", server);
-+ break;
-+ }
-+ }
-
- mySock_->encode();
- if (!mySock_->code(status) || !mySock_->end_of_message()) {
-@@ -810,7 +827,6 @@ int Condor_Auth_X509::authenticate_client_gss(CondorError* errstack)
- }
-
- delete [] server;
-- delete daemonNames;
- }
- clear:
- return (status == 0) ? FALSE : TRUE;
-diff --git a/src/condor_io/condor_secman.cpp b/src/condor_io/condor_secman.cpp
-index 21607fe..ea768bf 100644
---- a/src/condor_io/condor_secman.cpp
-+++ b/src/condor_io/condor_secman.cpp
-@@ -855,7 +855,7 @@ class SecManStartCommand: Service, public ClassyCountedPtr {
- SecManStartCommand (
- int cmd,Sock *sock,bool raw_protocol,
- CondorError *errstack,int subcmd,StartCommandCallbackType *callback_fn,
-- void *misc_data,bool nonblocking,char const *cmd_description,char const *sec_session_id_hint,SecMan *sec_man):
-+ void *misc_data,bool nonblocking,char const *cmd_description,char const *sec_session_id_hint,SecMan *sec_man, const std::string &hostname):
-
- m_cmd(cmd),
- m_subcmd(subcmd),
-@@ -867,7 +867,8 @@ class SecManStartCommand: Service, public ClassyCountedPtr {
- m_nonblocking(nonblocking),
- m_pending_socket_registered(false),
- m_sec_man(*sec_man),
-- m_use_tmp_sec_session(false)
-+ m_use_tmp_sec_session(false),
-+ m_hostname(hostname)
- {
- m_sec_session_id_hint = sec_session_id_hint ? sec_session_id_hint : "";
- if( m_sec_session_id_hint == USE_TMP_SEC_SESSION ) {
-@@ -972,6 +973,7 @@ class SecManStartCommand: Service, public ClassyCountedPtr {
- KeyCacheEntry *m_enc_key;
- KeyInfo* m_private_key;
- MyString m_sec_session_id_hint;
-+ std::string m_hostname;
-
- enum StartCommandState {
- SendAuthInfo,
-@@ -1023,7 +1025,7 @@ class SecManStartCommand: Service, public ClassyCountedPtr {
- };
-
- StartCommandResult
--SecMan::startCommand( int cmd, Sock* sock, bool raw_protocol, CondorError* errstack, int subcmd, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking,char const *cmd_description,char const *sec_session_id_hint)
-+SecMan::startCommand( int cmd, Sock* sock, bool raw_protocol, CondorError* errstack, int subcmd, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking,char const *cmd_description,char const *sec_session_id_hint, const char *hostname)
- {
- // This function is simply a convenient wrapper around the
- // SecManStartCommand class, which does the actual work.
-@@ -1032,7 +1034,8 @@ SecMan::startCommand( int cmd, Sock* sock, bool raw_protocol, CondorError* errst
- // The blocking case could avoid use of the heap, but for simplicity,
- // we just do the same in both cases.
-
-- classy_counted_ptr<SecManStartCommand> sc = new SecManStartCommand(cmd,sock,raw_protocol,errstack,subcmd,callback_fn,misc_data,nonblocking,cmd_description,sec_session_id_hint,this);
-+ std::string hostname_str = hostname ? hostname : "";
-+ classy_counted_ptr<SecManStartCommand> sc = new SecManStartCommand(cmd,sock,raw_protocol,errstack,subcmd,callback_fn,misc_data,nonblocking,cmd_description,sec_session_id_hint,this, hostname_str);
-
- ASSERT(sc.get());
-
-@@ -1829,7 +1832,7 @@ SecManStartCommand::authenticate_inner()
- }
-
- int auth_timeout = m_sec_man.getSecTimeout( CLIENT_PERM );
-- bool auth_success = m_sock->authenticate(m_private_key, auth_methods, m_errstack,auth_timeout);
-+ bool auth_success = m_sock->authenticate(m_private_key, auth_methods, m_errstack,auth_timeout, NULL, m_hostname.c_str());
-
- if (auth_methods) {
- free(auth_methods);
-@@ -2159,7 +2162,8 @@ SecManStartCommand::DoTCPAuth_inner()
- m_nonblocking,
- m_cmd_description.Value(),
- m_sec_session_id_hint.Value(),
-- &m_sec_man);
-+ &m_sec_man,
-+ m_hostname);
-
- StartCommandResult auth_result = m_tcp_auth_command->startCommand();
-
-@@ -2796,23 +2800,23 @@ char* SecMan::my_parent_unique_id() {
- }
-
- int
--SecMan::authenticate_sock(Sock *s,DCpermission perm, CondorError* errstack)
-+SecMan::authenticate_sock(Sock *s,DCpermission perm, CondorError* errstack, const char * hostname)
- {
- MyString methods;
- getAuthenticationMethods( perm, &methods );
- ASSERT(s);
- int auth_timeout = getSecTimeout(perm);
-- return s->authenticate(methods.Value(),errstack,auth_timeout);
-+ return s->authenticate(methods.Value(),errstack,auth_timeout, hostname);
- }
-
- int
--SecMan::authenticate_sock(Sock *s,KeyInfo *&ki, DCpermission perm, CondorError* errstack)
-+SecMan::authenticate_sock(Sock *s,KeyInfo *&ki, DCpermission perm, CondorError* errstack, const char * hostname)
- {
- MyString methods;
- getAuthenticationMethods( perm, &methods );
- ASSERT(s);
- int auth_timeout = getSecTimeout(perm);
-- return s->authenticate(ki,methods.Value(),errstack,auth_timeout);
-+ return s->authenticate(ki,methods.Value(),errstack,auth_timeout, NULL, hostname);
- }
-
- int
-diff --git a/src/condor_io/reli_sock.cpp b/src/condor_io/reli_sock.cpp
-index d80bab4..00a6d10 100644
---- a/src/condor_io/reli_sock.cpp
-+++ b/src/condor_io/reli_sock.cpp
-@@ -967,11 +967,11 @@ ReliSock::prepare_for_nobuffering(stream_coding direction)
-
- int ReliSock::perform_authenticate(bool with_key, KeyInfo *& key,
- const char* methods, CondorError* errstack,
-- int auth_timeout, char **method_used)
-+ int auth_timeout, char **method_used,
-+ const char * hostname )
- {
- int in_encode_mode;
- int result;
--
- if( method_used ) {
- *method_used = NULL;
- }
-@@ -984,9 +984,9 @@ int ReliSock::perform_authenticate(bool with_key, KeyInfo *& key,
-
- // actually perform the authentication
- if ( with_key ) {
-- result = authob.authenticate( hostAddr, key, methods, errstack, auth_timeout );
-+ result = authob.authenticate( hostname, key, methods, errstack, auth_timeout );
- } else {
-- result = authob.authenticate( hostAddr, methods, errstack, auth_timeout );
-+ result = authob.authenticate( hostname, methods, errstack, auth_timeout );
- }
- // restore stream mode (either encode or decode)
- if ( in_encode_mode && is_decode() ) {
-@@ -1010,16 +1010,16 @@ int ReliSock::perform_authenticate(bool with_key, KeyInfo *& key,
- }
- }
-
--int ReliSock::authenticate(KeyInfo *& key, const char* methods, CondorError* errstack, int auth_timeout, char **method_used)
-+int ReliSock::authenticate(KeyInfo *& key, const char* methods, CondorError* errstack, int auth_timeout, char **method_used, const char *hostname)
- {
-- return perform_authenticate(true,key,methods,errstack,auth_timeout,method_used);
-+ return perform_authenticate(true,key,methods,errstack,auth_timeout,method_used, hostname);
- }
-
- int
--ReliSock::authenticate(const char* methods, CondorError* errstack,int auth_timeout )
-+ReliSock::authenticate(const char* methods, CondorError* errstack,int auth_timeout, const char * hostname)
- {
- KeyInfo *key = NULL;
-- return perform_authenticate(false,key,methods,errstack,auth_timeout,NULL);
-+ return perform_authenticate(false,key,methods,errstack,auth_timeout,NULL, hostname);
- }
-
- bool
-diff --git a/src/condor_io/sock.cpp b/src/condor_io/sock.cpp
-index c4dcb0b..e743139 100644
---- a/src/condor_io/sock.cpp
-+++ b/src/condor_io/sock.cpp
-@@ -2235,12 +2235,12 @@ bool Sock :: is_hdr_encrypt(){
- return FALSE;
- }
-
--int Sock :: authenticate(KeyInfo *&, const char * /* methods */, CondorError* /* errstack */, int /*timeout*/, char ** /*method_used*/)
-+int Sock :: authenticate(KeyInfo *&, const char * /* methods */, CondorError* /* errstack */, int /*timeout*/, char ** /*method_used*/, const char * /*hostname*/)
- {
- return -1;
- }
-
--int Sock :: authenticate(const char * /* methods */, CondorError* /* errstack */, int /*timeout*/)
-+int Sock :: authenticate(const char * /* methods */, CondorError* /* errstack */, int /*timeout*/, const char * /*hostname*/)
- {
- /*
- errstack->push("AUTHENTICATE", AUTHENTICATE_ERR_NOT_BUILT,
-diff --git a/src/condor_job_router/submit_job.cpp b/src/condor_job_router/submit_job.cpp
-index ccad171..393bafc 100644
---- a/src/condor_job_router/submit_job.cpp
-+++ b/src/condor_job_router/submit_job.cpp
-@@ -175,7 +175,7 @@ ClaimJobResult claim_job(int cluster, int proc, MyString * error_details, const
- static Qmgr_connection *open_q_as_owner(char const *effective_owner,DCSchedd &schedd,FailObj &failobj)
- {
- CondorError errstack;
-- Qmgr_connection * qmgr = ConnectQ(schedd.addr(), 0 /*timeout==default*/, false /*read-only*/, & errstack, effective_owner, schedd.version());
-+ Qmgr_connection * qmgr = ConnectQ(schedd, 0 /*timeout==default*/, false /*read-only*/, & errstack, effective_owner, schedd.version());
- if( ! qmgr ) {
- failobj.fail("Unable to connect\n%s\n", errstack.getFullText(true));
- return NULL;
-diff --git a/src/condor_prio/prio.cpp b/src/condor_prio/prio.cpp
-index deec9b1..c27aa94 100644
---- a/src/condor_prio/prio.cpp
-+++ b/src/condor_prio/prio.cpp
-@@ -157,8 +157,7 @@ main( int argc, char *argv[] )
- }
-
- // Open job queue
-- DaemonName = schedd.addr();
-- q = ConnectQ(DaemonName.Value());
-+ q = ConnectQ(schedd);
- if( !q ) {
- fprintf( stderr, "Failed to connect to queue manager %s\n",
- DaemonName.Value() );
-diff --git a/src/condor_q.V6/queue.cpp b/src/condor_q.V6/queue.cpp
-index 58ff6ed..85ad9cf 100644
---- a/src/condor_q.V6/queue.cpp
-+++ b/src/condor_q.V6/queue.cpp
-@@ -119,13 +119,13 @@ static char * bufferJobShort (ClassAd *);
- /* if useDB is false, then v1 =scheddAddress, v2=scheddName, v3=scheddMachine, v4=scheddVersion;
- if useDB is true, then v1 =quill_name, v2=db_ipAddr, v3=db_name, v4=db_password
- */
--static bool show_queue (const char* v1, const char* v2, const char* v3, const char* v4, bool useDB);
--static bool show_queue_buffered (const char* v1, const char* v2, const char* v3, const char* v4, bool useDB);
-+static bool show_queue (const char* v1, const char* v2, const char* v3, const char* v4, const ClassAd *ad, bool useDB);
-+static bool show_queue_buffered (const char* v1, const char* v2, const char* v3, const char* v4, const ClassAd *ad, bool useDB);
- static void init_output_mask();
-
-
- /* a type used to point to one of the above two functions */
--typedef bool (*show_queue_fp)(const char* v1, const char* v2, const char* v3, const char* v4, bool useDB);
-+typedef bool (*show_queue_fp)(const char* v1, const char* v2, const char* v3, const char* v4, const ClassAd *ad, bool useDB);
-
- static bool read_classad_file(const char *filename, ClassAdList &classads);
-
-@@ -506,7 +506,7 @@ int main (int argc, char **argv)
-
- /* ask the database for the queue */
-
-- if ( (retval = sqfp( NULL, NULL, NULL, NULL, TRUE) ) ) {
-+ if ( (retval = sqfp( NULL, NULL, NULL, NULL, NULL, TRUE) ) ) {
- /* if the queue was retrieved, then I am done */
- freeConnectionStrings();
- exit(retval?EXIT_SUCCESS:EXIT_FAILURE);
-@@ -557,7 +557,7 @@ int main (int argc, char **argv)
- (quill.name()):tmp_char,
- (quill.fullHostname())?
- (quill.fullHostname()):tmp_char,
-- NULL, FALSE) ) ) )
-+ NULL, NULL, FALSE) ) ) )
- {
- /* if the queue was retrieved, then I am done */
- freeConnectionStrings();
-@@ -600,7 +600,7 @@ int main (int argc, char **argv)
- #endif /* HAVE_EXT_POSTGRESQL */
- case DIRECT_SCHEDD:
- retval = sqfp(scheddAddr, scheddName, scheddMachine,
-- scheddVersion.Value(), FALSE);
-+ scheddVersion.Value(), NULL, FALSE);
-
- /* Hopefully I got the queue from the schedd... */
- freeConnectionStrings();
-@@ -793,7 +793,7 @@ int main (int argc, char **argv)
- case DIRECT_RDBMS:
- if (useDB) {
- if ( (retval = sqfp(quillName, dbIpAddr, dbName,
-- queryPassword, TRUE) ) )
-+ queryPassword, ad, TRUE) ) )
- {
- /* processed correctly, so do the next ad */
- continue;
-@@ -840,7 +840,7 @@ int main (int argc, char **argv)
-
- if((result2 == Q_OK) && quillAddr &&
- (retval = sqfp(quillAddr, quillName, quillMachine,
-- NULL, FALSE) ) )
-+ NULL, ad, FALSE) ) )
- {
- /* processed correctly, so do the next ad */
- continue;
-@@ -896,7 +896,7 @@ int main (int argc, char **argv)
- case DIRECT_SCHEDD:
- /* database not configured or could not be reached,
- query the schedd daemon directly */
-- retval = sqfp(scheddAddr, scheddName, scheddMachine, scheddVersion.Value(), FALSE);
-+ retval = sqfp(scheddAddr, scheddName, scheddMachine, scheddVersion.Value(), ad, FALSE);
-
- break;
-
-@@ -2536,7 +2536,7 @@ static void init_output_mask()
- */
-
- static bool
--show_queue_buffered( const char* v1, const char* v2, const char* v3, const char* v4, bool useDB )
-+show_queue_buffered( const char* v1, const char* v2, const char* v3, const char* v4, const ClassAd* ad, bool useDB )
- {
- const char *scheddAddress = 0;
- const char *scheddName = 0;
-@@ -2709,7 +2709,13 @@ show_queue_buffered( const char* v1, const char* v2, const char* v3, const char*
- #endif /* HAVE_EXT_POSTGRESQL */
- } else {
- // fetch queue from schedd and stash it in output_buffer.
-- Daemon schedd(DT_SCHEDD, scheddName, pool ? pool->addr() : NULL );
-+ Daemon *schedd_ptr = NULL;
-+ if (ad)
-+ schedd_ptr = new Daemon(ad, DT_SCHEDD, pool ? pool->addr() : NULL );
-+ else
-+ schedd_ptr = new Daemon(DT_SCHEDD, scheddName, pool ? pool->addr() : NULL );
-+ ASSERT(schedd_ptr);
-+ Daemon & schedd = *schedd_ptr;
- const char *version = schedd.version();
- bool useFastPath = false;
- if (version) {
-@@ -2719,9 +2725,12 @@ show_queue_buffered( const char* v1, const char* v2, const char* v3, const char*
-
- // stash the schedd daemon object for use by process_buffer_line
- g_cur_schedd_for_process_buffer_line = new Daemon( schedd );
-+ ASSERT( g_cur_schedd_for_process_buffer_line );
-+
-+ delete schedd_ptr;
-
- int fetchResult;
-- if( (fetchResult = Q.fetchQueueFromHostAndProcess( scheddAddress, attrs,
-+ if( (fetchResult = Q.fetchQueueFromDaemonAndProcess( *g_cur_schedd_for_process_buffer_line, attrs,
- process_buffer_line,
- useFastPath,
- &errstack)) != Q_OK) {
-@@ -2967,7 +2976,7 @@ process_buffer_line( ClassAd *job )
- refer to the prototype of this function on the top of this file
- */
- static bool
--show_queue( const char* v1, const char* v2, const char* v3, const char* v4, bool useDB )
-+show_queue( const char* v1, const char* v2, const char* v3, const char* v4, const ClassAd * ad, bool useDB )
- {
- const char *scheddAddress;
- const char *scheddName;
-@@ -3046,7 +3055,13 @@ show_queue( const char* v1, const char* v2, const char* v3, const char* v4, bool
- } else {
- // fetch queue from schedd
- int fetchResult;
-- if( (fetchResult = Q.fetchQueueFromHost(jobs, attrs,scheddAddress, scheddVersion, &errstack) != Q_OK)) {
-+ Daemon *schedd_daemon_ptr;
-+ if (ad)
-+ schedd_daemon_ptr = new Daemon(ad, DT_SCHEDD, pool ? pool->addr() : NULL );
-+ else
-+ schedd_daemon_ptr = new Daemon(DT_SCHEDD, scheddName, pool ? pool->addr() : NULL );
-+ Daemon &schedd_daemon = *schedd_daemon_ptr;
-+ if( (fetchResult = Q.fetchQueueFromDaemon(jobs, attrs, schedd_daemon, scheddVersion, &errstack) != Q_OK)) {
- // The parse + fetch failed, print out why
- switch(fetchResult) {
- case Q_PARSE_ERROR:
-@@ -3081,7 +3096,12 @@ show_queue( const char* v1, const char* v2, const char* v3, const char* v4, bool
- scheddAddress, scheddMachine);
- }
-
-- Daemon schedd_daemon(DT_SCHEDD,scheddName,pool ? pool->addr() : NULL);
-+ Daemon *schedd_daemon_ptr;
-+ if (ad)
-+ schedd_daemon_ptr = new Daemon(ad, DT_SCHEDD, pool ? pool->addr() : NULL );
-+ else
-+ schedd_daemon_ptr = new Daemon(DT_SCHEDD, scheddName, pool ? pool->addr() : NULL );
-+ Daemon &schedd_daemon = *schedd_daemon_ptr;
- schedd_daemon.locate();
-
- jobs.Open();
-@@ -3089,6 +3109,7 @@ show_queue( const char* v1, const char* v2, const char* v3, const char* v4, bool
- doRunAnalysis( job, &schedd_daemon );
- }
- jobs.Close();
-+ delete schedd_daemon_ptr;
-
- if(lastUpdate) {
- free(lastUpdate);
-diff --git a/src/condor_schedd.V6/qmgmt_receivers.cpp b/src/condor_schedd.V6/qmgmt_receivers.cpp
-index 98e1960..db6314f 100644
---- a/src/condor_schedd.V6/qmgmt_receivers.cpp
-+++ b/src/condor_schedd.V6/qmgmt_receivers.cpp
-@@ -71,7 +71,7 @@ do_Q_request(ReliSock *syscall_sock,bool &may_fork)
- dprintf(D_SECURITY,"Calling authenticate(%s) in qmgmt_receivers\n", methods.Value());
- }
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(syscall_sock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(syscall_sock, WRITE, &errstack, NULL) ) {
- // Failed to authenticate
- dprintf( D_ALWAYS, "SCHEDD: authentication failed: %s\n",
- errstack.getFullText() );
-diff --git a/src/condor_schedd.V6/qmgr_job_updater.cpp b/src/condor_schedd.V6/qmgr_job_updater.cpp
-index f9c100a..25126dd 100644
---- a/src/condor_schedd.V6/qmgr_job_updater.cpp
-+++ b/src/condor_schedd.V6/qmgr_job_updater.cpp
-@@ -47,8 +47,10 @@ QmgrJobUpdater::QmgrJobUpdater( ClassAd* job, const char* schedd_address,
- schedd_addr(schedd_address?strdup(schedd_address):0),
- schedd_ver(schedd_version?strdup(schedd_version):0),
- cluster(-1), proc(-1),
-- q_update_tid(-1)
-+ q_update_tid(-1),
-+ m_daemon(DT_SCHEDD, schedd_address)
- {
-+
- if( ! is_valid_sinful(schedd_address) ) {
- EXCEPT( "schedd_addr not specified with valid address (%s)",
- schedd_address );
-@@ -252,7 +254,7 @@ QmgrJobUpdater::updateAttr( const char *name, const char *expr, bool updateMaste
- if (log) {
- flags = SHOULDLOG;
- }
-- if( ConnectQ(schedd_addr,SHADOW_QMGMT_TIMEOUT,false,NULL,m_owner.Value(),schedd_ver) ) {
-+ if( ConnectQ(m_daemon,SHADOW_QMGMT_TIMEOUT,false,NULL,m_owner.Value(),schedd_ver) ) {
- if( SetAttribute(cluster,p,name,expr,flags) < 0 ) {
- err_msg = "SetAttribute() failed";
- result = FALSE;
-@@ -338,7 +340,7 @@ QmgrJobUpdater::updateJob( update_t type, SetAttributeFlags_t commit_flags )
- job_queue_attrs->contains_anycase(name)) ) {
-
- if( ! is_connected ) {
-- if( ! ConnectQ(schedd_addr, SHADOW_QMGMT_TIMEOUT, false, NULL, m_owner.Value(),schedd_ver) ) {
-+ if( ! ConnectQ(m_daemon, SHADOW_QMGMT_TIMEOUT, false, NULL, m_owner.Value(),schedd_ver) ) {
- return false;
- }
- is_connected = true;
-@@ -351,7 +353,7 @@ QmgrJobUpdater::updateJob( update_t type, SetAttributeFlags_t commit_flags )
- m_pull_attrs->rewind();
- while ( (name = m_pull_attrs->next()) ) {
- if ( !is_connected ) {
-- if ( !ConnectQ( schedd_addr, SHADOW_QMGMT_TIMEOUT, true, NULL, NULL, schedd_ver ) ) {
-+ if ( !ConnectQ( m_daemon, SHADOW_QMGMT_TIMEOUT, true, NULL, NULL, schedd_ver ) ) {
- return false;
- }
- is_connected = true;
-@@ -392,7 +394,7 @@ QmgrJobUpdater::retrieveJobUpdates( void )
- ProcIdToStr(cluster, proc, id_str);
- job_ids.insert(id_str);
-
-- if ( !ConnectQ( schedd_addr, SHADOW_QMGMT_TIMEOUT, false ) ) {
-+ if ( !ConnectQ( m_daemon, SHADOW_QMGMT_TIMEOUT, false ) ) {
- return false;
- }
- if ( GetDirtyAttributes( cluster, proc, &updates ) < 0 ) {
-diff --git a/src/condor_schedd.V6/qmgr_job_updater.h b/src/condor_schedd.V6/qmgr_job_updater.h
-index e487688..ad86fe2 100644
---- a/src/condor_schedd.V6/qmgr_job_updater.h
-+++ b/src/condor_schedd.V6/qmgr_job_updater.h
-@@ -47,7 +47,7 @@ class QmgrJobUpdater : public Service
- {
- public:
- QmgrJobUpdater( ClassAd* job_a, const char*schedd_address, char const *schedd_version);
-- QmgrJobUpdater( ) : common_job_queue_attrs(0), hold_job_queue_attrs(0), evict_job_queue_attrs(0), remove_job_queue_attrs(0), requeue_job_queue_attrs(0), terminate_job_queue_attrs(0), checkpoint_job_queue_attrs(0), x509_job_queue_attrs(0), m_pull_attrs(0), job_ad(0), schedd_addr(0), schedd_ver(0), cluster(-1), proc(-1), q_update_tid(-1) {}
-+ QmgrJobUpdater( ) : common_job_queue_attrs(0), hold_job_queue_attrs(0), evict_job_queue_attrs(0), remove_job_queue_attrs(0), requeue_job_queue_attrs(0), terminate_job_queue_attrs(0), checkpoint_job_queue_attrs(0), x509_job_queue_attrs(0), m_pull_attrs(0), job_ad(0), schedd_addr(0), schedd_ver(0), cluster(-1), proc(-1), q_update_tid(-1), m_daemon(DT_SCHEDD, NULL) {}
- virtual ~QmgrJobUpdater();
-
- virtual void startUpdateTimer( void );
-@@ -148,6 +148,8 @@ private:
- int proc;
-
- int q_update_tid;
-+
-+ Daemon m_daemon;
- };
-
- // usefull if you don't want to update the job queue
-diff --git a/src/condor_schedd.V6/qmgr_lib_support.cpp b/src/condor_schedd.V6/qmgr_lib_support.cpp
-index 64bfffd..4afd1a6 100644
---- a/src/condor_schedd.V6/qmgr_lib_support.cpp
-+++ b/src/condor_schedd.V6/qmgr_lib_support.cpp
-@@ -34,8 +34,9 @@ ReliSock *qmgmt_sock = NULL;
- static Qmgr_connection connection;
-
- Qmgr_connection *
--ConnectQ(const char *qmgr_location, int timeout, bool read_only, CondorError* errstack, const char *effective_owner, const char* schedd_version_str )
-+ConnectQ(Daemon &d, int timeout, bool read_only, CondorError* errstack, const char *effective_owner, const char* schedd_version_str )
- {
-+
- int rval, ok;
- int cmd = read_only ? QMGMT_READ_CMD : QMGMT_WRITE_CMD;
-
-@@ -54,15 +55,10 @@ ConnectQ(const char *qmgr_location, int timeout, bool read_only, CondorError* er
- }
-
- // no connection active as of now; create a new one
-- Daemon d( DT_SCHEDD, qmgr_location );
-+ dprintf(D_HOSTNAME, "Hostname of new connection: %s\n", d.fullHostname());
- if( ! d.locate() ) {
- ok = FALSE;
-- if( qmgr_location ) {
-- dprintf( D_ALWAYS, "Can't find address of queue manager %s\n",
-- qmgr_location );
-- } else {
-- dprintf( D_ALWAYS, "Can't find address of local queue manager\n" );
-- }
-+ dprintf( D_ALWAYS, "Can't find address of queue manager\n" );
- } else {
- // QMGMT_WRITE_CMD didn't exist before 7.5.0, so use QMGMT_READ_CMD
- // when talking to older schedds
-@@ -104,7 +100,7 @@ ConnectQ(const char *qmgr_location, int timeout, bool read_only, CondorError* er
- // the connection, because this command is registered with
- // force_authentication=true on the server side.
- if( cmd == QMGMT_WRITE_CMD && !qmgmt_sock->triedAuthentication()) {
-- if( !SecMan::authenticate_sock(qmgmt_sock, CLIENT_PERM, errstack_select ) )
-+ if( !SecMan::authenticate_sock(qmgmt_sock, CLIENT_PERM, errstack_select, d.fullHostname()) )
- {
- delete qmgmt_sock;
- qmgmt_sock = NULL;
-@@ -155,7 +151,7 @@ ConnectQ(const char *qmgr_location, int timeout, bool read_only, CondorError* er
- }
-
- if ( !read_only ) {
-- if (!SecMan::authenticate_sock(qmgmt_sock, CLIENT_PERM, errstack_select)) {
-+ if (!SecMan::authenticate_sock(qmgmt_sock, CLIENT_PERM, errstack_select, d.fullHostname())) {
- delete qmgmt_sock;
- qmgmt_sock = NULL;
- if (!errstack) {
-diff --git a/src/condor_schedd.V6/schedd.cpp b/src/condor_schedd.V6/schedd.cpp
-index b855407..6731e6c 100644
---- a/src/condor_schedd.V6/schedd.cpp
-+++ b/src/condor_schedd.V6/schedd.cpp
-@@ -3434,7 +3434,7 @@ Scheduler::spoolJobFiles(int mode, Stream* s)
-
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-@@ -3690,7 +3690,7 @@ Scheduler::updateGSICred(int cmd, Stream* s)
-
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-@@ -3901,7 +3901,7 @@ Scheduler::actOnJobs(int, Stream* s)
- rsock->decode();
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-@@ -12104,7 +12104,7 @@ Scheduler::get_job_connect_info_handler_implementation(int, Stream* s) {
- // force authentication
- if( !sock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(sock, WRITE, &errstack) ||
-+ if( ! SecMan::authenticate_sock(sock, WRITE, &errstack, NULL) ||
- ! sock->getFullyQualifiedUser() )
- {
- dprintf( D_ALWAYS,
-@@ -13145,7 +13145,7 @@ Scheduler::claimLocalStartd()
- CondorQuery query(STARTD_AD);
- QueryResult q;
- ClassAdList result;
-- q = query.fetchAds(result, startd_addr, &errstack);
-+ q = query.fetchAds(result, startd, &errstack);
- if ( q != Q_OK ) {
- dprintf(D_FULLDEBUG,
- "ERROR: could not fetch ads from local startd : %s (%s)\n",
-@@ -13654,7 +13654,7 @@ Scheduler::RecycleShadow(int /*cmd*/, Stream *stream)
- sock->decode();
- if( !sock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(sock, WRITE, &errstack) ||
-+ if( ! SecMan::authenticate_sock(sock, WRITE, &errstack, NULL) ||
- ! sock->getFullyQualifiedUser() )
- {
- dprintf( D_ALWAYS,
-@@ -13864,7 +13864,7 @@ Scheduler::clear_dirty_job_attrs_handler(int /*cmd*/, Stream *stream)
- sock->decode();
- if( !sock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(sock, WRITE, &errstack) ||
-+ if( ! SecMan::authenticate_sock(sock, WRITE, &errstack, NULL) ||
- ! sock->getFullyQualifiedUser() )
- {
- dprintf( D_ALWAYS,
-diff --git a/src/condor_schedd.V6/schedd_td.cpp b/src/condor_schedd.V6/schedd_td.cpp
-index 3e30f27..a9b0e4b 100644
---- a/src/condor_schedd.V6/schedd_td.cpp
-+++ b/src/condor_schedd.V6/schedd_td.cpp
-@@ -76,7 +76,7 @@ Scheduler::requestSandboxLocation(int mode, Stream* s)
-
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-diff --git a/src/condor_schedd.V6/tdman.cpp b/src/condor_schedd.V6/tdman.cpp
-index 53c6000..eb1e205 100644
---- a/src/condor_schedd.V6/tdman.cpp
-+++ b/src/condor_schedd.V6/tdman.cpp
-@@ -869,7 +869,7 @@ TDMan::transferd_registration(int cmd, Stream *sock)
- ///////////////////////////////////////////////////////////////
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-diff --git a/src/condor_status.V6/status.cpp b/src/condor_status.V6/status.cpp
-index dea2c92..187e843 100644
---- a/src/condor_status.V6/status.cpp
-+++ b/src/condor_status.V6/status.cpp
-@@ -442,14 +442,19 @@ main (int argc, char *argv[])
- // subsystem that corresponds to a daemon (above).
- // Here 'addr' represents either the host:port of requested pool, or
- // alternatively the host:port of daemon associated with requested subsystem (direct mode)
-- q = query->fetchAds (result, addr, &errstack);
-+
-+ // If we are querying the same collector as before, reuse that object.
-+ // This is important for hostname resolution.
-+ if (!direct)
-+ q = query->fetchAds (result, *pool, &errstack);
-+ else
-+ q = query->fetchAds (result, addr, &errstack);
- } else {
- // otherwise obtain list of collectors and submit query that way
- CollectorList * collectors = CollectorList::create();
- q = collectors->query (*query, result, &errstack);
- delete collectors;
- }
--
-
- // if any error was encountered during the query, report it and exit
- if (Q_OK != q) {
-diff --git a/src/condor_submit.V6/submit.cpp b/src/condor_submit.V6/submit.cpp
-index 76bbabf..f5e118b 100644
---- a/src/condor_submit.V6/submit.cpp
-+++ b/src/condor_submit.V6/submit.cpp
-@@ -6237,7 +6237,8 @@ connect_to_the_schedd()
- setupAuthentication();
-
- CondorError errstack;
-- if( ConnectQ(MySchedd->addr(), 0 /* default */, false /* default */, &errstack, NULL, MySchedd->version() ) == 0 ) {
-+ ASSERT(MySchedd);
-+ if( ConnectQ(*MySchedd, 0 /* default */, false /* default */, &errstack, NULL, MySchedd->version() ) == 0 ) {
- if( ScheddName ) {
- fprintf( stderr,
- "\nERROR: Failed to connect to queue manager %s\n%s\n",
-@@ -7202,7 +7203,8 @@ DoCleanup(int,int,const char*)
- // DoCleanup(). This lead to infinite recursion which is bad.
- ClusterCreated = 0;
- if (!ActiveQueueConnection) {
-- ActiveQueueConnection = (ConnectQ(MySchedd->addr()) != 0);
-+ ASSERT( MySchedd );
-+ ActiveQueueConnection = (ConnectQ(*MySchedd) != 0);
- }
- if (ActiveQueueConnection) {
- // Call DestroyCluster() now in an attempt to get the schedd
-diff --git a/src/condor_tools/preen.cpp b/src/condor_tools/preen.cpp
-index 57fcd04..e2f9774 100644
---- a/src/condor_tools/preen.cpp
-+++ b/src/condor_tools/preen.cpp
-@@ -356,7 +356,8 @@ check_spool_dir()
- well_known_list.append( ".pgpass" );
-
- // connect to the Q manager
-- if (!(qmgr = ConnectQ (0))) {
-+ Daemon d(DT_SCHEDD, 0);
-+ if (!(qmgr = ConnectQ (d))) {
- dprintf( D_ALWAYS, "Not cleaning spool directory: Can't contact schedd\n" );
- return;
- }
-diff --git a/src/condor_tools/qedit.cpp b/src/condor_tools/qedit.cpp
-index e36d844..61d1b63 100644
---- a/src/condor_tools/qedit.cpp
-+++ b/src/condor_tools/qedit.cpp
-@@ -131,7 +131,7 @@ main(int argc, char *argv[])
- }
-
- // Open job queue
-- q = ConnectQ( schedd.addr(), 0, false, NULL, NULL, schedd.version() );
-+ q = ConnectQ( schedd, 0, false, NULL, NULL, schedd.version() );
- if( !q ) {
- fprintf( stderr, "Failed to connect to queue manager %s\n",
- schedd.addr() );
-diff --git a/src/condor_tools/tool.cpp b/src/condor_tools/tool.cpp
-index 5e63dc7..8b3ba37 100644
---- a/src/condor_tools/tool.cpp
-+++ b/src/condor_tools/tool.cpp
-@@ -1167,8 +1167,8 @@ resolveNames( DaemonList* daemon_list, StringList* name_list )
- }
-
-
-- if (pool_addr) {
-- q_result = query.fetchAds(ads, pool_addr, &errstack);
-+ if (pool) {
-+ q_result = query.fetchAds(ads, *pool, &errstack);
- } else {
- CollectorList * collectors = CollectorList::create();
- q_result = collectors->query (query, ads);
-diff --git a/src/condor_transferd/td_init.cpp b/src/condor_transferd/td_init.cpp
-index 1fccebd..f2330e1 100644
---- a/src/condor_transferd/td_init.cpp
-+++ b/src/condor_transferd/td_init.cpp
-@@ -277,7 +277,7 @@ TransferD::setup_transfer_request_handler(int /*cmd*/, Stream *sock)
- ///////////////////////////////////////////////////////////////
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-diff --git a/src/condor_transferd/td_read_files.cpp b/src/condor_transferd/td_read_files.cpp
-index a6c7f87..4febc43 100644
---- a/src/condor_transferd/td_read_files.cpp
-+++ b/src/condor_transferd/td_read_files.cpp
-@@ -67,7 +67,7 @@ TransferD::read_files_handler(int cmd, Stream *sock)
- /////////////////////////////////////////////////////////////////////////
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-diff --git a/src/condor_transferd/td_write_files.cpp b/src/condor_transferd/td_write_files.cpp
-index 412a552..572cc79 100644
---- a/src/condor_transferd/td_write_files.cpp
-+++ b/src/condor_transferd/td_write_files.cpp
-@@ -67,7 +67,7 @@ TransferD::write_files_handler(int cmd, Stream *sock)
- /////////////////////////////////////////////////////////////////////////
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-diff --git a/src/condor_utils/classad_command_util.cpp b/src/condor_utils/classad_command_util.cpp
-index 56d7ddb..1ae11a8 100644
---- a/src/condor_utils/classad_command_util.cpp
-+++ b/src/condor_utils/classad_command_util.cpp
-@@ -92,7 +92,7 @@ getCmdFromReliSock( ReliSock* s, ClassAd* ad, bool force_auth )
- s->decode();
- if( force_auth && ! s->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(s, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(s, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-diff --git a/src/condor_utils/condor_q.cpp b/src/condor_utils/condor_q.cpp
-index 886b664..c540bbd 100644
---- a/src/condor_utils/condor_q.cpp
-+++ b/src/condor_utils/condor_q.cpp
-@@ -28,6 +28,7 @@
- #include "CondorError.h"
- #include "condor_classad.h"
- #include "quill_enums.h"
-+#include "daemon.h"
-
- #ifdef HAVE_EXT_POSTGRESQL
- #include "pgsqldatabase.h"
-@@ -230,7 +231,8 @@ fetchQueue (ClassAdList &list, StringList &attrs, ClassAd *ad, CondorError* errs
- if (ad == 0)
- {
- // local case
-- if( !(qmgr = ConnectQ( 0, connect_timeout, true, errstack)) ) {
-+ Daemon d(DT_SCHEDD, 0, 0);
-+ if( !(qmgr = ConnectQ( d, connect_timeout, true, errstack)) ) {
- errstack->push("TEST", 0, "FOO");
- return Q_SCHEDD_COMMUNICATION_ERROR;
- }
-@@ -241,8 +243,9 @@ fetchQueue (ClassAdList &list, StringList &attrs, ClassAd *ad, CondorError* errs
- // remote case to handle condor_globalq
- if (!ad->LookupString (ATTR_SCHEDD_IP_ADDR, scheddString))
- return Q_NO_SCHEDD_IP_ADDR;
-+ Daemon d(ad, DT_SCHEDD, NULL);
-
-- if( !(qmgr = ConnectQ( scheddString, connect_timeout, true, errstack)) )
-+ if( !(qmgr = ConnectQ( d, connect_timeout, true, errstack)) )
- return Q_SCHEDD_COMMUNICATION_ERROR;
-
- }
-@@ -255,7 +258,7 @@ fetchQueue (ClassAdList &list, StringList &attrs, ClassAd *ad, CondorError* errs
- }
-
- int CondorQ::
--fetchQueueFromHost (ClassAdList &list, StringList &attrs, const char *host, char const *schedd_version, CondorError* errstack)
-+fetchQueueFromDaemon (ClassAdList &list, StringList &attrs, Daemon &daemon, char const *schedd_version, CondorError* errstack)
- {
- Qmgr_connection *qmgr;
- ExprTree *tree;
-@@ -276,7 +279,7 @@ fetchQueueFromHost (ClassAdList &list, StringList &attrs, const char *host, char
- optimal. :^).
- */
- init(); // needed to get default connect_timeout
-- if( !(qmgr = ConnectQ( host, connect_timeout, true, errstack)) )
-+ if( !(qmgr = ConnectQ( daemon, connect_timeout, true, errstack)) )
- return Q_SCHEDD_COMMUNICATION_ERROR;
-
- bool useFastPath = false;
-@@ -353,7 +356,7 @@ CondorQ::fetchQueueFromDB (ClassAdList &list,
- }
-
- int
--CondorQ::fetchQueueFromHostAndProcess ( const char *host,
-+CondorQ::fetchQueueFromDaemonAndProcess ( Daemon &daemon,
- StringList &attrs,
- process_function process_func,
- bool useFastPath,
-@@ -378,7 +381,7 @@ CondorQ::fetchQueueFromHostAndProcess ( const char *host,
- optimal. :^).
- */
- init(); // needed to get default connect_timeout
-- if( !(qmgr = ConnectQ( host, connect_timeout, true, errstack)) )
-+ if( !(qmgr = ConnectQ( daemon, connect_timeout, true, errstack)) )
- return Q_SCHEDD_COMMUNICATION_ERROR;
-
- // get the ads and filter them
-diff --git a/src/condor_utils/condor_q.h b/src/condor_utils/condor_q.h
-index 7f6a620..ccd9196 100644
---- a/src/condor_utils/condor_q.h
-+++ b/src/condor_utils/condor_q.h
-@@ -23,6 +23,7 @@
- #include "condor_common.h"
- #include "generic_query.h"
- #include "CondorError.h"
-+#include "daemon.h"
-
- #define MAXOWNERLEN 20
- #define MAXSCHEDDLEN 255
-@@ -90,8 +91,8 @@ class CondorQ
- // which pass the criterion specified by the constraints; default is
- // from the local schedd
- int fetchQueue (ClassAdList &, StringList &attrs, ClassAd * = 0, CondorError* errstack = 0);
-- int fetchQueueFromHost (ClassAdList &, StringList &attrs, const char * = 0, char const *schedd_version = 0,CondorError* errstack = 0);
-- int fetchQueueFromHostAndProcess ( const char *, StringList &attrs, process_function process_func, bool useFastPath, CondorError* errstack = 0);
-+ int fetchQueueFromDaemon (ClassAdList &, StringList &attrs, Daemon &, char const *schedd_version = 0,CondorError* errstack = 0);
-+ int fetchQueueFromDaemonAndProcess ( Daemon &, StringList &attrs, process_function process_func, bool useFastPath, CondorError* errstack = 0);
-
- // fetch the job ads from database
- int fetchQueueFromDB (ClassAdList &, char *&lastUpdate, const char * = 0, CondorError* errstack = 0);
-diff --git a/src/condor_utils/condor_query.cpp b/src/condor_utils/condor_query.cpp
-index 95bc78a..acc6201 100644
---- a/src/condor_utils/condor_query.cpp
-+++ b/src/condor_utils/condor_query.cpp
-@@ -386,10 +386,6 @@ addORConstraint (const char *value)
- QueryResult CondorQuery::
- fetchAds (ClassAdList &adList, const char *poolName, CondorError* errstack)
- {
-- Sock* sock;
-- int more;
-- QueryResult result;
-- ClassAd queryAd(extraAttrs), *ad;
-
- if ( !poolName ) {
- return Q_NO_COLLECTOR_HOST;
-@@ -402,7 +398,16 @@ fetchAds (ClassAdList &adList, const char *poolName, CondorError* errstack)
- return Q_NO_COLLECTOR_HOST;
- }
-
-+ return fetchAds(adList, my_collector, errstack);
-+}
-
-+QueryResult CondorQuery::
-+fetchAds (ClassAdList &adList, Daemon &my_collector, CondorError* errstack)
-+{
-+ Sock* sock;
-+ int more;
-+ QueryResult result;
-+ ClassAd queryAd(extraAttrs), *ad;
- // make the query ad
- result = getQueryAd (queryAd);
- if (result != Q_OK) return result;
-diff --git a/src/condor_utils/condor_query.h b/src/condor_utils/condor_query.h
-index 7e58eef..9fedcad 100644
---- a/src/condor_utils/condor_query.h
-+++ b/src/condor_utils/condor_query.h
-@@ -156,6 +156,7 @@ class CondorQuery
-
- // fetch from collector
- QueryResult fetchAds (ClassAdList &adList, const char * pool, CondorError* errstack = NULL);
-+ QueryResult fetchAds (ClassAdList &adList, Daemon &daemon, CondorError* errstack = NULL);
-
-
- // filter list of ads; arg1 is 'in', arg2 is 'out'
-diff --git a/src/condor_utils/ipv6_hostname.cpp b/src/condor_utils/ipv6_hostname.cpp
-index cfefb4b..3666bd4 100644
---- a/src/condor_utils/ipv6_hostname.cpp
-+++ b/src/condor_utils/ipv6_hostname.cpp
-@@ -197,10 +197,13 @@ int get_fqdn_and_ip_from_hostname(const MyString& hostname,
- MyString ret;
- condor_sockaddr ret_addr;
- bool found_ip = false;
-+ bool use_given_name = false;
-
- // if the hostname contains dot, hostname is assumed to be full hostname
- if (hostname.FindChar('.') != -1) {
- ret = hostname;
-+ fqdn = hostname;
-+ use_given_name = true;
- }
-
- if (nodns_enabled()) {
-@@ -219,7 +222,9 @@ int get_fqdn_and_ip_from_hostname(const MyString& hostname,
-
- while (addrinfo* info = ai.next()) {
- if (info->ai_canonname) {
-- fqdn = info->ai_canonname;
-+ dprintf(D_HOSTNAME, "Found canon addr: %s\n", info->ai_canonname);
-+ if (!use_given_name)
-+ fqdn = info->ai_canonname;
- addr = condor_sockaddr(info->ai_addr);
- return 1;
- }
-diff --git a/src/condor_who/who.cpp b/src/condor_who/who.cpp
-index 08f19b6..870668a 100644
---- a/src/condor_who/who.cpp
-+++ b/src/condor_who/who.cpp
-@@ -681,7 +681,7 @@ main( int argc, char *argv[] )
- ClassAdList result;
- if (addr || App.diagnostic) {
- CondorError errstack;
-- QueryResult qr = query->fetchAds (result, addr, &errstack);
-+ QueryResult qr = dae->locate() ? query->fetchAds (result, *dae, &errstack) : query->fetchAds (result, addr, &errstack);
- if (Q_OK != qr) {
- fprintf( stderr, "Error: %s\n", getStrQueryResult(qr) );
- fprintf( stderr, "%s\n", errstack.getFullText(true) );
diff --git a/condor-gahp.patch b/condor-gahp.patch
deleted file mode 100644
index 4d45bf2..0000000
--- a/condor-gahp.patch
+++ /dev/null
@@ -1,21 +0,0 @@
-diff --git a/src/condor_gridmanager/gahp-client.cpp b/src/condor_gridmanager/gahp-client.cpp
-index d2c66ce629..a2a694a6b9 100644
---- a/src/condor_gridmanager/gahp-client.cpp
-+++ b/src/condor_gridmanager/gahp-client.cpp
-@@ -820,6 +820,16 @@ GahpServer::Startup()
- free( tmp_char );
- }
-
-+ // GLOBUS_LOCATION needs to be set for the blahp; otherwise, it defaults to /opt/globus,
-+ // which is likely never correct
-+ tmp_char = param("GLOBUS_LOCATION");
-+ if ( tmp_char ) {
-+ newenv.SetEnv( "GLOBUS_LOCATION", tmp_char );
-+ free( tmp_char );
-+ } else if (getenv("GLOBUS_LOCATION") == NULL) {
-+ newenv.SetEnv( "GLOBUS_LOCATION", "/usr" );
-+ }
-+
- // For amazon ec2 ca authentication
- tmp_char = param("GAHP_SSL_CAFILE");
- if( tmp_char ) {
diff --git a/condor-tmpfiles.conf b/condor-tmpfiles.conf
deleted file mode 100644
index a0095ba..0000000
--- a/condor-tmpfiles.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-d /var/run/condor 0775 condor condor -
-d /var/lock/condor 0775 condor condor -
-d /var/lock/condor/local 0775 condor condor -
\ No newline at end of file
diff --git a/condor.service b/condor.service
deleted file mode 100644
index f39c3e7..0000000
--- a/condor.service
+++ /dev/null
@@ -1,37 +0,0 @@
-
-[Unit]
-Description=Condor Distributed High-Throughput-Computing
-After=syslog.target network.target
-Wants=network.target
-
-[Service]
-EnvironmentFile=-/etc/sysconfig/condor
-ExecStart=/usr/sbin/condor_master -f
-ExecStop=/usr/sbin/condor_off -master
-ExecReload=/bin/kill -HUP $MAINPID
-Restart=always
-RestartSec=5
-StandardOutput=syslog
-LimitNOFILE=16384
-
-#######################################
-# Note: Below are cgroup options
-#######################################
-#Slice=condor
-#CPUAccounting=true
-#CPUShares=1024
-
-#MemoryAccounting=true
-#TBD: MemoryLimit=bytes, MemorySoftLimit=bytes
-
-#BlockIOAccounting=true
-#BlockIOWeight=??
-#BlockIODeviceWeight=??
-#TBD: BlockIOReadBandwidth=bytes, BlockIOWriteBandwidth=bytes
-
-#DeviceAllow=
-#DevicePolicy=auto|closed|strict
-
-[Install]
-WantedBy=multi-user.target
-
diff --git a/condor.spec b/condor.spec
index f537a33..7f57bbf 100644
--- a/condor.spec
+++ b/condor.spec
@@ -1,136 +1,106 @@
%global newname htcondor
-%global srcver 8_8_15
-
-%ifarch %{arm} %{ix86} x86_64
-%global with_mongodb 1
-%endif
-
-# disable plumage (and need for mongodb)
-%global with_mongodb 0
-
-# enable aviary
-%global with_aviary 0
-
-# enable BOSCO
-%global with_bosco 0
-
-# enable CREAM gahp
-%global with_cream_gahp 0
+%global version 23.0.0
+%global version_ %(tr . _ <<< %{version})
#######################
Name: condor
-Version: 8.8.15
-Release: 10%{?dist}
+Version: 23.0.0
+Release: 1%{?dist}
Summary: HTCondor: High Throughput Computing
License: ASL 2.0
-URL: http://research.cs.wisc.edu/htcondor/
+URL: http://htcondor.org
##############################################################
# NOTE: If you wish to setup a debug build either add a patch
# or adjust the URL to a private github location
##############################################################
-Source0: https://github.com/htcondor/htcondor/archive/V%{srcver}/%{newname}-%{vers...
-Source1: %{name}-tmpfiles.conf
-Source2: %{name}.service
-Source3: 00personal_condor.config
-
-Patch1: condor-gahp.patch
-# turn off the cmake regex-replace hack that removes "-Werror", as it
-# breaks the new cflag "-Werror=format-security" passed in from build system:
-Patch2: Werror_replace.patch
-Patch5: python-scripts.patch
-Patch6: boost-python38.patch
-Patch7: doc-conf.patch
+Source0: https://github.com/htcondor/htcondor/archive/V%{version}/%{newname}-%{ver...
#######################
BuildRequires: gcc gcc-c++
-BuildRequires: cmake
-BuildRequires: flex
-BuildRequires: byacc
-BuildRequires: pcre-devel
+BuildRequires: cmake >= 3.16
+BuildRequires: pcre2-devel
BuildRequires: openssl-devel
BuildRequires: krb5-devel
BuildRequires: libvirt-devel
BuildRequires: bind-utils
-BuildRequires: m4
BuildRequires: libX11-devel
+BuildRequires: libXScrnSaver-devel
BuildRequires: libcurl-devel
BuildRequires: expat-devel
-BuildRequires: openldap-devel
BuildRequires: python3-setuptools
BuildRequires: python3-sphinx
BuildRequires: python3-sphinx_rtd_theme
BuildRequires: boost-devel
-BuildRequires: boost-python3
+BuildRequires: boost-python3-devel
+BuildRequires: boost-static
+BuildRequires: glibc-static
BuildRequires: libuuid-devel
BuildRequires: sqlite-devel
+BuildRequires: patch
# needed for param table generator
BuildRequires: perl-generators
+BuildRequires: perl(Archive::Tar)
BuildRequires: perl(Data::Dumper)
+BuildRequires: perl(Digest::MD5)
+BuildRequires: perl(XML::Parser)
-# Globus GSI build requirements
-BuildRequires: globus-gssapi-gsi-devel
-BuildRequires: globus-gass-server-ez-devel
-BuildRequires: globus-gass-transfer-devel
-BuildRequires: globus-gram-client-devel
-BuildRequires: globus-rsl-devel
-BuildRequires: globus-gram-protocol
-BuildRequires: globus-io-devel
-BuildRequires: globus-xio-devel
-BuildRequires: globus-gssapi-error-devel
-BuildRequires: globus-gss-assist-devel
-BuildRequires: globus-gsi-proxy-core-devel
-BuildRequires: globus-gsi-credential-devel
-BuildRequires: globus-gsi-callback-devel
-BuildRequires: globus-gsi-sysconfig-devel
-BuildRequires: globus-gsi-cert-utils-devel
-BuildRequires: globus-openssl-module-devel
-BuildRequires: globus-gsi-openssl-error-devel
-BuildRequires: globus-gsi-proxy-ssl-devel
-BuildRequires: globus-callout-devel
-BuildRequires: globus-common-devel
-BuildRequires: globus-ftp-client-devel
-BuildRequires: globus-ftp-control-devel
-BuildRequires: libtool-ltdl-devel
BuildRequires: munge-devel
BuildRequires: voms-devel
-# support for aviary
-%if 0%{?with_aviary}
-BuildRequires: wso2-wsf-cpp-devel
-BuildRequires: wso2-axis2-devel
-%endif
-# support for plumage
-%if 0%{?with_mongodb}
-BuildRequires: mongodb-devel
-%endif
-# support for cream (glite-ce-cream-client-devel doesn't exist in Fedora)
-#BuildRequires: glite-ce-cream-client-devel
-#BuildRequires: glite-lbjp-common-gsoap-plugin-devel
-#BuildRequires: glite-ce-cream-utils
-#BuildRequires: log4cpp-devel
-#BuildRequires: gridsite-devel
+BuildRequires: nss-devel
+BuildRequires: openldap-devel
+BuildRequires: scitokens-cpp-devel
# we now need to request the python libs and includes explicitly:
BuildRequires: python3-devel
-BuildRequires: python3-libs
# Added by B.DeKnuydt (Jan 2020)
-BuildRequires: zlib zlib-devel
BuildRequires: libxml2 libxml2-devel
-#BuildRequires: libcgroup libcgroup-devel
BuildRequires: pam-devel
BuildRequires: make
+BuildRequires: systemd-devel
+BuildRequires: systemd-units
+
#######################
# Installation requirements.
-Requires: mailx
+Requires: /usr/sbin/sendmail
Requires: python3
-Requires: condor-classads = %{version}-%{release}
-Requires: condor-procd = %{version}-%{release}
+
+# Require libraries that we dlopen
+# Ganglia is optional as well as nVidia and cuda libraries
Requires: voms
+Requires: krb5-libs
+Requires: libcom_err
+Requires: munge-libs
+Requires: openssl-libs
+Requires: scitokens-cpp >= 0.6.2
+Requires: systemd-libs
-# doesn't exist in fedora
-#Requires: blahp
-#Requires: glexec
+# openssh-server needed for condor_ssh_to_job
+Requires: openssh-server
+
+# net-tools needed to provide netstat for condor_who
+Requires: net-tools
+
+# Useful tools are using the Python bindings
+Requires: python3-condor = %{version}-%{release}
+Requires: python3-requests
+
+# procd package discontinued as of 10.8.0
+Obsoletes: %{name}-procd < 10.8.0
+Provides: %{name}-procd = %{version}-%{release}
+
+# all package discontinued as of 10.8.0
+Obsoletes: %{name}-openstack-gahp < 10.8.0
+Provides: %{name}-openstack-gahp = %{version}-%{release}
+
+# classads package discontinued as of 10.8.0
+Obsoletes: %{name}-classads < 10.8.0
+Provides: %{name}-classads = %{version}-%{release}
+
+# classads-devel package discontinued as of 10.8.0
+Obsoletes: %{name}-classads-devel < 10.8.0
+Provides: %{name}-classads-devel = %{version}-%{release}
%description
HTCondor is a workload management system for high-throughput and
@@ -143,64 +113,12 @@ monitors their progress, and ultimately informs the user upon
completion.
#######################
-%package procd
-Summary: HTCondor Process tracking Daemon
-%description procd
-A daemon for tracking child processes started by a parent.
-Part of HTCondor, but able to be stand-alone
+%package devel
+Summary: Development files for HTCondor
+Group: Applications/System
-#######################
-%if 0%{?with_aviary}
-%package aviary-common
-Summary: HTCondor Aviary development components
-Requires: %name = %version-%release
-Requires: python2-suds
-
-%description aviary-common
-Components to develop against simplified WS interface to HTCondor.
-
-%package aviary
-Summary: HTCondor Aviary components
-Requires: %name = %version-%release
-Requires: condor = %{version}-%{release}
-Requires: condor-aviary-common = %{version}-%{release}
-
-%description aviary
-Components to provide simplified WS interface to HTCondor.
-
-%package aviary-hadoop-common
-Summary: HTCondor Aviary Hadoop development components
-Requires: %name = %version-%release
-Requires: python2-suds
-Requires: condor-aviary-common = %{version}-%{release}
-Requires: tar
-
-%description aviary-hadoop-common
-Components to develop against simplified WS interface to HTCondor.
-
-%package aviary-hadoop
-Summary: HTCondor Aviary Hadoop components
-Requires: %name = %version-%release
-Requires: condor-aviary = %{version}-%{release}
-Requires: condor-aviary-hadoop-common = %{version}-%{release}
-
-%description aviary-hadoop
-Aviary Hadoop plugin and components.
-%endif
-
-#######################
-%if 0%{?with_mongodb}
-%package plumage
-Summary: HTCondor Plumage components
-Requires: %name = %version-%release
-Requires: condor-classads = %{version}-%{release}
-Requires: mongodb
-Requires: pymongo
-Requires: python2-dateutil
-
-%description plumage
-Components to provide a NoSQL operational data store for HTCondor.
-%endif
+%description devel
+Development files for HTCondor
#######################
%package kbdd
@@ -213,6 +131,15 @@ The condor_kbdd monitors logged in X users for activity. It is only
useful on systems where no device (e.g. /dev/*) can be used to
determine console idle time.
+#######################
+%package test
+Summary: HTCondor Self Tests
+Group: Applications/System
+Requires: %name = %version-%release
+
+%description test
+A collection of tests to verify that HTCondor is operating properly.
+
#######################
%package vm-gahp
Summary: HTCondor's VM Gahp
@@ -225,67 +152,9 @@ The condor_vm-gahp enables the Virtual Machine Universe feature of
HTCondor. The VM Universe uses libvirt to start and control VMs under
HTCondor's Startd.
-#######################
-%package openstack-gahp
-Summary: HTCondor's OpenStack Gahp
-Requires: %name = %version-%release
-Requires: condor = %{version}-%{release}
-
-%description openstack-gahp
-The openstack_gahp enables HTCondor's ability to manage jobs run on
-resources exposed by the OpenStack API.
-
-#######################
-%package classads
-Summary: HTCondor's classified advertisement language
-Obsoletes: classads <= 1.0.8
-Obsoletes: classads-static <= 1.0.8
-
-%description classads
-Classified Advertisements (classads) are the lingua franca of
-HTCondor. They are used for describing jobs, workstations, and other
-resources. They are exchanged by HTCondor processes to schedule
-jobs. They are logged to files for statistical and debugging
-purposes. They are used to enquire about current state of the system.
-
-A classad is a mapping from attribute names to expressions. In the
-simplest cases, the expressions are simple constants (integer,
-floating point, or string). A classad is thus a form of property
-list. Attribute expressions can also be more complicated. There is a
-protocol for evaluating an attribute expression of a classad vis a vis
-another ad. For example, the expression "other.size > 3" in one ad
-evaluates to true if the other ad has an attribute named size and the
-value of that attribute is (or evaluates to) an integer greater than
-three. Two classads match if each ad has an attribute requirements
-that evaluates to true in the context of the other ad. Classad
-matching is used by the HTCondor central manager to determine the
-compatibility of jobs and workstations where they may be run.
-
-#######################
-%package classads-devel
-Summary: Headers for HTCondor's classified advertisement language
-Requires: %name-classads = %version-%release
-Requires: pcre-devel
-Obsoletes: classads-devel <= 1.0.8
-
-%description classads-devel
-Header files for HTCondor's ClassAd Library, a powerful and flexible,
-semi-structured representation of data.
-
-#######################
-%if 0%{?with_cream_gahp}
-%%package cream-gahp
-Summary: Allows Condor to act as a client to CREAM.
-Requires: %%name = %%version-%%release
-
-%%description cream-gahp
-The cream_gahp enables the Condor grid universe to communicate with a remote
-CREAM server.
-%endif
-
#######################
%package -n python3-condor
-Summary: Python bindings for Condor.
+Summary: Python bindings for HTCondor
Requires: %name = %version-%release
%{?python_provide:%python_provide python3-condor}
@@ -293,6 +162,42 @@ Requires: %name = %version-%release
The python bindings allow one to directly invoke the C++ implementations of
the ClassAd library and HTCondor from python
+#######################
+%package credmon-oauth
+Summary: OAuth2 credmon for HTCondor
+Group: Applications/System
+Requires: %name = %version-%release
+Requires: python3-condor = %{version}-%{release}
+Requires: python3-requests-oauthlib
+Requires: python3-six
+Requires: python3-flask
+Requires: python3-cryptography
+Requires: python3-scitokens
+Requires: python3-mod_wsgi
+Requires: httpd
+
+%description credmon-oauth
+The OAuth2 credmon allows users to obtain credentials from configured
+OAuth2 endpoints and to use those credentials securely inside running jobs.
+
+#######################
+%package credmon-vault
+Summary: Vault credmon for HTCondor
+Group: Applications/System
+Requires: %name = %version-%release
+Requires: python3-condor = %{version}-%{release}
+Requires: python3-six
+Requires: python3-cryptography
+# Although htgettoken is only needed on the submit machine and
+# condor-credmon-vault is needed on both the submit and credd machines,
+# htgettoken is small so it doesn't hurt to require it in both places.
+Requires: htgettoken >= 1.1
+Conflicts: %name-credmon-oauth
+
+%description credmon-vault
+The Vault credmon allows users to obtain credentials from Vault using
+htgettoken and to use those credentials securely inside running jobs.
+
#######################
%package -n minicondor
Summary: Configuration for a single-node HTCondor
@@ -304,34 +209,10 @@ This example configuration is good for trying out HTCondor for the first time.
It only configures the IPv4 loopback address, turns on basic security, and
shortens many timers to be more responsive.
-#######################
-# The bosco subpkg is currently dropping file that breaks the out-of-box condor
-# configuration (60-campus_factory.config). The file looks somewhat site-
-# specific. I'm going to disable bosco until it can be made more generic for
-# fedora, and/or not break default condor config out of box.
-%if 0%{?with_bosco}
-%package bosco
-Summary: BOSCO, a Condor overlay system for managing jobs at remote clusters
-Url: http://bosco.opensciencegrid.org
-Requires: %name = %version-%release
-
-%description bosco
-BOSCO allows a locally-installed Condor to submit jobs to remote clusters,
-using SSH as a transit mechanism. It is designed for cases where the remote
-cluster is using a different batch system such as PBS, SGE, LSF, or another
-Condor system.
-
-BOSCO provides an overlay system so the remote clusters appear to be a Condor
-cluster. This allows the user to run their workflows using Condor tools across
-multiple clusters.
-%endif
-
#######################
%package annex-ec2
Summary: Configuration and scripts to make an EC2 image annex-compatible.
Requires: %name = %version-%release
-Requires(post): /sbin/chkconfig
-Requires(preun): /sbin/chkconfig
%description annex-ec2
Configures HTCondor to make an EC2 image annex-compatible. Do NOT install
@@ -344,13 +225,29 @@ on a non-EC2 image.
%config(noreplace) %_sysconfdir/condor/master_shutdown_script.sh
%post annex-ec2
-/bin/systemctl enable condor-annex-ec2
+#/bin/systemctl enable condor-annex-ec2
%preun annex-ec2
if [ $1 == 0 ]; then
/bin/systemctl disable condor-annex-ec2
fi
+#######################
+%package upgrade-checks
+Summary: Script to check for manual interventions needed to upgrade
+Group: Applications/System
+Requires: python3-condor
+Requires: pcre2-tools
+
+%description upgrade-checks
+HTCondor V9 to V10 check for for known breaking changes:
+1. IDToken TRUST_DOMAIN default value change
+2. Upgrade to PCRE2 breaking map file regex sequences
+3. The way to request GPU resources for a job
+
+%files upgrade-checks
+%_bindir/condor_upgrade_check
+
%pre
getent group %{name} >/dev/null || groupadd -r %{name}
getent passwd %{name} >/dev/null || \
@@ -359,54 +256,20 @@ getent passwd %{name} >/dev/null || \
exit 0
%prep
-%setup -q -n %{newname}-%{srcver}
-%patch1 -p1
-%patch2 -p1
-%patch5 -p1
-%patch7 -p1
-cp %{SOURCE1} %{name}-tmpfiles.conf
-cp %{SOURCE2} %{name}.service
-cp %{SOURCE3} .
+%setup -q -n %{newname}-%{version}
+
+# fix errant execute permissions
+find src -perm /a+x -type f -name "*.[Cch]" -exec chmod a-x {} \;
%build
make -C docs man
-%cmake -DNO_PHONE_HOME:BOOL=TRUE \
+%cmake -DBUILDID:STRING=RH-%{version}-%{release} \
-DBUILD_TESTING:BOOL=FALSE \
- -DBUILDID:STRING=RH-%{version}-%{release} \
- -D_VERBOSE:BOOL=TRUE \
-DCMAKE_SKIP_RPATH:BOOL=TRUE \
- -DHAVE_BACKFILL:BOOL=FALSE \
- -DHAVE_BOINC:BOOL=FALSE \
- -DWITH_GSOAP:BOOL=FALSE \
- -DWITH_POSTGRESQL:BOOL=FALSE \
- -DHAVE_KBDD:BOOL=TRUE \
- -DHAVE_HIBERNATION:BOOL=TRUE \
- -DWANT_LEASE_MANAGER:BOOL=FALSE \
- -DWANT_HDFS:BOOL=FALSE \
- -DWANT_QUILL:BOOL=FALSE \
- -DWITH_QPID:BOOL=FALSE \
- -DWITH_ZLIB:BOOL=FALSE \
- -DWITH_POSTGRESQL:BOOL=FALSE \
- -DWANT_CONTRIB:BOOL=ON \
- -DWITH_BOSCO:BOOL=FALSE \
- -DWITH_PIGEON:BOOL=FALSE \
- -DWITH_MANAGEMENT:BOOL=FALSE \
-%if 0%{?with_mongodb}
- -DWITH_PLUMAGE:BOOL=TRUE \
-%endif
-%if 0%{?with_aviary}
- -DWITH_AVIARY:BOOL=TRUE \
-%endif
- -DWANT_FULL_DEPLOYMENT:BOOL=TRUE \
- -DBLAHP_FOUND=/usr/libexec/BLClient \
- -DWITH_BLAHP:BOOL=TRUE \
- -DWITH_CREAM:BOOL=FALSE \
- -DWANT_GLEXEC:BOOL=TRUE \
- -DWANT_MAN_PAGES:BOOL=TRUE \
- -DWITH_LIBDELTACLOUD:BOOL=TRUE \
- -DWITH_GLOBUS:BOOL=TRUE \
- -DWITH_PYTHON_BINDINGS:BOOL=TRUE \
- -DWITH_LIBCGROUP:BOOL=FALSE
+ -DPACKAGEID:STRING=%{version}-%{release} \
+ -DCONDOR_PACKAGE_BUILD:BOOL=TRUE \
+ -DCONDOR_RPMBUILD:BOOL=TRUE \
+ -DCMAKE_INSTALL_PREFIX:PATH=/
%cmake_build
@@ -422,32 +285,11 @@ function populate {
rm -rf %{buildroot}
%cmake_install
-# The install target puts etc/ under usr/, let's fix that.
-mv %{buildroot}/usr/etc %{buildroot}/%{_sysconfdir}
-
-populate %_sysconfdir/condor %{buildroot}/%{_usr}/lib/condor_ssh_to_job_sshd_config_template
-
-# Things in /usr/lib really belong in /usr/share/condor
-populate %{_datadir}/condor %{buildroot}/%{_usr}/lib/*
-# Except for the shared libs
-populate %{_libdir}/ %{buildroot}/%{_datadir}/condor/libclassad.s*
-populate %{_libdir}/ %{buildroot}/%{_datadir}/condor/libcondor_utils*.so
-populate %{_libdir}/ %{buildroot}/%{_datadir}/condor/libpy3classad%{python3_version}_%{srcver}.so
-# and python site-packages
-if [ -d %{buildroot}/%{_datadir}/condor/python3.* ]; then
- mv %{buildroot}/%{_datadir}/condor/python3.* %{buildroot}/%{_libdir}/
-fi
-rm -f %{buildroot}/%{_datadir}/condor/libclassad.a
-
-# Remove the small shadow if built
-rm -f %{buildroot}/%{_sbindir}/condor_shadow_s
-
-# It is proper to put HTCondor specific libexec binaries under libexec/condor/
-populate %_libexecdir/condor %{buildroot}/usr/libexec/*
+# Drop in a symbolic link for backward compatibility
+ln -s ../..%{_libdir}/condor/condor_ssh_to_job_sshd_config_template %{buildroot}/%_sysconfdir/condor/condor_ssh_to_job_sshd_config_template
-# man pages
-mkdir -p %{buildroot}/%{_mandir}
-mv %{buildroot}/usr/man %{buildroot}/%{_mandir}/man1
+mv %{buildroot}/usr/share/doc/condor-%{version} %{buildroot}/usr/share/doc/condor
+populate /usr/share/doc/condor/examples %{buildroot}/usr/share/doc/condor/etc/examples/*
mkdir -p %{buildroot}/%{_sysconfdir}/condor
# the default condor_config file is not architecture aware and thus
@@ -460,236 +302,254 @@ if [ "$LIB" = "%_libdir" ]; then
echo "_libdir does not contain /usr, sed expression needs attention"
exit 1
fi
-sed -e "s:^LIB\s*=.*:LIB = \$(RELEASE_DIR)/$LIB/condor:" \
- %{buildroot}/etc/examples/condor_config.generic.redhat \
- > %{buildroot}/%{_sysconfdir}/condor/condor_config
# Install the basic configuration, a Personal HTCondor config. Allows for
# yum install condor + service condor start and go.
-#mkdir -m0755 %{buildroot}/%{_sysconfdir}/condor/config.d
-install -m 0644 00personal_condor.config %{buildroot}/%{_sysconfdir}/condor/config.d/00personal_condor.config
-
-populate %_sysconfdir/condor/config.d %{buildroot}/etc/examples/00-minicondor
-populate %_sysconfdir/condor/config.d %{buildroot}/etc/examples/50ec2.config
+mkdir -p -m0755 %{buildroot}/%{_sysconfdir}/condor/config.d
+mkdir -p -m0700 %{buildroot}/%{_sysconfdir}/condor/passwords.d
+mkdir -p -m0700 %{buildroot}/%{_sysconfdir}/condor/tokens.d
-%if 0%{?with_aviary}
-populate %_sysconfdir/condor/config.d %{buildroot}/etc/examples/61aviary.config
-populate %_sysconfdir/condor/config.d %{buildroot}/etc/examples/63aviary-hadoop.config
-
-mkdir -p %{buildroot}/%{_var}/lib/condor/aviary
-populate %{_var}/lib/condor/aviary %{buildroot}/usr/axis2.xml
-populate %{_var}/lib/condor/aviary %{buildroot}/usr/services/
-
-populate %{_libdir}/condor/plugins %{buildroot}/%{_usr}/libexec/condor/*-plugin.so
-populate %{_libdir}/ %{buildroot}/%{_datadir}/condor/libaviary_*
-%endif
+populate %_sysconfdir/condor/config.d %{buildroot}/usr/share/doc/condor/examples/00-htcondor-9.0.config
+populate %_sysconfdir/condor/config.d %{buildroot}/usr/share/doc/condor/examples/00-minicondor
+populate %_sysconfdir/condor/config.d %{buildroot}/usr/share/doc/condor/examples/50ec2.config
+# Install a second config.d directory under /usr/share, used for the
+# convenience of software built on top of Condor such as GlideinWMS.
+mkdir -p -m0755 %{buildroot}/usr/share/condor/config.d
-%if 0%{?with_mongodb}
-# Install condor-plumage's base plugin configuration
-populate %_sysconfdir/condor/config.d %{buildroot}/etc/examples/62plumage.config
-%endif
-rm -f %{buildroot}/%{_bindir}/ods_job_etl_tool
-rm -f %{buildroot}/%{_sbindir}/ods_job_etl_server
-mkdir -p -m0755 %{buildroot}/%{_var}/lib/condor/ViewHist
-
-mkdir -p -m0755 %{buildroot}/%{_var}/run/condor
mkdir -p -m0755 %{buildroot}/%{_var}/log/condor
-mkdir -p -m0755 %{buildroot}/%{_var}/lock/condor
-mkdir -p -m1777 %{buildroot}/%{_var}/lock/condor/local
+# Note we use %{_var}/lib instead of %{_sharedstatedir} for RHEL5 compatibility
mkdir -p -m0755 %{buildroot}/%{_var}/lib/condor/spool
-mkdir -p -m1777 %{buildroot}/%{_var}/lib/condor/execute
+mkdir -p -m0755 %{buildroot}/%{_var}/lib/condor/execute
+mkdir -p -m0755 %{buildroot}/%{_var}/lib/condor/krb_credentials
+mkdir -p -m2770 %{buildroot}/%{_var}/lib/condor/oauth_credentials
-# not packaging standard universe
-rm %{buildroot}/%{_mandir}/man1/condor_compile.1
-rm %{buildroot}/%{_mandir}/man1/condor_checkpoint.1
# not packaging configure/install scripts
-rm %{buildroot}/%{_mandir}/man1/condor_configure.1
+rm -f %{buildroot}%{_bindir}/make-personal-from-tarball
+rm -f %{buildroot}%{_sbindir}/condor_configure
+rm -f %{buildroot}%{_sbindir}/condor_install
+rm -f %{buildroot}/%{_mandir}/man1/condor_configure.1
+rm -f %{buildroot}/%{_mandir}/man1/condor_install.1
-# Remove junk
-rm -r %{buildroot}/%{_sysconfdir}/sysconfig
-rm -r %{buildroot}/%{_sysconfdir}/init.d
+mkdir -p %{buildroot}/%{_var}/www/wsgi-scripts/condor_credmon_oauth
+mv %{buildroot}/%{_libexecdir}/condor/condor_credmon_oauth.wsgi %{buildroot}/%{_var}/www/wsgi-scripts/condor_credmon_oauth/condor_credmon_oauth.wsgi
-# install tmpfiles.d/condor.conf
-mkdir -p %{buildroot}%{_tmpfilesdir}/tmpfiles.d
-install -m 0644 %{name}-tmpfiles.conf %{buildroot}%{_tmpfilesdir}/%{name}.conf
+# Move oauth credmon config files out of examples and into config.d
+mv %{buildroot}/usr/share/doc/condor/examples/condor_credmon_oauth/config/condor/40-oauth-credmon.conf %{buildroot}/%{_sysconfdir}/condor/config.d/40-oauth-credmon.conf
+mv %{buildroot}/usr/share/doc/condor/examples/condor_credmon_oauth/config/condor/40-oauth-tokens.conf %{buildroot}/%{_sysconfdir}/condor/config.d/40-oauth-tokens.conf
+mv %{buildroot}/usr/share/doc/condor/examples/condor_credmon_oauth/README.credentials %{buildroot}/%{_var}/lib/condor/oauth_credentials/README.credentials
-install -Dp -m0755 %{buildroot}/etc/examples/condor-annex-ec2 %{buildroot}%{_libexecdir}/condor/condor-annex-ec2
+# Move vault credmon config file out of examples and into config.d
+mv %{buildroot}/usr/share/doc/condor/examples/condor_credmon_oauth/config/condor/40-vault-credmon.conf %{buildroot}/%{_sysconfdir}/condor/config.d/40-vault-credmon.conf
-mkdir -p %{buildroot}%{_unitdir}
-install -m 0644 %{buildroot}/etc/examples/condor-annex-ec2.service %{buildroot}%{_unitdir}/condor-annex-ec2.service
+# install tmpfiles.d/condor.conf
+mkdir -p %{buildroot}%{_tmpfilesdir}
+install -m 0644 %{buildroot}/usr/share/doc/condor/examples/condor-tmpfiles.conf %{buildroot}%{_tmpfilesdir}/%{name}.conf
-mkdir -p %{buildroot}%{_localstatedir}/run/
-install -d -m 0710 %{buildroot}%{_localstatedir}/run/%{name}/
+install -Dp -m0755 %{buildroot}/usr/share/doc/condor/examples/condor-annex-ec2 %{buildroot}%{_libexecdir}/condor/condor-annex-ec2
mkdir -p %{buildroot}%{_unitdir}
-install -m 0644 %{name}.service %{buildroot}%{_unitdir}/condor.service
-
-mv %{buildroot}%{python3_sitearch}/py3htcondor.so %{buildroot}%{python3_sitearch}/htcondor.so
-mv %{buildroot}%{python3_sitearch}/py3classad.so %{buildroot}%{python3_sitearch}/classad.so
-
-# Remove stuff that comes from the full-deploy
-rm -rf %{buildroot}%{_sbindir}/cleanup_release
-rm -rf %{buildroot}%{_sbindir}/condor_cleanup_local
-rm -rf %{buildroot}%{_sbindir}/condor_cold_start
-rm -rf %{buildroot}%{_sbindir}/condor_cold_stop
-rm -rf %{buildroot}%{_sbindir}/condor_config_bind
-rm -rf %{buildroot}%{_sbindir}/condor_configure
-rm -rf %{buildroot}%{_sbindir}/condor_credd
-rm -rf %{buildroot}%{_sbindir}/condor_install
-rm -rf %{buildroot}%{_sbindir}/condor_install_local
-rm -rf %{buildroot}%{_sbindir}/condor_local_start
-rm -rf %{buildroot}%{_sbindir}/condor_local_stop
-rm -rf %{buildroot}%{_sbindir}/condor_startd_factory
-rm -rf %{buildroot}%{_sbindir}/condor_vm-gahp-vmware
-rm -rf %{buildroot}%{_sbindir}/condor_vm_vmwar*
-rm -rf %{buildroot}%{_sbindir}/filelock_midwife
-rm -rf %{buildroot}%{_sbindir}/filelock_undertaker
-rm -rf %{buildroot}%{_sbindir}/install_release
-rm -rf %{buildroot}%{_sbindir}/uniq_pid_command
-rm -rf %{buildroot}%{_sbindir}/uniq_pid_midwife
-rm -rf %{buildroot}%{_sbindir}/uniq_pid_undertaker
-rm -rf %{buildroot}%{_datadir}/condor/*.pm
-rm -rf %{buildroot}%{_datadir}/condor/Chirp.jar
-rm -rf %{buildroot}%{_usrsrc}/chirp/chirp_*
-rm -rf %{buildroot}%{_usrsrc}/startd_factory
-rm -rf %{buildroot}/usr/DOC
-rm -rf %{buildroot}/usr/INSTALL
-rm -rf %{buildroot}/usr/LICENSE-2.0.txt
-rm -rf %{buildroot}/usr/NOTICE.txt
-rm -rf %{buildroot}/usr/README
-rm -rf %{buildroot}/usr/examples/
-rm -rf %{buildroot}%{_includedir}/MyString.h
-rm -rf %{buildroot}%{_includedir}/chirp_client.h
-rm -rf %{buildroot}%{_includedir}/compat_classad*
-rm -rf %{buildroot}%{_includedir}/condor_classad.h
-rm -rf %{buildroot}%{_includedir}/condor_constants.h
-rm -rf %{buildroot}%{_includedir}/condor_event.h
-rm -rf %{buildroot}%{_includedir}/condor_header_features.h
-rm -rf %{buildroot}%{_includedir}/condor_holdcodes.h
-rm -rf %{buildroot}%{_includedir}/file_lock.h
-rm -rf %{buildroot}%{_includedir}/iso_dates.h
-rm -rf %{buildroot}%{_includedir}/read_user_log.h
-rm -rf %{buildroot}%{_includedir}/stl_string_utils.h
-rm -rf %{buildroot}%{_includedir}/user_log.README
-rm -rf %{buildroot}%{_includedir}/user_log.c++.h
-rm -rf %{buildroot}%{_includedir}/write_user_log.h
-rm -rf %{buildroot}%{_libexecdir}/condor/bgp_*
-rm -rf %{buildroot}%{_datadir}/condor/libchirp_client.*
-rm -rf %{buildroot}%{_datadir}/condor/libcondorapi.a
-rm -rf %{buildroot}%{_datadir}/condor/python/{htcondor,classad}.so
-rm -rf %{buildroot}%{_datadir}/condor/{libpy*classad_*,htcondor,classad}.so
-rm %{buildroot}%{_libexecdir}/condor/condor_schedd.init
-rm -rf %{buildroot}%{_libexecdir}/condor/pandad
-rm -rf %{buildroot}%{_libexecdir}/condor/libclassad_python*_user.so
-
-# Install BOSCO
-%if 0%{?with_bosco}
-mv %{buildroot}%{_libexecdir}/condor/campus_factory/share/condor/condor_config.factory %{buildroot}%{_sysconfdir}/condor/config.d/60-campus_factory.config
-mv %{buildroot}%{_libexecdir}/condor/campus_factory/etc/campus_factory.conf %{buildroot}%{_sysconfdir}/condor/
-mv %{buildroot}%{_libexecdir}/condor/campus_factory/share %{buildroot}%{_datadir}/condor/campus_factory
+install -m 0644 %{buildroot}/usr/share/doc/condor/examples/condor-annex-ec2.service %{buildroot}%{_unitdir}/condor-annex-ec2.service
+install -m 0644 %{buildroot}/usr/share/doc/condor/examples/condor.service %{buildroot}%{_unitdir}/condor.service
+# Disabled until HTCondor security fixed.
+# install -m 0644 %{buildroot}/usr/share/doc/condor/examples/condor.socket %{buildroot}%{_unitdir}/condor.socket
+
+%if 0%{?rhel} >= 7
+mkdir -p %{buildroot}%{_datadir}/condor/
+cp %{SOURCE8} %{buildroot}%{_datadir}/condor/
%endif
-rm -rf %{buildroot}%{_libexecdir}/condor/campus_factory
-rm -rf %{buildroot}/%{_sbindir}/bosco_install
-rm -rf %{buildroot}/%{_sbindir}/campus_factory
-rm -rf %{buildroot}/%{_sbindir}/condor_ft-gahp
-rm -rf %{buildroot}/%{_sbindir}/glidein_creation
-rm -rf %{buildroot}/%{_sbindir}/runfactory
-rm -rf %{buildroot}/%{_mandir}/man1/bosco*
-
-# we must place the config examples in builddir
-cp -rf %{buildroot}/etc/examples %{_builddir}/%{name}-%{tarball_version}
-rm -rf %{buildroot}/etc/examples
+#Fixups for packaged build, should have been done by cmake
+
+mkdir -p %{buildroot}/usr/share/condor
+mv %{buildroot}/usr/lib64/condor/Chirp.jar %{buildroot}/usr/share/condor
+mv %{buildroot}/usr/lib64/condor/CondorJava*.class %{buildroot}/usr/share/condor
+mv %{buildroot}/usr/lib64/condor/libchirp_client.so %{buildroot}/usr/lib64
+mv %{buildroot}/usr/lib64/condor/libcondorapi.so %{buildroot}/usr/lib64
+mv %{buildroot}/usr/lib64/condor/libcondor_utils_*.so %{buildroot}/usr/lib64
+mv %{buildroot}/usr/lib64/condor/libpyclassad3*.so %{buildroot}/usr/lib64
+
+rm -rf %{buildroot}/usr/share/doc/condor/LICENSE
+rm -rf %{buildroot}/usr/share/doc/condor/NOTICE.txt
+rm -rf %{buildroot}/usr/share/doc/condor/README
+
+# Move batch system customization files to /etc, with symlinks in the
+# original location. Admins will need to edit these.
+install -m 0755 -d -p %{buildroot}%{_sysconfdir}/blahp
+for batch_system in condor kubernetes lsf nqs pbs sge slurm; do
+ mv %{buildroot}%{_libexecdir}/blahp/${batch_system}_local_submit_attributes.sh %{buildroot}%{_sysconfdir}/blahp
+ ln -s %{_sysconfdir}/blahp/${batch_system}_local_submit_attributes.sh \
+ %{buildroot}%{_libexecdir}/blahp/${batch_system}_local_submit_attributes.sh
+done
+
+#################
%files
-%doc LICENSE-2.0.txt NOTICE.txt
-%config(noreplace) %_sysconfdir/bash_completion.d/condor
+%defattr(-,root,root,-)
+%doc LICENSE NOTICE.txt
+%doc /usr/share/doc/condor/examples
%dir %_sysconfdir/condor/
-%config(noreplace) %_sysconfdir/condor/condor_config
-%config(noreplace) %{_tmpfilesdir}/%{name}.conf
+%config %_sysconfdir/condor/condor_config
+%{_tmpfilesdir}/%{name}.conf
%{_unitdir}/condor.service
+# Disabled until HTCondor security fixed.
+# % {_unitdir}/condor.socket
%dir %_datadir/condor/
+%_datadir/condor/Chirp.jar
%_datadir/condor/CondorJavaInfo.class
%_datadir/condor/CondorJavaWrapper.class
-%_datadir/condor/scimark2lib.jar
+%if 0%{?rhel} >= 7
+%_datadir/condor/htcondor.pp
+%endif
+%dir %_sysconfdir/condor/passwords.d/
+%dir %_sysconfdir/condor/tokens.d/
%dir %_sysconfdir/condor/config.d/
-%dir %_sysconfdir/condor/ganglia.d
-%config(noreplace) %_sysconfdir/condor/ganglia.d/00_default_metrics
-%config(noreplace) %_sysconfdir/condor/config.d/00personal_condor.config
-%config(noreplace) %_sysconfdir/condor/condor_ssh_to_job_sshd_config_template
+%config(noreplace) %{_sysconfdir}/condor/config.d/00-htcondor-9.0.config
+%dir /usr/share/condor/config.d/
+%_libdir/condor/condor_ssh_to_job_sshd_config_template
+%_sysconfdir/condor/condor_ssh_to_job_sshd_config_template
+%_sysconfdir/bash_completion.d/condor
+%_libdir/libchirp_client.so
+%_libdir/libcondor_utils_%{version_}.so
+%_libdir/libcondorapi.so
+%_libdir/condor/libgetpwnam.so
%dir %_libexecdir/condor/
+%_libexecdir/condor/linux_kernel_tuning
+%_libexecdir/condor/accountant_log_fixer
%_libexecdir/condor/condor_chirp
%_libexecdir/condor/condor_ssh
%_libexecdir/condor/sshd.sh
%_libexecdir/condor/get_orted_cmd.sh
%_libexecdir/condor/orted_launcher.sh
+%_libexecdir/condor/set_batchtok_cmd
+%_libexecdir/condor/cred_producer_krb
%_libexecdir/condor/condor_job_router
-%_libexecdir/condor/condor_gangliad
-%_libexecdir/condor/condor_glexec_setup
-%_libexecdir/condor/condor_glexec_run
-%_libexecdir/condor/condor_glexec_job_wrapper
-%_libexecdir/condor/condor_glexec_update_proxy
-%_libexecdir/condor/condor_glexec_cleanup
-%_libexecdir/condor/condor_glexec_kill
-%_libexecdir/condor/glite/bin/*
+%_libexecdir/condor/condor_pid_ns_init
+%_libexecdir/condor/condor_urlfetch
+%_libexecdir/condor/htcondor_docker_test
+%_libexecdir/condor/exit_37.sif
+%dir %_libexecdir/condor/singularity_test_sandbox/
+%dir %_libexecdir/condor/singularity_test_sandbox/dev/
+%dir %_libexecdir/condor/singularity_test_sandbox/proc/
+%_libexecdir/condor/singularity_test_sandbox/exit_37
%_libexecdir/condor/condor_limits_wrapper.sh
%_libexecdir/condor/condor_rooster
+%_libexecdir/condor/condor_schedd.init
%_libexecdir/condor/condor_ssh_to_job_shell_setup
%_libexecdir/condor/condor_ssh_to_job_sshd_setup
%_libexecdir/condor/condor_power_state
%_libexecdir/condor/condor_kflops
%_libexecdir/condor/condor_mips
%_libexecdir/condor/data_plugin
+%_libexecdir/condor/box_plugin.py
+%_libexecdir/condor/gdrive_plugin.py
+%_libexecdir/condor/common-cloud-attributes-google.py
+%_libexecdir/condor/common-cloud-attributes-aws.py
+%_libexecdir/condor/common-cloud-attributes-aws.sh
+%_libexecdir/condor/onedrive_plugin.py
+# TODO: get rid of these
+# Not sure where these are getting built
+%if 0%{?rhel} <= 7 && ! 0%{?fedora}
+%_libexecdir/condor/box_plugin.pyc
+%_libexecdir/condor/box_plugin.pyo
+%_libexecdir/condor/gdrive_plugin.pyc
+%_libexecdir/condor/gdrive_plugin.pyo
+%_libexecdir/condor/onedrive_plugin.pyc
+%_libexecdir/condor/onedrive_plugin.pyo
+%_libexecdir/condor/adstash/__init__.pyc
+%_libexecdir/condor/adstash/__init__.pyo
+%_libexecdir/condor/adstash/ad_sources/__init__.pyc
+%_libexecdir/condor/adstash/ad_sources/__init__.pyo
+%_libexecdir/condor/adstash/ad_sources/registry.pyc
+%_libexecdir/condor/adstash/ad_sources/registry.pyo
+%_libexecdir/condor/adstash/interfaces/__init__.pyc
+%_libexecdir/condor/adstash/interfaces/__init__.pyo
+%_libexecdir/condor/adstash/interfaces/generic.pyc
+%_libexecdir/condor/adstash/interfaces/generic.pyo
+%_libexecdir/condor/adstash/interfaces/null.pyc
+%_libexecdir/condor/adstash/interfaces/null.pyo
+%_libexecdir/condor/adstash/interfaces/registry.pyc
+%_libexecdir/condor/adstash/interfaces/registry.pyo
+%_libexecdir/condor/adstash/interfaces/opensearch.pyc
+%_libexecdir/condor/adstash/interfaces/opensearch.pyo
+%endif
%_libexecdir/condor/curl_plugin
-%_libexecdir/condor/multifile_curl_plugin
%_libexecdir/condor/condor_shared_port
-%_libexecdir/condor/condor_sinful
-%_libexecdir/condor/condor_testingd
-%_libexecdir/condor/test_user_mapping
-%_libexecdir/condor/condor_glexec_wrapper
-%_libexecdir/condor/glexec_starter_setup.sh
%_libexecdir/condor/condor_defrag
%_libexecdir/condor/interactive.sub
-%_libexecdir/condor/linux_kernel_tuning
-%_libexecdir/condor/condor_dagman_metrics_reporter
-%_libexecdir/condor/condor_pid_ns_init
-%_libexecdir/condor/condor_urlfetch
-%_libexecdir/condor/test_user_mapping
+%_libexecdir/condor/condor_gangliad
+%_libexecdir/condor/ce-audit.so
+%_libexecdir/condor/adstash/__init__.py
+%_libexecdir/condor/adstash/adstash.py
+%_libexecdir/condor/adstash/config.py
+%_libexecdir/condor/adstash/convert.py
+%_libexecdir/condor/adstash/utils.py
+%_libexecdir/condor/adstash/ad_sources/__init__.py
+%_libexecdir/condor/adstash/ad_sources/ad_file.py
+%_libexecdir/condor/adstash/ad_sources/generic.py
+%_libexecdir/condor/adstash/ad_sources/registry.py
+%_libexecdir/condor/adstash/ad_sources/schedd_history.py
+%_libexecdir/condor/adstash/ad_sources/startd_history.py
+%_libexecdir/condor/adstash/interfaces/__init__.py
+%_libexecdir/condor/adstash/interfaces/elasticsearch.py
+%_libexecdir/condor/adstash/interfaces/opensearch.py
+%_libexecdir/condor/adstash/interfaces/generic.py
+%_libexecdir/condor/adstash/interfaces/json_file.py
+%_libexecdir/condor/adstash/interfaces/null.py
+%_libexecdir/condor/adstash/interfaces/registry.py
+%_libexecdir/condor/annex
%_mandir/man1/condor_advertise.1.gz
%_mandir/man1/condor_annex.1.gz
+%_mandir/man1/condor_check_password.1.gz
%_mandir/man1/condor_check_userlogs.1.gz
%_mandir/man1/condor_chirp.1.gz
-%_mandir/man1/condor_convert_history.1*
-%_mandir/man1/condor_cod.1.gz
%_mandir/man1/condor_config_val.1.gz
%_mandir/man1/condor_dagman.1.gz
%_mandir/man1/condor_fetchlog.1.gz
%_mandir/man1/condor_findhost.1.gz
+%_mandir/man1/condor_gpu_discovery.1.gz
%_mandir/man1/condor_history.1.gz
%_mandir/man1/condor_hold.1.gz
+%_mandir/man1/condor_job_router_info.1.gz
%_mandir/man1/condor_master.1.gz
%_mandir/man1/condor_off.1.gz
%_mandir/man1/condor_on.1.gz
+%_mandir/man1/condor_pool_job_report.1.gz
%_mandir/man1/condor_preen.1.gz
%_mandir/man1/condor_prio.1.gz
%_mandir/man1/condor_q.1.gz
+%_mandir/man1/condor_qsub.1.gz
%_mandir/man1/condor_qedit.1.gz
%_mandir/man1/condor_reconfig.1.gz
%_mandir/man1/condor_release.1.gz
+%_mandir/man1/condor_remote_cluster.1.gz
%_mandir/man1/condor_reschedule.1.gz
%_mandir/man1/condor_restart.1.gz
%_mandir/man1/condor_rm.1.gz
%_mandir/man1/condor_run.1.gz
%_mandir/man1/condor_set_shutdown.1.gz
+%_mandir/man1/condor_ssh_start.1.gz
+%_mandir/man1/condor_sos.1.gz
+%_mandir/man1/condor_ssl_fingerprint.1.gz
%_mandir/man1/condor_stats.1.gz
%_mandir/man1/condor_status.1.gz
%_mandir/man1/condor_store_cred.1.gz
%_mandir/man1/condor_submit.1.gz
%_mandir/man1/condor_submit_dag.1.gz
+%_mandir/man1/condor_test_token.1.gz
+%_mandir/man1/condor_token_create.1.gz
+%_mandir/man1/condor_token_fetch.1.gz
+%_mandir/man1/condor_token_list.1.gz
+%_mandir/man1/condor_token_request.1.gz
+%_mandir/man1/condor_token_request_approve.1.gz
+%_mandir/man1/condor_token_request_auto_approve.1.gz
+%_mandir/man1/condor_token_request_list.1.gz
%_mandir/man1/condor_top.1.gz
%_mandir/man1/condor_transfer_data.1.gz
+%_mandir/man1/condor_transform_ads.1.gz
+%_mandir/man1/condor_update_machine_ad.1.gz
%_mandir/man1/condor_updates_stats.1.gz
+%_mandir/man1/condor_urlfetch.1.gz
%_mandir/man1/condor_userlog.1.gz
%_mandir/man1/condor_userprio.1.gz
%_mandir/man1/condor_vacate.1.gz
@@ -704,26 +564,20 @@ rm -rf %{buildroot}/etc/examples
%_mandir/man1/condor_power.1.gz
%_mandir/man1/condor_gather_info.1.gz
%_mandir/man1/condor_router_rm.1.gz
-%_mandir/man1/condor_qsub.1.gz
%_mandir/man1/condor_drain.1.gz
-%_mandir/man1/condor_install.1.gz
%_mandir/man1/condor_ping.1.gz
%_mandir/man1/condor_rmdir.1.gz
%_mandir/man1/condor_tail.1.gz
%_mandir/man1/condor_who.1.gz
%_mandir/man1/condor_now.1.gz
-%_mandir/man1/condor_dagman_metrics_reporter.1.gz
-%_mandir/man1/condor_gpu_discovery.1.gz
-%_mandir/man1/condor_pool_job_report.1.gz
-%_mandir/man1/condor_sos.1.gz
-%_mandir/man1/condor_urlfetch.1.gz
-%_mandir/man1/condor_job_router_info.1.gz
-%_mandir/man1/condor_update_machine_ad.1.gz
-%_mandir/man1/condor_transform_ads.1.gz
+%_mandir/man1/classad_eval.1.gz
+%_mandir/man1/classads.1.gz
+%_mandir/man1/condor_adstash.1.gz
+%_mandir/man1/condor_evicted_files.1.gz
+%_mandir/man1/condor_watch_q.1.gz
+%_mandir/man1/get_htcondor.1.gz
+%_mandir/man1/htcondor.1.gz
# bin/condor is a link for checkpoint, reschedule, vacate
-%_libdir/libcondor_utils*.so
-%_libexecdir/condor/panda-plugin.so
-%_libexecdir/condor/libcollector_python3_plugin.so
%_bindir/condor_submit_dag
%_bindir/condor_who
%_bindir/condor_now
@@ -732,15 +586,17 @@ rm -rf %{buildroot}/etc/examples
%_bindir/condor_check_userlogs
%_bindir/condor_q
%_libexecdir/condor/condor_transferer
-%_bindir/condor_cod
%_bindir/condor_docker_enter
%_bindir/condor_qedit
+%_bindir/condor_qusers
%_bindir/condor_userlog
%_bindir/condor_release
%_bindir/condor_userlog_job_counter
%_bindir/condor_config_val
%_bindir/condor_reschedule
%_bindir/condor_userprio
+%_bindir/condor_check_password
+%_bindir/condor_check_config
%_bindir/condor_dagman
%_bindir/condor_rm
%_bindir/condor_vacate
@@ -751,7 +607,6 @@ rm -rf %{buildroot}/etc/examples
%_bindir/condor_vacate_job
%_bindir/condor_findhost
%_bindir/condor_stats
-%_bindir/condor_transform_ads
%_bindir/condor_version
%_bindir/condor_history
%_bindir/condor_status
@@ -762,24 +617,44 @@ rm -rf %{buildroot}/etc/examples
%_bindir/condor_power
%_bindir/condor_gather_info
%_bindir/condor_continue
+%_bindir/condor_ssl_fingerprint
%_bindir/condor_suspend
%_bindir/condor_test_match
+%_bindir/condor_token_create
+%_bindir/condor_token_fetch
+%_bindir/condor_token_request
+%_bindir/condor_token_request_approve
+%_bindir/condor_token_request_auto_approve
+%_bindir/condor_token_request_list
+%_bindir/condor_token_list
+%_bindir/condor_scitoken_exchange
%_bindir/condor_drain
%_bindir/condor_ping
-%_bindir/condor_qsub
%_bindir/condor_tail
+%_bindir/condor_qsub
%_bindir/condor_pool_job_report
%_bindir/condor_job_router_info
+%_bindir/condor_transform_ads
%_bindir/condor_update_machine_ad
%_bindir/condor_annex
%_bindir/condor_nsenter
+%_bindir/condor_evicted_files
+%_bindir/condor_adstash
+%_bindir/condor_remote_cluster
+%_bindir/bosco_cluster
+%_bindir/condor_ssh_start
+%_bindir/condor_test_token
+# sbin/condor is a link for master_off, off, on, reconfig,
+# reconfig_schedd, restart
%_sbindir/condor_advertise
%_sbindir/condor_aklog
+%_sbindir/condor_credmon_krb
%_sbindir/condor_c-gahp
%_sbindir/condor_c-gahp_worker_thread
%_sbindir/condor_collector
-%_sbindir/condor_convert_history
+%_sbindir/condor_credd
%_sbindir/condor_fetchlog
+%_sbindir/condor_ft-gahp
%_sbindir/condor_had
%_sbindir/condor_master
%_sbindir/condor_negotiator
@@ -792,38 +667,52 @@ rm -rf %{buildroot}/etc/examples
%_sbindir/condor_schedd
%_sbindir/condor_set_shutdown
%_sbindir/condor_shadow
+%_sbindir/condor_sos
%_sbindir/condor_startd
%_sbindir/condor_starter
%_sbindir/condor_store_cred
-%_sbindir/condor_transferd
+%_sbindir/condor_testwritelog
%_sbindir/condor_updates_stats
%_sbindir/ec2_gahp
%_sbindir/condor_gridmanager
-%_sbindir/condor_gridshell
-%_sbindir/gahp_server
-%_sbindir/grid_monitor
-%_sbindir/grid_monitor.sh
%_sbindir/remote_gahp
-%_sbindir/nordugrid_gahp
+%_sbindir/rvgahp_client
+%_sbindir/rvgahp_proxy
+%_sbindir/rvgahp_server
%_sbindir/AzureGAHPServer
-%_sbindir/condor_sos
-%_sbindir/condor_testwritelog
%_sbindir/gce_gahp
-#%%_bindir/condor_ping
+%_sbindir/arc_gahp
%_libexecdir/condor/condor_gpu_discovery
%_libexecdir/condor/condor_gpu_utilization
+%config(noreplace) %_sysconfdir/condor/ganglia.d/00_default_metrics
%defattr(-,condor,condor,-)
%dir %_var/lib/condor/
%dir %_var/lib/condor/execute/
-%dir %_var/log/condor/
%dir %_var/lib/condor/spool/
-%ghost %dir %_var/lock/condor/
-%dir %_var/run/condor/
-%_libexecdir/condor/accountant_log_fixer
-%_datadir/condor/libcondorapi.so
-
-#################
-%files procd
+%dir %_var/log/condor/
+%defattr(-,root,condor,-)
+%dir %_var/lib/condor/oauth_credentials
+%defattr(-,root,root,-)
+%dir %_var/lib/condor/krb_credentials
+
+###### blahp files #######
+%config %_sysconfdir/blah.config
+%config %_sysconfdir/blparser.conf
+%dir %_sysconfdir/blahp/
+%config %_sysconfdir/blahp/condor_local_submit_attributes.sh
+%config %_sysconfdir/blahp/kubernetes_local_submit_attributes.sh
+%config %_sysconfdir/blahp/lsf_local_submit_attributes.sh
+%config %_sysconfdir/blahp/nqs_local_submit_attributes.sh
+%config %_sysconfdir/blahp/pbs_local_submit_attributes.sh
+%config %_sysconfdir/blahp/sge_local_submit_attributes.sh
+%config %_sysconfdir/blahp/slurm_local_submit_attributes.sh
+%_bindir/blahpd
+%_sbindir/blah_check_config
+%_sbindir/blahpd_daemon
+%dir %_libexecdir/blahp
+%_libexecdir/blahp/*
+
+####### procd files #######
%_sbindir/condor_procd
%_sbindir/gidd_alloc
%_sbindir/procd_ctl
@@ -831,135 +720,24 @@ rm -rf %{buildroot}/etc/examples
%_mandir/man1/gidd_alloc.1.gz
%_mandir/man1/condor_procd.1.gz
-#################
-%if 0%{?with_aviary}
-%files aviary-common
-%doc LICENSE-2.0.txt NOTICE.txt
-%dir %_datadir/condor/aviary
-%_datadir/condor/aviary/jobcontrol.py*
-%_datadir/condor/aviary/jobquery.py*
-%_datadir/condor/aviary/submissions.py*
-%_datadir/condor/aviary/submission_ids.py*
-%_datadir/condor/aviary/subinventory.py*
-%_datadir/condor/aviary/submit.py*
-%_datadir/condor/aviary/setattr.py*
-%_datadir/condor/aviary/jobinventory.py*
-%_datadir/condor/aviary/locator.py*
-%_datadir/condor/aviary/collector_tool.py*
-%dir %_datadir/condor/aviary/dag
-%_datadir/condor/aviary/dag/diamond.dag
-%_datadir/condor/aviary/dag/dag-submit.py*
-%_datadir/condor/aviary/dag/job.sub
-%dir %_datadir/condor/aviary/module
-%_datadir/condor/aviary/module/aviary/util.py*
-%_datadir/condor/aviary/module/aviary/https.py*
-%_datadir/condor/aviary/module/aviary/__init__.py*
-%_datadir/condor/aviary/README
-%dir %_var/lib/condor/aviary
-%_var/lib/condor/aviary/axis2.xml
-%dir %_var/lib/condor/aviary/services
-%dir %_var/lib/condor/aviary/services/job
-%_var/lib/condor/aviary/services/job/services.xml
-%_var/lib/condor/aviary/services/job/aviary-common.xsd
-%_var/lib/condor/aviary/services/job/aviary-job.xsd
-%_var/lib/condor/aviary/services/job/aviary-job.wsdl
-%dir %_var/lib/condor/aviary/services/query
-%_var/lib/condor/aviary/services/query/services.xml
-%_var/lib/condor/aviary/services/query/aviary-common.xsd
-%_var/lib/condor/aviary/services/query/aviary-query.xsd
-%_var/lib/condor/aviary/services/query/aviary-query.wsdl
-%dir %_var/lib/condor/aviary/services/locator
-%_var/lib/condor/aviary/services/locator/services.xml
-%_var/lib/condor/aviary/services/locator/aviary-common.xsd
-%_var/lib/condor/aviary/services/locator/aviary-locator.xsd
-%_var/lib/condor/aviary/services/locator/aviary-locator.wsdl
-%dir %_var/lib/condor/aviary/services/collector
-%_var/lib/condor/aviary/services/collector/services.xml
-%_var/lib/condor/aviary/services/collector/aviary-common.xsd
-%_var/lib/condor/aviary/services/collector/aviary-collector.xsd
-%_var/lib/condor/aviary/services/collector/aviary-collector.wsdl
-
-#################
-%files aviary
-%doc LICENSE-2.0.txt NOTICE.txt
-%_sysconfdir/condor/config.d/61aviary.config
-%_libdir/libaviary_axis_provider.so
-%_libdir/libaviary_wso2_common.so
-%dir %_libdir/condor/plugins
-%_libdir/condor/plugins/AviaryScheddPlugin-plugin.so
-%_libdir/condor/plugins/AviaryLocatorPlugin-plugin.so
-%_libdir/condor/plugins/AviaryCollectorPlugin-plugin.so
-%_sbindir/aviary_query_server
-%_var/lib/condor/aviary/services/job/libaviary_job_axis.so
-%_var/lib/condor/aviary/services/query/libaviary_query_axis.so
-%_var/lib/condor/aviary/services/locator/libaviary_locator_axis.so
-%_var/lib/condor/aviary/services/collector/libaviary_collector_axis.so
-
-#################
-%files aviary-hadoop-common
-%doc LICENSE-2.0.txt NOTICE.txt
-%_var/lib/condor/aviary/services/hadoop/services.xml
-%_var/lib/condor/aviary/services/hadoop/aviary-common.xsd
-%_var/lib/condor/aviary/services/hadoop/aviary-hadoop.xsd
-%_var/lib/condor/aviary/services/hadoop/aviary-hadoop.wsdl
-%_datadir/condor/aviary/hadoop_tool.py*
-
-#################
-%files aviary-hadoop
-%doc LICENSE-2.0.txt NOTICE.txt
-%_var/lib/condor/aviary/services/hadoop/libaviary_hadoop_axis.so
-%_libdir/condor/plugins/AviaryHadoopPlugin-plugin.so
-%_sysconfdir/condor/config.d/63aviary-hadoop.config
-%_datadir/condor/aviary/hdfs_datanode.sh
-%_datadir/condor/aviary/hdfs_namenode.sh
-%_datadir/condor/aviary/mapred_jobtracker.sh
-%_datadir/condor/aviary/mapred_tasktracker.sh
-%endif
-
-#################
-%if 0%{?with_mongodb}
-%files plumage
-%doc LICENSE-2.0.txt NOTICE.txt
-%_sysconfdir/condor/config.d/62plumage.config
-%dir %_libdir/condor/plugins
-%_libdir/condor/plugins/PlumageCollectorPlugin-plugin.so
-%dir %_datadir/condor/plumage
-%_sbindir/plumage_job_etl_server
-%_bindir/plumage_history_load
-%_bindir/plumage_stats
-%_bindir/plumage_history
-%_datadir/condor/plumage/README
-%_datadir/condor/plumage/SCHEMA
-%_datadir/condor/plumage/plumage_accounting
-%_datadir/condor/plumage/plumage_scheduler
-%_datadir/condor/plumage/plumage_utilization
-%defattr(-,condor,condor,-)
-%endif
-
-#################
-%files kbdd
-%doc LICENSE-2.0.txt NOTICE.txt
-%_sbindir/condor_kbdd
-
-#################
-%files vm-gahp
-%doc LICENSE-2.0.txt NOTICE.txt
-%_sbindir/condor_vm-gahp
-%_libexecdir/condor/libvirt_simple_script.awk
-
-#################
-%files openstack-gahp
-%doc LICENSE-2.0.txt NOTICE.txt
-%_sbindir/openstack_gahp
-
-#################
-%files classads
-%doc LICENSE-2.0.txt NOTICE.txt
+####### classads files #######
+%defattr(-,root,root,-)
+%doc LICENSE NOTICE.txt
%_libdir/libclassad.so.*
#################
-%files classads-devel
-%doc LICENSE-2.0.txt NOTICE.txt
+%files devel
+%{_includedir}/condor/chirp_client.h
+%{_includedir}/condor/condor_event.h
+%{_includedir}/condor/file_lock.h
+%{_includedir}/condor/read_user_log.h
+%{_libdir}/condor/libchirp_client.a
+%{_libdir}/condor/libcondorapi.a
+%{_libdir}/libclassad.a
+
+####### classads-devel files #######
+%defattr(-,root,root,-)
+%doc LICENSE NOTICE.txt
%_bindir/classad_functional_tester
%_bindir/classad_version
%_libdir/libclassad.so
@@ -971,7 +749,7 @@ rm -rf %{buildroot}/etc/examples
%_includedir/classad/classad.h
%_includedir/classad/classadItor.h
%_includedir/classad/classadCache.h
-%_includedir/classad/classad_stl.h
+%_includedir/classad/classad_containers.h
%_includedir/classad/collectionBase.h
%_includedir/classad/collection.h
%_includedir/classad/common.h
@@ -979,14 +757,14 @@ rm -rf %{buildroot}/etc/examples
%_includedir/classad/exprList.h
%_includedir/classad/exprTree.h
%_includedir/classad/fnCall.h
+%_includedir/classad/indexfile.h
%_includedir/classad/jsonSink.h
%_includedir/classad/jsonSource.h
-%_includedir/classad/indexfile.h
%_includedir/classad/lexer.h
%_includedir/classad/lexerSource.h
%_includedir/classad/literals.h
-
%_includedir/classad/matchClassad.h
+%_includedir/classad/natural_cmp.h
%_includedir/classad/operators.h
%_includedir/classad/query.h
%_includedir/classad/sink.h
@@ -1000,55 +778,71 @@ rm -rf %{buildroot}/etc/examples
%_includedir/classad/xmlSource.h
#################
-%if 0%{?with_cream_gahp}
-%files cream-gahp
-%doc LICENSE-2.0.txt NOTICE.txt
-%_sbindir/cream_gahp
+%files kbdd
+%defattr(-,root,root,-)
+%doc LICENSE NOTICE.txt
+%_sbindir/condor_kbdd
+
+#################
+%if ! 0%{?amzn}
+%files vm-gahp
+%defattr(-,root,root,-)
+%doc LICENSE NOTICE.txt
+%_sbindir/condor_vm-gahp
+%_libexecdir/condor/libvirt_simple_script.awk
%endif
+#################
+%files test
+%defattr(-,root,root,-)
+%_libexecdir/condor/condor_sinful
+%_libexecdir/condor/condor_testingd
+%_libexecdir/condor/test_user_mapping
+%_bindir/condor_manifest
+
#################
%files -n python3-condor
+%defattr(-,root,root,-)
%_bindir/condor_top
-%{python3_sitearch}/classad.so
-%{python3_sitearch}/htcondor.so
-%{_libdir}/libpy3classad%{python3_version}_%{srcver}.so
+%_bindir/classad_eval
+%_bindir/condor_watch_q
+%_bindir/htcondor
+%_libdir/libpyclassad3*.so
+%_libexecdir/condor/libclassad_python_user.cpython-3*.so
+%_libexecdir/condor/libclassad_python3_user.so
+/usr/lib64/python%{python3_version}/site-packages/classad/
+/usr/lib64/python%{python3_version}/site-packages/htcondor/
+/usr/lib64/python%{python3_version}/site-packages/htcondor-*.egg-info/
+/usr/lib64/python%{python3_version}/site-packages/htcondor_cli/
+
+%files credmon-oauth
+%doc /usr/share/doc/condor/examples/condor_credmon_oauth
+%_sbindir/condor_credmon_oauth
+%_sbindir/scitokens_credential_producer
+%_var/www/wsgi-scripts/condor_credmon_oauth
+%_libexecdir/condor/credmon
+%_var/lib/condor/oauth_credentials/README.credentials
+%config(noreplace) %_sysconfdir/condor/config.d/40-oauth-credmon.conf
+%config(noreplace) %_sysconfdir/condor/config.d/40-oauth-tokens.conf
+%ghost %_var/lib/condor/oauth_credentials/wsgi_session_key
+%ghost %_var/lib/condor/oauth_credentials/CREDMON_COMPLETE
+%ghost %_var/lib/condor/oauth_credentials/pid
+
+%files credmon-vault
+%doc /usr/share/doc/condor/examples/condor_credmon_oauth
+%_sbindir/condor_credmon_vault
+%_bindir/condor_vault_storer
+%_libexecdir/condor/credmon
+%config(noreplace) %_sysconfdir/condor/config.d/40-vault-credmon.conf
+%ghost %_var/lib/condor/oauth_credentials/CREDMON_COMPLETE
+%ghost %_var/lib/condor/oauth_credentials/pid
-#################
%files -n minicondor
%config(noreplace) %_sysconfdir/condor/config.d/00-minicondor
-#################
-%if 0%{?with_bosco}
-%files bosco
-%config(noreplace) %_sysconfdir/condor/campus_factory.conf
-%config(noreplace) %_sysconfdir/condor/config.d/60-campus_factory.config
-%_libexecdir/condor/shellselector
-%_libexecdir/condor/campus_factory
-%_sbindir/bosco_install
-%_sbindir/campus_factory
-%_sbindir/condor_ft-gahp
-%_sbindir/runfactory
-%_bindir/bosco_cluster
-%_bindir/bosco_ssh_start
-%_bindir/bosco_start
-%_bindir/bosco_stop
-%_bindir/bosco_findplatform
-%_bindir/bosco_uninstall
-%_bindir/bosco_quickstart
-%_bindir/htsub
-%_sbindir/glidein_creation
-%_datadir/condor/campus_factory
-%_mandir/man1/bosco_cluster.1.gz
-%_mandir/man1/bosco_findplatform.1.gz
-%_mandir/man1/bosco_install.1.gz
-%_mandir/man1/bosco_ssh_start.1.gz
-%_mandir/man1/bosco_start.1.gz
-%_mandir/man1/bosco_stop.1.gz
-%_mandir/man1/bosco_uninstall.1.gz
-%endif
-
#################
%post
+/sbin/ldconfig
%systemd_post %{name}.service
%preun
@@ -1059,6 +853,11 @@ rm -rf %{buildroot}/etc/examples
/sbin/ldconfig
%changelog
+* Sat Sep 30 2023 Tim Theisen <ttheisen(a)fedoraproject.org> - 23.0.0-1
+- Update to latest upstream 23.0.0 - rhbz#1959462
+- Fix build issues - rhbz#2114520, rhbz#2172630, rhbz#2172684
+- Update to PCRE2 - rhbz#2128284
+
* Thu Jan 19 2023 Fedora Release Engineering <releng(a)fedoraproject.org> - 8.8.15-10
- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild
diff --git a/condor_oom_v2.patch b/condor_oom_v2.patch
deleted file mode 100644
index 0521be6..0000000
--- a/condor_oom_v2.patch
+++ /dev/null
@@ -1,340 +0,0 @@
-diff --git a/build/cmake/CondorConfigure.cmake b/build/cmake/CondorConfigure.cmake
-index e61fb4f..1094cb3 100644
---- a/build/cmake/CondorConfigure.cmake
-+++ b/build/cmake/CondorConfigure.cmake
-@@ -164,6 +164,7 @@ if( NOT WINDOWS)
- check_function_exists("setlinebuf" HAVE_SETLINEBUF)
- check_function_exists("snprintf" HAVE_SNPRINTF)
- check_function_exists("snprintf" HAVE_WORKING_SNPRINTF)
-+ check_function_exists("eventfd" HAVE_EVENTFD)
-
- check_function_exists("stat64" HAVE_STAT64)
- check_function_exists("_stati64" HAVE__STATI64)
-diff --git a/src/condor_includes/config.h.cmake b/src/condor_includes/config.h.cmake
-index b083945..3bd92b0 100644
---- a/src/condor_includes/config.h.cmake
-+++ b/src/condor_includes/config.h.cmake
-@@ -438,6 +438,9 @@
- /* Define to 1 if you have the 'snprintf' function. (USED)*/
- #cmakedefine HAVE_SNPRINTF 1
-
-+/* Define to 1 if you have the 'eventfd' function. (USED)*/
-+#cmakedefine HAVE_EVENTFD 1
-+
- /* Define to 1 if you have the 'stat64' function. (USED)*/
- #cmakedefine HAVE_STAT64 1
-
-diff --git a/src/condor_starter.V6.1/vanilla_proc.cpp b/src/condor_starter.V6.1/vanilla_proc.cpp
-index 2e5538f..fe02dd3 100644
---- a/src/condor_starter.V6.1/vanilla_proc.cpp
-+++ b/src/condor_starter.V6.1/vanilla_proc.cpp
-@@ -42,9 +42,16 @@
- extern dynuser* myDynuser;
- #endif
-
-+#if defined(HAVE_EVENTFD)
-+#include <sys/eventfd.h>
-+#endif
-+
- extern CStarter *Starter;
-
--VanillaProc::VanillaProc(ClassAd* jobAd) : OsProc(jobAd)
-+VanillaProc::VanillaProc(ClassAd* jobAd) : OsProc(jobAd),
-+ m_memory_limit(-1),
-+ m_oom_fd(-1),
-+ m_oom_efd(-1)
- {
- #if !defined(WIN32)
- m_escalation_tid = -1;
-@@ -215,6 +222,12 @@ VanillaProc::StartJob()
- }
- fi.group_ptr = &tracking_gid;
- }
-+
-+ // Increase the OOM score of this process; the child will inherit it.
-+ // This way, the job will be heavily preferred to be killed over a normal process.
-+ // OOM score is currently exponential - a score of 4 is a factor-16 increase in
-+ // the OOM score.
-+ setupOOMScore(4);
- #endif
-
- #if defined(HAVE_EXT_LIBCGROUP)
-@@ -406,6 +419,7 @@ VanillaProc::StartJob()
- int MemMb;
- if (MachineAd->LookupInteger(ATTR_MEMORY, MemMb)) {
- uint64_t MemMb_big = MemMb;
-+ m_memory_limit = MemMb_big;
- climits.set_memory_limit_bytes(1024*1024*MemMb_big, mem_is_soft);
- } else {
- dprintf(D_ALWAYS, "Not setting memory soft limit in cgroup because "
-@@ -425,6 +439,14 @@ VanillaProc::StartJob()
- } else {
- dprintf(D_FULLDEBUG, "Invalid value of SlotWeight in machine ClassAd; ignoring.\n");
- }
-+ setupOOMEvent(cgroup);
-+ }
-+
-+ // Now that the job is started, decrease the likelihood that the starter
-+ // is killed instead of the job itself.
-+ if (retval)
-+ {
-+ setupOOMScore(-4);
- }
-
- #endif
-@@ -611,5 +633,224 @@ VanillaProc::finishShutdownFast()
- // -gquinn, 2007-11-14
- daemonCore->Kill_Family(JobPid);
-
-+ if (m_oom_efd >= 0) {
-+ dprintf(D_FULLDEBUG, "Closing event FD pipe in shutdown %d.\n", m_oom_efd);
-+ daemonCore->Close_Pipe(m_oom_efd);
-+ m_oom_efd = -1;
-+ }
-+ if (m_oom_fd >= 0) {
-+ close(m_oom_fd);
-+ m_oom_fd = -1;
-+ }
-+
- return false; // shutdown is pending, so return false
- }
-+
-+/*
-+ * This will be called when the event fd fires, indicating an OOM event.
-+ */
-+int
-+VanillaProc::outOfMemoryEvent(int /* fd */)
-+{
-+ std::stringstream ss;
-+ if (m_memory_limit >= 0) {
-+ ss << "Job has gone over memory limit of " << m_memory_limit << " megabytes.";
-+ } else {
-+ ss << "Job has encountered an out-of-memory event.";
-+ }
-+ Starter->jic->holdJob(ss.str().c_str(), CONDOR_HOLD_CODE_JobOutOfResources, 0);
-+
-+ // this will actually clean up the job
-+ if ( Starter->Hold( ) ) {
-+ dprintf( D_FULLDEBUG, "All jobs were removed due to OOM event.\n" );
-+ Starter->allJobsDone();
-+ }
-+
-+ dprintf(D_FULLDEBUG, "Closing event FD pipe %d.\n", m_oom_efd);
-+ daemonCore->Close_Pipe(m_oom_efd);
-+ close(m_oom_fd);
-+ m_oom_efd = -1;
-+ m_oom_fd = -1;
-+
-+ Starter->ShutdownFast();
-+
-+ return 0;
-+}
-+
-+int
-+VanillaProc::setupOOMScore(int new_score)
-+{
-+#if !defined(LINUX)
-+ if (new_score) // Done to suppress compiler warnings.
-+ return 0;
-+ return 0;
-+#endif
-+ TemporaryPrivSentry sentry(PRIV_ROOT);
-+ // oom_adj is deprecated on modern kernels and causes a deprecation warning when used.
-+ int oom_score_fd = open("/proc/self/oom_score_adj", O_WRONLY | O_CLOEXEC);
-+ if (oom_score_fd == -1) {
-+ if (errno != ENOENT) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open oom_score_adj for the starter: (errno=%u, %s)\n",
-+ errno, strerror(errno));
-+ return 1;
-+ } else {
-+ int oom_score_fd = open("/proc/self/oom_adj", O_WRONLY | O_CLOEXEC);
-+ if (oom_score_fd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open oom_adj for the starter: (errno=%u, %s)\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+ }
-+ } else {
-+ // oom_score_adj is linear; oom_adj was exponential.
-+ if (new_score > 0)
-+ new_score = 1 << new_score;
-+ else
-+ new_score = -(1 << -new_score);
-+ }
-+
-+ std::stringstream ss;
-+ ss << new_score;
-+ std::string new_score_str = ss.str();
-+ ssize_t nwritten = full_write(oom_score_fd, new_score_str.c_str(), new_score_str.length());
-+ if (nwritten < 0) {
-+ dprintf(D_ALWAYS,
-+ "Unable to write into oom_adj file for the starter: (errno=%u, %s)\n",
-+ errno, strerror(errno));
-+ close(oom_score_fd);
-+ return 1;
-+ }
-+ close(oom_score_fd);
-+ return 0;
-+}
-+
-+int
-+VanillaProc::setupOOMEvent(const std::string &cgroup_string)
-+{
-+#if !(defined(HAVE_EVENTFD) && defined(HAVE_EXT_LIBCGROUP))
-+ return 0;
-+#endif
-+ // Initialize the event descriptor
-+ m_oom_efd = eventfd(0, EFD_CLOEXEC);
-+ if (m_oom_efd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to create new event FD for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+
-+ // Find the memcg location on disk
-+ void * handle = NULL;
-+ struct cgroup_mount_point mount_info;
-+ int ret = cgroup_get_controller_begin(&handle, &mount_info);
-+ std::stringstream oom_control;
-+ std::stringstream event_control;
-+ bool found_memcg = false;
-+ while (ret == 0) {
-+ if (strcmp(mount_info.name, MEMORY_CONTROLLER_STR) == 0) {
-+ found_memcg = true;
-+ oom_control << mount_info.path << "/";
-+ event_control << mount_info.path << "/";
-+ break;
-+ }
-+ cgroup_get_controller_next(&handle, &mount_info);
-+ }
-+ if (!found_memcg && (ret != ECGEOF)) {
-+ dprintf(D_ALWAYS,
-+ "Error while locating memcg controller for starter: %u %s\n",
-+ ret, cgroup_strerror(ret));
-+ return 1;
-+ }
-+ cgroup_get_controller_end(&handle);
-+ if (found_memcg == false) {
-+ dprintf(D_ALWAYS,
-+ "Memcg is not available; OOM notification disabled for starter.\n");
-+ return 1;
-+ }
-+
-+ // Finish constructing the location of the control files
-+ oom_control << cgroup_string << "/memory.oom_control";
-+ std::string oom_control_str = oom_control.str();
-+ event_control << cgroup_string << "/cgroup.event_control";
-+ std::string event_control_str = event_control.str();
-+
-+ // Open the oom_control and event control files
-+ TemporaryPrivSentry sentry(PRIV_ROOT);
-+ m_oom_fd = open(oom_control_str.c_str(), O_RDONLY | O_CLOEXEC);
-+ if (m_oom_fd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open the OOM control file for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+ int event_ctrl_fd = open(event_control_str.c_str(), O_WRONLY | O_CLOEXEC);
-+ if (event_ctrl_fd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open event control for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+
-+ // Inform Linux we will be handling the OOM events for this container.
-+ int oom_fd2 = open(oom_control_str.c_str(), O_WRONLY | O_CLOEXEC);
-+ if (oom_fd2 == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open the OOM control file for writing for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+ const char limits [] = "1";
-+ ssize_t nwritten = full_write(oom_fd2, &limits, 1);
-+ if (nwritten < 0) {
-+ dprintf(D_ALWAYS,
-+ "Unable to set OOM control to %s for starter: %u %s\n",
-+ limits, errno, strerror(errno));
-+ close(event_ctrl_fd);
-+ close(oom_fd2);
-+ return 1;
-+ }
-+ close(oom_fd2);
-+
-+ // Create the subscription string:
-+ std::stringstream sub_ss;
-+ sub_ss << m_oom_efd << " " << m_oom_fd;
-+ std::string sub_str = sub_ss.str();
-+
-+ if ((nwritten = full_write(event_ctrl_fd, sub_str.c_str(), sub_str.size())) < 0) {
-+ dprintf(D_ALWAYS,
-+ "Unable to write into event control file for starter: %u %s\n",
-+ errno, strerror(errno));
-+ close(event_ctrl_fd);
-+ return 1;
-+ }
-+ close(event_ctrl_fd);
-+
-+ // Fool DC into talking to the eventfd
-+ int pipes[2]; pipes[0] = -1; pipes[1] = -1;
-+ int fd_to_replace = -1;
-+ if (daemonCore->Create_Pipe(pipes, true) == -1 || pipes[0] == -1) {
-+ dprintf(D_ALWAYS, "Unable to create a DC pipe\n");
-+ close(m_oom_efd);
-+ m_oom_efd = -1;
-+ close(m_oom_fd);
-+ m_oom_fd = -1;
-+ return 1;
-+ }
-+ if ( daemonCore->Get_Pipe_FD(pipes[0], &fd_to_replace) == -1 || fd_to_replace == -1) {
-+ dprintf(D_ALWAYS, "Unable to lookup pipe's FD\n");
-+ close(m_oom_efd); m_oom_efd = -1;
-+ close(m_oom_fd); m_oom_fd = -1;
-+ daemonCore->Close_Pipe(pipes[0]);
-+ daemonCore->Close_Pipe(pipes[1]);
-+ }
-+ dup3(m_oom_efd, fd_to_replace, O_CLOEXEC);
-+ close(m_oom_efd);
-+ m_oom_efd = pipes[0];
-+
-+ // Inform DC we want to recieve notifications from this FD.
-+ daemonCore->Register_Pipe(pipes[0],"OOM event fd", static_cast<PipeHandlercpp>(&VanillaProc::outOfMemoryEvent),"OOM Event Handler",this,HANDLE_READ);
-+ return 0;
-+}
-+
-diff --git a/src/condor_starter.V6.1/vanilla_proc.h b/src/condor_starter.V6.1/vanilla_proc.h
-index d524cf5..90b4741 100644
---- a/src/condor_starter.V6.1/vanilla_proc.h
-+++ b/src/condor_starter.V6.1/vanilla_proc.h
-@@ -74,6 +74,15 @@ private:
- #if !defined(WIN32)
- int m_escalation_tid;
- #endif
-+
-+ // Configure OOM killer for this job
-+ int m_memory_limit; // Memory limit, in MB.
-+ int m_oom_fd; // The file descriptor which recieves events
-+ int m_oom_efd; // The event FD to watch
-+ int setupOOMScore(int new_score);
-+ int outOfMemoryEvent(int fd);
-+ int setupOOMEvent(const std::string & cgroup_string);
-+
- };
-
- #endif
-diff --git a/src/condor_utils/condor_holdcodes.h b/src/condor_utils/condor_holdcodes.h
-index d788d6e..3083db3 100644
---- a/src/condor_utils/condor_holdcodes.h
-+++ b/src/condor_utils/condor_holdcodes.h
-@@ -128,4 +128,6 @@ const int CONDOR_HOLD_CODE_GlexecChownSandboxToCondor = 30;
-
- const int CONDOR_HOLD_CODE_PrivsepChownSandboxToCondor = 31;
-
-+const int CONDOR_HOLD_CODE_JobOutOfResources = 32;
-+
- #endif
diff --git a/condor_oom_v3.patch b/condor_oom_v3.patch
deleted file mode 100644
index 5f6db89..0000000
--- a/condor_oom_v3.patch
+++ /dev/null
@@ -1,342 +0,0 @@
-diff --git a/build/cmake/CondorConfigure.cmake b/build/cmake/CondorConfigure.cmake
-index e61fb4f..1094cb3 100644
---- a/build/cmake/CondorConfigure.cmake
-+++ b/build/cmake/CondorConfigure.cmake
-@@ -164,6 +164,7 @@ if( NOT WINDOWS)
- check_function_exists("setlinebuf" HAVE_SETLINEBUF)
- check_function_exists("snprintf" HAVE_SNPRINTF)
- check_function_exists("snprintf" HAVE_WORKING_SNPRINTF)
-+ check_function_exists("eventfd" HAVE_EVENTFD)
-
- check_function_exists("stat64" HAVE_STAT64)
- check_function_exists("_stati64" HAVE__STATI64)
-diff --git a/src/condor_includes/config.h.cmake b/src/condor_includes/config.h.cmake
-index b083945..3bd92b0 100644
---- a/src/condor_includes/config.h.cmake
-+++ b/src/condor_includes/config.h.cmake
-@@ -438,6 +438,9 @@
- /* Define to 1 if you have the 'snprintf' function. (USED)*/
- #cmakedefine HAVE_SNPRINTF 1
-
-+/* Define to 1 if you have the 'eventfd' function. (USED)*/
-+#cmakedefine HAVE_EVENTFD 1
-+
- /* Define to 1 if you have the 'stat64' function. (USED)*/
- #cmakedefine HAVE_STAT64 1
-
-diff --git a/src/condor_starter.V6.1/vanilla_proc.cpp b/src/condor_starter.V6.1/vanilla_proc.cpp
-index 2e5538f..0246e5e 100644
---- a/src/condor_starter.V6.1/vanilla_proc.cpp
-+++ b/src/condor_starter.V6.1/vanilla_proc.cpp
-@@ -42,9 +42,16 @@
- extern dynuser* myDynuser;
- #endif
-
-+#if defined(HAVE_EVENTFD)
-+#include <sys/eventfd.h>
-+#endif
-+
- extern CStarter *Starter;
-
--VanillaProc::VanillaProc(ClassAd* jobAd) : OsProc(jobAd)
-+VanillaProc::VanillaProc(ClassAd* jobAd) : OsProc(jobAd),
-+ m_memory_limit(-1),
-+ m_oom_fd(-1),
-+ m_oom_efd(-1)
- {
- #if !defined(WIN32)
- m_escalation_tid = -1;
-@@ -215,6 +222,12 @@ VanillaProc::StartJob()
- }
- fi.group_ptr = &tracking_gid;
- }
-+
-+ // Increase the OOM score of this process; the child will inherit it.
-+ // This way, the job will be heavily preferred to be killed over a normal process.
-+ // OOM score is currently exponential - a score of 4 is a factor-16 increase in
-+ // the OOM score.
-+ setupOOMScore(4);
- #endif
-
- #if defined(HAVE_EXT_LIBCGROUP)
-@@ -406,6 +419,7 @@ VanillaProc::StartJob()
- int MemMb;
- if (MachineAd->LookupInteger(ATTR_MEMORY, MemMb)) {
- uint64_t MemMb_big = MemMb;
-+ m_memory_limit = MemMb_big;
- climits.set_memory_limit_bytes(1024*1024*MemMb_big, mem_is_soft);
- } else {
- dprintf(D_ALWAYS, "Not setting memory soft limit in cgroup because "
-@@ -425,6 +439,14 @@ VanillaProc::StartJob()
- } else {
- dprintf(D_FULLDEBUG, "Invalid value of SlotWeight in machine ClassAd; ignoring.\n");
- }
-+ setupOOMEvent(cgroup);
-+ }
-+
-+ // Now that the job is started, decrease the likelihood that the starter
-+ // is killed instead of the job itself.
-+ if (retval)
-+ {
-+ setupOOMScore(-4);
- }
-
- #endif
-@@ -611,5 +633,226 @@ VanillaProc::finishShutdownFast()
- // -gquinn, 2007-11-14
- daemonCore->Kill_Family(JobPid);
-
-+ if (m_oom_efd >= 0) {
-+ dprintf(D_FULLDEBUG, "Closing event FD pipe in shutdown %d.\n", m_oom_efd);
-+ daemonCore->Close_Pipe(m_oom_efd);
-+ m_oom_efd = -1;
-+ }
-+ if (m_oom_fd >= 0) {
-+ close(m_oom_fd);
-+ m_oom_fd = -1;
-+ }
-+
- return false; // shutdown is pending, so return false
- }
-+
-+/*
-+ * This will be called when the event fd fires, indicating an OOM event.
-+ */
-+int
-+VanillaProc::outOfMemoryEvent(int /* fd */)
-+{
-+ std::stringstream ss;
-+ if (m_memory_limit >= 0) {
-+ ss << "Job has gone over memory limit of " << m_memory_limit << " megabytes.";
-+ } else {
-+ ss << "Job has encountered an out-of-memory event.";
-+ }
-+ Starter->jic->holdJob(ss.str().c_str(), CONDOR_HOLD_CODE_JobOutOfResources, 0);
-+
-+ // this will actually clean up the job
-+ if ( Starter->Hold( ) ) {
-+ dprintf( D_FULLDEBUG, "All jobs were removed due to OOM event.\n" );
-+ Starter->allJobsDone();
-+ }
-+
-+ dprintf(D_FULLDEBUG, "Closing event FD pipe %d.\n", m_oom_efd);
-+ daemonCore->Close_Pipe(m_oom_efd);
-+ close(m_oom_fd);
-+ m_oom_efd = -1;
-+ m_oom_fd = -1;
-+
-+ Starter->ShutdownFast();
-+
-+ return 0;
-+}
-+
-+int
-+VanillaProc::setupOOMScore(int new_score)
-+{
-+#if !defined(LINUX)
-+ if (new_score) // Done to suppress compiler warnings.
-+ return 0;
-+ return 0;
-+#else
-+ TemporaryPrivSentry sentry(PRIV_ROOT);
-+ // oom_adj is deprecated on modern kernels and causes a deprecation warning when used.
-+ int oom_score_fd = open("/proc/self/oom_score_adj", O_WRONLY);
-+ if (oom_score_fd == -1) {
-+ if (errno != ENOENT) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open oom_score_adj for the starter: (errno=%u, %s)\n",
-+ errno, strerror(errno));
-+ return 1;
-+ } else {
-+ int oom_score_fd = open("/proc/self/oom_adj", O_WRONLY);
-+ if (oom_score_fd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open oom_adj for the starter: (errno=%u, %s)\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+ }
-+ } else {
-+ // oom_score_adj is linear; oom_adj was exponential.
-+ if (new_score > 0)
-+ new_score = 1 << new_score;
-+ else
-+ new_score = -(1 << -new_score);
-+ }
-+
-+ std::stringstream ss;
-+ ss << new_score;
-+ std::string new_score_str = ss.str();
-+ ssize_t nwritten = full_write(oom_score_fd, new_score_str.c_str(), new_score_str.length());
-+ if (nwritten < 0) {
-+ dprintf(D_ALWAYS,
-+ "Unable to write into oom_adj file for the starter: (errno=%u, %s)\n",
-+ errno, strerror(errno));
-+ close(oom_score_fd);
-+ return 1;
-+ }
-+ close(oom_score_fd);
-+ return 0;
-+#endif
-+}
-+
-+int
-+VanillaProc::setupOOMEvent(const std::string &cgroup_string)
-+{
-+#if !(defined(HAVE_EVENTFD) && defined(HAVE_EXT_LIBCGROUP))
-+ return 0;
-+#else
-+ // Initialize the event descriptor
-+ m_oom_efd = eventfd(0, EFD_CLOEXEC);
-+ if (m_oom_efd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to create new event FD for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+
-+ // Find the memcg location on disk
-+ void * handle = NULL;
-+ struct cgroup_mount_point mount_info;
-+ int ret = cgroup_get_controller_begin(&handle, &mount_info);
-+ std::stringstream oom_control;
-+ std::stringstream event_control;
-+ bool found_memcg = false;
-+ while (ret == 0) {
-+ if (strcmp(mount_info.name, MEMORY_CONTROLLER_STR) == 0) {
-+ found_memcg = true;
-+ oom_control << mount_info.path << "/";
-+ event_control << mount_info.path << "/";
-+ break;
-+ }
-+ cgroup_get_controller_next(&handle, &mount_info);
-+ }
-+ if (!found_memcg && (ret != ECGEOF)) {
-+ dprintf(D_ALWAYS,
-+ "Error while locating memcg controller for starter: %u %s\n",
-+ ret, cgroup_strerror(ret));
-+ return 1;
-+ }
-+ cgroup_get_controller_end(&handle);
-+ if (found_memcg == false) {
-+ dprintf(D_ALWAYS,
-+ "Memcg is not available; OOM notification disabled for starter.\n");
-+ return 1;
-+ }
-+
-+ // Finish constructing the location of the control files
-+ oom_control << cgroup_string << "/memory.oom_control";
-+ std::string oom_control_str = oom_control.str();
-+ event_control << cgroup_string << "/cgroup.event_control";
-+ std::string event_control_str = event_control.str();
-+
-+ // Open the oom_control and event control files
-+ TemporaryPrivSentry sentry(PRIV_ROOT);
-+ m_oom_fd = open(oom_control_str.c_str(), O_RDONLY | O_CLOEXEC);
-+ if (m_oom_fd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open the OOM control file for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+ int event_ctrl_fd = open(event_control_str.c_str(), O_WRONLY | O_CLOEXEC);
-+ if (event_ctrl_fd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open event control for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+
-+ // Inform Linux we will be handling the OOM events for this container.
-+ int oom_fd2 = open(oom_control_str.c_str(), O_WRONLY | O_CLOEXEC);
-+ if (oom_fd2 == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open the OOM control file for writing for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+ const char limits [] = "1";
-+ ssize_t nwritten = full_write(oom_fd2, &limits, 1);
-+ if (nwritten < 0) {
-+ dprintf(D_ALWAYS,
-+ "Unable to set OOM control to %s for starter: %u %s\n",
-+ limits, errno, strerror(errno));
-+ close(event_ctrl_fd);
-+ close(oom_fd2);
-+ return 1;
-+ }
-+ close(oom_fd2);
-+
-+ // Create the subscription string:
-+ std::stringstream sub_ss;
-+ sub_ss << m_oom_efd << " " << m_oom_fd;
-+ std::string sub_str = sub_ss.str();
-+
-+ if ((nwritten = full_write(event_ctrl_fd, sub_str.c_str(), sub_str.size())) < 0) {
-+ dprintf(D_ALWAYS,
-+ "Unable to write into event control file for starter: %u %s\n",
-+ errno, strerror(errno));
-+ close(event_ctrl_fd);
-+ return 1;
-+ }
-+ close(event_ctrl_fd);
-+
-+ // Fool DC into talking to the eventfd
-+ int pipes[2]; pipes[0] = -1; pipes[1] = -1;
-+ int fd_to_replace = -1;
-+ if (daemonCore->Create_Pipe(pipes, true) == -1 || pipes[0] == -1) {
-+ dprintf(D_ALWAYS, "Unable to create a DC pipe\n");
-+ close(m_oom_efd);
-+ m_oom_efd = -1;
-+ close(m_oom_fd);
-+ m_oom_fd = -1;
-+ return 1;
-+ }
-+ if ( daemonCore->Get_Pipe_FD(pipes[0], &fd_to_replace) == -1 || fd_to_replace == -1) {
-+ dprintf(D_ALWAYS, "Unable to lookup pipe's FD\n");
-+ close(m_oom_efd); m_oom_efd = -1;
-+ close(m_oom_fd); m_oom_fd = -1;
-+ daemonCore->Close_Pipe(pipes[0]);
-+ daemonCore->Close_Pipe(pipes[1]);
-+ }
-+ dup3(m_oom_efd, fd_to_replace, O_CLOEXEC);
-+ close(m_oom_efd);
-+ m_oom_efd = pipes[0];
-+
-+ // Inform DC we want to recieve notifications from this FD.
-+ daemonCore->Register_Pipe(pipes[0],"OOM event fd", static_cast<PipeHandlercpp>(&VanillaProc::outOfMemoryEvent),"OOM Event Handler",this,HANDLE_READ);
-+ return 0;
-+#endif
-+}
-+
-diff --git a/src/condor_starter.V6.1/vanilla_proc.h b/src/condor_starter.V6.1/vanilla_proc.h
-index d524cf5..90b4741 100644
---- a/src/condor_starter.V6.1/vanilla_proc.h
-+++ b/src/condor_starter.V6.1/vanilla_proc.h
-@@ -74,6 +74,15 @@ private:
- #if !defined(WIN32)
- int m_escalation_tid;
- #endif
-+
-+ // Configure OOM killer for this job
-+ int m_memory_limit; // Memory limit, in MB.
-+ int m_oom_fd; // The file descriptor which recieves events
-+ int m_oom_efd; // The event FD to watch
-+ int setupOOMScore(int new_score);
-+ int outOfMemoryEvent(int fd);
-+ int setupOOMEvent(const std::string & cgroup_string);
-+
- };
-
- #endif
-diff --git a/src/condor_utils/condor_holdcodes.h b/src/condor_utils/condor_holdcodes.h
-index d788d6e..3083db3 100644
---- a/src/condor_utils/condor_holdcodes.h
-+++ b/src/condor_utils/condor_holdcodes.h
-@@ -128,4 +128,6 @@ const int CONDOR_HOLD_CODE_GlexecChownSandboxToCondor = 30;
-
- const int CONDOR_HOLD_CODE_PrivsepChownSandboxToCondor = 31;
-
-+const int CONDOR_HOLD_CODE_JobOutOfResources = 32;
-+
- #endif
diff --git a/condor_partial_defrag_v2.patch b/condor_partial_defrag_v2.patch
deleted file mode 100644
index d2b0016..0000000
--- a/condor_partial_defrag_v2.patch
+++ /dev/null
@@ -1,208 +0,0 @@
-diff --git a/src/condor_daemon_client/dc_startd.cpp b/src/condor_daemon_client/dc_startd.cpp
-index 7261c4a..09a2689 100644
---- a/src/condor_daemon_client/dc_startd.cpp
-+++ b/src/condor_daemon_client/dc_startd.cpp
-@@ -51,7 +51,7 @@ DCStartd::DCStartd( const char* tName, const char* tPool, const char* tAddr,
- }
- }
-
--DCStartd::DCStartd( ClassAd *ad, const char *tPool )
-+DCStartd::DCStartd( const ClassAd *ad, const char *tPool )
- : Daemon(ad,DT_STARTD,tPool),
- claim_id(NULL)
- {
-diff --git a/src/condor_daemon_client/dc_startd.h b/src/condor_daemon_client/dc_startd.h
-index c5f3e89..ff20892 100644
---- a/src/condor_daemon_client/dc_startd.h
-+++ b/src/condor_daemon_client/dc_startd.h
-@@ -49,7 +49,7 @@ public:
- DCStartd( const char* const name, const char* const pool,
- const char* const addr, const char* const id );
-
-- DCStartd( ClassAd *ad, const char *pool = NULL );
-+ DCStartd( const ClassAd *ad, const char *pool = NULL );
-
- /// Destructor.
- ~DCStartd();
-diff --git a/src/defrag/defrag.cpp b/src/defrag/defrag.cpp
-index 26aec0a..8710b5d 100644
---- a/src/defrag/defrag.cpp
-+++ b/src/defrag/defrag.cpp
-@@ -185,6 +185,8 @@ void Defrag::config()
- }
- }
-
-+ m_can_cancel = param_boolean("DEFRAG_CAN_CANCEL", true);
-+
- param(m_defrag_name,"DEFRAG_NAME");
-
- int stats_quantum = m_polling_interval;
-@@ -487,8 +489,17 @@ void Defrag::poll()
- int num_whole_machines = countMachines(m_whole_machine_expr.c_str(),"DEFRAG_WHOLE_MACHINE_EXPR",&whole_machines);
- m_stats.WholeMachines = num_whole_machines;
-
-+ MachineSet draining_whole_machines;
-+ std::stringstream draining_whole_machines_ss;
-+ draining_whole_machines_ss << m_whole_machine_expr << " && Draining && Offline=!=True";
-+ int num_draining_whole_machines = countMachines(draining_whole_machines_ss.str().c_str(),
-+ "<DEFRAG_WHOLE_MACHINE_EXPR Draining>", &draining_whole_machines);
-+
- dprintf(D_ALWAYS,"There are currently %d draining and %d whole machines.\n",
- num_draining,num_whole_machines);
-+ if (num_draining_whole_machines)
-+ dprintf(D_ALWAYS, "Of the %d whole machines, %d are in the draining state.\n",
-+ num_whole_machines, num_draining_whole_machines);
-
- queryDrainingCost();
-
-@@ -548,8 +559,7 @@ void Defrag::poll()
-
- ClassAdList startdAds;
- std::string requirements;
-- sprintf(requirements,"(%s) && Draining =!= true",m_defrag_requirements.c_str());
-- if( !queryMachines(requirements.c_str(),"DEFRAG_REQUIREMENTS",startdAds) ) {
-+ if( !queryMachines(m_defrag_requirements.c_str(),"DEFRAG_REQUIREMENTS",startdAds) ) {
- dprintf(D_ALWAYS,"Doing nothing, because the query to select machines matching DEFRAG_REQUIREMENTS failed.\n");
- return;
- }
-@@ -561,12 +571,26 @@ void Defrag::poll()
- int num_drained = 0;
- ClassAd *startd_ad;
- MachineSet machines_done;
-+ MachineSet draining_machines_done;
- while( (startd_ad=startdAds.Next()) ) {
- std::string machine;
- std::string name;
- startd_ad->LookupString(ATTR_NAME,name);
- slotNameToDaemonName(name,machine);
-
-+ if( !draining_machines_done.count(machine) && draining_whole_machines.count(machine) ) {
-+ cancel_drain(*startd_ad);
-+ draining_machines_done.insert(machine);
-+ continue;
-+ }
-+
-+ // Do not consider slots which are already draining.
-+ bool startd_currently_draining = false;
-+ startd_ad->LookupBool("Draining", startd_currently_draining);
-+ if( startd_currently_draining ) {
-+ continue;
-+ }
-+
- if( machines_done.count(machine) ) {
- dprintf(D_FULLDEBUG,
- "Skipping %s: already attempted to drain %s in this cycle.\n",
-@@ -581,14 +605,13 @@ void Defrag::poll()
- continue;
- }
-
-- if( drain(startd_ad) ) {
-+ if( (num_drained++ < num_to_drain) && drain(*startd_ad) ) {
- machines_done.insert(machine);
-
-- if( ++num_drained >= num_to_drain ) {
-+ if( num_drained >= num_to_drain ) {
- dprintf(D_ALWAYS,
- "Drained maximum number of machines allowed in this cycle (%d).\n",
- num_to_drain);
-- break;
- }
- }
- }
-@@ -601,26 +624,24 @@ void Defrag::poll()
- }
-
- bool
--Defrag::drain(ClassAd *startd_ad)
-+Defrag::drain(const ClassAd &startd_ad)
- {
-- ASSERT( startd_ad );
--
- std::string name;
-- startd_ad->LookupString(ATTR_NAME,name);
-+ startd_ad.LookupString(ATTR_NAME,name);
-
- dprintf(D_ALWAYS,"Initiating %s draining of %s.\n",
- m_draining_schedule_str.c_str(),name.c_str());
-
-- DCStartd startd( startd_ad );
-+ DCStartd startd( &startd_ad );
-
- int graceful_completion = 0;
-- startd_ad->LookupInteger(ATTR_EXPECTED_MACHINE_GRACEFUL_DRAINING_COMPLETION,graceful_completion);
-+ startd_ad.LookupInteger(ATTR_EXPECTED_MACHINE_GRACEFUL_DRAINING_COMPLETION,graceful_completion);
- int quick_completion = 0;
-- startd_ad->LookupInteger(ATTR_EXPECTED_MACHINE_QUICK_DRAINING_COMPLETION,quick_completion);
-+ startd_ad.LookupInteger(ATTR_EXPECTED_MACHINE_QUICK_DRAINING_COMPLETION,quick_completion);
- int graceful_badput = 0;
-- startd_ad->LookupInteger(ATTR_EXPECTED_MACHINE_GRACEFUL_DRAINING_BADPUT,graceful_badput);
-+ startd_ad.LookupInteger(ATTR_EXPECTED_MACHINE_GRACEFUL_DRAINING_BADPUT,graceful_badput);
- int quick_badput = 0;
-- startd_ad->LookupInteger(ATTR_EXPECTED_MACHINE_QUICK_DRAINING_BADPUT,quick_badput);
-+ startd_ad.LookupInteger(ATTR_EXPECTED_MACHINE_QUICK_DRAINING_BADPUT,quick_badput);
-
- time_t now = time(NULL);
- std::string draining_check_expr;
-@@ -659,6 +680,27 @@ Defrag::drain(ClassAd *startd_ad)
- return true;
- }
-
-+bool
-+Defrag::cancel_drain(const ClassAd &startd_ad)
-+{
-+
-+ std::string name;
-+ startd_ad.LookupString(ATTR_NAME,name);
-+
-+ dprintf(D_ALWAYS,"Initiating %s draining of %s.\n",
-+ m_draining_schedule_str.c_str(),name.c_str());
-+
-+ DCStartd startd( &startd_ad );
-+
-+ bool rval = startd.cancelDrainJobs( NULL );
-+ if ( rval ) {
-+ dprintf(D_FULLDEBUG, "Sent request to cancel draining on %s\n", startd.name());
-+ } else {
-+ dprintf(D_ALWAYS, "Unable to cancel draining on %s: %s\n", startd.name(), startd.error());
-+ }
-+ return rval;
-+}
-+
- void
- Defrag::publish(ClassAd *ad)
- {
-diff --git a/src/defrag/defrag.h b/src/defrag/defrag.h
-index 8c7fd51..909b569 100644
---- a/src/defrag/defrag.h
-+++ b/src/defrag/defrag.h
-@@ -40,11 +40,11 @@ class Defrag: public Service {
- void stop();
-
- void poll(); // do the periodic policy evaluation
-- bool drain(ClassAd *startd_ad);
-
- typedef std::set< std::string > MachineSet;
-
- private:
-+
- int m_polling_interval; // delay between evaluations of the policy
- int m_polling_timer;
- double m_draining_per_hour;
-@@ -58,6 +58,7 @@ class Defrag: public Service {
- ClassAd m_rank_ad;
- int m_draining_schedule;
- std::string m_draining_schedule_str;
-+ bool m_can_cancel; // Whether condor_defrag can also cancel draining early.
-
- time_t m_last_poll;
-
-@@ -70,6 +71,9 @@ class Defrag: public Service {
- ClassAd m_public_ad;
- DefragStats m_stats;
-
-+ bool drain(const ClassAd &startd_ad);
-+ bool cancel_drain(const ClassAd &startd_ad);
-+
- void validateExpr(char const *constraint,char const *constraint_source);
- bool queryMachines(char const *constraint,char const *constraint_source,ClassAdList &startdAds);
-
diff --git a/condor_pid_namespaces_v7.patch b/condor_pid_namespaces_v7.patch
deleted file mode 100644
index 810a4b4..0000000
--- a/condor_pid_namespaces_v7.patch
+++ /dev/null
@@ -1,305 +0,0 @@
-diff --git a/src/condor_daemon_core.V6/condor_daemon_core.h b/src/condor_daemon_core.V6/condor_daemon_core.h
-index 3562577..d9d1736 100644
---- a/src/condor_daemon_core.V6/condor_daemon_core.h
-+++ b/src/condor_daemon_core.V6/condor_daemon_core.h
-@@ -192,6 +192,7 @@ struct FamilyInfo {
- gid_t* group_ptr;
- #endif
- const char* glexec_proxy;
-+ bool want_pid_namespace;
- const char* cgroup;
-
- FamilyInfo() {
-@@ -201,6 +202,7 @@ struct FamilyInfo {
- group_ptr = NULL;
- #endif
- glexec_proxy = NULL;
-+ want_pid_namespace = false;
- cgroup = NULL;
- }
- };
-diff --git a/src/condor_daemon_core.V6/daemon_core.cpp b/src/condor_daemon_core.V6/daemon_core.cpp
-index e058fd3..74fe8a0 100644
---- a/src/condor_daemon_core.V6/daemon_core.cpp
-+++ b/src/condor_daemon_core.V6/daemon_core.cpp
-@@ -34,6 +34,7 @@
- #if HAVE_CLONE
- #include <sched.h>
- #include <sys/syscall.h>
-+#include <sys/mount.h>
- #endif
-
- #if HAVE_RESOLV_H && HAVE_DECL_RES_INIT
-@@ -112,6 +113,10 @@ CRITICAL_SECTION Big_fat_mutex; // coarse grained mutex for debugging purposes
- #include <sched.h>
- #endif
-
-+#if !defined(CLONE_NEWPID)
-+#define CLONE_NEWPID 0x20000000
-+#endif
-+
- static const char* EMPTY_DESCRIP = "<NULL>";
-
- // special errno values that may be returned from Create_Process
-@@ -6566,7 +6571,9 @@ public:
- m_affinity_mask(affinity_mask),
- m_fs_remap(fs_remap),
- m_wrote_tracking_gid(false),
-- m_no_dprintf_allowed(false)
-+ m_no_dprintf_allowed(false),
-+ m_clone_newpid_pid(-1),
-+ m_clone_newpid_ppid(-1)
- {
- }
-
-@@ -6627,6 +6634,10 @@ private:
- bool m_wrote_tracking_gid;
- bool m_no_dprintf_allowed;
- priv_state m_priv_state;
-+ pid_t m_clone_newpid_pid;
-+ pid_t m_clone_newpid_ppid;
-+
-+ pid_t fork(int);
- };
-
- enum {
-@@ -6650,7 +6661,19 @@ pid_t CreateProcessForkit::clone_safe_getpid() {
- // the pid of the parent process (presumably due to internal
- // caching in libc). Therefore, use the syscall to get
- // the answer directly.
-- return syscall(SYS_getpid);
-+
-+ int retval = syscall(SYS_getpid);
-+
-+ // If we were fork'd with CLONE_NEWPID, we think our PID is 1.
-+ // In this case, ask the parent!
-+ if (retval == 1) {
-+ if (m_clone_newpid_pid == -1) {
-+ EXCEPT("getpid is 1!");
-+ }
-+ retval = m_clone_newpid_pid;
-+ }
-+
-+ return retval;
- #else
- return ::getpid();
- #endif
-@@ -6659,12 +6682,115 @@ pid_t CreateProcessForkit::clone_safe_getppid() {
- #if HAVE_CLONE
- // See above comment for clone_safe_getpid() for explanation of
- // why we need to do this.
-- return syscall(SYS_getppid);
-+
-+ int retval = syscall(SYS_getppid);
-+
-+ // If ppid is 0, then either Condor is init (DEAR GOD) or we
-+ // were created with CLONE_NEWPID; ask the parent!
-+ if (retval == 0) {
-+ if (m_clone_newpid_ppid == -1) {
-+ EXCEPT("getppid is 0!");
-+ }
-+ retval = m_clone_newpid_ppid;
-+ }
-+
-+ return retval;
- #else
- return ::getppid();
- #endif
- }
-
-+/**
-+ * fork allows one to use certain clone syscall flags, but provides more
-+ * familiar POSIX fork semantics.
-+ * NOTES:
-+ * - We whitelist the flags you are allowed to pass. Currently supported:
-+ * - CLONE_NEWPID. Implies CLONE_NEWNS.
-+ * If the clone succeeds but the remount fails, the child calls _exit(1),
-+ * but the parent will return successfully.
-+ * It would be a simple fix to have the parent return the failure, if
-+ * someone desired.
-+ * Flags are whitelisted to help us adhere to the fork-like semantics (no
-+ * shared memory between parent and child, for example). If you give other
-+ * flags, they are silently ignored.
-+ * - man pages indicate that clone on i386 is only fully functional when used
-+ * via ASM, not the vsyscall interface. This doesn't appear to be relevant
-+ * to this particular use case.
-+ * - To avoid linking with pthreads (or copy/pasting lots of glibc code), I
-+ * don't include integration with threads. This means various threading
-+ * calls in the child may not function correctly (pre-exec; post-exec
-+ * should be fine), and pthreads might not notice when the child exits.
-+ * Traditional POSIX calls like wait will still function because the
-+ * parent will receive the SIGCHLD.
-+ * This is simple to fix if someone desired, but I'd mostly rather not link
-+ * with pthreads.
-+ */
-+
-+#define ALLOWED_FLAGS (SIGCHLD | CLONE_NEWPID | CLONE_NEWNS )
-+
-+pid_t CreateProcessForkit::fork(int flags) {
-+
-+ // If you don't need any fancy flags, just do the old boring POSIX call
-+ if (flags == 0) {
-+ return ::fork();
-+ }
-+
-+#if HAVE_CLONE
-+
-+ int rw[2]; // Communication pipes for the CLONE_NEWPID case.
-+
-+ flags |= SIGCHLD; // The only necessary flag.
-+ if (flags & CLONE_NEWPID) {
-+ flags |= CLONE_NEWNS;
-+ if (pipe(rw)) {
-+ EXCEPT("UNABLE TO CREATE PIPE.");
-+ }
-+ }
-+
-+ // fork as root if we have our fancy flags.
-+ priv_state orig_state = set_priv(PRIV_ROOT);
-+ int retval = syscall(SYS_clone, ALLOWED_FLAGS & flags, 0, NULL, NULL);
-+
-+ // Child
-+ if ((retval == 0) && (flags & CLONE_NEWPID)) {
-+
-+ // If we should have forked as non-root, make things in life final.
-+ set_priv(orig_state);
-+
-+ if (full_read(rw[0], &m_clone_newpid_ppid, sizeof(pid_t)) != sizeof(pid_t)) {
-+ EXCEPT("Unable to write into pipe.");
-+ }
-+ if (full_read(rw[0], &m_clone_newpid_pid, sizeof(pid_t)) != sizeof(pid_t)) {
-+ EXCEPT("Unable to write into pipe.");
-+ }
-+
-+ // Parent
-+ } else if (retval > 0) {
-+ set_priv(orig_state);
-+ pid_t ppid = getpid(); // We are parent, so don't need clone_safe_pid.
-+ if (full_write(rw[1], &ppid, sizeof(ppid)) != sizeof(ppid)) {
-+ EXCEPT("Unable to write into pipe.");
-+ }
-+ if (full_write(rw[1], &retval, sizeof(ppid)) != sizeof(ppid)) {
-+ EXCEPT("Unable to write into pipe.");
-+ }
-+ }
-+ // retval=-1 falls through here.
-+ if (flags & CLONE_NEWPID) {
-+ close(rw[0]);
-+ close(rw[1]);
-+ }
-+ return retval;
-+
-+#else
-+
-+ // Note we silently ignore flags if there's no clone on the platform.
-+ return ::fork();
-+
-+#endif
-+
-+}
-+
- pid_t CreateProcessForkit::fork_exec() {
- pid_t newpid;
-
-@@ -6736,7 +6862,11 @@ pid_t CreateProcessForkit::fork_exec() {
- }
- #endif /* HAVE_CLONE */
-
-- newpid = fork();
-+ int fork_flags = 0;
-+ if (m_family_info) {
-+ fork_flags |= m_family_info->want_pid_namespace ? CLONE_NEWPID : 0;
-+ }
-+ newpid = this->fork(fork_flags);
- if( newpid == 0 ) {
- // in child
- enterCreateProcessChild(this);
-diff --git a/src/condor_starter.V6.1/vanilla_proc.cpp b/src/condor_starter.V6.1/vanilla_proc.cpp
-index 044cb10..8528ca7 100644
---- a/src/condor_starter.V6.1/vanilla_proc.cpp
-+++ b/src/condor_starter.V6.1/vanilla_proc.cpp
-@@ -360,6 +360,24 @@ VanillaProc::StartJob()
- }
- }
-
-+#if defined(LINUX)
-+ // On Linux kernel 2.6.24 and later, we can give each
-+ // job its own PID namespace
-+ if (param_boolean("USE_PID_NAMESPACES", false)) {
-+ if (!can_switch_ids()) {
-+ EXCEPT("USE_PID_NAMESPACES enabled, but can't perform this "
-+ "call in Linux unless running as root.");
-+ }
-+ fi.want_pid_namespace = true;
-+ if (!fs_remap) {
-+ fs_remap = new FilesystemRemap();
-+ }
-+ fs_remap->RemapProc();
-+ }
-+ dprintf(D_FULLDEBUG, "PID namespace option: %s\n", fi.want_pid_namespace ? "true" : "false");
-+#endif
-+
-+
- // have OsProc start the job
- //
- int retval = OsProc::StartJob(&fi, fs_remap);
-diff --git a/src/condor_utils/filesystem_remap.cpp b/src/condor_utils/filesystem_remap.cpp
-index e0f2e61..735c744 100644
---- a/src/condor_utils/filesystem_remap.cpp
-+++ b/src/condor_utils/filesystem_remap.cpp
-@@ -29,7 +29,8 @@
-
- FilesystemRemap::FilesystemRemap() :
- m_mappings(),
-- m_mounts_shared()
-+ m_mounts_shared(),
-+ m_remap_proc(false)
- {
- ParseMountinfo();
- }
-@@ -120,6 +121,9 @@ int FilesystemRemap::PerformMappings() {
- break;
- }
- }
-+ if ((!retval) && m_remap_proc) {
-+ retval = mount("proc", "/proc", "proc", 0, NULL);
-+ }
- #endif
- return retval;
- }
-@@ -148,6 +152,10 @@ std::string FilesystemRemap::RemapDir(std::string target) {
- return target;
- }
-
-+void FilesystemRemap::RemapProc() {
-+ m_remap_proc = true;
-+}
-+
- /*
- Sample mountinfo contents (from http://www.kernel.org/doc/Documentation/filesystems/proc.txt):
- 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
-diff --git a/src/condor_utils/filesystem_remap.h b/src/condor_utils/filesystem_remap.h
-index 5e9362d..2e17476 100644
---- a/src/condor_utils/filesystem_remap.h
-+++ b/src/condor_utils/filesystem_remap.h
-@@ -74,6 +74,12 @@ public:
- */
- std::string RemapFile(std::string);
-
-+ /**
-+ * Indicate that we should remount /proc in the child process.
-+ * Necessary for PID namespaces.
-+ */
-+ void RemapProc();
-+
- private:
-
- /**
-@@ -89,6 +95,7 @@ private:
- std::list<pair_strings> m_mappings;
- std::list<pair_str_bool> m_mounts_shared;
- std::list<pair_strings> m_mounts_autofs;
-+ bool m_remap_proc;
-
- };
- #endif
diff --git a/cream_sl6_build.patch b/cream_sl6_build.patch
deleted file mode 100644
index debc220..0000000
--- a/cream_sl6_build.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- a/externals/bundles/cream/1.12.1_14/CMakeLists.txt
-+++ b/externals/bundles/cream/1.12.1_14/CMakeLists.txt
-@@ -250,7 +250,7 @@ if ( NOT PROPER )
-
- else( NOT PROPER )
-
-- if ( ${SYSTEM_NAME} MATCHES "rhel6" OR ${SYSTEM_NAME} MATCHES "centos6")
-+ if ( ${SYSTEM_NAME} MATCHES "rhel6" OR ${SYSTEM_NAME} MATCHES "centos6" OR ${SYSTEM_NAME} MATCHES "sl6")
- find_multiple("glite_ce_cream_client_soap;glite_ce_cream_client_util;glite_security_gsoap_plugin_2716_cxx;glite_security_gss;gridsite" CREAM_FOUND )
- else()
- find_multiple("glite_ce_cream_client_soap;glite_ce_cream_client_util;glite_security_gsoap_plugin_2713_cxx;glite_security_gss;gridsite" CREAM_FOUND )
diff --git a/d10e85eada71599caebb56fde50dd42bbbf6b65d.patch b/d10e85eada71599caebb56fde50dd42bbbf6b65d.patch
deleted file mode 100644
index f0ab9a6..0000000
--- a/d10e85eada71599caebb56fde50dd42bbbf6b65d.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From d10e85eada71599caebb56fde50dd42bbbf6b65d Mon Sep 17 00:00:00 2001
-From: Brian Bockelman <bbockelm(a)cse.unl.edu>
-Date: Tue, 28 Jul 2015 21:24:36 -0500
-Subject: [PATCH] Allow compilation with both old and new Globus version. #5180
-
-The signature of globus_gsi_cred_write_proxy changed from
-
-globus_result_t
-globus_gsi_cred_write_proxy(globus_l_gsi_cred_handle_s*, char*)
-
-to
-
-globus_result_t
-globus_gsi_cred_write_proxy(globus_l_gsi_cred_handle_s*, const char*)
-
-This causes a function pointer assignment to fail. Since we want to support
-both the old and new interface, simply reinterpret_cast the pointer to the
-correct type.
-
-Tested compilation against both globus-gsi-credential 7.7 and 7.9.
----
- src/condor_utils/globus_utils.cpp | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/src/condor_utils/globus_utils.cpp b/src/condor_utils/globus_utils.cpp
-index 2027e3e..1810b74 100644
---- a/src/condor_utils/globus_utils.cpp
-+++ b/src/condor_utils/globus_utils.cpp
-@@ -354,7 +354,7 @@ activate_globus_gsi( void )
- globus_gsi_cred_handle_destroy_ptr = globus_gsi_cred_handle_destroy;
- globus_gsi_cred_handle_init_ptr = globus_gsi_cred_handle_init;
- globus_gsi_cred_read_proxy_ptr = globus_gsi_cred_read_proxy;
-- globus_gsi_cred_write_proxy_ptr = globus_gsi_cred_write_proxy;
-+ globus_gsi_cred_write_proxy_ptr = reinterpret_cast<globus_result_t (*)(globus_l_gsi_cred_handle_s*, char*)>(globus_gsi_cred_write_proxy);
- globus_gsi_proxy_assemble_cred_ptr = globus_gsi_proxy_assemble_cred;
- globus_gsi_proxy_create_req_ptr = globus_gsi_proxy_create_req;
- globus_gsi_proxy_handle_attrs_destroy_ptr = globus_gsi_proxy_handle_attrs_destroy;
diff --git a/doc-conf.patch b/doc-conf.patch
deleted file mode 100644
index 9358225..0000000
--- a/doc-conf.patch
+++ /dev/null
@@ -1,11 +0,0 @@
-diff --git a/docs/conf.py b/docs/conf.py
-index c1e082031a..8a044532dc 100644
---- a/docs/conf.py
-+++ b/docs/conf.py
-@@ -450,6 +450,5 @@ def modify_signature(app, what, name, obj, options, signature, return_annotation
- return signature, return_annotation
-
- def setup(app):
-- app.add_stylesheet('css/htcondor-manual.css')
- app.connect('autodoc-process-docstring', modify_docstring)
- app.connect('autodoc-process-signature', modify_signature)
diff --git a/dprintf_syslog.patch b/dprintf_syslog.patch
deleted file mode 100644
index c567266..0000000
--- a/dprintf_syslog.patch
+++ /dev/null
@@ -1,324 +0,0 @@
-From cf86dbaf75f4c81e406036b6695c717cf4fd1331 Mon Sep 17 00:00:00 2001
-From: Brian Bockelman <bbockelm(a)cse.unl.edu>
-Date: Wed, 24 Oct 2012 20:28:09 -0500
-Subject: [PATCH 1/3] First attempt at syslog code for dprintf.
-
----
- src/condor_includes/dprintf_internal.h | 11 ++++-
- src/condor_utils/dprintf_setup.cpp | 16 ++++++
- src/condor_utils/dprintf_syslog.cpp | 19 +++++++
- src/condor_utils/dprintf_syslog.h | 82 ++++++++++++++++++++++++++++++++
- 4 files changed, 127 insertions(+), 1 deletions(-)
- create mode 100644 src/condor_utils/dprintf_syslog.cpp
- create mode 100644 src/condor_utils/dprintf_syslog.h
-
-diff --git a/src/condor_includes/dprintf_internal.h b/src/condor_includes/dprintf_internal.h
-index c26a886..b0ecf48 100644
---- a/src/condor_includes/dprintf_internal.h
-+++ b/src/condor_includes/dprintf_internal.h
-@@ -17,6 +17,9 @@
- *
- ***************************************************************/
-
-+#ifndef __dprintf_internal_h_
-+#define __dprintf_internal_h_
-+
- // This #define doesn't actually do anything. This value needs to be
- // defined before any system header files are included in the source file
- // to have any effect.
-@@ -27,6 +30,7 @@ typedef _Longlong int64_t;
- #else
- #include <stdint.h>
- #endif
-+#include <ctime>
-
- struct DebugFileInfo;
-
-@@ -37,7 +41,8 @@ enum DebugOutput
- FILE_OUT,
- STD_OUT,
- STD_ERR,
-- OUTPUT_DEBUG_STR
-+ OUTPUT_DEBUG_STR,
-+ SYSLOG
- };
-
- /* future
-@@ -70,6 +75,7 @@ struct DebugFileInfo
- bool want_truncate;
- bool accepts_all;
- bool dont_panic;
-+ void *userData;
- DebugFileInfo() :
- outputTarget(FILE_OUT),
- debugFP(0),
-@@ -79,6 +85,7 @@ struct DebugFileInfo
- want_truncate(false),
- accepts_all(false),
- dont_panic(false),
-+ userData(NULL),
- dprintfFunc(NULL)
- {}
- DebugFileInfo(const DebugFileInfo &dfi) : outputTarget(dfi.outputTarget), debugFP(NULL), choice(dfi.choice),
-@@ -115,3 +122,5 @@ void _dprintf_global_func(int cat_and_flags, int hdr_flags, time_t clock_now, st
- void dprintf_to_outdbgstr(int cat_and_flags, int hdr_flags, time_t clock_now, struct tm *tm, const char* message, DebugFileInfo* dbgInfo);
- #endif
-
-+#endif
-+
-diff --git a/src/condor_utils/dprintf_setup.cpp b/src/condor_utils/dprintf_setup.cpp
-index 440ef98..b1ccd3a 100644
---- a/src/condor_utils/dprintf_setup.cpp
-+++ b/src/condor_utils/dprintf_setup.cpp
-@@ -24,6 +24,7 @@
- #include "condor_sys_types.h"
- #include "condor_debug.h"
- #include "dprintf_internal.h"
-+#include "dprintf_syslog.h"
- #include "condor_constants.h"
-
- #if HAVE_BACKTRACE
-@@ -134,6 +135,13 @@ void dprintf_set_outputs(const struct dprintf_output_settings *p_info, int c_inf
- it->dprintfFunc = dprintf_to_outdbgstr;
- }
- #endif
-+ else if (logPath == "SYSLOG")
-+ {
-+ // Intention is to eventually user-selected
-+ it->dprintfFunc = DprintfSyslog::Log;
-+ it->outputTarget = SYSLOG;
-+ it->userData = static_cast<void*>(DprintfSyslogFactory::NewLog(LOG_DAEMON));
-+ }
- else
- {
- it->outputTarget = FILE_OUT;
-@@ -211,6 +219,14 @@ void dprintf_set_outputs(const struct dprintf_output_settings *p_info, int c_inf
-
- if(debugLogsOld)
- {
-+
-+ for (it = debugLogsOld->begin(); it != debugLogsOld->end(); it++)
-+ {
-+ if ((it->outputTarget == SYSLOG) && (it->userData))
-+ {
-+ delete static_cast<DprintfSyslog*>(it->userData);
-+ }
-+ }
- delete debugLogsOld;
- }
-
-diff --git a/src/condor_utils/dprintf_syslog.cpp b/src/condor_utils/dprintf_syslog.cpp
-new file mode 100644
-index 0000000..d0189f8
---- /dev/null
-+++ b/src/condor_utils/dprintf_syslog.cpp
-@@ -0,0 +1,19 @@
-+
-+#include "condor_common.h"
-+#include "condor_debug.h"
-+#include "dprintf_syslog.h"
-+
-+DprintfSyslogFactory * DprintfSyslogFactory::m_singleton = NULL;
-+
-+void
-+DprintfSyslog::Log(const char * message)
-+{
-+ syslog(LOG_INFO, "%s", message);
-+}
-+
-+DprintfSyslog::~DprintfSyslog()
-+{
-+ DprintfSyslogFactory &factory = DprintfSyslogFactory::getInstance();
-+ factory.DecCount();
-+}
-+
-diff --git a/src/condor_utils/dprintf_syslog.h b/src/condor_utils/dprintf_syslog.h
-new file mode 100644
-index 0000000..a10d42d
---- /dev/null
-+++ b/src/condor_utils/dprintf_syslog.h
-@@ -0,0 +1,82 @@
-+
-+#include "dprintf_internal.h"
-+#include <syslog.h>
-+
-+class DprintfSyslogFactory;
-+
-+class DprintfSyslog
-+{
-+ friend class DprintfSyslogFactory;
-+
-+public:
-+ static void Log(int, int, time_t, struct tm*, const char * message, DebugFileInfo* info)
-+ {
-+ if (!info || !info->userData)
-+ {
-+ return;
-+ }
-+ DprintfSyslog * logger = static_cast<DprintfSyslog*>(info->userData);
-+ logger->Log(message);
-+ }
-+
-+ ~DprintfSyslog();
-+
-+protected:
-+ DprintfSyslog() {}
-+
-+private:
-+ void Log(const char *);
-+};
-+
-+class DprintfSyslogFactory
-+{
-+ friend class DprintfSyslog;
-+
-+public:
-+ static DprintfSyslog *NewLog(int facility)
-+ {
-+ DprintfSyslogFactory & factory = getInstance();
-+ return factory.NewDprintfSyslog(facility);
-+ }
-+
-+protected:
-+ void DecCount()
-+ {
-+ m_count--;
-+ if (m_count == 0)
-+ {
-+ closelog();
-+ }
-+ }
-+
-+ static DprintfSyslogFactory & getInstance()
-+ {
-+ if (!m_singleton)
-+ {
-+ m_singleton = new DprintfSyslogFactory();
-+ }
-+ return *m_singleton;
-+ }
-+
-+private:
-+ DprintfSyslog * NewDprintfSyslog(int facility)
-+ {
-+ DprintfSyslog * logger = new DprintfSyslog();
-+ if (!logger) return NULL;
-+ if (m_count == 0)
-+ {
-+ openlog("condor", LOG_PID|LOG_NDELAY, facility);
-+ }
-+ m_count++;
-+ return logger;
-+ }
-+
-+ DprintfSyslogFactory() :
-+ m_count(0)
-+ {
-+ }
-+
-+ static DprintfSyslogFactory *m_singleton;
-+
-+ unsigned int m_count;
-+};
---
-1.7.4.1
-
-
-From 5b17f58b41722735bf1a7da34c728bfe3114479b Mon Sep 17 00:00:00 2001
-From: Brian Bockelman <bbockelm(a)cse.unl.edu>
-Date: Wed, 24 Oct 2012 20:46:52 -0500
-Subject: [PATCH 2/3] Don't provide an ident - it defaults to the binary name, which is more useful anyway.
-
----
- src/condor_utils/dprintf_syslog.h | 2 +-
- 1 files changed, 1 insertions(+), 1 deletions(-)
-
-diff --git a/src/condor_utils/dprintf_syslog.h b/src/condor_utils/dprintf_syslog.h
-index a10d42d..364a228 100644
---- a/src/condor_utils/dprintf_syslog.h
-+++ b/src/condor_utils/dprintf_syslog.h
-@@ -65,7 +65,7 @@ private:
- if (!logger) return NULL;
- if (m_count == 0)
- {
-- openlog("condor", LOG_PID|LOG_NDELAY, facility);
-+ openlog(NULL, LOG_PID|LOG_NDELAY, facility);
- }
- m_count++;
- return logger;
---
-1.7.4.1
-
-
-From d082fcc410b3729241dbe82912f526d51a96a2f5 Mon Sep 17 00:00:00 2001
-From: Brian Bockelman <bbockelm(a)cse.unl.edu>
-Date: Tue, 30 Oct 2012 18:15:21 -0500
-Subject: [PATCH 3/3] Prevent dprintf_syslog from compiling on Windows.
-
----
- src/condor_utils/CMakeLists.txt | 4 +++-
- src/condor_utils/dprintf_setup.cpp | 7 ++++++-
- 2 files changed, 9 insertions(+), 2 deletions(-)
-
-diff --git a/src/condor_utils/CMakeLists.txt b/src/condor_utils/CMakeLists.txt
-index 7ce1fd6..7de76fb 100644
---- a/src/condor_utils/CMakeLists.txt
-+++ b/src/condor_utils/CMakeLists.txt
-@@ -84,10 +84,12 @@ endif()
- ##################################################
- # condorapi & tests
-
--condor_selective_glob("my_username.*;condor_event.*;file_sql.*;misc_utils.*;user_log_header.*;write_user_log*;read_user_log*;iso_dates.*;file_lock.*;format_time.*;utc_time.*;stat_wrapper*;log_rotate.*;dprintf*;sig_install.*;basename.*;mkargv.*;except.*;strupr.*;lock_file.*;rotate_file.*;strcasestr.*;strnewp.*;condor_environ.*;setsyscalls.*;passwd_cache.*;uids.c*;chomp.*;subsystem_info.*;my_subsystem.*;distribution.*;my_distribution.*;get_random_num.*;libcondorapi_stubs.*;seteuid.*;setegid.*;condor_open.*;classad_merge.*;condor_attributes.*;simple_arg.*;compat_classad.*;compat_classad_util.*;classad_oldnew.*;condor_snutils.*;stringSpace.*;string_list.*;stl_string_utils.*;MyString.*;condor_xml_classads.*;directory*;param_functions.*;filename_tools_cpp.*;filename_tools.*;stat_info.*;${SAFE_OPEN_SRC}" ApiSrcs)
-+condor_selective_glob("my_username.*;condor_event.*;file_sql.*;misc_utils.*;user_log_header.*;write_user_log*;read_user_log*;iso_dates.*;file_lock.*;format_time.*;utc_time.*;stat_wrapper*;log_rotate.*;dprintf.cpp;dprintf_c*;dprintf_setup.cpp;sig_install.*;basename.*;mkargv.*;except.*;strupr.*;lock_file.*;rotate_file.*;strcasestr.*;strnewp.*;condor_environ.*;setsyscalls.*;passwd_cache.*;uids.c*;chomp.*;subsystem_info.*;my_subsystem.*;distribution.*;my_distribution.*;get_random_num.*;libcondorapi_stubs.*;seteuid.*;setegid.*;condor_open.*;classad_merge.*;condor_attributes.*;simple_arg.*;compat_classad.*;compat_classad_util.*;classad_oldnew.*;condor_snutils.*;stringSpace.*;string_list.*;stl_string_utils.*;MyString.*;condor_xml_classads.*;directory*;param_functions.*;filename_tools_cpp.*;filename_tools.*;stat_info.*;${SAFE_OPEN_SRC}" ApiSrcs)
- if(WINDOWS)
- condor_selective_glob("directory.WINDOWS.*;directory_util.*;dynuser.WINDOWS.*;lock_file.WINDOWS.*;lsa_mgr.*;my_dynuser.*;ntsysinfo.WINDOWS.*;posix.WINDOWS.*;stat.WINDOWS.*;store_cred.*;token_cache.WINDOWS.*;truncate.WINDOWS.*" ApiSrcs)
- set_property( TARGET utils_genparams PROPERTY FOLDER "libraries" )
-+else()
-+ condor_selective_glob("dprintf_syslog*" ApiSrcs)
- endif()
-
- condor_static_lib( condorapi "${ApiSrcs}" )
-diff --git a/src/condor_utils/dprintf_setup.cpp b/src/condor_utils/dprintf_setup.cpp
-index b1ccd3a..b5938e2 100644
---- a/src/condor_utils/dprintf_setup.cpp
-+++ b/src/condor_utils/dprintf_setup.cpp
-@@ -24,7 +24,9 @@
- #include "condor_sys_types.h"
- #include "condor_debug.h"
- #include "dprintf_internal.h"
-+#if !defined(WIN32)
- #include "dprintf_syslog.h"
-+#endif
- #include "condor_constants.h"
-
- #if HAVE_BACKTRACE
-@@ -134,7 +136,7 @@ void dprintf_set_outputs(const struct dprintf_output_settings *p_info, int c_inf
- it->outputTarget = OUTPUT_DEBUG_STR;
- it->dprintfFunc = dprintf_to_outdbgstr;
- }
--#endif
-+#else
- else if (logPath == "SYSLOG")
- {
- // Intention is to eventually user-selected
-@@ -142,6 +144,7 @@ void dprintf_set_outputs(const struct dprintf_output_settings *p_info, int c_inf
- it->outputTarget = SYSLOG;
- it->userData = static_cast<void*>(DprintfSyslogFactory::NewLog(LOG_DAEMON));
- }
-+#endif
- else
- {
- it->outputTarget = FILE_OUT;
-@@ -224,7 +227,9 @@ void dprintf_set_outputs(const struct dprintf_output_settings *p_info, int c_inf
- {
- if ((it->outputTarget == SYSLOG) && (it->userData))
- {
-+#if !defined(WIN32)
- delete static_cast<DprintfSyslog*>(it->userData);
-+#endif
- }
- }
- delete debugLogsOld;
---
-1.7.4.1
-
diff --git a/glexec_privsep_helper.patch b/glexec_privsep_helper.patch
deleted file mode 100644
index c2b664e..0000000
--- a/glexec_privsep_helper.patch
+++ /dev/null
@@ -1,19 +0,0 @@
-commit 3baf81cbdd3c86594382027cd5d075ca036da78b
-Author: Ben Cotton <bcotton(a)fedoraproject.org>
-Date: Thu Mar 23 20:34:00 2017 -0400
-
- Fix a build issue for Fedora
-
-diff --git a/src/condor_starter.V6.1/glexec_privsep_helper.linux.cpp b/src/condor_starter.V6.1/glexec_privsep_helper.linux.cpp
-index d4bb589..1093273 100644
---- a/src/condor_starter.V6.1/glexec_privsep_helper.linux.cpp
-+++ b/src/condor_starter.V6.1/glexec_privsep_helper.linux.cpp
-@@ -418,7 +418,7 @@ GLExecPrivSepHelper::create_process(const char* path,
- if( !retry ) {
- // return the most recent glexec error output
- if( error_msg ) {
-- error_msg->formatstr_cat(glexec_error_msg.Value());
-+ error_msg->formatstr_cat("%s", glexec_error_msg.Value());
- }
- return 0;
- }
diff --git a/hcc-condor-build b/hcc-condor-build
deleted file mode 100755
index 4100113..0000000
--- a/hcc-condor-build
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/usr/bin/python
-
-import os
-import sys
-import shutil
-import optparse
-
-def prep_dirs(base_dir):
- for i in ["BUILD", "INSTALL", "RPMS", "SOURCES", "SPECS", "SRPMS"]:
- new_dir = os.path.join(base_dir, i)
- if not os.path.exists(new_dir):
- os.makedirs(new_dir)
-
-def prep_source(base_dir):
- source_dir = os.path.join(base_dir, "_build", "SOURCES")
- for file in os.listdir(base_dir):
- if file == "_build":
- continue
- full_name = os.path.join(base_dir, file)
- if not os.path.isfile(full_name):
- continue
- shutil.copy(full_name, os.path.join(source_dir, file))
-
-def prepare_condor_tarball(build_dir, source_dir, branch):
- cur_dir = os.getcwd()
- tarball_dir = os.path.join(build_dir, "SOURCES")
- fd = open(os.path.join(tarball_dir, "condor.tar.gz"), "w")
- fdnum = fd.fileno()
- try:
- os.chdir(source_dir)
- pid = os.fork()
- if not pid:
- try:
- os.dup2(fdnum, 1)
- os.execvp("/bin/sh", ["sh", "-c", "git archive %s | gzip -7" % branch])
- finally:
- os._exit(1)
- else:
- (pid, status) = os.waitpid(pid, 0)
- if status:
- raise Exception("git archive failed")
- finally:
- os.chdir(cur_dir)
-
-def get_rpmbuild_defines(results_dir):
- results_dir = os.path.abspath(results_dir)
- defines = []
- defines += ["--define=_topdir %s" % results_dir]
- return defines
-
-def parse_opts():
- parser = optparse.OptionParser()
- parser.add_option("-s", "--source-dir", help="Location of the Condor git repo clone.", dest="source_dir", default="~/projects/condor")
- parser.add_option("-b", "--branch", help="Name of the git branch to use for the condor build.", dest="branch", default="master")
-
- opts, args = parser.parse_args()
-
- opts.source_dir = os.path.expanduser(opts.source_dir)
-
- return args, opts
-
-def main():
-
- args, opts = parse_opts()
-
- if len(args) != 2:
- print "Usage: hcc_make_condor <action> <directory>"
- print "Valid commands are 'build', 'prep', and 'srpm'"
- print "<directory> should point at the fedpkg-condor-hcc clone."
- return 1
-
- build_dir = os.path.join(args[1], "_build")
-
- prep_dirs(build_dir)
- defines = get_rpmbuild_defines(build_dir)
- prep_source(args[1])
-
- prepare_condor_tarball(build_dir, opts.source_dir, opts.branch)
-
- if args[0] == 'build':
- os.execvp("rpmbuild", ["rpmbuild"] + defines + ["-ba", "condor.spec"])
- elif args[0] == 'srpm':
- os.execvp("rpmbuild", ["rpmbuild"] + defines + ["-bs", "condor.spec"])
- elif args[0] == "prep":
- os.execvp("rpmbuild", ["rpmbuild"] + defines + ["-bp", "condor.spec"])
- else:
- print "Unknown action: %s" % args[0]
-
- return 1
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/libdl.patch b/libdl.patch
deleted file mode 100644
index 6278707..0000000
--- a/libdl.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-diff --git a/src/gt2_gahp/CMakeLists.txt b/src/gt2_gahp/CMakeLists.txt
-index 1e3c2f2090..41c517dc3e 100644
---- a/src/gt2_gahp/CMakeLists.txt
-+++ b/src/gt2_gahp/CMakeLists.txt
-@@ -20,7 +20,7 @@
- if (HAVE_EXT_GLOBUS)
-
- condor_exe( gahp_server "gahp_server.cpp;my_ez.cpp" ${C_SBIN}
-- "${GLOBUS_GRID_UNIVERSE_GT2};${GLOBUS_GRID_UNIVERSE_COMMON};${GLOBUS_FOUND};${KRB5_FOUND};${OPENSSL_FOUND}" OFF )
-+ "${GLOBUS_GRID_UNIVERSE_GT2};${GLOBUS_GRID_UNIVERSE_COMMON};${GLOBUS_FOUND};${KRB5_FOUND};${OPENSSL_FOUND};${HAVE_LIBDL}" OFF )
-
- else()
-
-diff --git a/src/nordugrid_gahp/CMakeLists.txt b/src/nordugrid_gahp/CMakeLists.txt
-index 1fda9cd601..e91491f4ff 100644
---- a/src/nordugrid_gahp/CMakeLists.txt
-+++ b/src/nordugrid_gahp/CMakeLists.txt
-@@ -33,7 +33,7 @@ if (HAVE_EXT_GLOBUS)
- condor_exe( nordugrid_gahp
- "${HeaderFiles};${SourceFiles}"
- ${C_SBIN}
-- "${GLOBUS_GRID_UNIVERSE_NORDUGRID};${GLOBUS_GRID_UNIVERSE_COMMON};${GLOBUS_FOUND};${OPENSSL_FOUND};${LDAP_FOUND}"
-+ "${GLOBUS_GRID_UNIVERSE_NORDUGRID};${GLOBUS_GRID_UNIVERSE_COMMON};${GLOBUS_FOUND};${OPENSSL_FOUND};${LDAP_FOUND};${HAVE_LIBDL}"
- OFF )
-
- # Check nordugrid_gahp's shared library dependencies and copy a
diff --git a/python-bindings-v1.patch b/python-bindings-v1.patch
deleted file mode 100644
index 7eed28c..0000000
--- a/python-bindings-v1.patch
+++ /dev/null
@@ -1,2058 +0,0 @@
-diff --git a/.gitignore b/.gitignore
-index 8fe6157..5a569ca 100644
---- a/.gitignore
-+++ b/.gitignore
-@@ -209,3 +209,4 @@ src/safefile/stamp-h1
- src/safefile/stamp-h2
- src/safefile/safe_id_range_list.h.in.tmp
- src/safefile/safe_id_range_list.h.tmp_out
-+src/condor_contrib/python-bindings/tests_tmp
-diff --git a/externals/bundles/boost/1.49.0/CMakeLists.txt b/externals/bundles/boost/1.49.0/CMakeLists.txt
-index 8608ee6..dcba24b 100644
---- a/externals/bundles/boost/1.49.0/CMakeLists.txt
-+++ b/externals/bundles/boost/1.49.0/CMakeLists.txt
-@@ -28,6 +28,9 @@ if (NOT WINDOWS)
- if (BUILD_TESTING)
- set (BOOST_COMPONENTS unit_test_framework ${BOOST_COMPONENTS})
- endif()
-+ if (WITH_PYTHON_BINDINGS)
-+ set (BOOST_COMPONENTS python ${BOOST_COMPONENTS})
-+ endif()
-
- endif()
-
-@@ -104,6 +107,9 @@ if (NOT PROPER) # AND (NOT Boost_FOUND OR SYSTEM_NOT_UP_TO_SNUFF) )
- condor_pre_external( BOOST ${BOOST_FILENAME}-p2 "lib;${INCLUDE_LOC}" "done")
-
- set(BOOST_MIN_BUILD_DEP --with-thread --with-test)
-+ if (WITH_PYTHON_BINDINGS)
-+ set(BOOST_MIN_BUILD_DEP --with-python)
-+ endif()
- set(BOOST_PATCH echo "nothing")
- set(BOOST_INSTALL echo "nothing")
- unset(BOOST_INCLUDE)
-diff --git a/src/condor_contrib/CMakeLists.txt b/src/condor_contrib/CMakeLists.txt
-index 52f14c0..41b9002 100644
---- a/src/condor_contrib/CMakeLists.txt
-+++ b/src/condor_contrib/CMakeLists.txt
-@@ -32,4 +32,5 @@ else(WANT_CONTRIB)
- add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/campus_factory")
- add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/bosco")
- add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/lark")
-+ add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/python-bindings")
- endif(WANT_CONTRIB)
-diff --git a/src/condor_contrib/python-bindings/CMakeLists.txt b/src/condor_contrib/python-bindings/CMakeLists.txt
-new file mode 100644
-index 0000000..50d8a29
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/CMakeLists.txt
-@@ -0,0 +1,26 @@
-+
-+option(WITH_PYTHON_BINDINGS "Support for HTCondor python bindings" OFF)
-+
-+if ( WITH_PYTHON_BINDINGS )
-+
-+ set ( CMAKE_LIBRARY_PATH_ORIG ${CMAKE_LIBRARY_PATH} )
-+ set ( CMAKE_LIBRARY_PATH ${CMAKE_LIBRARY_PATH} /usr/lib64 )
-+ find_package(PythonLibs REQUIRED)
-+ set ( CMAKE_LIBRARY_PATH CMAKE_LIBRARY_PATH_ORIG)
-+
-+ include_directories(${PYTHON_INCLUDE_DIRS})
-+
-+ condor_shared_lib( pyclassad classad.cpp classad_wrapper.h exprtree_wrapper.h )
-+ target_link_libraries( pyclassad classad ${PYTHON_LIBRARIES} -lboost_python )
-+
-+ condor_shared_lib( classad_module classad_module.cpp )
-+ target_link_libraries( classad_module pyclassad -lboost_python ${PYTHON_LIBRARIES} )
-+ set_target_properties(classad_module PROPERTIES PREFIX "" OUTPUT_NAME classad )
-+
-+ set_source_files_properties(dc_tool.cpp schedd.cpp PROPERTIES COMPILE_FLAGS -Wno-strict-aliasing)
-+ condor_shared_lib( condor condor.cpp collector.cpp config.cpp daemon_and_ad_types.cpp dc_tool.cpp export_headers.h old_boost.h schedd.cpp secman.cpp )
-+ target_link_libraries( condor pyclassad condor_utils -lboost_python ${PYTHON_LIBRARIES} )
-+ set_target_properties( condor PROPERTIES PREFIX "" )
-+
-+endif ( WITH_PYTHON_BINDINGS )
-+
-diff --git a/src/condor_contrib/python-bindings/classad.cpp b/src/condor_contrib/python-bindings/classad.cpp
-new file mode 100644
-index 0000000..4c2db18
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/classad.cpp
-@@ -0,0 +1,341 @@
-+
-+#include <string>
-+
-+#include <classad/source.h>
-+#include <classad/sink.h>
-+
-+#include "classad_wrapper.h"
-+#include "exprtree_wrapper.h"
-+
-+
-+ExprTreeHolder::ExprTreeHolder(const std::string &str)
-+ : m_expr(NULL), m_owns(true)
-+{
-+ classad::ClassAdParser parser;
-+ classad::ExprTree *expr = NULL;
-+ if (!parser.ParseExpression(str, expr))
-+ {
-+ PyErr_SetString(PyExc_SyntaxError, "Unable to parse string into a ClassAd.");
-+ boost::python::throw_error_already_set();
-+ }
-+ m_expr = expr;
-+}
-+
-+
-+ExprTreeHolder::ExprTreeHolder(classad::ExprTree *expr)
-+ : m_expr(expr), m_owns(false)
-+{}
-+
-+
-+ExprTreeHolder::~ExprTreeHolder()
-+{
-+ if (m_owns && m_expr) delete m_expr;
-+}
-+
-+
-+boost::python::object ExprTreeHolder::Evaluate() const
-+{
-+ if (!m_expr)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Cannot operate on an invalid ExprTree");
-+ boost::python::throw_error_already_set();
-+ }
-+ classad::Value value;
-+ if (!m_expr->Evaluate(value)) {
-+ PyErr_SetString(PyExc_SyntaxError, "Unable to evaluate expression");
-+ boost::python::throw_error_already_set();
-+ }
-+ boost::python::object result;
-+ std::string strvalue;
-+ long long intvalue;
-+ bool boolvalue;
-+ double realvalue;
-+ PyObject* obj;
-+ switch (value.GetType())
-+ {
-+ case classad::Value::BOOLEAN_VALUE:
-+ value.IsBooleanValue(boolvalue);
-+ obj = boolvalue ? Py_True : Py_False;
-+ result = boost::python::object(boost::python::handle<>(boost::python::borrowed(obj)));
-+ break;
-+ case classad::Value::STRING_VALUE:
-+ value.IsStringValue(strvalue);
-+ result = boost::python::str(strvalue);
-+ break;
-+ case classad::Value::ABSOLUTE_TIME_VALUE:
-+ case classad::Value::INTEGER_VALUE:
-+ value.IsIntegerValue(intvalue);
-+ result = boost::python::long_(intvalue);
-+ break;
-+ case classad::Value::RELATIVE_TIME_VALUE:
-+ case classad::Value::REAL_VALUE:
-+ value.IsRealValue(realvalue);
-+ result = boost::python::object(realvalue);
-+ break;
-+ case classad::Value::ERROR_VALUE:
-+ result = boost::python::object(classad::Value::ERROR_VALUE);
-+ break;
-+ case classad::Value::UNDEFINED_VALUE:
-+ result = boost::python::object(classad::Value::UNDEFINED_VALUE);
-+ break;
-+ default:
-+ PyErr_SetString(PyExc_TypeError, "Unknown ClassAd value type.");
-+ boost::python::throw_error_already_set();
-+ }
-+ return result;
-+}
-+
-+
-+std::string ExprTreeHolder::toRepr()
-+{
-+ if (!m_expr)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Cannot operate on an invalid ExprTree");
-+ boost::python::throw_error_already_set();
-+ }
-+ classad::ClassAdUnParser up;
-+ std::string ad_str;
-+ up.Unparse(ad_str, m_expr);
-+ return ad_str;
-+}
-+
-+
-+std::string ExprTreeHolder::toString()
-+{
-+ if (!m_expr)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Cannot operate on an invalid ExprTree");
-+ boost::python::throw_error_already_set();
-+ }
-+ classad::PrettyPrint pp;
-+ std::string ad_str;
-+ pp.Unparse(ad_str, m_expr);
-+ return ad_str;
-+}
-+
-+
-+classad::ExprTree *ExprTreeHolder::get()
-+{
-+ if (!m_expr)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Cannot operate on an invalid ExprTree");
-+ boost::python::throw_error_already_set();
-+ }
-+ return m_expr->Copy();
-+}
-+
-+AttrPairToSecond::result_type AttrPairToSecond::operator()(AttrPairToSecond::argument_type p) const
-+{
-+ ExprTreeHolder holder(p.second);
-+ if (p.second->GetKind() == classad::ExprTree::LITERAL_NODE)
-+ {
-+ return holder.Evaluate();
-+ }
-+ boost::python::object result(holder);
-+ return result;
-+}
-+
-+
-+AttrPair::result_type AttrPair::operator()(AttrPair::argument_type p) const
-+{
-+ ExprTreeHolder holder(p.second);
-+ boost::python::object result(holder);
-+ if (p.second->GetKind() == classad::ExprTree::LITERAL_NODE)
-+ {
-+ result = holder.Evaluate();
-+ }
-+ return boost::python::make_tuple<std::string, boost::python::object>(p.first, result);
-+}
-+
-+
-+boost::python::object ClassAdWrapper::LookupWrap(const std::string &attr) const
-+{
-+ classad::ExprTree * expr = Lookup(attr);
-+ if (!expr)
-+ {
-+ PyErr_SetString(PyExc_KeyError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ if (expr->GetKind() == classad::ExprTree::LITERAL_NODE) return EvaluateAttrObject(attr);
-+ ExprTreeHolder holder(expr);
-+ boost::python::object result(holder);
-+ return result;
-+}
-+
-+boost::python::object ClassAdWrapper::LookupExpr(const std::string &attr) const
-+{
-+ classad::ExprTree * expr = Lookup(attr);
-+ if (!expr)
-+ {
-+ PyErr_SetString(PyExc_KeyError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ ExprTreeHolder holder(expr);
-+ boost::python::object result(holder);
-+ return result;
-+}
-+
-+boost::python::object ClassAdWrapper::EvaluateAttrObject(const std::string &attr) const
-+{
-+ classad::ExprTree *expr;
-+ if (!(expr = Lookup(attr))) {
-+ PyErr_SetString(PyExc_KeyError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ ExprTreeHolder holder(expr);
-+ return holder.Evaluate();
-+}
-+
-+
-+void ClassAdWrapper::InsertAttrObject( const std::string &attr, boost::python::object value)
-+{
-+ boost::python::extract<ExprTreeHolder&> expr_obj(value);
-+ if (expr_obj.check())
-+ {
-+ classad::ExprTree *expr = expr_obj().get();
-+ Insert(attr, expr);
-+ return;
-+ }
-+ boost::python::extract<classad::Value::ValueType> value_enum_obj(value);
-+ if (value_enum_obj.check())
-+ {
-+ classad::Value::ValueType value_enum = value_enum_obj();
-+ classad::Value classad_value;
-+ if (value_enum == classad::Value::ERROR_VALUE)
-+ {
-+ classad_value.SetErrorValue();
-+ classad::ExprTree *lit = classad::Literal::MakeLiteral(classad_value);
-+ Insert(attr, lit);
-+ }
-+ else if (value_enum == classad::Value::UNDEFINED_VALUE)
-+ {
-+ classad_value.SetUndefinedValue();
-+ classad::ExprTree *lit = classad::Literal::MakeLiteral(classad_value);
-+ if (!Insert(attr, lit))
-+ {
-+ PyErr_SetString(PyExc_AttributeError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ }
-+ return;
-+ }
-+ if (PyString_Check(value.ptr()))
-+ {
-+ std::string cppvalue = boost::python::extract<std::string>(value);
-+ if (!InsertAttr(attr, cppvalue))
-+ {
-+ PyErr_SetString(PyExc_AttributeError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ return;
-+ }
-+ if (PyLong_Check(value.ptr()))
-+ {
-+ long long cppvalue = boost::python::extract<long long>(value);
-+ if (!InsertAttr(attr, cppvalue))
-+ {
-+ PyErr_SetString(PyExc_AttributeError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ return;
-+ }
-+ if (PyInt_Check(value.ptr()))
-+ {
-+ long int cppvalue = boost::python::extract<long int>(value);
-+ if (!InsertAttr(attr, cppvalue))
-+ {
-+ PyErr_SetString(PyExc_AttributeError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ return;
-+ }
-+ if (PyFloat_Check(value.ptr()))
-+ {
-+ double cppvalue = boost::python::extract<double>(value);
-+ if (!InsertAttr(attr, cppvalue))
-+ {
-+ PyErr_SetString(PyExc_AttributeError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ return;
-+ }
-+ PyErr_SetString(PyExc_TypeError, "Unknown ClassAd value type.");
-+ boost::python::throw_error_already_set();
-+}
-+
-+
-+std::string ClassAdWrapper::toRepr()
-+{
-+ classad::ClassAdUnParser up;
-+ std::string ad_str;
-+ up.Unparse(ad_str, this);
-+ return ad_str;
-+}
-+
-+
-+std::string ClassAdWrapper::toString()
-+{
-+ classad::PrettyPrint pp;
-+ std::string ad_str;
-+ pp.Unparse(ad_str, this);
-+ return ad_str;
-+}
-+
-+std::string ClassAdWrapper::toOldString()
-+{
-+ classad::ClassAdUnParser pp;
-+ std::string ad_str;
-+ pp.SetOldClassAd(true);
-+ pp.Unparse(ad_str, this);
-+ return ad_str;
-+}
-+
-+AttrKeyIter ClassAdWrapper::beginKeys()
-+{
-+ return AttrKeyIter(begin());
-+}
-+
-+
-+AttrKeyIter ClassAdWrapper::endKeys()
-+{
-+ return AttrKeyIter(end());
-+}
-+
-+AttrValueIter ClassAdWrapper::beginValues()
-+{
-+ return AttrValueIter(begin());
-+}
-+
-+AttrValueIter ClassAdWrapper::endValues()
-+{
-+ return AttrValueIter(end());
-+}
-+
-+AttrItemIter ClassAdWrapper::beginItems()
-+{
-+ return AttrItemIter(begin());
-+}
-+
-+
-+AttrItemIter ClassAdWrapper::endItems()
-+{
-+ return AttrItemIter(end());
-+}
-+
-+
-+ClassAdWrapper::ClassAdWrapper() : classad::ClassAd() {}
-+
-+
-+ClassAdWrapper::ClassAdWrapper(const std::string &str)
-+{
-+ classad::ClassAdParser parser;
-+ classad::ClassAd *result = parser.ParseClassAd(str);
-+ if (!result)
-+ {
-+ PyErr_SetString(PyExc_SyntaxError, "Unable to parse string into a ClassAd.");
-+ boost::python::throw_error_already_set();
-+ }
-+ CopyFrom(*result);
-+ result;
-+}
-+
-diff --git a/src/condor_contrib/python-bindings/classad_module.cpp b/src/condor_contrib/python-bindings/classad_module.cpp
-new file mode 100644
-index 0000000..b3f1970
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/classad_module.cpp
-@@ -0,0 +1,145 @@
-+
-+#include <boost/python.hpp>
-+#include <classad/source.h>
-+
-+#include "classad_wrapper.h"
-+#include "exprtree_wrapper.h"
-+
-+using namespace boost::python;
-+
-+
-+Py_ssize_t py_len(boost::python::object const& obj)
-+{
-+ Py_ssize_t result = PyObject_Length(obj.ptr());
-+ if (PyErr_Occurred()) boost::python::throw_error_already_set();
-+ return result;
-+}
-+
-+
-+std::string ClassadLibraryVersion()
-+{
-+ std::string val;
-+ classad::ClassAdLibraryVersion(val);
-+ return val;
-+}
-+
-+
-+ClassAdWrapper *parseString(const std::string &str)
-+{
-+ classad::ClassAdParser parser;
-+ classad::ClassAd *result = parser.ParseClassAd(str);
-+ if (!result)
-+ {
-+ PyErr_SetString(PyExc_SyntaxError, "Unable to parse string into a ClassAd.");
-+ boost::python::throw_error_already_set();
-+ }
-+ ClassAdWrapper * wrapper_result = new ClassAdWrapper();
-+ wrapper_result->CopyFrom(*result);
-+ delete result;
-+ return wrapper_result;
-+}
-+
-+
-+ClassAdWrapper *parseFile(FILE *stream)
-+{
-+ classad::ClassAdParser parser;
-+ classad::ClassAd *result = parser.ParseClassAd(stream);
-+ if (!result)
-+ {
-+ PyErr_SetString(PyExc_SyntaxError, "Unable to parse input stream into a ClassAd.");
-+ boost::python::throw_error_already_set();
-+ }
-+ ClassAdWrapper * wrapper_result = new ClassAdWrapper();
-+ wrapper_result->CopyFrom(*result);
-+ delete result;
-+ return wrapper_result;
-+}
-+
-+ClassAdWrapper *parseOld(object input)
-+{
-+ ClassAdWrapper * wrapper = new ClassAdWrapper();
-+ object input_list;
-+ extract<std::string> input_extract(input);
-+ if (input_extract.check())
-+ {
-+ input_list = input.attr("splitlines")();
-+ }
-+ else
-+ {
-+ input_list = input.attr("readlines")();
-+ }
-+ unsigned input_len = py_len(input_list);
-+ for (unsigned idx=0; idx<input_len; idx++)
-+ {
-+ object line = input_list[idx].attr("strip")();
-+ if (line.attr("startswith")("#"))
-+ {
-+ continue;
-+ }
-+ std::string line_str = extract<std::string>(line);
-+ if (!wrapper->Insert(line_str))
-+ {
-+ PyErr_SetString(PyExc_SyntaxError, line_str.c_str());
-+ throw_error_already_set();
-+ }
-+ }
-+ return wrapper;
-+}
-+
-+void *convert_to_FILEptr(PyObject* obj) {
-+ return PyFile_Check(obj) ? PyFile_AsFile(obj) : 0;
-+}
-+
-+BOOST_PYTHON_MODULE(classad)
-+{
-+ using namespace boost::python;
-+
-+ def("version", ClassadLibraryVersion, "Return the version of the linked ClassAd library.");
-+
-+ def("parse", parseString, return_value_policy<manage_new_object>());
-+ def("parse", parseFile, return_value_policy<manage_new_object>(),
-+ "Parse input into a ClassAd.\n"
-+ ":param input: A string or a file pointer.\n"
-+ ":return: A ClassAd object.");
-+ def("parseOld", parseOld, return_value_policy<manage_new_object>(),
-+ "Parse old ClassAd format input into a ClassAd.\n"
-+ ":param input: A string or a file pointer.\n"
-+ ":return: A ClassAd object.");
-+
-+ class_<ClassAdWrapper, boost::noncopyable>("ClassAd", "A classified advertisement.")
-+ .def(init<std::string>())
-+ .def("__delitem__", &ClassAdWrapper::Delete)
-+ .def("__getitem__", &ClassAdWrapper::LookupWrap)
-+ .def("eval", &ClassAdWrapper::EvaluateAttrObject, "Evaluate the ClassAd attribute to a python object.")
-+ .def("__setitem__", &ClassAdWrapper::InsertAttrObject)
-+ .def("__str__", &ClassAdWrapper::toString)
-+ .def("__repr__", &ClassAdWrapper::toRepr)
-+ // I see no way to use the SetParentScope interface safely.
-+ // Delay exposing it to python until we absolutely have to!
-+ //.def("setParentScope", &ClassAdWrapper::SetParentScope)
-+ .def("__iter__", boost::python::range(&ClassAdWrapper::beginKeys, &ClassAdWrapper::endKeys))
-+ .def("keys", boost::python::range(&ClassAdWrapper::beginKeys, &ClassAdWrapper::endKeys))
-+ .def("values", boost::python::range(&ClassAdWrapper::beginValues, &ClassAdWrapper::endValues))
-+ .def("items", boost::python::range(&ClassAdWrapper::beginItems, &ClassAdWrapper::endItems))
-+ .def("__len__", &ClassAdWrapper::size)
-+ .def("lookup", &ClassAdWrapper::LookupExpr, "Lookup an attribute and return a ClassAd expression. This method will not attempt to evaluate it to a python object.")
-+ .def("printOld", &ClassAdWrapper::toOldString, "Represent this ClassAd as a string in the \"old ClassAd\" format.")
-+ ;
-+
-+ class_<ExprTreeHolder>("ExprTree", "An expression in the ClassAd language", init<std::string>())
-+ .def("__str__", &ExprTreeHolder::toString)
-+ .def("__repr__", &ExprTreeHolder::toRepr)
-+ .def("eval", &ExprTreeHolder::Evaluate)
-+ ;
-+
-+ register_ptr_to_python< boost::shared_ptr<ClassAdWrapper> >();
-+
-+ boost::python::enum_<classad::Value::ValueType>("Value")
-+ .value("Error", classad::Value::ERROR_VALUE)
-+ .value("Undefined", classad::Value::UNDEFINED_VALUE)
-+ ;
-+
-+ boost::python::converter::registry::insert(convert_to_FILEptr,
-+ boost::python::type_id<FILE>());
-+}
-+
-diff --git a/src/condor_contrib/python-bindings/classad_wrapper.h b/src/condor_contrib/python-bindings/classad_wrapper.h
-new file mode 100644
-index 0000000..96600c3
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/classad_wrapper.h
-@@ -0,0 +1,72 @@
-+
-+#ifndef __CLASSAD_WRAPPER_H_
-+#define __CLASSAD_WRAPPER_H_
-+
-+#include <classad/classad.h>
-+#include <boost/python.hpp>
-+#include <boost/iterator/transform_iterator.hpp>
-+
-+struct AttrPairToFirst :
-+ public std::unary_function<std::pair<std::string, classad::ExprTree*> const&, std::string>
-+{
-+ AttrPairToFirst::result_type operator()(AttrPairToFirst::argument_type p) const
-+ {
-+ return p.first;
-+ }
-+};
-+
-+typedef boost::transform_iterator<AttrPairToFirst, classad::AttrList::iterator> AttrKeyIter;
-+
-+class ExprTreeHolder;
-+
-+struct AttrPairToSecond :
-+ public std::unary_function<std::pair<std::string, classad::ExprTree*> const&, boost::python::object>
-+{
-+ AttrPairToSecond::result_type operator()(AttrPairToSecond::argument_type p) const;
-+};
-+
-+typedef boost::transform_iterator<AttrPairToSecond, classad::AttrList::iterator> AttrValueIter;
-+
-+struct AttrPair :
-+ public std::unary_function<std::pair<std::string, classad::ExprTree*> const&, boost::python::object>
-+{
-+ AttrPair::result_type operator()(AttrPair::argument_type p) const;
-+};
-+
-+typedef boost::transform_iterator<AttrPair, classad::AttrList::iterator> AttrItemIter;
-+
-+struct ClassAdWrapper : classad::ClassAd, boost::python::wrapper<classad::ClassAd>
-+{
-+ boost::python::object LookupWrap( const std::string &attr) const;
-+
-+ boost::python::object EvaluateAttrObject(const std::string &attr) const;
-+
-+ void InsertAttrObject( const std::string &attr, boost::python::object value);
-+
-+ boost::python::object LookupExpr(const std::string &attr) const;
-+
-+ std::string toRepr();
-+
-+ std::string toString();
-+
-+ std::string toOldString();
-+
-+ AttrKeyIter beginKeys();
-+
-+ AttrKeyIter endKeys();
-+
-+ AttrValueIter beginValues();
-+
-+ AttrValueIter endValues();
-+
-+ AttrItemIter beginItems();
-+
-+ AttrItemIter endItems();
-+
-+ ClassAdWrapper();
-+
-+ ClassAdWrapper(const std::string &str);
-+};
-+
-+#endif
-+
-diff --git a/src/condor_contrib/python-bindings/collector.cpp b/src/condor_contrib/python-bindings/collector.cpp
-new file mode 100644
-index 0000000..3c4fa39
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/collector.cpp
-@@ -0,0 +1,329 @@
-+
-+#include "condor_adtypes.h"
-+#include "dc_collector.h"
-+#include "condor_version.h"
-+
-+#include <memory>
-+#include <boost/python.hpp>
-+
-+#include "old_boost.h"
-+#include "classad_wrapper.h"
-+
-+using namespace boost::python;
-+
-+AdTypes convert_to_ad_type(daemon_t d_type)
-+{
-+ AdTypes ad_type = NO_AD;
-+ switch (d_type)
-+ {
-+ case DT_MASTER:
-+ ad_type = MASTER_AD;
-+ break;
-+ case DT_STARTD:
-+ ad_type = STARTD_AD;
-+ break;
-+ case DT_SCHEDD:
-+ ad_type = SCHEDD_AD;
-+ break;
-+ case DT_NEGOTIATOR:
-+ ad_type = NEGOTIATOR_AD;
-+ break;
-+ case DT_COLLECTOR:
-+ ad_type = COLLECTOR_AD;
-+ break;
-+ default:
-+ PyErr_SetString(PyExc_ValueError, "Unknown daemon type.");
-+ throw_error_already_set();
-+ }
-+ return ad_type;
-+}
-+
-+struct Collector {
-+
-+ Collector(const std::string &pool="")
-+ : m_collectors(NULL)
-+ {
-+ if (pool.size())
-+ m_collectors = CollectorList::create(pool.c_str());
-+ else
-+ m_collectors = CollectorList::create();
-+ }
-+
-+ ~Collector()
-+ {
-+ if (m_collectors) delete m_collectors;
-+ }
-+
-+ object query(AdTypes ad_type, const std::string &constraint, list attrs)
-+ {
-+ CondorQuery query(ad_type);
-+ if (constraint.length())
-+ {
-+ query.addANDConstraint(constraint.c_str());
-+ }
-+ std::vector<const char *> attrs_char;
-+ std::vector<std::string> attrs_str;
-+ int len_attrs = py_len(attrs);
-+ if (len_attrs)
-+ {
-+ attrs_str.reserve(len_attrs);
-+ attrs_char.reserve(len_attrs+1);
-+ attrs_char[len_attrs] = NULL;
-+ for (int i=0; i<len_attrs; i++)
-+ {
-+ std::string str = extract<std::string>(attrs[i]);
-+ attrs_str.push_back(str);
-+ attrs_char[i] = attrs_str[i].c_str();
-+ }
-+ query.setDesiredAttrs(&attrs_char[0]);
-+ }
-+ ClassAdList adList;
-+
-+ QueryResult result = m_collectors->query(query, adList, NULL);
-+
-+ switch (result)
-+ {
-+ case Q_OK:
-+ break;
-+ case Q_INVALID_CATEGORY:
-+ PyErr_SetString(PyExc_RuntimeError, "Category not supported by query type.");
-+ boost::python::throw_error_already_set();
-+ case Q_MEMORY_ERROR:
-+ PyErr_SetString(PyExc_MemoryError, "Memory allocation error.");
-+ boost::python::throw_error_already_set();
-+ case Q_PARSE_ERROR:
-+ PyErr_SetString(PyExc_SyntaxError, "Query constraints could not be parsed.");
-+ boost::python::throw_error_already_set();
-+ case Q_COMMUNICATION_ERROR:
-+ PyErr_SetString(PyExc_IOError, "Failed communication with collector.");
-+ boost::python::throw_error_already_set();
-+ case Q_INVALID_QUERY:
-+ PyErr_SetString(PyExc_RuntimeError, "Invalid query.");
-+ boost::python::throw_error_already_set();
-+ case Q_NO_COLLECTOR_HOST:
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to determine collector host.");
-+ boost::python::throw_error_already_set();
-+ default:
-+ PyErr_SetString(PyExc_RuntimeError, "Unknown error from collector query.");
-+ boost::python::throw_error_already_set();
-+ }
-+
-+ list retval;
-+ ClassAd * ad;
-+ adList.Open();
-+ while ((ad = adList.Next()))
-+ {
-+ boost::shared_ptr<ClassAdWrapper> wrapper(new ClassAdWrapper());
-+ wrapper->CopyFrom(*ad);
-+ retval.append(wrapper);
-+ }
-+ return retval;
-+ }
-+
-+ object locateAll(daemon_t d_type)
-+ {
-+ AdTypes ad_type = convert_to_ad_type(d_type);
-+ return query(ad_type, "", list());
-+ }
-+
-+ object locate(daemon_t d_type, const std::string &name)
-+ {
-+ std::string constraint = ATTR_NAME " =?= \"" + name + "\"";
-+ object result = query(convert_to_ad_type(d_type), constraint, list());
-+ if (py_len(result) >= 1) {
-+ return result[0];
-+ }
-+ PyErr_SetString(PyExc_ValueError, "Unable to find daemon.");
-+ throw_error_already_set();
-+ return object();
-+ }
-+
-+ ClassAdWrapper *locateLocal(daemon_t d_type)
-+ {
-+ Daemon my_daemon( d_type, 0, 0 );
-+
-+ ClassAdWrapper *wrapper = new ClassAdWrapper();
-+ if (my_daemon.locate())
-+ {
-+ classad::ClassAd *daemonAd;
-+ if ((daemonAd = my_daemon.daemonAd()))
-+ {
-+ wrapper->CopyFrom(*daemonAd);
-+ }
-+ else
-+ {
-+ std::string addr = my_daemon.addr();
-+ if (!my_daemon.addr() || !wrapper->InsertAttr(ATTR_MY_ADDRESS, addr))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to locate daemon address.");
-+ throw_error_already_set();
-+ }
-+ std::string name = my_daemon.name() ? my_daemon.name() : "Unknown";
-+ if (!wrapper->InsertAttr(ATTR_NAME, name))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to insert daemon name.");
-+ throw_error_already_set();
-+ }
-+ std::string hostname = my_daemon.fullHostname() ? my_daemon.fullHostname() : "Unknown";
-+ if (!wrapper->InsertAttr(ATTR_MACHINE, hostname))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to insert daemon hostname.");
-+ throw_error_already_set();
-+ }
-+ std::string version = my_daemon.version() ? my_daemon.version() : "";
-+ if (!wrapper->InsertAttr(ATTR_VERSION, version))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to insert daemon version.");
-+ throw_error_already_set();
-+ }
-+ const char * my_type = AdTypeToString(convert_to_ad_type(d_type));
-+ if (!my_type)
-+ {
-+ PyErr_SetString(PyExc_ValueError, "Unable to determined daemon type.");
-+ throw_error_already_set();
-+ }
-+ std::string my_type_str = my_type;
-+ if (!wrapper->InsertAttr(ATTR_MY_TYPE, my_type_str))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to insert daemon type.");
-+ throw_error_already_set();
-+ }
-+ std::string cversion = CondorVersion(); std::string platform = CondorPlatform();
-+ if (!wrapper->InsertAttr(ATTR_VERSION, cversion) || !wrapper->InsertAttr(ATTR_PLATFORM, platform))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to insert HTCondor version.");
-+ throw_error_already_set();
-+ }
-+ }
-+ }
-+ else
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to locate local daemon");
-+ boost::python::throw_error_already_set();
-+ }
-+ return wrapper;
-+ }
-+
-+
-+ // Overloads for the Collector; can't be done in boost.python and provide
-+ // docstrings.
-+ object query0()
-+ {
-+ return query(ANY_AD, "", list());
-+ }
-+ object query1(AdTypes ad_type)
-+ {
-+ return query(ad_type, "", list());
-+ }
-+ object query2(AdTypes ad_type, const std::string &constraint)
-+ {
-+ return query(ad_type, constraint, list());
-+ }
-+
-+ // TODO: this has crappy error handling when there are multiple collectors.
-+ void advertise(list ads, const std::string &command_str="UPDATE_AD_GENERIC", bool use_tcp=false)
-+ {
-+ m_collectors->rewind();
-+ Daemon *collector;
-+ std::auto_ptr<Sock> sock;
-+
-+ int command = getCollectorCommandNum(command_str.c_str());
-+ if (command == -1)
-+ {
-+ PyErr_SetString(PyExc_ValueError, ("Invalid command " + command_str).c_str());
-+ throw_error_already_set();
-+ }
-+
-+ if (command == UPDATE_STARTD_AD_WITH_ACK)
-+ {
-+ PyErr_SetString(PyExc_NotImplementedError, "Startd-with-ack protocol is not implemented at this time.");
-+ }
-+
-+ int list_len = py_len(ads);
-+ if (!list_len)
-+ return;
-+
-+ compat_classad::ClassAd ad;
-+ while (m_collectors->next(collector))
-+ {
-+ if(!collector->locate()) {
-+ PyErr_SetString(PyExc_ValueError, "Unable to locate collector.");
-+ throw_error_already_set();
-+ }
-+ int list_len = py_len(ads);
-+ sock.reset();
-+ for (int i=0; i<list_len; i++)
-+ {
-+ ClassAdWrapper &wrapper = extract<ClassAdWrapper &>(ads[i]);
-+ ad.CopyFrom(wrapper);
-+ if (use_tcp)
-+ {
-+ if (!sock.get())
-+ sock.reset(collector->startCommand(command,Stream::reli_sock,20));
-+ else
-+ {
-+ sock->encode();
-+ sock->put(command);
-+ }
-+ }
-+ else
-+ {
-+ sock.reset(collector->startCommand(command,Stream::safe_sock,20));
-+ }
-+ int result = 0;
-+ if (sock.get()) {
-+ result += ad.put(*sock);
-+ result += sock->end_of_message();
-+ }
-+ if (result != 2) {
-+ PyErr_SetString(PyExc_ValueError, "Failed to advertise to collector");
-+ throw_error_already_set();
-+ }
-+ }
-+ sock->encode();
-+ sock->put(DC_NOP);
-+ sock->end_of_message();
-+ }
-+ }
-+
-+private:
-+
-+ CollectorList *m_collectors;
-+
-+};
-+
-+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(advertise_overloads, advertise, 1, 3);
-+
-+void export_collector()
-+{
-+ class_<Collector>("Collector", "Client-side operations for the HTCondor collector")
-+ .def(init<std::string>(":param pool: Name of collector to query; if not specified, uses the local one."))
-+ .def("query", &Collector::query0)
-+ .def("query", &Collector::query1)
-+ .def("query", &Collector::query2)
-+ .def("query", &Collector::query,
-+ "Query the contents of a collector.\n"
-+ ":param ad_type: Type of ad to return from the AdTypes enum; if not specified, uses ANY_AD.\n"
-+ ":param constraint: A constraint for the ad query; defaults to true.\n"
-+ ":param attrs: A list of attributes; if specified, the returned ads will be "
-+ "projected along these attributes.\n"
-+ ":return: A list of ads in the collector matching the constraint.")
-+ .def("locate", &Collector::locateLocal, return_value_policy<manage_new_object>())
-+ .def("locate", &Collector::locate,
-+ "Query the collector for a particular daemon.\n"
-+ ":param daemon_type: Type of daemon; must be from the DaemonTypes enum.\n"
-+ ":param name: Name of daemon to locate. If not specified, it searches for the local daemon.\n"
-+ ":return: The ad of the corresponding daemon.")
-+ .def("locateAll", &Collector::locateAll,
-+ "Query the collector for all ads of a particular type.\n"
-+ ":param daemon_type: Type of daemon; must be from the DaemonTypes enum.\n"
-+ ":return: A list of matching ads.")
-+ .def("advertise", &Collector::advertise, advertise_overloads(
-+ "Advertise a list of ClassAds into the collector.\n"
-+ ":param ad_list: A list of ClassAds.\n"
-+ ":param command: A command for the collector; defaults to UPDATE_AD_GENERIC;"
-+ " other commands, such as UPDATE_STARTD_AD, may require reduced authorization levels.\n"
-+ ":param use_tcp: When set to true, updates are sent via TCP."))
-+ ;
-+}
-+
-diff --git a/src/condor_contrib/python-bindings/condor.cpp b/src/condor_contrib/python-bindings/condor.cpp
-new file mode 100644
-index 0000000..f4a4fd4
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/condor.cpp
-@@ -0,0 +1,25 @@
-+
-+#include <boost/python.hpp>
-+
-+#include "old_boost.h"
-+#include "export_headers.h"
-+
-+using namespace boost::python;
-+
-+
-+BOOST_PYTHON_MODULE(condor)
-+{
-+ scope().attr("__doc__") = "Utilities for interacting with the HTCondor system.";
-+
-+ py_import("classad");
-+
-+ // TODO: old boost doesn't have this; conditionally compile only one newer systems.
-+ //docstring_options local_docstring_options(true, false, false);
-+
-+ export_config();
-+ export_daemon_and_ad_types();
-+ export_collector();
-+ export_schedd();
-+ export_dc_tool();
-+ export_secman();
-+}
-diff --git a/src/condor_contrib/python-bindings/config.cpp b/src/condor_contrib/python-bindings/config.cpp
-new file mode 100644
-index 0000000..0afdfc4
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/config.cpp
-@@ -0,0 +1,60 @@
-+
-+#include "condor_common.h"
-+#include "condor_config.h"
-+#include "condor_version.h"
-+
-+#include <boost/python.hpp>
-+
-+using namespace boost::python;
-+
-+struct Param
-+{
-+ std::string getitem(const std::string &attr)
-+ {
-+ std::string result;
-+ if (!param(result, attr.c_str()))
-+ {
-+ PyErr_SetString(PyExc_KeyError, attr.c_str());
-+ throw_error_already_set();
-+ }
-+ return result;
-+ }
-+
-+ void setitem(const std::string &attr, const std::string &val)
-+ {
-+ param_insert(attr.c_str(), val.c_str());
-+ }
-+
-+ std::string setdefault(const std::string &attr, const std::string &def)
-+ {
-+ std::string result;
-+ if (!param(result, attr.c_str()))
-+ {
-+ param_insert(attr.c_str(), def.c_str());
-+ return def;
-+ }
-+ return result;
-+ }
-+};
-+
-+std::string CondorVersionWrapper() { return CondorVersion(); }
-+
-+std::string CondorPlatformWrapper() { return CondorPlatform(); }
-+
-+BOOST_PYTHON_FUNCTION_OVERLOADS(config_overloads, config, 0, 3);
-+
-+void export_config()
-+{
-+ config();
-+ def("version", CondorVersionWrapper, "Returns the version of HTCondor this module is linked against.");
-+ def("platform", CondorPlatformWrapper, "Returns the platform of HTCondor this module is running on.");
-+ def("reload_config", config, config_overloads("Reload the HTCondor configuration from disk."));
-+ class_<Param>("_Param")
-+ .def("__getitem__", &Param::getitem)
-+ .def("__setitem__", &Param::setitem)
-+ .def("setdefault", &Param::setdefault)
-+ ;
-+ object param = object(Param());
-+ param.attr("__doc__") = "A dictionary-like object containing the HTCondor configuration.";
-+ scope().attr("param") = param;
-+}
-diff --git a/src/condor_contrib/python-bindings/daemon_and_ad_types.cpp b/src/condor_contrib/python-bindings/daemon_and_ad_types.cpp
-new file mode 100644
-index 0000000..f2b0bab
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/daemon_and_ad_types.cpp
-@@ -0,0 +1,30 @@
-+
-+#include <condor_adtypes.h>
-+#include <daemon_types.h>
-+#include <boost/python.hpp>
-+
-+using namespace boost::python;
-+
-+void export_daemon_and_ad_types()
-+{
-+ enum_<daemon_t>("DaemonTypes")
-+ .value("None", DT_NONE)
-+ .value("Any", DT_ANY)
-+ .value("Master", DT_MASTER)
-+ .value("Schedd", DT_SCHEDD)
-+ .value("Startd", DT_STARTD)
-+ .value("Collector", DT_COLLECTOR)
-+ .value("Negotiator", DT_NEGOTIATOR)
-+ ;
-+
-+ enum_<AdTypes>("AdTypes")
-+ .value("None", NO_AD)
-+ .value("Any", ANY_AD)
-+ .value("Generic", GENERIC_AD)
-+ .value("Startd", STARTD_AD)
-+ .value("Schedd", SCHEDD_AD)
-+ .value("Master", MASTER_AD)
-+ .value("Collector", COLLECTOR_AD)
-+ .value("Negotiator", NEGOTIATOR_AD)
-+ ;
-+}
-diff --git a/src/condor_contrib/python-bindings/dc_tool.cpp b/src/condor_contrib/python-bindings/dc_tool.cpp
-new file mode 100644
-index 0000000..973c1e3
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/dc_tool.cpp
-@@ -0,0 +1,129 @@
-+
-+#include "condor_common.h"
-+
-+#include <boost/python.hpp>
-+
-+#include "daemon.h"
-+#include "daemon_types.h"
-+#include "condor_commands.h"
-+#include "condor_attributes.h"
-+#include "compat_classad.h"
-+
-+#include "classad_wrapper.h"
-+
-+using namespace boost::python;
-+
-+enum DaemonCommands {
-+ DDAEMONS_OFF = DAEMONS_OFF,
-+ DDAEMONS_OFF_FAST = DAEMONS_OFF_FAST,
-+ DDAEMONS_OFF_PEACEFUL = DAEMONS_OFF_PEACEFUL,
-+ DDAEMON_OFF = DAEMON_OFF,
-+ DDAEMON_OFF_FAST = DAEMON_OFF_FAST,
-+ DDAEMON_OFF_PEACEFUL = DAEMON_OFF_PEACEFUL,
-+ DDC_OFF_FAST = DC_OFF_FAST,
-+ DDC_OFF_PEACEFUL = DC_OFF_PEACEFUL,
-+ DDC_OFF_GRACEFUL = DC_OFF_GRACEFUL,
-+ DDC_SET_PEACEFUL_SHUTDOWN = DC_SET_PEACEFUL_SHUTDOWN,
-+ DDC_RECONFIG_FULL = DC_RECONFIG_FULL,
-+ DRESTART = RESTART,
-+ DRESTART_PEACEFUL = RESTART_PEACEFUL
-+};
-+
-+void send_command(const ClassAdWrapper & ad, DaemonCommands dc, const std::string &target="")
-+{
-+ std::string addr;
-+ if (!ad.EvaluateAttrString(ATTR_MY_ADDRESS, addr))
-+ {
-+ PyErr_SetString(PyExc_ValueError, "Address not available in location ClassAd.");
-+ throw_error_already_set();
-+ }
-+ std::string ad_type_str;
-+ if (!ad.EvaluateAttrString(ATTR_MY_TYPE, ad_type_str))
-+ {
-+ PyErr_SetString(PyExc_ValueError, "Daemon type not available in location ClassAd.");
-+ throw_error_already_set();
-+ }
-+ int ad_type = AdTypeFromString(ad_type_str.c_str());
-+ if (ad_type == NO_AD)
-+ {
-+ printf("ad type %s.\n", ad_type_str.c_str());
-+ PyErr_SetString(PyExc_ValueError, "Unknown ad type.");
-+ throw_error_already_set();
-+ }
-+ daemon_t d_type;
-+ switch (ad_type) {
-+ case MASTER_AD: d_type = DT_MASTER; break;
-+ case STARTD_AD: d_type = DT_STARTD; break;
-+ case SCHEDD_AD: d_type = DT_SCHEDD; break;
-+ case NEGOTIATOR_AD: d_type = DT_NEGOTIATOR; break;
-+ case COLLECTOR_AD: d_type = DT_COLLECTOR; break;
-+ default:
-+ d_type = DT_NONE;
-+ PyErr_SetString(PyExc_ValueError, "Unknown daemon type.");
-+ throw_error_already_set();
-+ }
-+
-+ ClassAd ad_copy; ad_copy.CopyFrom(ad);
-+ Daemon d(&ad_copy, d_type, NULL);
-+ if (!d.locate())
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to locate daemon.");
-+ throw_error_already_set();
-+ }
-+ ReliSock sock;
-+ if (!sock.connect(d.addr()))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to connect to the remote daemon");
-+ throw_error_already_set();
-+ }
-+ if (!d.startCommand(dc, &sock, 0, NULL))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Failed to start command.");
-+ throw_error_already_set();
-+ }
-+ if (target.size())
-+ {
-+ std::vector<unsigned char> target_cstr; target_cstr.reserve(target.size()+1);
-+ memcpy(&target_cstr[0], target.c_str(), target.size()+1);
-+ if (!sock.code(&target_cstr[0]))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Failed to send target.");
-+ throw_error_already_set();
-+ }
-+ if (!sock.end_of_message())
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Failed to send end-of-message.");
-+ throw_error_already_set();
-+ }
-+ }
-+ sock.close();
-+}
-+
-+BOOST_PYTHON_FUNCTION_OVERLOADS(send_command_overloads, send_command, 2, 3);
-+
-+void
-+export_dc_tool()
-+{
-+ enum_<DaemonCommands>("DaemonCommands")
-+ .value("DaemonsOff", DDAEMONS_OFF)
-+ .value("DaemonsOffFast", DDAEMONS_OFF_FAST)
-+ .value("DaemonsOffPeaceful", DDAEMONS_OFF_PEACEFUL)
-+ .value("DaemonOff", DDAEMON_OFF)
-+ .value("DaemonOffFast", DDAEMON_OFF_FAST)
-+ .value("DaemonOffPeaceful", DDAEMON_OFF_PEACEFUL)
-+ .value("OffGraceful", DDC_OFF_GRACEFUL)
-+ .value("OffPeaceful", DDC_OFF_PEACEFUL)
-+ .value("OffFast", DDC_OFF_FAST)
-+ .value("SetPeacefulShutdown", DDC_SET_PEACEFUL_SHUTDOWN)
-+ .value("Reconfig", DDC_RECONFIG_FULL)
-+ .value("Restart", DRESTART)
-+ .value("RestartPeacful", DRESTART_PEACEFUL)
-+ ;
-+
-+ def("send_command", send_command, send_command_overloads("Send a command to a HTCondor daemon specified by a location ClassAd\n"
-+ ":param ad: An ad specifying the location of the daemon; typically, found by using Collector.locate(...).\n"
-+ ":param dc: A command type; must be a member of the enum DaemonCommands.\n"
-+ ":param target: Some commands require additional arguments; for example, sending DaemonOff to a master requires one to specify which subsystem to turn off."
-+ " If this parameter is given, the daemon is sent an additional argument."))
-+ ;
-+}
-diff --git a/src/condor_contrib/python-bindings/export_headers.h b/src/condor_contrib/python-bindings/export_headers.h
-new file mode 100644
-index 0000000..4480495
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/export_headers.h
-@@ -0,0 +1,8 @@
-+
-+void export_collector();
-+void export_schedd();
-+void export_dc_tool();
-+void export_daemon_and_ad_types();
-+void export_config();
-+void export_secman();
-+
-diff --git a/src/condor_contrib/python-bindings/exprtree_wrapper.h b/src/condor_contrib/python-bindings/exprtree_wrapper.h
-new file mode 100644
-index 0000000..e3d2bc0
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/exprtree_wrapper.h
-@@ -0,0 +1,30 @@
-+
-+#ifndef __EXPRTREE_WRAPPER_H_
-+#define __EXPRTREE_WRAPPER_H_
-+
-+#include <classad/exprTree.h>
-+#include <boost/python.hpp>
-+
-+struct ExprTreeHolder
-+{
-+ ExprTreeHolder(const std::string &str);
-+
-+ ExprTreeHolder(classad::ExprTree *expr);
-+
-+ ~ExprTreeHolder();
-+
-+ boost::python::object Evaluate() const;
-+
-+ std::string toRepr();
-+
-+ std::string toString();
-+
-+ classad::ExprTree *get();
-+
-+private:
-+ classad::ExprTree *m_expr;
-+ bool m_owns;
-+};
-+
-+#endif
-+
-diff --git a/src/condor_contrib/python-bindings/old_boost.h b/src/condor_contrib/python-bindings/old_boost.h
-new file mode 100644
-index 0000000..7d159bc
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/old_boost.h
-@@ -0,0 +1,25 @@
-+
-+#include <boost/python.hpp>
-+
-+/*
-+ * This header contains all boost.python constructs missing in
-+ * older versions of boost.
-+ *
-+ * We'll eventually not compile these if the version of boost
-+ * is sufficiently recent.
-+ */
-+
-+inline ssize_t py_len(boost::python::object const& obj)
-+{
-+ ssize_t result = PyObject_Length(obj.ptr());
-+ if (PyErr_Occurred()) boost::python::throw_error_already_set();
-+ return result;
-+}
-+
-+inline boost::python::object py_import(boost::python::str name)
-+{
-+ char * n = boost::python::extract<char *>(name);
-+ boost::python::handle<> module(PyImport_ImportModule(n));
-+ return boost::python::object(module);
-+}
-+
-diff --git a/src/condor_contrib/python-bindings/schedd.cpp b/src/condor_contrib/python-bindings/schedd.cpp
-new file mode 100644
-index 0000000..9bbc830
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/schedd.cpp
-@@ -0,0 +1,402 @@
-+
-+#include "condor_attributes.h"
-+#include "condor_q.h"
-+#include "condor_qmgr.h"
-+#include "daemon.h"
-+#include "daemon_types.h"
-+#include "enum_utils.h"
-+#include "dc_schedd.h"
-+
-+#include <boost/python.hpp>
-+
-+#include "old_boost.h"
-+#include "classad_wrapper.h"
-+#include "exprtree_wrapper.h"
-+
-+using namespace boost::python;
-+
-+#define DO_ACTION(action_name) \
-+ reason_str = extract<std::string>(reason); \
-+ if (use_ids) \
-+ result = schedd. action_name (&ids, reason_str.c_str(), NULL, AR_TOTALS); \
-+ else \
-+ result = schedd. action_name (constraint.c_str(), reason_str.c_str(), NULL, AR_TOTALS);
-+
-+struct Schedd {
-+
-+ Schedd()
-+ {
-+ Daemon schedd( DT_SCHEDD, 0, 0 );
-+
-+ if (schedd.locate())
-+ {
-+ if (schedd.addr())
-+ {
-+ m_addr = schedd.addr();
-+ }
-+ else
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to locate schedd address.");
-+ throw_error_already_set();
-+ }
-+ m_name = schedd.name() ? schedd.name() : "Unknown";
-+ m_version = schedd.version() ? schedd.version() : "";
-+ }
-+ else
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to locate local daemon");
-+ boost::python::throw_error_already_set();
-+ }
-+ }
-+
-+ Schedd(const ClassAdWrapper &ad)
-+ : m_addr(), m_name("Unknown"), m_version("")
-+ {
-+ if (!ad.EvaluateAttrString(ATTR_SCHEDD_IP_ADDR, m_addr))
-+ {
-+ PyErr_SetString(PyExc_ValueError, "Schedd address not specified.");
-+ throw_error_already_set();
-+ }
-+ ad.EvaluateAttrString(ATTR_NAME, m_name);
-+ ad.EvaluateAttrString(ATTR_VERSION, m_version);
-+ }
-+
-+ object query(const std::string &constraint="", list attrs=list())
-+ {
-+ CondorQ q;
-+
-+ if (constraint.size())
-+ q.addAND(constraint.c_str());
-+
-+ StringList attrs_list(NULL, "\n");
-+ // Must keep strings alive; StringList does not create an internal copy.
-+ int len_attrs = py_len(attrs);
-+ std::vector<std::string> attrs_str; attrs_str.reserve(len_attrs);
-+ for (int i=0; i<len_attrs; i++)
-+ {
-+ std::string attrName = extract<std::string>(attrs[i]);
-+ attrs_str.push_back(attrName);
-+ attrs_list.append(attrs_str[i].c_str());
-+ }
-+
-+ ClassAdList jobs;
-+
-+ int fetchResult = q.fetchQueueFromHost(jobs, attrs_list, m_addr.c_str(), m_version.c_str(), NULL);
-+ switch (fetchResult)
-+ {
-+ case Q_OK:
-+ break;
-+ case Q_PARSE_ERROR:
-+ case Q_INVALID_CATEGORY:
-+ PyErr_SetString(PyExc_RuntimeError, "Parse error in constraint.");
-+ throw_error_already_set();
-+ break;
-+ default:
-+ PyErr_SetString(PyExc_IOError, "Failed to fetch ads from schedd.");
-+ throw_error_already_set();
-+ break;
-+ }
-+
-+ list retval;
-+ ClassAd *job;
-+ jobs.Open();
-+ while ((job = jobs.Next()))
-+ {
-+ boost::shared_ptr<ClassAdWrapper> wrapper(new ClassAdWrapper());
-+ wrapper->CopyFrom(*job);
-+ retval.append(wrapper);
-+ }
-+ return retval;
-+ }
-+
-+ object actOnJobs(JobAction action, object job_spec, object reason=object())
-+ {
-+ if (reason == object())
-+ {
-+ reason = object("Python-initiated action");
-+ }
-+ StringList ids;
-+ std::vector<std::string> ids_list;
-+ std::string constraint, reason_str, reason_code;
-+ bool use_ids = false;
-+ extract<std::string> constraint_extract(job_spec);
-+ if (constraint_extract.check())
-+ {
-+ constraint = constraint_extract();
-+ }
-+ else
-+ {
-+ int id_len = py_len(job_spec);
-+ ids_list.reserve(id_len);
-+ for (int i=0; i<id_len; i++)
-+ {
-+ std::string str = extract<std::string>(job_spec[i]);
-+ ids_list.push_back(str);
-+ ids.append(ids_list[i].c_str());
-+ }
-+ use_ids = true;
-+ }
-+ DCSchedd schedd(m_addr.c_str());
-+ ClassAd *result = NULL;
-+ VacateType vacate_type;
-+ tuple reason_tuple;
-+ const char *reason_char, *reason_code_char = NULL;
-+ extract<tuple> try_extract_tuple(reason);
-+ switch (action)
-+ {
-+ case JA_HOLD_JOBS:
-+ if (try_extract_tuple.check())
-+ {
-+ reason_tuple = extract<tuple>(reason);
-+ if (py_len(reason_tuple) != 2)
-+ {
-+ PyErr_SetString(PyExc_ValueError, "Hold action requires (hold string, hold code) tuple as the reason.");
-+ throw_error_already_set();
-+ }
-+ reason_str = extract<std::string>(reason_tuple[0]); reason_char = reason_str.c_str();
-+ reason_code = extract<std::string>(reason_tuple[1]); reason_code_char = reason_code.c_str();
-+ }
-+ else
-+ {
-+ reason_str = extract<std::string>(reason);
-+ reason_char = reason_str.c_str();
-+ }
-+ if (use_ids)
-+ result = schedd.holdJobs(&ids, reason_char, reason_code_char, NULL, AR_TOTALS);
-+ else
-+ result = schedd.holdJobs(constraint.c_str(), reason_char, reason_code_char, NULL, AR_TOTALS);
-+ break;
-+ case JA_RELEASE_JOBS:
-+ DO_ACTION(releaseJobs)
-+ break;
-+ case JA_REMOVE_JOBS:
-+ DO_ACTION(removeJobs)
-+ break;
-+ case JA_REMOVE_X_JOBS:
-+ DO_ACTION(removeXJobs)
-+ break;
-+ case JA_VACATE_JOBS:
-+ case JA_VACATE_FAST_JOBS:
-+ vacate_type = action == JA_VACATE_JOBS ? VACATE_GRACEFUL : VACATE_FAST;
-+ if (use_ids)
-+ result = schedd.vacateJobs(&ids, vacate_type, NULL, AR_TOTALS);
-+ else
-+ result = schedd.vacateJobs(constraint.c_str(), vacate_type, NULL, AR_TOTALS);
-+ break;
-+ case JA_SUSPEND_JOBS:
-+ DO_ACTION(suspendJobs)
-+ break;
-+ case JA_CONTINUE_JOBS:
-+ DO_ACTION(continueJobs)
-+ break;
-+ default:
-+ PyErr_SetString(PyExc_NotImplementedError, "Job action not implemented.");
-+ throw_error_already_set();
-+ }
-+ if (!result)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Error when querying the schedd.");
-+ throw_error_already_set();
-+ }
-+
-+ boost::shared_ptr<ClassAdWrapper> wrapper(new ClassAdWrapper());
-+ wrapper->CopyFrom(*result);
-+ object wrapper_obj(wrapper);
-+
-+ boost::shared_ptr<ClassAdWrapper> result_ptr(new ClassAdWrapper());
-+ object result_obj(result_ptr);
-+
-+ result_obj["TotalError"] = wrapper_obj["result_total_0"];
-+ result_obj["TotalSuccess"] = wrapper_obj["result_total_1"];
-+ result_obj["TotalNotFound"] = wrapper_obj["result_total_2"];
-+ result_obj["TotalBadStatus"] = wrapper_obj["result_total_3"];
-+ result_obj["TotalAlreadyDone"] = wrapper_obj["result_total_4"];
-+ result_obj["TotalPermissionDenied"] = wrapper_obj["result_total_5"];
-+ result_obj["TotalJobAds"] = wrapper_obj["TotalJobAds"];
-+ result_obj["TotalChangedAds"] = wrapper_obj["ActionResult"];
-+ return result_obj;
-+ }
-+
-+ object actOnJobs2(JobAction action, object job_spec)
-+ {
-+ return actOnJobs(action, job_spec, object("Python-initiated action."));
-+ }
-+
-+ int submit(ClassAdWrapper &wrapper, int count=1)
-+ {
-+ ConnectionSentry sentry(*this); // Automatically connects / disconnects.
-+
-+ int cluster = NewCluster();
-+ if (cluster < 0)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Failed to create new cluster.");
-+ throw_error_already_set();
-+ }
-+ ClassAd ad; ad.CopyFrom(wrapper);
-+ for (int idx=0; idx<count; idx++)
-+ {
-+ int procid = NewProc (cluster);
-+ if (procid < 0)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Failed to create new proc id.");
-+ throw_error_already_set();
-+ }
-+ ad.InsertAttr(ATTR_CLUSTER_ID, cluster);
-+ ad.InsertAttr(ATTR_PROC_ID, procid);
-+
-+ classad::ClassAdUnParser unparser;
-+ unparser.SetOldClassAd( true );
-+ for (classad::ClassAd::const_iterator it = ad.begin(); it != ad.end(); it++)
-+ {
-+ std::string rhs;
-+ unparser.Unparse(rhs, it->second);
-+ if (-1 == SetAttribute(cluster, procid, it->first.c_str(), rhs.c_str(), SetAttribute_NoAck))
-+ {
-+ PyErr_SetString(PyExc_ValueError, it->first.c_str());
-+ throw_error_already_set();
-+ }
-+ }
-+ }
-+
-+ return cluster;
-+ }
-+
-+ void edit(object job_spec, std::string attr, object val)
-+ {
-+ std::vector<int> clusters;
-+ std::vector<int> procs;
-+ std::string constraint;
-+ bool use_ids = false;
-+ extract<std::string> constraint_extract(job_spec);
-+ if (constraint_extract.check())
-+ {
-+ constraint = constraint_extract();
-+ }
-+ else
-+ {
-+ int id_len = py_len(job_spec);
-+ clusters.reserve(id_len);
-+ procs.reserve(id_len);
-+ for (int i=0; i<id_len; i++)
-+ {
-+ object id_list = job_spec[i].attr("split")(".");
-+ if (py_len(id_list) != 2)
-+ {
-+ PyErr_SetString(PyExc_ValueError, "Invalid ID");
-+ throw_error_already_set();
-+ }
-+ clusters.push_back(extract<int>(long_(id_list[0])));
-+ procs.push_back(extract<int>(long_(id_list[1])));
-+ }
-+ use_ids = true;
-+ }
-+
-+ std::string val_str;
-+ extract<ExprTreeHolder &> exprtree_extract(val);
-+ if (exprtree_extract.check())
-+ {
-+ classad::ClassAdUnParser unparser;
-+ unparser.Unparse(val_str, exprtree_extract().get());
-+ }
-+ else
-+ {
-+ val_str = extract<std::string>(val);
-+ }
-+
-+ ConnectionSentry sentry(*this);
-+
-+ if (use_ids)
-+ {
-+ for (unsigned idx=0; idx<clusters.size(); idx++)
-+ {
-+ if (-1 == SetAttribute(clusters[idx], procs[idx], attr.c_str(), val_str.c_str()))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to edit job");
-+ throw_error_already_set();
-+ }
-+ }
-+ }
-+ else
-+ {
-+ if (-1 == SetAttributeByConstraint(constraint.c_str(), attr.c_str(), val_str.c_str()))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to edit jobs matching constraint");
-+ throw_error_already_set();
-+ }
-+ }
-+ }
-+
-+private:
-+ struct ConnectionSentry
-+ {
-+ public:
-+ ConnectionSentry(Schedd &schedd) : m_connected(false)
-+ {
-+ if (ConnectQ(schedd.m_addr.c_str(), 0, false, NULL, NULL, schedd.m_version.c_str()) == 0)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Failed to connect to schedd.");
-+ throw_error_already_set();
-+ }
-+ m_connected = true;
-+ }
-+
-+ void disconnect()
-+ {
-+ if (m_connected && !DisconnectQ(NULL))
-+ {
-+ m_connected = false;
-+ PyErr_SetString(PyExc_RuntimeError, "Failed to commmit and disconnect from queue.");
-+ throw_error_already_set();
-+ }
-+ m_connected = false;
-+ }
-+
-+ ~ConnectionSentry()
-+ {
-+ disconnect();
-+ }
-+ private:
-+ bool m_connected;
-+ };
-+
-+ std::string m_addr, m_name, m_version;
-+};
-+
-+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(query_overloads, query, 0, 2);
-+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(submit_overloads, submit, 1, 2);
-+
-+void export_schedd()
-+{
-+ enum_<JobAction>("JobAction")
-+ .value("Hold", JA_HOLD_JOBS)
-+ .value("Release", JA_RELEASE_JOBS)
-+ .value("Remove", JA_REMOVE_JOBS)
-+ .value("RemoveX", JA_REMOVE_X_JOBS)
-+ .value("Vacate", JA_VACATE_JOBS)
-+ .value("VacateFast", JA_VACATE_FAST_JOBS)
-+ .value("Suspend", JA_SUSPEND_JOBS)
-+ .value("Continue", JA_CONTINUE_JOBS)
-+ ;
-+
-+ class_<Schedd>("Schedd", "A client class for the HTCondor schedd")
-+ .def(init<const ClassAdWrapper &>(":param ad: An ad containing the location of the schedd"))
-+ .def("query", &Schedd::query, query_overloads("Query the HTCondor schedd for jobs.\n"
-+ ":param constraint: An optional constraint for filtering out jobs; defaults to 'true'\n"
-+ ":param attr_list: A list of attributes for the schedd to project along. Defaults to having the schedd return all attributes.\n"
-+ ":return: A list of matching jobs, containing the requested attributes."))
-+ .def("act", &Schedd::actOnJobs2)
-+ .def("act", &Schedd::actOnJobs, "Change status of job(s) in the schedd.\n"
-+ ":param action: Action to perform; must be from enum JobAction.\n"
-+ ":param job_spec: Job specification; can either be a list of job IDs or a string specifying a constraint to match jobs.\n"
-+ ":return: Number of jobs changed.")
-+ .def("submit", &Schedd::submit, submit_overloads("Submit one or more jobs to the HTCondor schedd.\n"
-+ ":param ad: ClassAd describing job cluster.\n"
-+ ":param count: Number of jobs to submit to cluster.\n"
-+ ":return: Newly created cluster ID."))
-+ .def("edit", &Schedd::edit, "Edit one or more jobs in the queue.\n"
-+ ":param job_spec: Either a list of jobs (CLUSTER.PROC) or a string containing a constraint to match jobs against.\n"
-+ ":param attr: Attribute name to edit.\n"
-+ ":param value: The new value of the job attribute; should be a string (which will be converted to a ClassAds expression) or a ClassAds expression.");
-+ ;
-+}
-+
-diff --git a/src/condor_contrib/python-bindings/secman.cpp b/src/condor_contrib/python-bindings/secman.cpp
-new file mode 100644
-index 0000000..343fba8
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/secman.cpp
-@@ -0,0 +1,35 @@
-+
-+#include "condor_common.h"
-+
-+#include <boost/python.hpp>
-+
-+// Note - condor_secman.h can't be included directly. The following headers must
-+// be loaded first. Sigh.
-+#include "condor_ipverify.h"
-+#include "sock.h"
-+
-+#include "condor_secman.h"
-+
-+using namespace boost::python;
-+
-+struct SecManWrapper
-+{
-+public:
-+ SecManWrapper() : m_secman() {}
-+
-+ void
-+ invalidateAllCache()
-+ {
-+ m_secman.invalidateAllCache();
-+ }
-+
-+private:
-+ SecMan m_secman;
-+};
-+
-+void
-+export_secman()
-+{
-+ class_<SecManWrapper>("SecMan", "Access to the internal security state information.")
-+ .def("invalidateAllSessions", &SecManWrapper::invalidateAllCache, "Invalidate all security sessions.");
-+}
-diff --git a/src/condor_contrib/python-bindings/tests/classad_tests.py b/src/condor_contrib/python-bindings/tests/classad_tests.py
-new file mode 100644
-index 0000000..7641190
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/tests/classad_tests.py
-@@ -0,0 +1,79 @@
-+#!/usr/bin/python
-+
-+import re
-+import classad
-+import unittest
-+
-+class TestClassad(unittest.TestCase):
-+
-+ def test_load_classad_from_file(self):
-+ ad = classad.parse(open("tests/test.ad"))
-+ self.assertEqual(ad["foo"], "bar")
-+ self.assertEqual(ad["baz"], classad.Value.Undefined)
-+ self.assertRaises(KeyError, ad.__getitem__, "bar")
-+
-+ def test_old_classad(self):
-+ ad = classad.parseOld(open("tests/test.old.ad"))
-+ contents = open("tests/test.old.ad").read()
-+ self.assertEqual(ad.printOld(), contents)
-+
-+ def test_exprtree(self):
-+ ad = classad.ClassAd()
-+ ad["foo"] = classad.ExprTree("2+2")
-+ expr = ad["foo"]
-+ self.assertEqual(expr.__repr__(), "2 + 2")
-+ self.assertEqual(expr.eval(), 4)
-+
-+ def test_exprtree_func(self):
-+ ad = classad.ClassAd()
-+ ad["foo"] = classad.ExprTree('regexps("foo (bar)", "foo bar", "\\\\1")')
-+ self.assertEqual(ad.eval("foo"), "bar")
-+
-+ def test_ad_assignment(self):
-+ ad = classad.ClassAd()
-+ ad["foo"] = 2.1
-+ self.assertEqual(ad["foo"], 2.1)
-+ ad["foo"] = 2
-+ self.assertEqual(ad["foo"], 2)
-+ ad["foo"] = "bar"
-+ self.assertEqual(ad["foo"], "bar")
-+ self.assertRaises(TypeError, ad.__setitem__, {})
-+
-+ def test_ad_refs(self):
-+ ad = classad.ClassAd()
-+ ad["foo"] = classad.ExprTree("bar + baz")
-+ ad["bar"] = 2.1
-+ ad["baz"] = 4
-+ self.assertEqual(ad["foo"].__repr__(), "bar + baz")
-+ self.assertEqual(ad.eval("foo"), 6.1)
-+
-+ def test_ad_special_values(self):
-+ ad = classad.ClassAd()
-+ ad["foo"] = classad.ExprTree('regexp(12, 34)')
-+ ad["bar"] = classad.Value.Undefined
-+ self.assertEqual(ad["foo"].eval(), classad.Value.Error)
-+ self.assertNotEqual(ad["foo"].eval(), ad["bar"])
-+ self.assertEqual(classad.Value.Undefined, ad["bar"])
-+
-+ def test_ad_iterator(self):
-+ ad = classad.ClassAd()
-+ ad["foo"] = 1
-+ ad["bar"] = 2
-+ self.assertEqual(len(ad), 2)
-+ self.assertEqual(len(list(ad)), 2)
-+ self.assertEqual(list(ad)[1], "foo")
-+ self.assertEqual(list(ad)[0], "bar")
-+ self.assertEqual(list(ad.items())[1][1], 1)
-+ self.assertEqual(list(ad.items())[0][1], 2)
-+ self.assertEqual(list(ad.values())[1], 1)
-+ self.assertEqual(list(ad.values())[0], 2)
-+
-+ def test_ad_lookup(self):
-+ ad = classad.ClassAd()
-+ ad["foo"] = classad.Value.Error
-+ self.assertTrue(isinstance(ad.lookup("foo"), classad.ExprTree))
-+ self.assertEquals(ad.lookup("foo").eval(), classad.Value.Error)
-+
-+if __name__ == '__main__':
-+ unittest.main()
-+
-diff --git a/src/condor_contrib/python-bindings/tests/condor_tests.py b/src/condor_contrib/python-bindings/tests/condor_tests.py
-new file mode 100644
-index 0000000..2293fc2
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/tests/condor_tests.py
-@@ -0,0 +1,173 @@
-+#!/usr/bin/python
-+
-+import os
-+import re
-+import time
-+import condor
-+import errno
-+import signal
-+import classad
-+import unittest
-+
-+class TestConfig(unittest.TestCase):
-+
-+ def setUp(self):
-+ os.environ["_condor_FOO"] = "BAR"
-+ condor.reload_config()
-+
-+ def test_config(self):
-+ self.assertEquals(condor.param["FOO"], "BAR")
-+
-+ def test_reconfig(self):
-+ condor.param["FOO"] = "BAZ"
-+ self.assertEquals(condor.param["FOO"], "BAZ")
-+ os.environ["_condor_FOO"] = "1"
-+ condor.reload_config()
-+ self.assertEquals(condor.param["FOO"], "1")
-+
-+class TestVersion(unittest.TestCase):
-+
-+ def setUp(self):
-+ fd = os.popen("condor_version")
-+ self.lines = []
-+ for line in fd.readlines():
-+ self.lines.append(line.strip())
-+ if fd.close():
-+ raise RuntimeError("Unable to invoke condor_version")
-+
-+ def test_version(self):
-+ self.assertEquals(condor.version(), self.lines[0])
-+
-+ def test_platform(self):
-+ self.assertEquals(condor.platform(), self.lines[1])
-+
-+def makedirs_ignore_exist(directory):
-+ try:
-+ os.makedirs(directory)
-+ except OSError, oe:
-+ if oe.errno != errno.EEXIST:
-+ raise
-+
-+def remove_ignore_missing(file):
-+ try:
-+ os.unlink(file)
-+ except OSError, oe:
-+ if oe.errno != errno.ENOENT:
-+ raise
-+
-+class TestWithDaemons(unittest.TestCase):
-+
-+ def setUp(self):
-+ self.pid = -1
-+ testdir = os.path.join(os.getcwd(), "tests_tmp")
-+ makedirs_ignore_exist(testdir)
-+ os.environ["_condor_LOCAL_DIR"] = testdir
-+ os.environ["_condor_LOG"] = '$(LOCAL_DIR)/log'
-+ os.environ["_condor_LOCK"] = '$(LOCAL_DIR)/lock'
-+ os.environ["_condor_RUN"] = '$(LOCAL_DIR)/run'
-+ os.environ["_condor_COLLECTOR_NAME"] = "python_classad_tests"
-+ os.environ["_condor_SCHEDD_NAME"] = "python_classad_tests"
-+ condor.reload_config()
-+ condor.SecMan().invalidateAllSessions()
-+
-+ def launch_daemons(self, daemons=["MASTER", "COLLECTOR"]):
-+ makedirs_ignore_exist(condor.param["LOG"])
-+ makedirs_ignore_exist(condor.param["LOCK"])
-+ makedirs_ignore_exist(condor.param["EXECUTE"])
-+ makedirs_ignore_exist(condor.param["SPOOL"])
-+ makedirs_ignore_exist(condor.param["RUN"])
-+ remove_ignore_missing(condor.param["MASTER_ADDRESS_FILE"])
-+ remove_ignore_missing(condor.param["COLLECTOR_ADDRESS_FILE"])
-+ remove_ignore_missing(condor.param["SCHEDD_ADDRESS_FILE"])
-+ if "COLLECTOR" in daemons:
-+ os.environ["_condor_PORT"] = "9622"
-+ os.environ["_condor_COLLECTOR_ARGS"] = "-port $(PORT)"
-+ os.environ["_condor_COLLECTOR_HOST"] = "$(CONDOR_HOST):$(PORT)"
-+ if 'MASTER' not in daemons:
-+ daemons.append('MASTER')
-+ os.environ["_condor_DAEMON_LIST"] = ", ".join(daemons)
-+ condor.reload_config()
-+ self.pid = os.fork()
-+ if not self.pid:
-+ try:
-+ try:
-+ os.execvp("condor_master", ["condor_master", "-f"])
-+ except Exception, e:
-+ print str(e)
-+ finally:
-+ os._exit(1)
-+ for daemon in daemons:
-+ self.waitLocalDaemon(daemon)
-+
-+ def tearDown(self):
-+ if self.pid > 1:
-+ os.kill(self.pid, signal.SIGQUIT)
-+ pid, exit_status = os.waitpid(self.pid, 0)
-+ self.assertTrue(os.WIFEXITED(exit_status))
-+ code = os.WEXITSTATUS(exit_status)
-+ self.assertEquals(code, 0)
-+
-+ def waitLocalDaemon(self, daemon, timeout=5):
-+ address_file = condor.param[daemon + "_ADDRESS_FILE"]
-+ for i in range(timeout):
-+ if os.path.exists(address_file):
-+ return
-+ time.sleep(1)
-+ if not os.path.exists(address_file):
-+ raise RuntimeError("Waiting for daemon %s timed out." % daemon)
-+
-+ def waitRemoteDaemon(self, dtype, dname, pool=None, timeout=5):
-+ if pool:
-+ coll = condor.Collector(pool)
-+ else:
-+ coll = condor.Collector()
-+ for i in range(timeout):
-+ try:
-+ return coll.locate(dtype, dname)
-+ except Exception:
-+ pass
-+ time.sleep(1)
-+ return coll.locate(dtype, dname)
-+
-+ def testDaemon(self):
-+ self.launch_daemons(["COLLECTOR"])
-+
-+ def testLocate(self):
-+ self.launch_daemons(["COLLECTOR"])
-+ coll = condor.Collector()
-+ coll_ad = coll.locate(condor.DaemonTypes.Collector)
-+ self.assertTrue("MyAddress" in coll_ad)
-+ self.assertEquals(coll_ad["Name"].split(":")[-1], os.environ["_condor_PORT"])
-+
-+ def testRemoteLocate(self):
-+ self.launch_daemons(["COLLECTOR"])
-+ coll = condor.Collector()
-+ coll_ad = coll.locate(condor.DaemonTypes.Collector)
-+ remote_ad = self.waitRemoteDaemon(condor.DaemonTypes.Collector, "%s@%s" % (condor.param["COLLECTOR_NAME"], condor.param["CONDOR_HOST"]))
-+ self.assertEquals(remote_ad["MyAddress"], coll_ad["MyAddress"])
-+
-+ def testScheddLocate(self):
-+ self.launch_daemons(["SCHEDD", "COLLECTOR"])
-+ coll = condor.Collector()
-+ name = "%s@%s" % (condor.param["SCHEDD_NAME"], condor.param["CONDOR_HOST"])
-+ schedd_ad = self.waitRemoteDaemon(condor.DaemonTypes.Schedd, name, timeout=10)
-+ self.assertEquals(schedd_ad["Name"], name)
-+
-+ def testCollectorAdvertise(self):
-+ self.launch_daemons(["COLLECTOR"])
-+ print condor.param["COLLECTOR_HOST"]
-+ coll = condor.Collector()
-+ now = time.time()
-+ ad = classad.ClassAd('[MyType="GenericAd"; Name="Foo"; Foo=1; Bar=%f; Baz="foo"]' % now)
-+ coll.advertise([ad])
-+ for i in range(5):
-+ ads = coll.query(condor.AdTypes.Any, 'Name =?= "Foo"', ["Bar"])
-+ if ads: break
-+ time.sleep
-+ self.assertEquals(len(ads), 1)
-+ self.assertEquals(ads[0]["Bar"], now)
-+ self.assertTrue("Foo" not in ads[0])
-+
-+if __name__ == '__main__':
-+ unittest.main()
-+
-diff --git a/src/condor_contrib/python-bindings/tests/test.ad b/src/condor_contrib/python-bindings/tests/test.ad
-new file mode 100644
-index 0000000..06eeeb5
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/tests/test.ad
-@@ -0,0 +1,4 @@
-+[
-+foo = "bar";
-+baz = undefined;
-+]
diff --git a/python-boost.patch b/python-boost.patch
deleted file mode 100644
index 20e79ed..0000000
--- a/python-boost.patch
+++ /dev/null
@@ -1,23 +0,0 @@
-diff --git a/src/python-bindings/CMakeLists.txt b/src/python-bindings/CMakeLists.txt
-index 781580d8f9..93d297ee92 100644
---- a/src/python-bindings/CMakeLists.txt
-+++ b/src/python-bindings/CMakeLists.txt
-@@ -224,7 +224,7 @@ else()
- if (${SYSTEM_NAME} MATCHES "Debian" OR ${SYSTEM_NAME} MATCHES "Ubuntu")
- set ( PYTHON_BOOST_LIB "boost_python-py${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR}" )
- endif()
-- if (${SYSTEM_NAME} MATCHES "rhel7" OR ${SYSTEM_NAME} MATCHES "centos7" OR ${SYSTEM_NAME} MATCHES "sl7" OR ${SYSTEM_NAME} MATCHES "amzn2" OR ${SYSTEM_NAME} MATCHES "Debian.*10")
-+ if (${SYSTEM_NAME} MATCHES "rhel7" OR ${SYSTEM_NAME} MATCHES "centos7" OR ${SYSTEM_NAME} MATCHES "sl7" OR ${SYSTEM_NAME} MATCHES "fc3[0-9]" OR ${SYSTEM_NAME} MATCHES "amzn2" OR ${SYSTEM_NAME} MATCHES "Debian.*10")
- set ( PYTHON_BOOST_LIB "boost_python${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR}" )
- endif()
- if (${SYSTEM_NAME} MATCHES "rhel8" OR ${SYSTEM_NAME} MATCHES "centos8" )
-@@ -309,7 +309,7 @@ else()
- if (${SYSTEM_NAME} MATCHES "Debian" OR ${SYSTEM_NAME} MATCHES "Ubuntu")
- set ( PYTHON3_BOOST_LIB "boost_python-py${PYTHON3_VERSION_MAJOR}${PYTHON3_VERSION_MINOR}" )
- endif()
-- if (${SYSTEM_NAME} MATCHES "rhel7" OR ${SYSTEM_NAME} MATCHES "centos7" OR ${SYSTEM_NAME} MATCHES "sl7" OR ${SYSTEM_NAME} MATCHES "amzn2" OR ${SYSTEM_NAME} MATCHES "Debian.*10" OR ${SYSTEM_NAME} MATCHES "Ubuntu.*20")
-+ if (${SYSTEM_NAME} MATCHES "rhel7" OR ${SYSTEM_NAME} MATCHES "centos7" OR ${SYSTEM_NAME} MATCHES "sl7" OR ${SYSTEM_NAME} MATCHES "fc3[0-9]" OR ${SYSTEM_NAME} MATCHES "amzn2" OR ${SYSTEM_NAME} MATCHES "Debian.*10" OR ${SYSTEM_NAME} MATCHES "Ubuntu.*20")
- set ( PYTHON3_BOOST_LIB "boost_python${PYTHON3_VERSION_MAJOR}${PYTHON3_VERSION_MINOR}" )
- endif()
- if (${SYSTEM_NAME} MATCHES "rhel8" OR ${SYSTEM_NAME} MATCHES "centos8" )
-
diff --git a/python-executable.patch b/python-executable.patch
deleted file mode 100644
index 2533a50..0000000
--- a/python-executable.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/build/cmake/CondorConfigure.cmake b/build/cmake/CondorConfigure.cmake
-index 83e68eb..2ee07bc 100644
---- a/build/cmake/CondorConfigure.cmake
-+++ b/build/cmake/CondorConfigure.cmake
-@@ -146,7 +146,7 @@ if(NOT WINDOWS)
- message(STATUS "PYTHON_INCLUDE_PATH = ${PYTHON_INCLUDE_PATH}")
- message(STATUS "PYTHON_VERSION_STRING = ${PYTHON_VERSION_STRING}")
- endif()
-- find_program(PYTHON3_EXECUTABLE python3)
-+ find_program(PYTHON3_EXECUTABLE python)
- if (PYTHON3_EXECUTABLE)
- set(PYTHON3INTERP_FOUND TRUE)
- set(PYTHON_QUERY_PART_01 "from distutils import sysconfig;")
diff --git a/python-scripts.patch b/python-scripts.patch
deleted file mode 100644
index 8c7773f..0000000
--- a/python-scripts.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-Patch scripts to use the python3.
-
-diff --git a/src/azure_gahp/AzureGAHPServer.py b/src/azure_gahp/AzureGAHPServer.py
-index ec89591c38..274a8d3929 100644
---- a/src/azure_gahp/AzureGAHPServer.py
-+++ b/src/azure_gahp/AzureGAHPServer.py
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python -u
-+#!/usr/bin/env python3 -u
-
- from __future__ import print_function
-
-diff --git a/src/condor_gridmanager/slurm_status.py b/src/condor_gridmanager/slurm_status.py
-index 61e68682ff..9ca5bd3cee 100755
---- a/src/condor_gridmanager/slurm_status.py
-+++ b/src/condor_gridmanager/slurm_status.py
-@@ -1,4 +1,4 @@
--#!/usr/bin/python
-+#!/usr/bin/python3
-
- # File: slurm_status.py
- #
-diff --git a/src/condor_job_router/condor_router_history b/src/condor_job_router/condor_router_history
-index 8120fde566..452f0b6aca 100755
---- a/src/condor_job_router/condor_router_history
-+++ b/src/condor_job_router/condor_router_history
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/env python3
- ##**************************************************************
- ##
- ## Copyright (C) 1990-2007, Condor Team, Computer Sciences Department,
-diff --git a/src/condor_scripts/condor_top b/src/condor_scripts/condor_top
-index c31f14d6a7..bd647c2d00 100755
---- a/src/condor_scripts/condor_top
-+++ b/src/condor_scripts/condor_top
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/env python3
-
- import htcondor
- import classad
diff --git a/python3.patch b/python3.patch
deleted file mode 100644
index 9fadbf2..0000000
--- a/python3.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/src/python-bindings/CMakeLists.txt b/src/python-bindings/CMakeLists.txt
-index 5c2b104363..b610a3ac4e 100644
---- a/src/python-bindings/CMakeLists.txt
-+++ b/src/python-bindings/CMakeLists.txt
-@@ -204,7 +204,7 @@ if(WINDOWS)
- endif(NOT (MSVC_VERSION LESS 1700))
- endif()
- else()
-- if ( WITH_PYTHON_BINDINGS AND PYTHONLIBS_FOUND AND Boost_PYTHON_LIBRARY AND NOT SOLARIS )
-+ if ( WITH_PYTHON_BINDINGS AND PYTHON3LIBS_FOUND )
- configure_file (
- "${PROJECT_SOURCE_DIR}/src/python-bindings/test_driver.in"
- "${CMAKE_CURRENT_BINARY_DIR}/test_driver"
diff --git a/sources b/sources
index a5e7b88..54537c9 100644
--- a/sources
+++ b/sources
@@ -1 +1 @@
-SHA512 (htcondor-8.8.15.tar.gz) = 47aa05c48138bebd911c10da834736c0cae5540db3a0a4f48960fffc31c5d09e2ab99560bb3b775b986a8d56496e9d8b59ab6a72ee957b833ffde6607808c61a
+SHA512 (htcondor-23.0.0.tar.gz) = 767b1769e81f2a9aced274877330999b25c182d7cfe8f27b6d841d501ae50a9c388083e25f710527db62e402b63d82f2525ed993fe4a78d33cc9e6226d6ad233
7 months, 1 week
Architecture specific change in rpms/condor.git
by githook-noreply@fedoraproject.org
The package rpms/condor.git has added or updated architecture specific content in its
spec file (ExclusiveArch/ExcludeArch or %ifarch/%ifnarch) in commit(s):
https://src.fedoraproject.org/cgit/rpms/condor.git/commit/?id=98d727a48da....
Change:
+%ifarch s390x
Thanks.
Full change:
============
commit 98d727a48da241bc1a2798d5e050cd9ee8c586dd
Author: Tim Theisen <ttheisen(a)fedoraproject.org>
Date: Sun Oct 1 00:01:12 2023 -0500
Get s390x to build
diff --git a/condor.spec b/condor.spec
index 1928f1e..62dbe95 100644
--- a/condor.spec
+++ b/condor.spec
@@ -285,6 +285,11 @@ function populate {
rm -rf %{buildroot}
%cmake_install
+# TODO: Fix up cmake and remove this hack
+%ifarch s390x
+mv %{buildroot}/usr/lib %{buildroot}/usr/%{_lib}
+%endif
+
# Drop in a symbolic link for backward compatibility
ln -s ../..%{_libdir}/condor/condor_ssh_to_job_sshd_config_template %{buildroot}/%_sysconfdir/condor/condor_ssh_to_job_sshd_config_template
7 months, 1 week
[Report] Packages Restricting Arches
by root
List of packages currently excluding arches (1143)
===========================================
- 0ad
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64 ppc64le
- 90-Second-Portraits
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 ppc64le
- BareBonesBrowserLaunch
ExclusiveArch: %{java_arches} noarch
- CFR
ExclusiveArch: %{java_arches} noarch
- CardManager
ExclusiveArch: %{java_arches} noarch
- GAPDoc
ExclusiveArch: %{gap_arches} noarch
- GoldenCheetah
ExclusiveArch: %{qt5_qtwebengine_arches}
- GtkAda
ExclusiveArch: %{GPRbuild_arches}
- GtkAda3
ExclusiveArch: %{GPRbuild_arches}
- IPAddress
ExclusiveArch: %{java_arches} noarch
- Mars
ExclusiveArch: %{java_arches} noarch
- OpenColorIO
ExclusiveArch: x86_64 ppc64le
- OpenImageIO
ExclusiveArch: x86_64 ppc64le
- OpenMolcas
ExclusiveArch: x86_64 aarch64 ppc64le s390x
- OpenStego
ExclusiveArch: %{java_arches} noarch
- PragmARC
ExclusiveArch: %{GPRbuild_arches}
- R-V8
ExclusiveArch: %{nodejs_arches}
- R-rJava
ExclusiveArch: %{java_arches}
- RdRand
ExclusiveArch: %{ix86} x86_64
- RediSearch
ExclusiveArch: x86_64
- SLOF
ExclusiveArch: ppc64le
- YafaRay
ExclusiveArch: %{ix86} x86_64
- aardvark-dns
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: aarch64 ppc64le s390x x86_64
- accel-config
ExclusiveArch: %{ix86} x86_64
- acpid
ExclusiveArch: ia64 x86_64 %{ix86} %{arm} aarch64
- ahven
ExclusiveArch: %{GPRbuild_arches}
- algobox
ExclusiveArch: %{qt5_qtwebengine_arches}
- american-fuzzy-lop
ExclusiveArch: %{ix86} x86_64 s390x
- anet
ExclusiveArch: %{GPRbuild_arches}
- ant
ExclusiveArch: %{java_arches} noarch
- ant-antunit
ExclusiveArch: %{java_arches} noarch
- ant-contrib
ExclusiveArch: %{java_arches} noarch
- antlr
ExclusiveArch: %{java_arches}
- antlr-maven-plugin
ExclusiveArch: %{java_arches} noarch
- antlr3
ExclusiveArch: %{java_arches}
- antlr4-project
ExclusiveArch: %{java_arches}
- antlrworks
ExclusiveArch: %{java_arches} noarch
- aopalliance
ExclusiveArch: %{java_arches} noarch
- apache-commons-beanutils
ExclusiveArch: %{java_arches} noarch
- apache-commons-cli
ExclusiveArch: %{java_arches} noarch
- apache-commons-codec
ExclusiveArch: %{java_arches} noarch
- apache-commons-collections
ExclusiveArch: %{java_arches} noarch
- apache-commons-collections4
ExclusiveArch: %{java_arches} noarch
- apache-commons-compress
ExclusiveArch: %{java_arches} noarch
- apache-commons-configuration
ExclusiveArch: %{java_arches} noarch
- apache-commons-digester
ExclusiveArch: %{java_arches} noarch
- apache-commons-exec
ExclusiveArch: %{java_arches} noarch
- apache-commons-io
ExclusiveArch: %{java_arches} noarch
- apache-commons-jexl
ExclusiveArch: %{java_arches} noarch
- apache-commons-jxpath
ExclusiveArch: %{java_arches} noarch
- apache-commons-lang3
ExclusiveArch: %{java_arches} noarch
- apache-commons-logging
ExclusiveArch: %{java_arches} noarch
- apache-commons-math
ExclusiveArch: %{java_arches} noarch
- apache-commons-modeler
ExclusiveArch: %{java_arches} noarch
- apache-commons-net
ExclusiveArch: %{java_arches} noarch
- apache-commons-parent
ExclusiveArch: %{java_arches} noarch
- apache-commons-pool
ExclusiveArch: %{java_arches} noarch
- apache-commons-vfs
ExclusiveArch: %{java_arches} noarch
- apache-ivy
ExclusiveArch: %{java_arches} noarch
- apache-parent
ExclusiveArch: %{java_arches} noarch
- apache-resource-bundles
ExclusiveArch: %{java_arches} noarch
- apache-sshd
ExclusiveArch: %{java_arches} noarch
- apiguardian
ExclusiveArch: %{java_arches} noarch
- apmd
ExclusiveArch: %{ix86}
- appstream-generator
ExclusiveArch: %{ldc_arches}
- aqute-bnd
ExclusiveArch: %{java_arches} noarch
- args4j
ExclusiveArch: %{java_arches} noarch
- arianna
ExclusiveArch: %{qt5_qtwebengine_arches}
- arm-trusted-firmware
ExclusiveArch: aarch64
- assertj-core
ExclusiveArch: %{java_arches} noarch
- atinject
ExclusiveArch: %{java_arches} noarch
- aunit
ExclusiveArch: %GPRbuild_arches
- auto
ExclusiveArch: %{java_arches} noarch
- autolink-java
ExclusiveArch: noarch %{java_arches}
- avgtime
ExclusiveArch: %{ldc_arches}
- aws
ExclusiveArch: %GPRbuild_arches
- azure-cli
ExclusiveArch: %{java_arches} noarch
- batik
ExclusiveArch: %{java_arches} noarch
- bcal
ExclusiveArch: x86_64 aarch64 ia64 ppc64 ppc64le s390x
- bcc
ExclusiveArch: x86_64 %{power64} aarch64 s390x armv7hl
- bcel
ExclusiveArch: %{java_arches} noarch
- bcm283x-firmware
ExclusiveArch: aarch64
- beansbinding
ExclusiveArch: %{java_arches} noarch
- belle-sip
ExclusiveArch: %{java_arches}
- berusky2
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64 %{mips}
- beust-jcommander
ExclusiveArch: %{java_arches} noarch
- bibletime
ExclusiveArch: %{java_arches}
- biosdevname
ExclusiveArch: %{ix86} x86_64
- bless
ExclusiveArch: %mono_arches
- bodhi-server
ExclusiveArch: %{golang_arches_future}
- bolzplatz2006
ExclusiveArch: %{java_arches}
- bouncycastle
ExclusiveArch: %{java_arches} noarch
- box64
ExclusiveArch: aarch64 ppc64le x86_64
- bpftrace
ExclusiveArch: x86_64 %{power64} aarch64 s390x
- brazil
ExclusiveArch: %{java_arches} noarch
- bsf
ExclusiveArch: %{java_arches} noarch
- bsh
ExclusiveArch: %{java_arches} noarch
- build-helper-maven-plugin
ExclusiveArch: %{java_arches} noarch
- buildah
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: aarch64 ppc64le s390x x86_64
- byte-buddy
ExclusiveArch: %{java_arches} noarch
- byteman
ExclusiveArch: %{java_arches} noarch
- cachelib
ExclusiveArch: x86_64 aarch64 ppc64le
- caddy
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: %{golang_arches}
- caffe
ExclusiveArch: x86_64 aarch64 ppc64le
- calamares
ExclusiveArch: %{ix86} x86_64 aarch64
- calibre
ExclusiveArch: aarch64 x86_64
- cambozola
ExclusiveArch: %{java_arches} noarch
- canl-java
ExclusiveArch: %{java_arches} noarch
- catatonit
ExclusiveArch: aarch64 ppc64le s390x x86_64
ExclusiveArch: %{golang_arches_future}
- ccdciel
ExclusiveArch: %{fpc_arches}
- cdcollect
ExclusiveArch: %{mono_arches}
- cdi-api
ExclusiveArch: %{java_arches} noarch
- ceph
ExclusiveArch: x86_64 aarch64 ppc64le s390x
- cglib
ExclusiveArch: %{java_arches} noarch
- chromium
ExclusiveArch: x86_64
ExclusiveArch: x86_64 aarch64
ExclusiveArch: x86_64 aarch64
- cjdns
ExclusiveArch: %{nodejs_arches}
- classloader-leak-test-framework
ExclusiveArch: %{java_arches} noarch
- classpathless-compiler
ExclusiveArch: %{java_arches} noarch
- clevis-pin-tpm2
ExclusiveArch: %{rust_arches}
- clojure
ExclusiveArch: %{java_arches} noarch
- clojure-core-specs-alpha
ExclusiveArch: %{java_arches} noarch
- clojure-maven-plugin
ExclusiveArch: %{java_arches} noarch
- clojure-spec-alpha
ExclusiveArch: %{java_arches} noarch
- cmospwd
ExclusiveArch: %{ix86} x86_64
- cmrt
ExclusiveArch: %{ix86} x86_64 ia64
- codehaus-parent
ExclusiveArch: %{java_arches} noarch
- colorful
ExclusiveArch: %{fpc_arches}
- colossus
ExclusiveArch: %{java_arches} noarch
- conmon
ExclusiveArch: %{golang_arches_future}
- console-image-viewer
ExclusiveArch: %{java_arches} noarch
- containernetworking-plugins
ExclusiveArch: %{golang_arches}
- coq
ExclusiveArch: %{ocaml_native_compiler}
- cortado
ExclusiveArch: %{java_arches} noarch
- cpu-x
ExclusiveArch: i686 x86_64
- cpufetch
ExclusiveArch: %{arm} aarch64 x86_64 ppc ppc64 ppc64le
- cpuid
ExclusiveArch: %{ix86} x86_64
- cpuinfo
ExclusiveArch: x86_64 aarch64
- cqrlog
ExclusiveArch: %{fpc_arches}
- crash
ExclusiveArch: %{ix86} ia64 x86_64 ppc ppc64 s390 s390x %{arm} aarch64 ppc64le
- crash-gcore-command
ExclusiveArch: aarch64 ppc64le x86_64
- crash-trace-command
ExclusiveArch: aarch64 ppc64le s390x x86_64
- credentials-fetcher
ExclusiveArch: x86_64 aarch64 s390x
- cri-o
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 aarch64 %{arm}}
- cri-tools
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 aarch64 %{arm}}
- criu
ExclusiveArch: x86_64 %{arm} ppc64le aarch64 s390x
- crun
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: aarch64 ppc64le riscv64 s390x x86_64
- cryptlib
ExclusiveArch: x86_64 aarch64 ppc64le
- crypto-policies
ExclusiveArch: %{java_arches} noarch
- cryptobone
ExclusiveArch: x86_64 ppc64le aarch64
- csslint
ExclusiveArch: %{nodejs_arches} noarch
- daq
ExclusiveArch: x86_64 aarch64
- dbus-sharp
ExclusiveArch: %mono_arches
- dbus-sharp-glib
ExclusiveArch: %mono_arches
- decentxml
ExclusiveArch: %{java_arches} noarch
- deepin-daemon
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 aarch64 %{arm}}
- deepin-desktop-schemas
ExclusiveArch: %{go_arches}
- directory-maven-plugin
ExclusiveArch: %{java_arches} noarch
- dirgra
ExclusiveArch: %{java_arches} noarch
- disruptor
ExclusiveArch: %{java_arches} noarch
- ditaa
ExclusiveArch: %{java_arches} noarch
- dlm
ExclusiveArch: i686 x86_64
- dmidecode
ExclusiveArch: %{ix86} x86_64 ia64 aarch64
- dmtcp
ExclusiveArch: x86_64
- docker-distribution
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 %{arm}}
- dogtag-pki
ExclusiveArch: %{java_arches}
- dolphin-emu
ExclusiveArch: x86_64 aarch64
- dom4j
ExclusiveArch: %{java_arches} noarch
- dotnet6.0
ExclusiveArch: aarch64 x86_64 s390x
ExclusiveArch: x86_64
- dotnet7.0
ExclusiveArch: aarch64 ppc64le s390x x86_64
ExclusiveArch: x86_64
- doublecmd
ExclusiveArch: %{ix86} x86_64
- dpdk
ExclusiveArch: x86_64 i686 aarch64 ppc64le
- dub
ExclusiveArch: %{ldc_arches}
- dxvk-native
ExclusiveArch: %{ix86} x86_64
- dyninst
ExclusiveArch: %{ix86} x86_64 ppc64le aarch64
- e3
ExclusiveArch: %{ix86} x86_64
- easymock
ExclusiveArch: %{java_arches} noarch
- ecj
ExclusiveArch: %{java_arches} noarch
- eclipse-swt
ExclusiveArch: %{java_arches}
- ed25519-java
ExclusiveArch: %{java_arches} noarch
- edk2
ExclusiveArch: x86_64 aarch64 riscv64
- efibootmgr
ExclusiveArch: %{efi}
- efifs
ExclusiveArch: %{efi}
- efitools
ExclusiveArch: %{efi}
- efivar
ExclusiveArch: %{efi}
- elk
ExclusiveArch: x86_64 %{ix86}
ExclusiveArch: x86_64 %{ix86} aarch64 %{arm} %{power64}
- emacs-slime
ExclusiveArch: %{arm} %{ix86} x86_64 ppc sparcv9 aarch64
- embree
ExclusiveArch: aarch64 x86_64
- embree3
ExclusiveArch: aarch64 x86_64
- enjarify
ExclusiveArch: %{java_arches} noarch
- enki
ExclusiveArch: %{qt5_qtwebengine_arches} noarch
- envytools
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64
- erlang-corba
ExclusiveArch: %{java_arches}
- esmi_ib_library
ExclusiveArch: x86_64
- eth-tools
ExclusiveArch: x86_64
- exec-maven-plugin
ExclusiveArch: %{java_arches} noarch
- external-editor-revived
ExclusiveArch: %{rust_arches}
- extra-enforcer-rules
ExclusiveArch: %{java_arches} noarch
- fasterxml-oss-parent
ExclusiveArch: %{java_arches} noarch
- fb303
ExclusiveArch: x86_64 aarch64 ppc64le
- fbthrift
ExclusiveArch: x86_64 aarch64 ppc64le
- fcitx-libpinyin
ExclusiveArch: %{qt5_qtwebengine_arches}
- fedora-dockerfiles
ExclusiveArch: %{go_arches}
- felix-parent
ExclusiveArch: %{java_arches} noarch
- felix-utils
ExclusiveArch: %{java_arches} noarch
- fernflower
ExclusiveArch: %{java_arches} noarch
- fes
ExclusiveArch: %{ix86} x86_64
- filedrop
ExclusiveArch: %{java_arches} noarch
- firecracker
ExclusiveArch: aarch64 x86_64
- fishbowl
ExclusiveArch: %{java_arches} noarch
- fizz
ExclusiveArch: x86_64 aarch64 ppc64le
- flexmark-java
ExclusiveArch: noarch %{java_arches}
- flocq
ExclusiveArch: %{ocaml_native_compiler}
- florist
ExclusiveArch: %{GPRbuild_arches}
- fluent-bit
ExclusiveArch: x86_64 aarch64
- flute
ExclusiveArch: %{java_arches} noarch
- folly
ExclusiveArch: x86_64 aarch64 ppc64le
- fop
ExclusiveArch: %{java_arches} noarch
- forge-parent
ExclusiveArch: %{java_arches} noarch
- fpc
ExclusiveArch: %{arm} aarch64 %{ix86} x86_64 ppc64le
- frama-c
ExclusiveArch: %{ocaml_native_compiler}
- freecol
ExclusiveArch: %{java_arches} noarch
- freemarker
ExclusiveArch: %{java_arches} noarch
- freerouting
ExclusiveArch: %{java_arches} noarch
- frescobaldi
ExclusiveArch: %{qt5_qtwebengine_arches}
- frysk
ExclusiveArch: x86_64 ppc64
- fuse-overlayfs
ExclusiveArch: %{arm64} ppc64le s390x x86_64
- fusesource-pom
ExclusiveArch: %{java_arches} noarch
- fwts
ExclusiveArch: x86_64 %{arm} aarch64 s390x riscv64 %{power64}
- fwupd-efi
ExclusiveArch: x86_64 aarch64
- ga
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64 ppc64le
- gap
ExclusiveArch: %{gap_arches}
- gap-pkg-ace
ExclusiveArch: %{gap_arches}
- gap-pkg-aclib
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-alnuth
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-atlasrep
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-autodoc
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-automata
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-autpgrp
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-browse
ExclusiveArch: %{gap_arches}
- gap-pkg-caratinterface
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-circle
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-cohomolo
ExclusiveArch: %{gap_arches}
- gap-pkg-congruence
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-corelg
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-crime
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-crisp
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-crypting
ExclusiveArch: %{gap_arches}
- gap-pkg-cryst
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-crystcat
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-ctbllib
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-curlinterface
ExclusiveArch: %{gap_arches}
- gap-pkg-cvec
ExclusiveArch: %{gap_arches}
- gap-pkg-datastructures
ExclusiveArch: %{gap_arches}
- gap-pkg-design
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-digraphs
ExclusiveArch: %{gap_arches}
- gap-pkg-edim
ExclusiveArch: %{gap_arches}
- gap-pkg-factint
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-ferret
ExclusiveArch: %{gap_arches}
- gap-pkg-fga
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-fining
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-float
ExclusiveArch: %{gap_arches}
- gap-pkg-format
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-forms
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-fr
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-francy
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-gbnp
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-genss
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-grape
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-groupoids
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-grpconst
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-guava
ExclusiveArch: %{gap_arches}
- gap-pkg-hap
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-hapcryst
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-hecke
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-images
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-io
ExclusiveArch: %{gap_arches}
- gap-pkg-irredsol
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-json
ExclusiveArch: %{gap_arches}
- gap-pkg-jupyterkernel
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-jupyterviz
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-laguna
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-liealgdb
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-liepring
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-liering
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-loops
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-lpres
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-mapclass
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-nautytracesinterface
ExclusiveArch: %{gap_arches}
- gap-pkg-nq
ExclusiveArch: %{gap_arches}
- gap-pkg-openmath
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-orb
ExclusiveArch: %{gap_arches}
- gap-pkg-polenta
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-polycyclic
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-polymaking
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-primgrp
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-profiling
ExclusiveArch: %{gap_arches}
- gap-pkg-qpa
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-quagroup
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-radiroot
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-recog
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-repsn
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-resclasses
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-scscp
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-semigroups
ExclusiveArch: %{gap_arches}
- gap-pkg-singular
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-sla
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-smallgrp
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-smallsemi
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-sonata
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-sophus
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-spinsym
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-standardff
ExclusiveArch: %{gap_arches}
- gap-pkg-tomlib
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-toric
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-transgrp
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-utils
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-uuid
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-xmod
ExclusiveArch: %{gap_arches} noarch
- gap-pkg-zeromqinterface
ExclusiveArch: %{gap_arches}
- gappalib-coq
ExclusiveArch: %{ocaml_native_compiler}
- gbrainy
ExclusiveArch: %mono_arches
- gdb
ExclusiveArch: %{ix86} x86_64
- gdb-exploitable
ExclusiveArch: x86_64 i386
ExclusiveArch: x86_64 noarch
- gearhead1
ExclusiveArch: %{fpc_arches}
- gearhead2
ExclusiveArch: %{fpc_arches}
- ghdl
ExclusiveArch: %{GNAT_arches}
- ghostwriter
ExclusiveArch: %{qt5_qtwebengine_arches}
- gio-sharp
ExclusiveArch: %mono_arches
- gir-to-d
ExclusiveArch: %{ldc_arches}
- git-octopus
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 aarch64 %{arm}}
- gitqlient
ExclusiveArch: %{qt5_qtwebengine_arches}
- giver
ExclusiveArch: %{mono_arches}
- gkeyfile-sharp
ExclusiveArch: %mono_arches
- glibc32
ExclusiveArch: x86_64
- glibd
ExclusiveArch: %{ldc_arches}
- gnatcoll
ExclusiveArch: %{GPRbuild_arches}
- gnatcoll-bindings
ExclusiveArch: %{GPRbuild_arches}
- gnatcoll-db
ExclusiveArch: %{GPRbuild_arches}
- gnome-boxes
ExclusiveArch: x86_64
- gnome-desktop-sharp
ExclusiveArch: %mono_arches
- gnome-do
ExclusiveArch: %mono_arches
- gnome-keyring-sharp
ExclusiveArch: %mono_arches
- gnome-rdp
ExclusiveArch: %{mono_arches}
- gnome-sharp
ExclusiveArch: %mono_arches
- gnome-subtitles
ExclusiveArch: %mono_arches
- gnu-efi
ExclusiveArch: %{efi}
- go-bindata
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 %{arm}}
- godep
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 %{arm}}
- golang
ExclusiveArch: %{golang_arches}
- google-gson
ExclusiveArch: %{java_arches} noarch
- google-guice
ExclusiveArch: %{java_arches} noarch
- gotun
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 %{arm}}
ExclusiveArch: x86_64
- goverlay
ExclusiveArch: %{fpc_arches}
- gprbuild
ExclusiveArch: %{GPRbuild_arches}
- gprolog
ExclusiveArch: x86_64 %{ix86} ppc alpha aarch64
- grafana
ExclusiveArch: %{grafana_arches}
- grafana-pcp
ExclusiveArch: %{grafanapcp_arches}
- gtk-sharp-beans
ExclusiveArch: %mono_arches
- gtk-sharp2
ExclusiveArch: %mono_arches
- gtk-sharp3
ExclusiveArch: %{mono_arches}
- gtkd
ExclusiveArch: %{ldc_arches}
- guava
ExclusiveArch: %{java_arches} noarch
- gudev-sharp
ExclusiveArch: %mono_arches
- guestfs-tools
ExclusiveArch: %{kernel_arches}
- gvisor-tap-vsock
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: aarch64 ppc64le s390x x86_64
- hamcrest
ExclusiveArch: %{java_arches} noarch
- harec
ExclusiveArch: x86_64 aarch64
- hawtjni
ExclusiveArch: %{java_arches} noarch
- hedgewars
ExclusiveArch: %{fpc_arches}
- hibernate-jpa-2.0-api
ExclusiveArch: %{java_arches} noarch
- hid4java
ExclusiveArch: %{java_arches} noarch
- hipcub
ExclusiveArch: x86_64
- hsakmt
ExclusiveArch: x86_64 aarch64 ppc64le
- httpcomponents-client
ExclusiveArch: %{java_arches} noarch
- httpcomponents-core
ExclusiveArch: %{java_arches} noarch
- httpcomponents-project
ExclusiveArch: %{java_arches} noarch
- hyena
ExclusiveArch: %{mono_arches}
- hyperscan
ExclusiveArch: x86_64
- hyperv-daemons
ExclusiveArch: i686 x86_64 aarch64
- icaro
ExclusiveArch: %{ix86} x86_64 noarch
- icedtea-web
ExclusiveArch: %{java_arches}
- icu4j
ExclusiveArch: %{java_arches} noarch
- imagej
ExclusiveArch: %{java_arches} noarch
- imhex
ExclusiveArch: x86_64 %{arm64}
- imvirt
ExclusiveArch: %{ix86} x86_64 ia64
- indistarter
ExclusiveArch: %{fpc_arches}
- infinipath-psm
ExclusiveArch: x86_64
- intel-cm-compiler
ExclusiveArch: i686 x86_64
- intel-cmt-cat
ExclusiveArch: x86_64
- intel-compute-runtime
ExclusiveArch: x86_64
- intel-gmmlib
ExclusiveArch: x86_64 i686
- intel-igc
ExclusiveArch: x86_64
- intel-ipp-crypto-mb
ExclusiveArch: x86_64
- intel-ipsec-mb
ExclusiveArch: x86_64
- intel-mediasdk
ExclusiveArch: x86_64
- intel-undervolt
ExclusiveArch: i386 x86_64
- ioport
ExclusiveArch: %{ix86} x86_64
- ipmctl
ExclusiveArch: x86_64
- ispc
ExclusiveArch: x86_64 aarch64
- iucode-tool
ExclusiveArch: %{ix86} x86_64
- iyfct
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 ppc64le
- jFormatString
ExclusiveArch: %{java_arches} noarch
- jackson-annotations
ExclusiveArch: %{java_arches} noarch
- jackson-bom
ExclusiveArch: %{java_arches} noarch
- jackson-core
ExclusiveArch: %{java_arches} noarch
- jackson-databind
ExclusiveArch: %{java_arches} noarch
- jackson-dataformats-binary
ExclusiveArch: %{java_arches} noarch
- jackson-dataformats-text
ExclusiveArch: %{java_arches} noarch
- jackson-jaxrs-providers
ExclusiveArch: %{java_arches} noarch
- jackson-modules-base
ExclusiveArch: %{java_arches} noarch
- jackson-parent
ExclusiveArch: %{java_arches} noarch
- jacoco
ExclusiveArch: %{java_arches} noarch
- jacop
ExclusiveArch: %{java_arches} noarch
- jakarta-activation
ExclusiveArch: %{java_arches} noarch
- jakarta-activation1
ExclusiveArch: %{java_arches} noarch
- jakarta-annotations
ExclusiveArch: %{java_arches} noarch
- jakarta-el
ExclusiveArch: %{java_arches} noarch
- jakarta-interceptors
ExclusiveArch: %{java_arches} noarch
- jakarta-json
ExclusiveArch: %{java_arches} noarch
- jakarta-mail
ExclusiveArch: %{java_arches} noarch
- jakarta-mail1
ExclusiveArch: %{java_arches} noarch
- jakarta-oro
ExclusiveArch: %{java_arches} noarch
- jakarta-saaj
ExclusiveArch: %{java_arches} noarch
- jakarta-server-pages
ExclusiveArch: %{java_arches} noarch
- jakarta-servlet
ExclusiveArch: %{java_arches} noarch
- jakarta-xml-ws
ExclusiveArch: %{java_arches} noarch
- janino
ExclusiveArch: %{java_arches} noarch
- jansi
ExclusiveArch: %{java_arches}
- jansi-native
ExclusiveArch: %{java_arches}
- jansi1
ExclusiveArch: %{java_arches} noarch
- java-1.8.0-openjdk
ExclusiveArch: %{java_arches}
- java-1.8.0-openjdk-aarch32
ExclusiveArch: %{arm}
- java-1.8.0-openjdk-portable
ExclusiveArch: %{java_arches}
- java-11-openjdk
ExclusiveArch: %{java_arches}
- java-11-openjdk-portable
ExclusiveArch: %{java_arches}
- java-17-openjdk
ExclusiveArch: %{java_arches}
- java-17-openjdk-portable
ExclusiveArch: %{java_arches}
- java-diff-utils
ExclusiveArch: %{java_arches} noarch
- java-dirq
ExclusiveArch: %{java_arches} noarch
- java-jd-decompiler
ExclusiveArch: %{java_arches} noarch
- java-latest-openjdk
ExclusiveArch: %{java_arches}
- java-latest-openjdk-portable
ExclusiveArch: %{java_arches}
- java-runtime-decompiler
ExclusiveArch: %{java_arches} noarch
- java-scrypt
ExclusiveArch: %{java_arches} noarch
- java_cup
ExclusiveArch: %{java_arches} noarch
- javacc
ExclusiveArch: %{java_arches} noarch
- javacc-maven-plugin
ExclusiveArch: %{java_arches} noarch
- javaewah
ExclusiveArch: %{java_arches} noarch
- javapackages-bootstrap
ExclusiveArch: %{java_arches}
- javaparser
ExclusiveArch: %{java_arches} noarch
- javapoet
ExclusiveArch: %{java_arches} noarch
- javassist
ExclusiveArch: %{java_arches} noarch
- jaxb
ExclusiveArch: %{java_arches} noarch
- jaxb-api
ExclusiveArch: %{java_arches} noarch
- jaxb-api2
ExclusiveArch: %{java_arches} noarch
- jaxb-dtd-parser
ExclusiveArch: %{java_arches} noarch
- jaxb-fi
ExclusiveArch: %{java_arches} noarch
- jaxb-istack-commons
ExclusiveArch: %{java_arches} noarch
- jaxb-stax-ex
ExclusiveArch: %{java_arches} noarch
- jaxen
ExclusiveArch: %{java_arches} noarch
- jboss-jaxrs-2.0-api
ExclusiveArch: %{java_arches} noarch
- jboss-logging
ExclusiveArch: %{java_arches} noarch
- jboss-logging-tools
ExclusiveArch: %{java_arches} noarch
- jboss-parent
ExclusiveArch: %{java_arches} noarch
- jchardet
ExclusiveArch: %{java_arches} noarch
- jcip-annotations
ExclusiveArch: %{java_arches} noarch
- jctools
ExclusiveArch: %{java_arches} noarch
- jcuber
ExclusiveArch: %{java_arches} noarch
- jdeparser
ExclusiveArch: %{java_arches} noarch
- jdepend
ExclusiveArch: %{java_arches} noarch
- jdependency
ExclusiveArch: %{java_arches} noarch
- jdom
ExclusiveArch: %{java_arches} noarch
- jdom2
ExclusiveArch: %{java_arches} noarch
- jedit
ExclusiveArch: %{java_arches} noarch
- jericho-html
ExclusiveArch: %{java_arches} noarch
- jetbrains-annotations
ExclusiveArch: noarch %{java_arches}
- jetty
ExclusiveArch: %{java_arches} noarch
- jflex
ExclusiveArch: %{java_arches} noarch
- jfreechart
ExclusiveArch: %{java_arches} noarch
- jgit
ExclusiveArch: %{java_arches} noarch
- jglobus
ExclusiveArch: %{java_arches} noarch
- jgoodies-common
ExclusiveArch: %{java_arches} noarch
- jgoodies-forms
ExclusiveArch: %{java_arches} noarch
- jgoodies-looks
ExclusiveArch: %{java_arches} noarch
- jigawatts
ExclusiveArch: x86_64 %{arm} ppc64le aarch64 s390x
- jline
ExclusiveArch: %{java_arches}
- jline2
ExclusiveArch: %{java_arches} noarch
- jmock
ExclusiveArch: %{java_arches} noarch
- jmol
ExclusiveArch: %{java_arches} noarch
- jna
ExclusiveArch: %{java_arches}
- jneuroml-core
ExclusiveArch: %{java_arches} noarch
- jni-inchi
ExclusiveArch: %{java_arches}
- jol
ExclusiveArch: %{java_arches} noarch
- jolokia-jvm-agent
ExclusiveArch: %{java_arches} noarch
- jopt-simple
ExclusiveArch: %{java_arches} noarch
- jorbis
ExclusiveArch: %{java_arches} noarch
- jowl
ExclusiveArch: %{nodejs_arches} noarch
- jpanoramamaker
ExclusiveArch: %{java_arches} noarch
- jsch
ExclusiveArch: %{java_arches} noarch
- jsch-agent-proxy
ExclusiveArch: %{java_arches} noarch
- json_simple
ExclusiveArch: %{java_arches} noarch
- jsoup
ExclusiveArch: %{java_arches} noarch
- jsr-305
ExclusiveArch: %{java_arches} noarch
- jss
ExclusiveArch: %{java_arches}
- jssc
ExclusiveArch: %{java_arches}
- jtidy
ExclusiveArch: %{java_arches} noarch
- julia
ExclusiveArch: x86_64
- junit
ExclusiveArch: %{java_arches} noarch
- junit5
ExclusiveArch: %{java_arches} noarch
- juniversalchardet
ExclusiveArch: %{java_arches} noarch
- jzlib
ExclusiveArch: %{java_arches} noarch
- kaidan
ExclusiveArch: %{qt5_qtwebengine_arches}
- kchmviewer
ExclusiveArch: %{qt5_qtwebengine_arches}
- kernel
ExclusiveArch: noarch x86_64 s390x aarch64 ppc64le
ExclusiveArch: noarch i386 i686 x86_64 s390x aarch64 ppc64le
- keylime-agent-rust
ExclusiveArch: %{rust_arches}
- keyring-ima-signer
ExclusiveArch: %{rust_arches}
- kf5-akonadi-search
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- kf5-audiocd-kio
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- kf5-kblog
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- kf5-kcalendarcore
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- kf5-kcalendarutils
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- kf5-kitinerary
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- kf5-ktnef
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- kf5-libkdcraw
ExclusiveArch: x86_64 ppc64le %{arm}
- kicad
ExclusiveArch: x86_64 aarch64 ppc64le
- kiwix-desktop
ExclusiveArch: %{qt5_qtwebengine_arches}
- knot-resolver
ExclusiveArch: %{ix86} x86_64
ExclusiveArch: %{arm} aarch64 %{ix86} x86_64
- knotes
ExclusiveArch: x86_64 %{arm}
- kubernetes
ExclusiveArch: x86_64 aarch64 ppc64le s390x %{arm}
- laf-plugin
ExclusiveArch: %{java_arches} noarch
- lazarus
ExclusiveArch: %{fpc_arches}
- lazpaint
ExclusiveArch: %{fpc_arches}
- ldapjdk
ExclusiveArch: %{java_arches} noarch
- ldc
ExclusiveArch: %{ldc_arches} ppc64le
- ldc1.30
ExclusiveArch: %{ldc_arches} ppc64le
- ldc1.32
ExclusiveArch: %{ldc_arches} ppc64le
- libbase
ExclusiveArch: %{java_arches} noarch
- libclc
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64 %{power64} s390x
- libcxl
ExclusiveArch: %{power64}
- libdfp
ExclusiveArch: ppc ppc64 ppc64le s390 s390x x86_64
- libdispatch
ExclusiveArch: x86_64 aarch64 ppc64le
- libfonts
ExclusiveArch: %{java_arches} noarch
- libformula
ExclusiveArch: %{java_arches} noarch
- libguestfs
ExclusiveArch: %{kernel_arches}
- libica
ExclusiveArch: s390 s390x
- libipt
ExclusiveArch: %{ix86} x86_64
ExclusiveArch: %{ix86} x86_64
- libkgapi
ExclusiveArch: x86_64 ppc64le aarch64 %{arm}
- libkrun
ExclusiveArch: x86_64 aarch64
- libkrunfw
ExclusiveArch: x86_64 aarch64
- liblayout
ExclusiveArch: %{java_arches} noarch
- libloader
ExclusiveArch: %{java_arches} noarch
- libnxz
ExclusiveArch: ppc64le
- libocxl
ExclusiveArch: ppc64le
- libpsm2
ExclusiveArch: x86_64
- libquentier
ExclusiveArch: %{qt5_qtwebengine_arches}
- libreoffice-TexMaths
ExclusiveArch: %{java_arches}
- librepository
ExclusiveArch: %{java_arches} noarch
- libretro-desmume2015
ExclusiveArch: i686 x86_64
- librtas
ExclusiveArch: %{power64}
- libserializer
ExclusiveArch: %{java_arches} noarch
- libservicelog
ExclusiveArch: ppc %{power64}
- libsmbios
ExclusiveArch: x86_64 %{ix86}
- libunicode
ExclusiveArch: x86_64 aarch64
- libunwind
ExclusiveArch: %{arm} aarch64 hppa ia64 mips ppc %{power64} s390x %{ix86} x86_64
- libva-nvidia-driver
ExclusiveArch: %{x86_64} %{ix86} %{arm64} ppc64le
- libvirt-java
ExclusiveArch: %{java_arches} noarch
- libvma
ExclusiveArch: x86_64 ppc64le ppc64 aarch64
- libvmi
ExclusiveArch: x86_64
- libvpd
ExclusiveArch: %{power64}
- libxsmm
ExclusiveArch: x86_64
- libzdnn
ExclusiveArch: s390x
- libzfcphbaapi
ExclusiveArch: s390 s390x
- libzpc
ExclusiveArch: s390x
- llhttp
ExclusiveArch: %{nodejs_arches}
- log4j
ExclusiveArch: %{java_arches} noarch
- log4net
ExclusiveArch: %mono_arches
- lrmi
ExclusiveArch: %{ix86}
- lsvpd
ExclusiveArch: %{power64}
- luajit
ExclusiveArch: %{arm} %{ix86} x86_64 %{mips} aarch64
- lucene
ExclusiveArch: %{java_arches} noarch
- lujavrite
ExclusiveArch: %{java_arches}
- luxcorerender
ExclusiveArch: x86_64
- mactel-boot
ExclusiveArch: x86_64
- magicmirror
ExclusiveArch: %{nodejs_arches} noarch
- magicmirror-module-airnow
ExclusiveArch: %{nodejs_arches} noarch
- magicmirror-module-onthisday
ExclusiveArch: %{nodejs_arches} noarch
- magicmirror-module-singlestock
ExclusiveArch: %{nodejs_arches} noarch
- manifest-tool
ExclusiveArch: x86_64 aarch64 ppc64le s390x
- mariadb-java-client
ExclusiveArch: %{java_arches} noarch
- marked
ExclusiveArch: %{nodejs_arches} noarch
ExclusiveArch: %{ix86} x86_64 %{arm} noarch
- matrix-synapse
ExclusiveArch: %{rust_arches}
- maui-mauikit
ExclusiveArch: %{ix86} s390x aarch64 x86_64
- maven
ExclusiveArch: %{java_arches} noarch
- maven-antrun-plugin
ExclusiveArch: %{java_arches} noarch
- maven-archetype
ExclusiveArch: %{java_arches} noarch
- maven-archiver
ExclusiveArch: %{java_arches} noarch
- maven-artifact-transfer
ExclusiveArch: %{java_arches} noarch
- maven-assembly-plugin
ExclusiveArch: %{java_arches} noarch
- maven-bundle-plugin
ExclusiveArch: %{java_arches} noarch
- maven-clean-plugin
ExclusiveArch: %{java_arches} noarch
- maven-common-artifact-filters
ExclusiveArch: %{java_arches} noarch
- maven-compiler-plugin
ExclusiveArch: %{java_arches} noarch
- maven-dependency-analyzer
ExclusiveArch: %{java_arches} noarch
- maven-dependency-plugin
ExclusiveArch: %{java_arches} noarch
- maven-dependency-tree
ExclusiveArch: %{java_arches} noarch
- maven-doxia
ExclusiveArch: %{java_arches} noarch
- maven-doxia-sitetools
ExclusiveArch: %{java_arches} noarch
- maven-enforcer
ExclusiveArch: %{java_arches} noarch
- maven-file-management
ExclusiveArch: %{java_arches} noarch
- maven-filtering
ExclusiveArch: %{java_arches} noarch
- maven-invoker
ExclusiveArch: %{java_arches} noarch
- maven-invoker-plugin
ExclusiveArch: %{java_arches} noarch
- maven-jar-plugin
ExclusiveArch: %{java_arches} noarch
- maven-mapping
ExclusiveArch: %{java_arches} noarch
- maven-native
ExclusiveArch: %{java_arches} noarch
- maven-parent
ExclusiveArch: %{java_arches} noarch
- maven-patch-plugin
ExclusiveArch: %{java_arches} noarch
- maven-plugin-testing
ExclusiveArch: %{java_arches} noarch
- maven-plugin-tools
ExclusiveArch: %{java_arches} noarch
- maven-remote-resources-plugin
ExclusiveArch: %{java_arches} noarch
- maven-reporting-api
ExclusiveArch: %{java_arches} noarch
- maven-reporting-impl
ExclusiveArch: %{java_arches} noarch
- maven-resolver
ExclusiveArch: %{java_arches} noarch
- maven-resources-plugin
ExclusiveArch: %{java_arches} noarch
- maven-script-interpreter
ExclusiveArch: %{java_arches} noarch
- maven-shade-plugin
ExclusiveArch: %{java_arches} noarch
- maven-shared-incremental
ExclusiveArch: %{java_arches} noarch
- maven-shared-io
ExclusiveArch: %{java_arches} noarch
- maven-shared-utils
ExclusiveArch: %{java_arches} noarch
- maven-source-plugin
ExclusiveArch: %{java_arches} noarch
- maven-surefire
ExclusiveArch: %{java_arches} noarch
- maven-verifier
ExclusiveArch: %{java_arches} noarch
- maven-verifier-plugin
ExclusiveArch: %{java_arches} noarch
- maven-wagon
ExclusiveArch: %{java_arches} noarch
- maxima
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 ppc sparcv9
ExclusiveArch: %{ix86} x86_64 ppc sparcv9
- mbpfan
ExclusiveArch: x86_64
- mcelog
ExclusiveArch: i686 x86_64
- mcrouter
ExclusiveArch: x86_64 aarch64 ppc64le
- mecab-java
ExclusiveArch: %java_arches
- mediaconch
ExclusiveArch: %{qt5_qtwebengine_arches}
- mellowplayer
ExclusiveArch: %{qt5_qtwebengine_arches}
- memkind
ExclusiveArch: x86_64 ppc64 ppc64le s390x aarch64
- memtest86+
ExclusiveArch: x86_64 %{ix86}
- merkuro
ExclusiveArch: %{qt5_qtwebengine_arches}
- microcode_ctl
ExclusiveArch: %{ix86} x86_64
- micropython
ExclusiveArch: %{arm} x86_64 riscv64
- miglayout
ExclusiveArch: %{java_arches} noarch
- mine_detector
ExclusiveArch: %{GPRbuild_arches}
- minetest
ExclusiveArch: %{ix86} x86_64
ExclusiveArch: %{arm} %{ix86} x86_64 %{mips} aarch64
- mingw-libidn2
ExclusiveArch: %{ix86} x86_64 %{arm}
- mingw-wine-gecko
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64
- mkbootdisk
ExclusiveArch: %{ix86} sparc sparc64 x86_64
- moby-engine
ExclusiveArch: %{golang_arches}
- mockito
ExclusiveArch: %{java_arches} noarch
- mod_mono
ExclusiveArch: %mono_arches
- modello
ExclusiveArch: %{java_arches} noarch
- moditect
ExclusiveArch: %{java_arches} noarch
- module-build-service
ExclusiveArch: %{ix86} x86_64 noarch
- modulemaker-maven-plugin
ExclusiveArch: %{java_arches} noarch
- mojo-executor
ExclusiveArch: %{java_arches} noarch
- mojo-parent
ExclusiveArch: %{java_arches} noarch
- mokutil
ExclusiveArch: %{ix86} x86_64 aarch64 %{arm}
- mono
ExclusiveArch: %mono_arches
- mono-addins
ExclusiveArch: %mono_arches
- mono-basic
ExclusiveArch: %{mono_arches}
- mono-bouncycastle
ExclusiveArch: %mono_arches
- mono-cecil
ExclusiveArch: %mono_arches
- mono-cecil-flowanalysis
ExclusiveArch: %mono_arches
- mono-reflection
ExclusiveArch: %mono_arches
- mono-tools
ExclusiveArch: %mono_arches
- mono-zeroconf
ExclusiveArch: %mono_arches
- monodevelop
ExclusiveArch: %mono_arches
- monodevelop-debugger-gdb
ExclusiveArch: %{mono_arches}
- mrrescue
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 ppc64le
- msr-tools
ExclusiveArch: %{ix86} x86_64
- mvfst
ExclusiveArch: x86_64 aarch64 ppc64le
- mxparser
ExclusiveArch: %{java_arches} noarch
- mysql-connector-java
ExclusiveArch: %{java_arches} noarch
- mysql-connector-net
ExclusiveArch: %{mono_arches}
- naev
ExclusiveArch: %{arm} %{ix86} x86_64 %{mips} aarch64
- naga
ExclusiveArch: %{java_arches} noarch
- nant
ExclusiveArch: %mono_arches
- nativejit
ExclusiveArch: x86_64
- nbc
ExclusiveArch: %{fpc_arches}
- nbdkit
ExclusiveArch: x86_64
- ndesk-dbus
ExclusiveArch: %{mono_arches}
- ndesk-dbus-glib
ExclusiveArch: %{mono_arches}
- nekohtml
ExclusiveArch: %{java_arches} noarch
- netavark
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: aarch64 ppc64le s390x x86_64
- newtonsoft-json
ExclusiveArch: %{mono_arches}
- nodejs-acorn-object-spread
ExclusiveArch: %{nodejs_arches} noarch
ExclusiveArch: %{ix86} x86_64 %{arm} noarch
- nodejs-backbone
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-bash-language-server
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-buble
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-colors
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-generic-pool
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-less
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-linefix
ExclusiveArch: %{nodejs_arches} noarch
ExclusiveArch: %{ix86} x86_64 %{arm} noarch
- nodejs-nodemon
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-packaging
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-replace-require-self
ExclusiveArch: %{nodejs_arches} noarch
- nodejs-underscore
ExclusiveArch: %{nodejs_arches} noarch
ExclusiveArch: %{ix86} x86_64 %{arm} noarch
- nodejs18
ExclusiveArch: %{nodejs_arches}
- nodejs20
ExclusiveArch: %{nodejs_arches}
- nom-tam-fits
ExclusiveArch: %{java_arches} noarch
- notify-sharp
ExclusiveArch: %{mono_arches}
- notify-sharp3
ExclusiveArch: %{mono_arches}
- nuget
ExclusiveArch: %{mono_arches}
- numatop
ExclusiveArch: x86_64 ppc64le
- nunit
ExclusiveArch: %{mono_arches}
- nunit2
ExclusiveArch: %{mono_arches}
- nvml
ExclusiveArch: x86_64 ppc64le
- objectweb-asm
ExclusiveArch: %{java_arches} noarch
- objenesis
ExclusiveArch: %{java_arches} noarch
- obs-service-rust2rpm
ExclusiveArch: %{rust_arches} noarch
- oci-seccomp-bpf-hook
ExclusiveArch: x86_64 %{power64} aarch64 s390x armv7hl
ExclusiveArch: %{golang_arches_future}
- oidn
ExclusiveArch: x86_64
- olpc-utils
ExclusiveArch: %{ix86} %{arm}
- oneVPL
ExclusiveArch: x86_64
- oneVPL-intel-gpu
ExclusiveArch: x86_64
- oneapi-level-zero
ExclusiveArch: x86_64
- onednn
ExclusiveArch: x86_64 aarch64 ppc64le s390x
- onedrive
ExclusiveArch: %{ldc_arches}
- ongres-scram
ExclusiveArch: %{java_arches} noarch
- ongres-stringprep
ExclusiveArch: %{java_arches} noarch
- opae
ExclusiveArch: x86_64
- opal-prd
ExclusiveArch: ppc64le
- open-vm-tools
ExclusiveArch: x86_64 aarch64
ExclusiveArch: %{ix86} x86_64 aarch64
ExclusiveArch: x86_64
- openblas
ExclusiveArch: %{openblas_arches}
- openjdk-asmtools
ExclusiveArch: %{java_arches} noarch
- openjdk-asmtools7
ExclusiveArch: %{java_arches} noarch
- openjfx
ExclusiveArch: %{java_arches}
- openjfx8
ExclusiveArch: x86_64
- openlibm
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 %{power64}
- openms
ExclusiveArch: %{qt5_qtwebengine_arches}
- openni
ExclusiveArch: x86_64 %{arm}
- openni-primesense
ExclusiveArch: %{ix86} x86_64 %{arm}
- openoffice.org-diafilter
ExclusiveArch: %{java_arches}
- openpgl
ExclusiveArch: aarch64 x86_64
- openssl-ibmca
ExclusiveArch: s390 s390x
- openstack-java-sdk
ExclusiveArch: %{java_arches} noarch
- opentest4j
ExclusiveArch: %{java_arches} noarch
- openvkl
ExclusiveArch: aarch64 x86_64
- optee_client
ExclusiveArch: aarch64
- optee_os
ExclusiveArch: aarch64
- options
ExclusiveArch: %{java_arches} noarch
- orthorobot
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 ppc64le
- osgi-annotation
ExclusiveArch: %{java_arches} noarch
- osgi-compendium
ExclusiveArch: %{java_arches} noarch
- osgi-core
ExclusiveArch: %{java_arches} noarch
- pacemaker
ExclusiveArch: aarch64 i686 ppc64le s390x x86_64 %{arm}
ExclusiveArch: aarch64 i686 ppc64le s390x x86_64
- pageedit
ExclusiveArch: %{qt5_qtwebengine_arches}
- parserng
ExclusiveArch: %{java_arches} noarch
- pasdoc
ExclusiveArch: %{fpc_arches}
- pcc
ExclusiveArch: %{ix86} x86_64
- pcfi
ExclusiveArch: %{java_arches} noarch
- pcm
ExclusiveArch: %{ix86} x86_64
- pcmciautils
ExclusiveArch: %{ix86} x86_64 ia64 ppc ppc64 %{arm}
- pdbg
ExclusiveArch: ppc64le
- pdfbox
ExclusiveArch: %{java_arches} noarch
- pdfmod
ExclusiveArch: %mono_arches
- pdftk-java
ExclusiveArch: %{java_arches} noarch
- pentaho-libxml
ExclusiveArch: %{java_arches} noarch
- pentaho-reporting-flow-engine
ExclusiveArch: %{java_arches} noarch
- perl-Dumbbench
ExclusiveArch: %{ix86} x86_64 noarch
- perl-Parse-DMIDecode
ExclusiveArch: %{ix86} x86_64 ia64 aarch64
- pesign
ExclusiveArch: %{ix86} x86_64 ia64 aarch64 %{arm}
- pesign-test-app
ExclusiveArch: x86_64
- picocli
ExclusiveArch: %{java_arches} noarch
- pinta
ExclusiveArch: %mono_arches
- pioneer
ExclusiveArch: %{ix86} x86_64
- plantuml
ExclusiveArch: %{java_arches} noarch
- plasma-dialer
ExclusiveArch: %{java_arches}
- playonlinux
ExclusiveArch: %{arm} aarch64 %{ix86} x86_64
- plexus-archiver
ExclusiveArch: %{java_arches} noarch
- plexus-build-api
ExclusiveArch: %{java_arches} noarch
- plexus-build-api0
ExclusiveArch: %{java_arches} noarch
- plexus-cipher
ExclusiveArch: %{java_arches} noarch
- plexus-classworlds
ExclusiveArch: %{java_arches} noarch
- plexus-compiler
ExclusiveArch: %{java_arches} noarch
- plexus-component-api
ExclusiveArch: %{java_arches} noarch
- plexus-components-pom
ExclusiveArch: %{java_arches} noarch
- plexus-containers
ExclusiveArch: %{java_arches} noarch
- plexus-i18n
ExclusiveArch: %{java_arches} noarch
- plexus-interpolation
ExclusiveArch: %{java_arches} noarch
- plexus-io
ExclusiveArch: %{java_arches} noarch
- plexus-languages
ExclusiveArch: %{java_arches} noarch
- plexus-pom
ExclusiveArch: %{java_arches} noarch
- plexus-resources
ExclusiveArch: %{java_arches} noarch
- plexus-sec-dispatcher
ExclusiveArch: %{java_arches} noarch
- plexus-utils
ExclusiveArch: %{java_arches} noarch
- plexus-velocity
ExclusiveArch: %{java_arches} noarch
- podman
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: aarch64 ppc64le s390x x86_64
- poppler-sharp
ExclusiveArch: %mono_arches
- popub
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 %{arm}}
- postgresql-jdbc
ExclusiveArch: %{java_arches} noarch
- powerpc-utils
ExclusiveArch: ppc %{power64}
- powerstat
ExclusiveArch: %{ix86} x86_64
- ppc64-diag
ExclusiveArch: ppc %{power64}
- procyon
ExclusiveArch: %{java_arches} noarch
ExclusiveArch: %{java_arches} noarch
ExclusiveArch: %{java_arches} noarch
ExclusiveArch: %{java_arches} noarch
ExclusiveArch: %{java_arches} noarch
ExclusiveArch: %{java_arches} noarch
- proguard
ExclusiveArch: %{java_arches} noarch
- prometheus-jmx-exporter
ExclusiveArch: %{java_arches} noarch
- prometheus-simpleclient-java
ExclusiveArch: %{java_arches} noarch
- proxygen
ExclusiveArch: x86_64 aarch64 ppc64le
- pveclib
ExclusiveArch: ppc %{power64}
- pyqtwebengine
ExclusiveArch: %{qt5_qtwebengine_arches}
- python-ast-monitor
ExclusiveArch: %{qt6_qtwebengine_arches} noarch
- python-cryptography
ExclusiveArch: %{rust_arches}
- python-damo
ExclusiveArch: x86_64 aarch64 ppc64le s390x noarch
- python-etcd
ExclusiveArch: noarch %{ix86} x86_64 %{arm} aarch64 ppc64le s390x
- python-javaobj
ExclusiveArch: %{java_arches} noarch
- python-jnius
ExclusiveArch: %{java_arches}
- python-jupyter-polymake
ExclusiveArch: noarch aarch64 ppc64le s390x x86_64
- python-openoffice
ExclusiveArch: noarch x86_64
- python-pymoc
ExclusiveArch: aarch64 ppc64 ppc64le x86_64 s390x
- python-pyqt6-webengine
ExclusiveArch: aarch64 x86_64
- q4wine
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64
- qatengine
ExclusiveArch: x86_64
- qatzip
ExclusiveArch: x86_64
- qbe
ExclusiveArch: x86_64 aarch64
- qcint
ExclusiveArch: x86_64
- qclib
ExclusiveArch: s390 s390x
- qdox
ExclusiveArch: %{java_arches} noarch
- qemu-sanity-check
ExclusiveArch: %{kernel_arches}
- qevercloud
ExclusiveArch: %{qt5_qtwebengine_arches}
- qmapshack
ExclusiveArch: %{qt5_qtwebengine_arches}
- qt4pas
ExclusiveArch: %{fpc_arches}
- qt5-qtwebengine
ExclusiveArch: %{qt5_qtwebengine_arches}
- qt6-qtwebengine
ExclusiveArch: aarch64 x86_64
- qt6-qtwebview
ExclusiveArch: aarch64 x86_64
- quantum-espresso
ExclusiveArch: x86_64 %{ix86} aarch64 %{power64}
- quentier
ExclusiveArch: %{qt5_qtwebengine_arches}
- rEFInd
ExclusiveArch: %{efi}
- rachota
ExclusiveArch: %{java_arches} noarch
- rear
ExclusiveArch: %ix86 x86_64 ppc ppc64 ppc64le ia64
- reflections
ExclusiveArch: %{java_arches} noarch
- reg
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 aarch64 %{arm}}
- regexp
ExclusiveArch: %{java_arches} noarch
- relaxng-datatype-java
ExclusiveArch: %{java_arches} noarch
- replacer
ExclusiveArch: %{java_arches} noarch
- reptyr
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64
- rescene
ExclusiveArch: %{mono_arches}
- resteasy
ExclusiveArch: %{java_arches} noarch
- restool
ExclusiveArch: aarch64
- restsharp
ExclusiveArch: %{mono_arches}
- retsnoop
ExclusiveArch: %{rust_arches}
- rhino
ExclusiveArch: %{java_arches} noarch
- river
ExclusiveArch: %{zig_arches}
- rkcommon
ExclusiveArch: x86_64 aarch64
- rocclr
ExclusiveArch: x86_64 aarch64 ppc64le
- rocm-compilersupport
ExclusiveArch: x86_64 aarch64 ppc64le
- rocm-device-libs
ExclusiveArch: x86_64 aarch64 ppc64le
- rocm-runtime
ExclusiveArch: x86_64 aarch64 ppc64le
- rocm-smi
ExclusiveArch: x86_64 aarch64 ppc64le
- rocminfo
ExclusiveArch: x86_64 aarch64 ppc64le
- rocprim
ExclusiveArch: x86_64
- rocrand
ExclusiveArch: x86_64
- rocthrust
ExclusiveArch: x86_64
- rpm-ostree
ExclusiveArch: %{rust_arches}
- rr
ExclusiveArch: %{ix86} x86_64 aarch64
- rssguard
ExclusiveArch: %{qt5_qtwebengine_arches}
- rstudio
ExclusiveArch: %{java_arches}
- rsyntaxtextarea
ExclusiveArch: %{java_arches} noarch
- rubygem-childprocess
ExclusiveArch: %{ix86} x86_64 noarch
- runc
ExclusiveArch: %{golang_arches_future}
- rundoc
ExclusiveArch: %{java_arches} noarch
- rust
ExclusiveArch: %{rust_arches}
- rust-actix-router
ExclusiveArch: %{rust_arches}
- rust-argmax
ExclusiveArch: %{rust_arches}
- rust-askalono-cli
ExclusiveArch: %{rust_arches}
- rust-base-x
ExclusiveArch: %{rust_arches}
- rust-bit-set
ExclusiveArch: %{rust_arches}
- rust-bootupd
ExclusiveArch: x86_64 aarch64
- rust-brotli
ExclusiveArch: %{rust_arches}
- rust-capnp-futures
ExclusiveArch: %{rust_arches}
- rust-cascade
ExclusiveArch: %{rust_arches}
- rust-cast
ExclusiveArch: %{rust_arches}
- rust-combine
ExclusiveArch: %{rust_arches}
- rust-compress-tools
ExclusiveArch: %{rust_arches}
- rust-const-oid0.6
ExclusiveArch: %{rust_arches}
- rust-coreos-installer
ExclusiveArch: %{rust_arches}
- rust-crossbeam
ExclusiveArch: %{rust_arches}
- rust-cssparser-macros
ExclusiveArch: %{rust_arches}
- rust-ctr
ExclusiveArch: %{rust_arches}
- rust-dashmap4
ExclusiveArch: %{rust_arches}
- rust-deflate
ExclusiveArch: %{rust_arches}
- rust-deflate0.8
ExclusiveArch: %{rust_arches}
- rust-drg
ExclusiveArch: %{rust_arches}
- rust-event-listener
ExclusiveArch: %{rust_arches}
- rust-fail
ExclusiveArch: %{rust_arches}
- rust-gag
ExclusiveArch: %{rust_arches}
- rust-getch
ExclusiveArch: %{rust_arches}
- rust-gzip-header
ExclusiveArch: %{rust_arches}
- rust-hamming
ExclusiveArch: %{rust_arches}
- rust-hidapi
ExclusiveArch: %{rust_arches}
- rust-httparse
ExclusiveArch: %{rust_arches}
- rust-humantime-serde
ExclusiveArch: %{rust_arches}
- rust-hyperlocal
ExclusiveArch: %{rust_arches}
- rust-image0.23
ExclusiveArch: %{rust_arches}
- rust-inflate
ExclusiveArch: %{rust_arches}
- rust-jpeg-decoder0.1
ExclusiveArch: %{rust_arches}
- rust-k9
ExclusiveArch: %{rust_arches}
- rust-krunvm
ExclusiveArch: x86_64 aarch64
- rust-kstring
ExclusiveArch: %{rust_arches}
- rust-kvm-ioctls
ExclusiveArch: x86_64 aarch64
- rust-lebe
ExclusiveArch: %{rust_arches}
- rust-libslirp-sys
ExclusiveArch: %{rust_arches}
- rust-msru
ExclusiveArch: x86_64
- rust-nanorand
ExclusiveArch: %{rust_arches}
- rust-netstat2
ExclusiveArch: %{rust_arches}
- rust-onig
ExclusiveArch: %{rust_arches}
- rust-openat-ext
ExclusiveArch: %{rust_arches}
- rust-ordered-float2
ExclusiveArch: %{rust_arches}
- rust-os_type
ExclusiveArch: %{rust_arches}
- rust-pager
ExclusiveArch: %{rust_arches}
- rust-pathsearch
ExclusiveArch: %{rust_arches}
- rust-pretty-bytes
ExclusiveArch: %{rust_arches}
- rust-pretty-hex
ExclusiveArch: %{rust_arches}
- rust-primal-bit
ExclusiveArch: %{rust_arches}
- rust-primal-check
ExclusiveArch: %{rust_arches}
- rust-primal-estimate
ExclusiveArch: %{rust_arches}
- rust-procfs0.9
ExclusiveArch: %{rust_arches}
- rust-prost
ExclusiveArch: %{rust_arches}
- rust-prost-derive
ExclusiveArch: %{rust_arches}
- rust-prost-derive0.8
ExclusiveArch: %{rust_arches}
- rust-prost0.8
ExclusiveArch: %{rust_arches}
- rust-psl-types
ExclusiveArch: %{rust_arches}
- rust-psm
ExclusiveArch: %{rust_arches}
- rust-pwd
ExclusiveArch: %{rust_arches}
- rust-rand_core
ExclusiveArch: %{rust_arches}
- rust-schemafy_core
ExclusiveArch: %{rust_arches}
- rust-schemafy_lib
ExclusiveArch: %{rust_arches}
- rust-seahash
ExclusiveArch: %{rust_arches}
- rust-serde-big-array
ExclusiveArch: %{rust_arches}
- rust-serde_qs
ExclusiveArch: %{rust_arches}
- rust-serde_yaml0.8
ExclusiveArch: %{rust_arches}
- rust-sev
ExclusiveArch: x86_64
- rust-sevctl
ExclusiveArch: x86_64
- rust-silver
ExclusiveArch: %{rust_arches}
- rust-sinit
ExclusiveArch: %{rust_arches}
- rust-snphost
ExclusiveArch: x86_64
- rust-ssh-key-dir
ExclusiveArch: %{rust_arches}
- rust-stacker
ExclusiveArch: %{rust_arches}
- rust-structopt
ExclusiveArch: %{rust_arches}
- rust-tiff0.6
ExclusiveArch: %{rust_arches}
- rust-totp-lite
ExclusiveArch: %{rust_arches}
- rust-tower-layer
ExclusiveArch: %{rust_arches}
- rust-tpm2-policy
ExclusiveArch: %{rust_arches}
- rust-tree-sitter
ExclusiveArch: %{rust_arches}
- rust-tree-sitter-cli
ExclusiveArch: %{rust_arches}
- rust-ubyte
ExclusiveArch: %{rust_arches}
- rust-unicode-normalization
ExclusiveArch: %{rust_arches}
- rust-unicode-xid
ExclusiveArch: %{rust_arches}
- rust-uriparse
ExclusiveArch: %{rust_arches}
- rust-uucore
ExclusiveArch: %{rust_arches}
- rust-varlink_generator
ExclusiveArch: %{rust_arches}
- rust-varlink_parser
ExclusiveArch: %{rust_arches}
- rust-virtio-bindings
ExclusiveArch: x86_64 aarch64 ppc64le
- rust-vmm-sys-util
ExclusiveArch: x86_64 aarch64 ppc64le
- rust-vtparse
ExclusiveArch: %{rust_arches}
- rust-web-ext-native-messaging
ExclusiveArch: %{rust_arches}
- rust-wezterm-color-types
ExclusiveArch: %{rust_arches}
- rust-wezterm-dynamic
ExclusiveArch: %{rust_arches}
- rust-wezterm-dynamic-derive
ExclusiveArch: %{rust_arches}
- rust-zbus1
ExclusiveArch: %{rust_arches}
- rust-zbus_macros1
ExclusiveArch: %{rust_arches}
- rust-zincati
ExclusiveArch: %{rust_arches}
- rust-zvariant2
ExclusiveArch: %{rust_arches}
- rust-zvariant_derive2
ExclusiveArch: %{rust_arches}
- s390utils
ExclusiveArch: s390 s390x
- sac
ExclusiveArch: %{java_arches} noarch
- safetyblanket
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 ppc64le
- sat4j
ExclusiveArch: %{java_arches} noarch
- sbcl
ExclusiveArch: %{ix86} x86_64 aarch64 %{power64}
- sbd
ExclusiveArch: i686 x86_64 s390x aarch64 ppc64le
- sblim-cim-client
ExclusiveArch: %{java_arches} noarch
- sblim-cim-client2
ExclusiveArch: %{java_arches} noarch
- sbsigntools
ExclusiveArch: x86_64 aarch64 %{arm} %{ix86}
- scala
ExclusiveArch: %{java_arches} noarch
- scalacheck
ExclusiveArch: %{java_arches} noarch
- scannotation
ExclusiveArch: %{java_arches} noarch
- sdljava
ExclusiveArch: %{java_arches}
- sdrangel
ExclusiveArch: %{qt5_qtwebengine_arches}
- sdsl-lite
ExclusiveArch: %{power64} x86_64 aarch64
- sdubby
ExclusiveArch: %{efi}
- seabios
ExclusiveArch: x86_64
- seadrive-gui
ExclusiveArch: %{qt5_qtwebengine_arches}
- seafile-client
ExclusiveArch: %{qt5_qtwebengine_arches}
- seamonkey
ExclusiveArch: x86_64
- seqan3
ExclusiveArch: %{power64} x86_64 aarch64
- sequence-library
ExclusiveArch: %{java_arches} noarch
- servicelog
ExclusiveArch: ppc %{power64}
- shaman
ExclusiveArch: %{java_arches} noarch
- sharpfont
ExclusiveArch: %mono_arches
- sharpziplib
ExclusiveArch: %{mono_arches}
- shim
ExclusiveArch: %{efi}
- shim-unsigned-aarch64
ExclusiveArch: aarch64
- shim-unsigned-x64
ExclusiveArch: x86_64
- sigul
ExclusiveArch: x86_64
- singularity-ce
ExclusiveArch: %{go_arches}
- sisu
ExclusiveArch: %{java_arches} noarch
- sisu-mojos
ExclusiveArch: %{java_arches} noarch
- skopeo
ExclusiveArch: %{golang_arches_future}
ExclusiveArch: aarch64 ppc64le s390x x86_64
- skychart
ExclusiveArch: %{fpc_arches}
- slf4j
ExclusiveArch: %{java_arches} noarch
- slirp4netns
ExclusiveArch: %{golang_arches_future}
- snakeyaml
ExclusiveArch: %{java_arches} noarch
- snapd
ExclusiveArch: %{?golang_arches}%{!?golang_arches:%{ix86} x86_64 %{arm} aarch64 ppc64le s390x}
- snip
ExclusiveArch: %{java_arches} noarch
- softnet-stat
ExclusiveArch: %{rust_arches}
- soup-sharp
ExclusiveArch: %{mono_arches}
- spacebar
ExclusiveArch: %{java_arches}
- sparkleshare
ExclusiveArch: %{mono_arches}
- spec-version-maven-plugin
ExclusiveArch: %{java_arches} noarch
- spicctrl
ExclusiveArch: %{ix86} x86_64
- spice
ExclusiveArch: x86_64
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64
- spice-parent
ExclusiveArch: %{java_arches} noarch
- springlobby
ExclusiveArch: %{ix86} x86_64
- sqljet
ExclusiveArch: %{java_arches} noarch
- squeekboard
ExclusiveArch: %{rust_arches}
- startdde
ExclusiveArch: %{?go_arches:%{go_arches}}%{!?go_arches:%{ix86} x86_64 %{arm}}
- stats-collect
ExclusiveArch: %{ix86} x86_64 noarch
- statsd
ExclusiveArch: %{nodejs_arches} noarch
- stratis-cli
ExclusiveArch: %{rust_arches} noarch
- stratisd
ExclusiveArch: %{rust_arches}
ExclusiveArch: %{rust_arches}
ExclusiveArch: %{rust_arches}
- string-template-maven-plugin
ExclusiveArch: %{java_arches} noarch
- stringtemplate
ExclusiveArch: %{java_arches} noarch
- stringtemplate4
ExclusiveArch: %{java_arches} noarch
- stripesnoop
ExclusiveArch: %{ix86} x86_64
- subscription-manager-cockpit
ExclusiveArch: %{nodejs_arches} noarch
- supercollider
ExclusiveArch: %{qt5_qtwebengine_arches}
- supermin
ExclusiveArch: %{kernel_arches}
- svnkit
ExclusiveArch: %{java_arches} noarch
- svt-vp9
ExclusiveArch: x86_64
- swift-lang
ExclusiveArch: x86_64 aarch64
- swing-layout
ExclusiveArch: %{java_arches} noarch
- sysbench
ExclusiveArch: %{arm} %{ix86} x86_64 %{mips}
ExclusiveArch: %{arm} %{ix86} x86_64 %{mips} aarch64
ExclusiveArch: %{arm} %{ix86} x86_64 %{mips} aarch64 ppc64le s390x
- syslinux
ExclusiveArch: %{ix86} x86_64
ExclusiveArch: %{ix86} x86_64
- system-rules
ExclusiveArch: %{java_arches} noarch
- systemd-boot
ExclusiveArch: %efi
- t-digest
ExclusiveArch: %{java_arches} noarch
- taglib-sharp
ExclusiveArch: %{mono_arches}
- tagsoup
ExclusiveArch: %{java_arches} noarch
- tarantool
ExclusiveArch: %{ix86} x86_64
- tboot
ExclusiveArch: %{ix86} x86_64
- tdlib
ExclusiveArch: x86_64 aarch64
- templates_parser
ExclusiveArch: %GPRbuild_arches
- ternimal
ExclusiveArch: %{rust_arches}
- testcloud
ExclusiveArch: %{kernel_arches} noarch
- testng
ExclusiveArch: %{java_arches} noarch
- texlive
ExclusiveArch: %{java_arches} noarch
- thermald
ExclusiveArch: %{ix86} x86_64
- tilix
ExclusiveArch: %{ldc_arches}
- tomboy
ExclusiveArch: %{mono_arches}
- tomcat
ExclusiveArch: %{java_arches} noarch
- tomcat-native
ExclusiveArch: %{java_arches}
- tomcat-taglibs-parent
ExclusiveArch: %{java_arches} noarch
- tomcatjss
ExclusiveArch: %{java_arches} noarch
- torbrowser-launcher
ExclusiveArch: %{ix86} x86_64
- treelayout
ExclusiveArch: %{java_arches} noarch
- trilead-ssh2
ExclusiveArch: %{java_arches} noarch
- truth
ExclusiveArch: %{java_arches} noarch
- tuned-profiles-nfv-host-bin
ExclusiveArch: %{ix86} x86_64
- typescript
ExclusiveArch: %{nodejs_arches} noarch
- uClibc
ExclusiveArch: %{arm} %{ix86} x86_64 %{mips}
- ucx
ExclusiveArch: aarch64 ppc64le x86_64
- uglify-js
ExclusiveArch: %{nodejs_arches} noarch
- umr
ExclusiveArch: x86_64 aarch64 ppc64le
- unetbootin
ExclusiveArch: %{ix86} x86_64
- univocity-parsers
ExclusiveArch: %{java_arches} noarch
- ursa-major
ExclusiveArch: noarch aarch64 ppc64le s390x x86_64
- usd
ExclusiveArch: aarch64 x86_64
- v8-314
ExclusiveArch: %{ix86} x86_64 %{arm} mips mipsel ppc ppc64
- vakzination
ExclusiveArch: %{java_arches}
- valgrind
ExclusiveArch: %{ix86} x86_64 ppc ppc64 ppc64le s390x armv7hl aarch64
- vboot-utils
ExclusiveArch: %{arm} aarch64 %{ix86} x86_64
- vecmath1.2
ExclusiveArch: %{java_arches} noarch
- velocity
ExclusiveArch: %{java_arches} noarch
- vim-go
ExclusiveArch: %{?golang_arches}%{!?golang_arches:%{ix86} x86_64 %{arm}}
- vim-syntastic
ExclusiveArch: %{java_arches} noarch
- virt-p2v
ExclusiveArch: x86_64
- virt-v2v
ExclusiveArch: x86_64
- virtiofsd
ExclusiveArch: %{rust_arches}
- virtualbox-guest-additions
ExclusiveArch: i686 x86_64
- vkd3d
ExclusiveArch: %{ix86} x86_64 %{arm}
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64
- vmaf
ExclusiveArch: x86_64
- voms-api-java
ExclusiveArch: %{java_arches} noarch
- voms-clients-java
ExclusiveArch: %{java_arches} noarch
- vrq
ExclusiveArch: %{ix86} x86_64
- wangle
ExclusiveArch: x86_64 aarch64 ppc64le
- warsow
ExclusiveArch: %{ix86} x86_64 %{arm}
- warsow-data
ExclusiveArch: %{ix86} x86_64 %{arm} noarch
ExclusiveArch: %{ix86} x86_64 %{arm}
- wasmedge
ExclusiveArch: x86_64 aarch64
- watchman
ExclusiveArch: x86_64 aarch64 ppc64le
- wdt
ExclusiveArch: x86_64 aarch64 ppc64le
- webkit2-sharp
ExclusiveArch: %mono_arches
- weld-parent
ExclusiveArch: %{java_arches} noarch
- why3
ExclusiveArch: %{ocaml_native_compiler}
- wine
ExclusiveArch: %{ix86} x86_64 aarch64
ExclusiveArch: %{ix86} x86_64
ExclusiveArch: %{ix86}
- wine-dxvk
ExclusiveArch: %{ix86} x86_64
- winetricks
ExclusiveArch: %{ix86} x86_64 %{arm} aarch64
- ws-commons-util
ExclusiveArch: %{java_arches} noarch
- wsdl4j
ExclusiveArch: %{java_arches} noarch
- wult
ExclusiveArch: x86_64
- wxMaxima
ExclusiveArch: %{arm} %{ix86} x86_64 aarch64 ppc sparcv9
- x2goclient
ExclusiveArch: x86_64
- x86-simd-sort
ExclusiveArch: x86_64
- xalan-j2
ExclusiveArch: %{java_arches} noarch
- xbean
ExclusiveArch: %{java_arches} noarch
- xbyak
ExclusiveArch: x86_64
- xbyak_aarch64
ExclusiveArch: aarch64
- xe-guest-utilities-latest
ExclusiveArch: %{ix86} x86_64
- xen
ExclusiveArch: x86_64 aarch64
- xerces-j2
ExclusiveArch: %{java_arches} noarch
- xgap
ExclusiveArch: %{gap_arches}
- xml-commons-apis
ExclusiveArch: %{java_arches} noarch
- xml-commons-resolver
ExclusiveArch: %{java_arches} noarch
- xml-maven-plugin
ExclusiveArch: %{java_arches} noarch
- xmlada
ExclusiveArch: %{GPRbuild_arches}
- xmlgraphics-commons
ExclusiveArch: %{java_arches} noarch
- xmlpull
ExclusiveArch: %{java_arches} noarch
- xmlstreambuffer
ExclusiveArch: %{java_arches} noarch
- xmlunit
ExclusiveArch: %{java_arches} noarch
- xmvn
ExclusiveArch: %{java_arches} noarch
- xmvn-connector-ivy
ExclusiveArch: %{java_arches} noarch
- xmvn-generator
ExclusiveArch: %{java_arches}
- xorg-x11-drv-armsoc
ExclusiveArch: %{arm} aarch64
- xorg-x11-drv-intel
ExclusiveArch: %{ix86} x86_64
- xorg-x11-drv-openchrome
ExclusiveArch: %{ix86} x86_64
- xorg-x11-drv-vesa
ExclusiveArch: %{ix86} x86_64
- xorg-x11-drv-vmware
ExclusiveArch: %{ix86} x86_64 ia64
- xsp
ExclusiveArch: %mono_arches
- xstream
ExclusiveArch: %{java_arches} noarch
- xz-java
ExclusiveArch: %{java_arches} noarch
- yarnpkg
ExclusiveArch: %{nodejs_arches} noarch
- zcfan
ExclusiveArch: x86_64
- zeal
ExclusiveArch: aarch64 x86_64
- zenon
ExclusiveArch: %{ocaml_native_compiler}
- zeromq-ada
ExclusiveArch: %{GPRbuild_arches}
- zig
ExclusiveArch: %{zig_arches}
- zlib-ada
ExclusiveArch: %{GPRbuild_arches}
- zlib-ng
ExclusiveArch: aarch64 i686 ppc64le s390x x86_64
- zola
ExclusiveArch: %{rust_arches}
7 months, 1 week
Architecture specific change in rpms/condor.git
by githook-noreply@fedoraproject.org
The package rpms/condor.git has added or updated architecture specific content in its
spec file (ExclusiveArch/ExcludeArch or %ifarch/%ifnarch) in commit(s):
https://src.fedoraproject.org/cgit/rpms/condor.git/commit/?id=ad0b762dbb3....
Change:
-%ifarch %{arm} %{ix86} x86_64
Thanks.
Full change:
============
commit ad0b762dbb3fe163d3385a7dff4a563c84f742af
Author: Tim Theisen <ttheisen(a)fedoraproject.org>
Date: Sat Sep 30 21:25:56 2023 -0500
Update to latest upstream 23.0.0 - rhbz#1959462
Fix build issues - rhbz#2114520, rhbz#2172630, rhbz#2172684
Update to PCRE2 - rhbz#2128284
diff --git a/.gitignore b/.gitignore
index 62ba8ec..1f2a152 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,3 +40,4 @@ _build
/htcondor-8.8.8.tar.gz
/htcondor-8.8.10.tar.gz
/htcondor-8.8.15.tar.gz
+/htcondor-23.0.0.tar.gz
diff --git a/0001-Apply-the-user-s-condor_config-last-rather-than-firs.patch b/0001-Apply-the-user-s-condor_config-last-rather-than-firs.patch
deleted file mode 100644
index 82906b5..0000000
--- a/0001-Apply-the-user-s-condor_config-last-rather-than-firs.patch
+++ /dev/null
@@ -1,104 +0,0 @@
-From 47a7bb8fb64885d46c995a18d2c4601fbf9609f9 Mon Sep 17 00:00:00 2001
-From: Brian Bockelman <bbockelm(a)cse.unl.edu>
-Date: Tue, 24 Jul 2012 09:40:06 -0500
-Subject: [PATCH] Apply the user's condor_config last, rather than first.
-
----
- src/condor_utils/condor_config.cpp | 55 +++++++++++++++++++++++++++++------
- 1 files changed, 45 insertions(+), 10 deletions(-)
-
-diff --git a/src/condor_utils/condor_config.cpp b/src/condor_utils/condor_config.cpp
-index ef35572..455bdfa 100644
---- a/src/condor_utils/condor_config.cpp
-+++ b/src/condor_utils/condor_config.cpp
-@@ -110,6 +110,7 @@ void check_params();
- // External variables
- extern int ConfigLineNo;
- } /* End extern "C" */
-+bool find_user_file(std::string &);
-
- // Global variables
- BUCKET *ConfigTab[TABLESIZE];
-@@ -654,6 +655,14 @@ real_config(char* host, int wantsQuiet, bool wantExtraInfo)
- if(dirlist) { free(dirlist); dirlist = NULL; }
- if(newdirlist) { free(newdirlist); newdirlist = NULL; }
-
-+ // Now, insert overrides from the user config file
-+ std::string file_location;
-+ if (find_user_file(file_location))
-+ {
-+ process_config_source( file_location.c_str(), "user local source", host, false );
-+ local_config_sources.append(file_location.c_str());
-+ }
-+
- // Now, insert any macros defined in the environment.
- char **my_environ = GetEnviron();
- for( int i = 0; my_environ[i]; i++ ) {
-@@ -996,6 +1005,38 @@ find_global()
- }
-
-
-+// Find user-specific location of a file
-+// Returns true if found, and puts the location in the file_location argument.
-+// If not found, returns false. The contents of file_location are undefined.
-+bool
-+find_user_file(std::string &file_location)
-+{
-+#ifdef UNIX
-+ // $HOME/.condor/condor_config
-+ struct passwd *pw = getpwuid( geteuid() );
-+ std::stringstream ss;
-+ if ( can_switch_ids() || !pw || !pw->pw_dir ) {
-+ return false;
-+ }
-+ ss << pw->pw_dir << "/." << myDistro->Get() << "/" << myDistro->Get() << "_config";
-+ file_location = ss.str();
-+
-+ int fd;
-+ if ((fd = safe_open_wrapper_follow(file_location.c_str(), O_RDONLY)) < 0) {
-+ return false;
-+ } else {
-+ close(fd);
-+ dprintf(D_FULLDEBUG, "Reading condor configuration from '%s'\n", file_location.c_str());
-+ }
-+
-+ return true;
-+#else
-+ // To get rid of warnings...
-+ file_location = "";
-+ return false;
-+#endif
-+}
-+
- // Find location of specified file
- char*
- find_file(const char *env_name, const char *file_name)
-@@ -1052,21 +1093,15 @@ find_file(const char *env_name, const char *file_name)
- if (!config_source) {
- // List of condor_config file locations we'll try to open.
- // As soon as we find one, we'll stop looking.
-- const int locations_length = 4;
-+ const int locations_length = 3;
- MyString locations[locations_length];
-- // 1) $HOME/.condor/condor_config
-- struct passwd *pw = getpwuid( geteuid() );
-- if ( !can_switch_ids() && pw && pw->pw_dir ) {
-- formatstr( locations[0], "%s/.%s/%s", pw->pw_dir, myDistro->Get(),
-- file_name );
-- }
- // 2) /etc/condor/condor_config
-- locations[1].formatstr( "/etc/%s/%s", myDistro->Get(), file_name );
-+ locations[0].formatstr( "/etc/%s/%s", myDistro->Get(), file_name );
- // 3) /usr/local/etc/condor_config (FreeBSD)
-- locations[2].formatstr( "/usr/local/etc/%s", file_name );
-+ locations[1].formatstr( "/usr/local/etc/%s", file_name );
- if (tilde) {
- // 4) ~condor/condor_config
-- locations[3].formatstr( "%s/%s", tilde, file_name );
-+ locations[2].formatstr( "%s/%s", tilde, file_name );
- }
-
- int ctr;
---
-1.7.4.1
-
diff --git a/00personal_condor.config b/00personal_condor.config
deleted file mode 100644
index 57604b9..0000000
--- a/00personal_condor.config
+++ /dev/null
@@ -1,34 +0,0 @@
-## What machine is your central manager?
-
-CONDOR_HOST = $(FULL_HOSTNAME)
-
-## Pool's short description
-
-COLLECTOR_NAME = Personal Condor at $(FULL_HOSTNAME)
-
-## When is this machine willing to start a job?
-
-START = TRUE
-
-
-## When to suspend a job?
-
-SUSPEND = FALSE
-
-
-## When to nicely stop a job?
-## (as opposed to killing it instantaneously)
-
-PREEMPT = FALSE
-
-
-## When to instantaneously kill a preempting job
-## (e.g. if a job is in the pre-empting stage for too long)
-
-KILL = FALSE
-
-## This macro determines what daemons the condor_master will start and keep its watchful eyes on.
-## The list is a comma or space separated list of subsystem names
-
-DAEMON_LIST = COLLECTOR, MASTER, NEGOTIATOR, SCHEDD, STARTD
-
diff --git a/BZ1000106.patch b/BZ1000106.patch
deleted file mode 100644
index 004d9c2..0000000
--- a/BZ1000106.patch
+++ /dev/null
@@ -1,9 +0,0 @@
-diff --git a/src/condor_examples/condor_config.generic.redhat b/src/condor_examples/condor_config.generic.redhat
-index 6f3caaa..a3ac4e7 100644
---- a/src/condor_examples/condor_config.generic.redhat
-+++ b/src/condor_examples/condor_config.generic.redhat
-@@ -2315,3 +2315,4 @@ UNHIBERNATE = CurrentTime - MachineLastMatchTime < 1200
- ##--------------------------------------------------------------------
- FILETRANSFER_PLUGINS = $(LIBEXEC)/curl_plugin, $(LIBEXEC)/data_plugin
-
-+USE_CLONE_TO_CREATE_PROCESSES = False
diff --git a/NOTICE.txt b/NOTICE.txt
deleted file mode 100644
index cc4365b..0000000
--- a/NOTICE.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-Condor
-
-Copyright (C) 1990-2007, Condor Team, Computer Sciences Department,
-University of Wisconsin-Madison, WI.
-
-This source code is covered by the Apache License, Version 2.0, which
-can be found in the accompanying LICENSE-2.0.txt file, or online at
-http://www.apache.org/licenses/ .
-
-This product includes software developed by and/or derived from the
-Globus Project (http://www.globus.org/) to which the U.S. Government
-retains certain rights. Copyright (c) 1999 University of Chicago and
-The University of Southern California. All Rights Reserved.
-
-This product includes software developed by the OpenSSL Project for
-use in the OpenSSL Toolkit (http://www.openssl.org/). Complete
-conditions and disclaimers for OpenSSL can be found at
-http://www.openssl.org/source/license.html
-
-Some distributions of Condor include software developed by the
-Info-ZIP Project (http://www.info-zip.org/). Complete conditions
-and disclaimers for Info-ZIP can be found at
-http://www.info-zip.org/doc/LICENSE
-
-Some distributions of Condor include MAKEMSI software developed by
-Dennis Bareis (http://dennisbareis.com/makemsi.htm). Complete
-conditions and disclaimers for MAKEMSI can be found at
-http://makemsi-manual.dennisbareis.com/disclaimer.htm
-
-Some distributions of Condor include a compiled, unmodified version
-of the GNU C library. The complete source code to GNU glibc can be
-found at http://www.gnu.org/software/libc/.
-
-Part of the software embedded in this product is gSOAP software.
-Portions created by gSOAP are Copyright (C) 2001-2004 Robert A. van
-Engelen, Genivia inc. All Rights Reserved.
-THE SOFTWARE IN THIS PRODUCT WAS IN PART PROVIDED BY GENIVIA INC AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
-GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
-IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Werror_replace.patch b/Werror_replace.patch
deleted file mode 100644
index 4c6f6a3..0000000
--- a/Werror_replace.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/src/condor_contrib/CMakeLists.txt b/src/condor_contrib/CMakeLists.txt
-index 627334660f..5f02bcc581 100644
---- a/src/condor_contrib/CMakeLists.txt
-+++ b/src/condor_contrib/CMakeLists.txt
-@@ -21,7 +21,7 @@
-
- if (WANT_CONTRIB)
- dprint("building contrib modules")
-- string(REGEX REPLACE "-Werror" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
-+ #string(REGEX REPLACE "-Werror" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
- #dprint( "CMAKE_CXX_FLAGS = ${CMAKE_CXX_FLAGS}")
-
- include_directories("${CMAKE_CURRENT_SOURCE_DIR}/utils")
diff --git a/boost-python38.patch b/boost-python38.patch
deleted file mode 100644
index d62a797..0000000
--- a/boost-python38.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/externals/bundles/boost/1.66.0/CMakeLists.txt b/externals/bundles/boost/1.66.0/CMakeLists.txt
-index 4a9a20b..5a972ca 100644
---- a/externals/bundles/boost/1.66.0/CMakeLists.txt
-+++ b/externals/bundles/boost/1.66.0/CMakeLists.txt
-@@ -52,7 +52,7 @@ else (WINDOWS)
- set (BOOST_COMPONENTS unit_test_framework ${BOOST_COMPONENTS})
- endif()
- if (WITH_PYTHON_BINDINGS)
-- set (BOOST_COMPONENTS python ${BOOST_COMPONENTS})
-+ set (BOOST_COMPONENTS python38 ${BOOST_COMPONENTS})
- endif()
-
- endif()
diff --git a/cgroup_reset_stats.patch b/cgroup_reset_stats.patch
deleted file mode 100644
index 458e7ad..0000000
--- a/cgroup_reset_stats.patch
+++ /dev/null
@@ -1,181 +0,0 @@
-diff --git a/src/condor_procd/proc_family.cpp b/src/condor_procd/proc_family.cpp
-index d35ffcc..29d9471 100644
---- a/src/condor_procd/proc_family.cpp
-+++ b/src/condor_procd/proc_family.cpp
-@@ -54,7 +54,9 @@ ProcFamily::ProcFamily(ProcFamilyMonitor* monitor,
- m_member_list(NULL)
- #if defined(HAVE_EXT_LIBCGROUP)
- , m_cgroup_string(""),
-- m_cm(CgroupManager::getInstance())
-+ m_cm(CgroupManager::getInstance()),
-+ m_initial_user_cpu(0),
-+ m_initial_sys_cpu(0)
- #endif
- {
- #if !defined(WIN32)
-@@ -188,6 +190,7 @@ after_migrate:
- cgroup_free(&orig_cgroup);
- }
-
-+
- after_restore:
- if (orig_cgroup_string != NULL) {
- free(orig_cgroup_string);
-@@ -231,6 +234,27 @@ ProcFamily::set_cgroup(const std::string &cgroup_string)
- member = member->m_next;
- }
-
-+ // Record the amount of pre-existing CPU usage here.
-+ m_initial_user_cpu = 0;
-+ m_initial_sys_cpu = 0;
-+ get_cpu_usage_cgroup(m_initial_user_cpu, m_initial_sys_cpu);
-+
-+ // Reset block IO controller
-+ if (m_cm.isMounted(CgroupManager::BLOCK_CONTROLLER)) {
-+ struct cgroup *tmp_cgroup = cgroup_new_cgroup(m_cgroup_string.c_str());
-+ struct cgroup_controller *blkio_controller = cgroup_add_controller(tmp_cgroup, BLOCK_CONTROLLER_STR);
-+ ASSERT (blkio_controller != NULL); // Block IO controller should already exist.
-+ cgroup_add_value_uint64(blkio_controller, "blkio.reset_stats", 0);
-+ int err;
-+ if ((err = cgroup_modify_cgroup(tmp_cgroup))) {
-+ // Not allowed to reset stats?
-+ dprintf(D_ALWAYS,
-+ "Unable to reset cgroup %s block IO statistics. "
-+ "Some block IO accounting will be inaccurate (ProcFamily %u): %u %s\n",
-+ m_cgroup_string.c_str(), m_root_pid, err, cgroup_strerror(err));
-+ }
-+ }
-+
- return 0;
- }
-
-@@ -486,6 +510,40 @@ ProcFamily::aggregate_usage_cgroup_blockio(ProcFamilyUsage* usage)
- return 0;
- }
-
-+int ProcFamily::get_cpu_usage_cgroup(long &user_time, long &sys_time) {
-+
-+ if (!m_cm.isMounted(CgroupManager::CPUACCT_CONTROLLER)) {
-+ return 1;
-+ }
-+
-+ void * handle = NULL;
-+ u_int64_t tmp = 0;
-+ struct cgroup_stat stats;
-+ int err = cgroup_read_stats_begin(CPUACCT_CONTROLLER_STR, m_cgroup_string.c_str(), &handle, &stats);
-+ while (err != ECGEOF) {
-+ if (err > 0) {
-+ dprintf(D_PROCFAMILY,
-+ "Unable to read cgroup %s cpuacct stats (ProcFamily %u): %s.\n",
-+ m_cgroup_string.c_str(), m_root_pid, cgroup_strerror(err));
-+ break;
-+ }
-+ if (_check_stat_uint64(stats, "user", &tmp)) {
-+ user_time = tmp/clock_tick-m_initial_user_cpu;
-+ } else if (_check_stat_uint64(stats, "system", &tmp)) {
-+ sys_time = tmp/clock_tick-m_initial_sys_cpu;
-+ }
-+ err = cgroup_read_stats_next(&handle, &stats);
-+ }
-+ if (handle != NULL) {
-+ cgroup_read_stats_end(&handle);
-+ }
-+ if (err != ECGEOF) {
-+ dprintf(D_ALWAYS, "Internal cgroup error when retrieving CPU statistics: %s\n", cgroup_strerror(err));
-+ return 1;
-+ }
-+ return 0;
-+}
-+
- int
- ProcFamily::aggregate_usage_cgroup(ProcFamilyUsage* usage)
- {
-@@ -496,16 +554,13 @@ ProcFamily::aggregate_usage_cgroup(ProcFamilyUsage* usage)
-
- int err;
- struct cgroup_stat stats;
-- void **handle;
-+ void *handle = NULL;
- u_int64_t tmp = 0, image = 0;
- bool found_rss = false;
-
- // Update memory
-- handle = (void **)malloc(sizeof(void*));
-- ASSERT (handle != NULL);
-- *handle = NULL;
-
-- err = cgroup_read_stats_begin(MEMORY_CONTROLLER_STR, m_cgroup_string.c_str(), handle, &stats);
-+ err = cgroup_read_stats_begin(MEMORY_CONTROLLER_STR, m_cgroup_string.c_str(), &handle, &stats);
- while (err != ECGEOF) {
- if (err > 0) {
- dprintf(D_PROCFAMILY,
-@@ -522,10 +577,10 @@ ProcFamily::aggregate_usage_cgroup(ProcFamilyUsage* usage)
- } else if (_check_stat_uint64(stats, "total_swap", &tmp)) {
- image += tmp;
- }
-- err = cgroup_read_stats_next(handle, &stats);
-+ err = cgroup_read_stats_next(&handle, &stats);
- }
-- if (*handle != NULL) {
-- cgroup_read_stats_end(handle);
-+ if (handle != NULL) {
-+ cgroup_read_stats_end(&handle);
- }
- if (found_rss) {
- usage->total_image_size = image/1024;
-@@ -540,29 +595,12 @@ ProcFamily::aggregate_usage_cgroup(ProcFamilyUsage* usage)
- m_max_image_size = image/1024;
- }
- // Try updating the max size using cgroups
-- update_max_image_size_cgroup();
-+ // XXX: This is taken out for now - kernel calculates max INCLUDING
-+ // the filesystem cache. Not what you want.
-+ //update_max_image_size_cgroup();
-
- // Update CPU
-- *handle = NULL;
-- err = cgroup_read_stats_begin(CPUACCT_CONTROLLER_STR, m_cgroup_string.c_str(), handle, &stats);
-- while (err != ECGEOF) {
-- if (err > 0) {
-- dprintf(D_PROCFAMILY,
-- "Unable to read cgroup %s cpuacct stats (ProcFamily %u): %s.\n",
-- m_cgroup_string.c_str(), m_root_pid, cgroup_strerror(err));
-- break;
-- }
-- if (_check_stat_uint64(stats, "user", &tmp)) {
-- usage->user_cpu_time = tmp/clock_tick;
-- } else if (_check_stat_uint64(stats, "system", &tmp)) {
-- usage->sys_cpu_time = tmp/clock_tick;
-- }
-- err = cgroup_read_stats_next(handle, &stats);
-- }
-- if (*handle != NULL) {
-- cgroup_read_stats_end(handle);
-- }
-- free(handle);
-+ get_cpu_usage_cgroup(usage->user_cpu_time, usage->sys_cpu_time);
-
- aggregate_usage_cgroup_blockio(usage);
-
---- a/src/condor_procd/proc_family.h
-+++ b/src/condor_procd/proc_family.h
-@@ -181,6 +181,11 @@ private:
- std::string m_cgroup_string;
- CgroupManager &m_cm;
- static long clock_tick;
-+ // Sometimes Condor doesn't successfully clear out the cgroup from the
-+ // previous run. Hence, we subtract off any CPU usage found at the
-+ // start of the job.
-+ long m_initial_user_cpu;
-+ long m_initial_sys_cpu;
- static bool have_warned_about_memsw;
-
- int count_tasks_cgroup();
-@@ -190,6 +195,7 @@ private:
- int spree_cgroup(int);
- int migrate_to_cgroup(pid_t);
- void update_max_image_size_cgroup();
-+ int get_cpu_usage_cgroup(long &user_cpu, long &sys_cpu);
- #endif
- };
-
diff --git a/cgroups_noswap.patch b/cgroups_noswap.patch
deleted file mode 100644
index b94e7df..0000000
--- a/cgroups_noswap.patch
+++ /dev/null
@@ -1,100 +0,0 @@
-diff --git a/src/condor_procd/proc_family.cpp b/src/condor_procd/proc_family.cpp
-index d35ffcc..2a5839f 100644
---- a/src/condor_procd/proc_family.cpp
-+++ b/src/condor_procd/proc_family.cpp
-@@ -36,6 +36,10 @@
-
- #include <unistd.h>
- long ProcFamily::clock_tick = sysconf( _SC_CLK_TCK );
-+
-+// Swap accounting is sometimes turned off. We use this variable so we
-+// warn about that situation only once.
-+bool ProcFamily::have_warned_about_memsw = false;
- #endif
-
- ProcFamily::ProcFamily(ProcFamilyMonitor* monitor,
-@@ -425,10 +429,19 @@ ProcFamily::update_max_image_size_cgroup()
- return;
- }
- if ((err = cgroup_get_value_uint64(memct, "memory.memsw.max_usage_in_bytes", &max_image))) {
-- dprintf(D_PROCFAMILY,
-- "Unable to load max memory usage for cgroup %s (ProcFamily %u): %u %s\n",
-- m_cgroup_string.c_str(), m_root_pid, err, cgroup_strerror(err));
-- return;
-+ // On newer nodes, swap accounting is disabled by default.
-+ // In some cases, swap accounting causes a kernel oops at the time of writing.
-+ // So, we check memory.max_usage_in_bytes instead.
-+ int err2 = cgroup_get_value_uint64(memct, "memory.max_usage_in_bytes", &max_image);
-+ if (err2) {
-+ dprintf(D_PROCFAMILY,
-+ "Unable to load max memory usage for cgroup %s (ProcFamily %u): %u %s\n",
-+ m_cgroup_string.c_str(), m_root_pid, err, cgroup_strerror(err));
-+ return;
-+ } else if (!have_warned_about_memsw) {
-+ have_warned_about_memsw = true;
-+ dprintf(D_ALWAYS, "Swap acounting is not available; only doing RAM accounting.\n");
-+ }
- }
- m_max_image_size = max_image/1024;
- }
-diff --git a/src/condor_procd/proc_family.h b/src/condor_procd/proc_family.h
-index 28a854c..d831d8e 100644
---- a/src/condor_procd/proc_family.h
-+++ b/src/condor_procd/proc_family.h
-@@ -181,6 +181,7 @@ private:
- std::string m_cgroup_string;
- CgroupManager &m_cm;
- static long clock_tick;
-+ static bool have_warned_about_memsw;
-
- int count_tasks_cgroup();
- int aggregate_usage_cgroup_blockio(ProcFamilyUsage*);
-diff --git a/src/condor_starter.V6.1/cgroup.linux.cpp b/src/condor_starter.V6.1/cgroup.linux.cpp
-index 97407b3..4fbd00d 100644
---- a/src/condor_starter.V6.1/cgroup.linux.cpp
-+++ b/src/condor_starter.V6.1/cgroup.linux.cpp
-@@ -185,13 +185,6 @@ int CgroupManager::create(const std::string &cgroup_string, Cgroup &cgroup,
- has_cgroup, changed_cgroup)) {
- return -1;
- }
-- if ((preferred_controllers & CPUACCT_CONTROLLER) &&
-- initialize_controller(*cgroupp, CPUACCT_CONTROLLER,
-- CPUACCT_CONTROLLER_STR,
-- required_controllers & CPUACCT_CONTROLLER,
-- has_cgroup, changed_cgroup)) {
-- return -1;
-- }
- if ((preferred_controllers & BLOCK_CONTROLLER) &&
- initialize_controller(*cgroupp, BLOCK_CONTROLLER,
- BLOCK_CONTROLLER_STR,
-diff --git a/src/condor_starter.V6.1/cgroup_limits.cpp b/src/condor_starter.V6.1/cgroup_limits.cpp
-index 71830a5..93e311c 100644
---- a/src/condor_starter.V6.1/cgroup_limits.cpp
-+++ b/src/condor_starter.V6.1/cgroup_limits.cpp
-@@ -20,7 +20,7 @@ CgroupLimits::CgroupLimits(std::string &cgroup) : m_cgroup_string(cgroup)
- int CgroupLimits::set_memory_limit_bytes(uint64_t mem_bytes, bool soft)
- {
- if (!m_cgroup.isValid() || !CgroupManager::getInstance().isMounted(CgroupManager::MEMORY_CONTROLLER)) {
-- dprintf(D_ALWAYS, "Unable to set memory limit because cgroup is invalid.");
-+ dprintf(D_ALWAYS, "Unable to set memory limit because cgroup is invalid.\n");
- return 1;
- }
-
-@@ -55,7 +55,7 @@ int CgroupLimits::set_memory_limit_bytes(uint64_t mem_bytes, bool soft)
- int CgroupLimits::set_cpu_shares(uint64_t shares)
- {
- if (!m_cgroup.isValid() || !CgroupManager::getInstance().isMounted(CgroupManager::CPU_CONTROLLER)) {
-- dprintf(D_ALWAYS, "Unable to set CPU shares because cgroup is invalid.");
-+ dprintf(D_ALWAYS, "Unable to set CPU shares because cgroup is invalid.\n");
- return 1;
- }
-
-@@ -89,7 +89,7 @@ int CgroupLimits::set_cpu_shares(uint64_t shares)
- int CgroupLimits::set_blockio_weight(uint64_t weight)
- {
- if (!m_cgroup.isValid() || !CgroupManager::getInstance().isMounted(CgroupManager::BLOCK_CONTROLLER)) {
-- dprintf(D_ALWAYS, "Unable to set blockio weight because cgroup is invalid.");
-+ dprintf(D_ALWAYS, "Unable to set blockio weight because cgroup is invalid.\n");
- return 1;
- }
-
diff --git a/condor-1605-v2.patch b/condor-1605-v2.patch
deleted file mode 100644
index 1702b1d..0000000
--- a/condor-1605-v2.patch
+++ /dev/null
@@ -1,1457 +0,0 @@
-diff --git a/src/condor_c-gahp/schedd_client.cpp b/src/condor_c-gahp/schedd_client.cpp
-index 32f0059..b8fda84 100644
---- a/src/condor_c-gahp/schedd_client.cpp
-+++ b/src/condor_c-gahp/schedd_client.cpp
-@@ -562,7 +562,7 @@ doContactSchedd()
- // Try connecting to the queue
- Qmgr_connection * qmgr_connection;
-
-- if ((qmgr_connection = ConnectQ(dc_schedd.addr(), QMGMT_TIMEOUT, false, NULL, NULL, dc_schedd.version() )) == NULL) {
-+ if ((qmgr_connection = ConnectQ(dc_schedd, QMGMT_TIMEOUT, false, NULL, NULL, dc_schedd.version() )) == NULL) {
- error = TRUE;
- sprintf( error_msg, "Error connecting to schedd %s", ScheddAddr );
- dprintf( D_ALWAYS, "%s\n", error_msg.c_str() );
-diff --git a/src/condor_contrib/triggerd/src/Triggerd.cpp b/src/condor_contrib/triggerd/src/Triggerd.cpp
-index 40e1197..59f2e55 100644
---- a/src/condor_contrib/triggerd/src/Triggerd.cpp
-+++ b/src/condor_contrib/triggerd/src/Triggerd.cpp
-@@ -819,7 +819,7 @@ Triggerd::PerformQueries()
- // Perform the query and check the result
- if (NULL != query_collector)
- {
-- status = query->fetchAds(result, query_collector->addr(), &errstack);
-+ status = query->fetchAds(result, *query_collector, &errstack);
- }
- else
- {
-diff --git a/src/condor_credd/credd.cpp b/src/condor_credd/credd.cpp
-index 7f0973f..c3b7b1f 100644
---- a/src/condor_credd/credd.cpp
-+++ b/src/condor_credd/credd.cpp
-@@ -80,7 +80,7 @@ store_cred_handler(Service * /*service*/, int /*i*/, Stream *stream) {
-
- if (!socket->triedAuthentication()) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(socket, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(socket, WRITE, &errstack, NULL) ) {
- dprintf (D_ALWAYS, "Unable to authenticate, qutting\n");
- goto EXIT;
- }
-@@ -236,7 +236,7 @@ get_cred_handler(Service * /*service*/, int /*i*/, Stream *stream) {
- // Authenticate
- if (!socket->triedAuthentication()) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(socket, READ, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(socket, READ, &errstack, NULL) ) {
- dprintf (D_ALWAYS, "Unable to authenticate, qutting\n");
- goto EXIT;
- }
-@@ -351,7 +351,7 @@ query_cred_handler(Service * /*service*/, int /*i*/, Stream *stream) {
-
- if (!socket->triedAuthentication()) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(socket, READ, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(socket, READ, &errstack, NULL) ) {
- dprintf (D_ALWAYS, "Unable to authenticate, qutting\n");
- goto EXIT;
- }
-@@ -426,7 +426,7 @@ rm_cred_handler(Service * /*service*/, int /*i*/, Stream *stream) {
-
- if (!socket->triedAuthentication()) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(socket, READ, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(socket, READ, &errstack, NULL) ) {
- dprintf (D_ALWAYS, "Unable to authenticate, qutting\n");
- goto EXIT;
- }
-diff --git a/src/condor_daemon_client/daemon.cpp b/src/condor_daemon_client/daemon.cpp
-index e2afded..5ccb2aa 100644
---- a/src/condor_daemon_client/daemon.cpp
-+++ b/src/condor_daemon_client/daemon.cpp
-@@ -162,7 +162,7 @@ Daemon::Daemon( const ClassAd* tAd, daemon_t tType, const char* tPool )
-
- getInfoFromAd( tAd );
-
-- dprintf( D_HOSTNAME, "New Daemon obj (%s) name: \"%s\", pool: "
-+ dprintf( D_HOSTNAME, "From ClassAd, new Daemon obj (%s) name: \"%s\", pool: "
- "\"%s\", addr: \"%s\"\n", daemonString(_type),
- _name ? _name : "NULL", _pool ? _pool : "NULL",
- _addr ? _addr : "NULL" );
-@@ -533,7 +533,7 @@ Daemon::connectSock(Sock *sock, int sec, CondorError* errstack, bool non_blockin
-
-
- StartCommandResult
--Daemon::startCommand( int cmd, Sock* sock, int timeout, CondorError *errstack, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking, char const *cmd_description, char *, SecMan *sec_man, bool raw_protocol, char const *sec_session_id )
-+Daemon::startCommand( int cmd, Sock* sock, int timeout, CondorError *errstack, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking, char const *cmd_description, char *, SecMan *sec_man, bool raw_protocol, char const *sec_session_id, const char * hostname )
- {
- // This function may be either blocking or non-blocking, depending
- // on the flag that is passed in. All versions of Daemon::startCommand()
-@@ -555,7 +555,7 @@ Daemon::startCommand( int cmd, Sock* sock, int timeout, CondorError *errstack, S
- sock->timeout( timeout );
- }
-
-- start_command_result = sec_man->startCommand(cmd, sock, raw_protocol, errstack, 0, callback_fn, misc_data, nonblocking, cmd_description, sec_session_id);
-+ start_command_result = sec_man->startCommand(cmd, sock, raw_protocol, errstack, 0, callback_fn, misc_data, nonblocking, cmd_description, sec_session_id, hostname);
-
- if(callback_fn) {
- // SecMan::startCommand() called the callback function, so we just return here
-@@ -619,7 +619,8 @@ Daemon::startCommand( int cmd, Stream::stream_type st,Sock **sock,int timeout, C
- _version,
- &_sec_man,
- raw_protocol,
-- sec_session_id);
-+ sec_session_id,
-+ _full_hostname);
- }
-
- Sock*
-@@ -662,7 +663,7 @@ Daemon::startCommand_nonblocking( int cmd, Sock* sock, int timeout, CondorError
- {
- // This is the nonblocking version of startCommand().
- const bool nonblocking = true;
-- return startCommand(cmd,sock,timeout,errstack,callback_fn,misc_data,nonblocking,cmd_description,_version,&_sec_man,raw_protocol,sec_session_id);
-+ return startCommand(cmd,sock,timeout,errstack,callback_fn,misc_data,nonblocking,cmd_description,_version,&_sec_man,raw_protocol,sec_session_id, _full_hostname);
- }
-
- bool
-@@ -670,7 +671,7 @@ Daemon::startCommand( int cmd, Sock* sock, int timeout, CondorError *errstack, c
- {
- // This is a blocking version of startCommand().
- const bool nonblocking = false;
-- StartCommandResult rc = startCommand(cmd,sock,timeout,errstack,NULL,NULL,nonblocking,cmd_description,_version,&_sec_man,raw_protocol,sec_session_id);
-+ StartCommandResult rc = startCommand(cmd,sock,timeout,errstack,NULL,NULL,nonblocking,cmd_description,_version,&_sec_man,raw_protocol,sec_session_id, _full_hostname);
- switch(rc) {
- case StartCommandSucceeded:
- return true;
-@@ -2097,7 +2098,7 @@ Daemon::forceAuthentication( ReliSock* rsock, CondorError* errstack )
- return true;
- }
-
-- return SecMan::authenticate_sock(rsock, CLIENT_PERM, errstack );
-+ return SecMan::authenticate_sock(rsock, CLIENT_PERM, errstack, _full_hostname);
- }
-
-
-diff --git a/src/condor_daemon_client/daemon.h b/src/condor_daemon_client/daemon.h
-index 57fcd8a..9aa3b9f 100644
---- a/src/condor_daemon_client/daemon.h
-+++ b/src/condor_daemon_client/daemon.h
-@@ -761,7 +761,7 @@ protected:
- It may be either blocking or nonblocking, depending on the
- nonblocking flag. This version uses an existing socket.
- */
-- static StartCommandResult startCommand( int cmd, Sock* sock, int timeout, CondorError *errstack, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking, char const *cmd_description, char *version, SecMan *sec_man, bool raw_protocol, char const *sec_session_id );
-+ static StartCommandResult startCommand( int cmd, Sock* sock, int timeout, CondorError *errstack, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking, char const *cmd_description, char *version, SecMan *sec_man, bool raw_protocol, char const *sec_session_id, char const *hostname );
-
- /**
- Internal function used by public versions of startCommand().
-@@ -769,7 +769,7 @@ protected:
- nonblocking flag. This version creates a socket of the
- specified type and connects it.
- */
-- StartCommandResult startCommand( int cmd, Stream::stream_type st,Sock **sock,int timeout, CondorError *errstack, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking, char const *cmd_description=NULL, bool raw_protocol=false, char const *sec_session_id=NULL );
-+ StartCommandResult startCommand( int cmd, Stream::stream_type st,Sock **sock,int timeout, CondorError *errstack, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking, char const *cmd_description, bool raw_protocol, char const *sec_session_id );
-
- /**
- Class used internally to handle non-blocking connects for
-diff --git a/src/condor_daemon_client/daemon_list.cpp b/src/condor_daemon_client/daemon_list.cpp
-index 244d58a..597d981 100644
---- a/src/condor_daemon_client/daemon_list.cpp
-+++ b/src/condor_daemon_client/daemon_list.cpp
-@@ -330,7 +330,7 @@ CollectorList::query(CondorQuery & cQuery, ClassAdList & adList, CondorError *er
- }
-
- result =
-- cQuery.fetchAds (adList, daemon->addr(), errstack);
-+ cQuery.fetchAds (adList, *daemon, errstack);
-
- if( num_collectors > 1 ) {
- daemon->blacklistMonitorQueryFinished( result == Q_OK );
-diff --git a/src/condor_daemon_client/dc_schedd.cpp b/src/condor_daemon_client/dc_schedd.cpp
-index af1049e..769e12c 100644
---- a/src/condor_daemon_client/dc_schedd.cpp
-+++ b/src/condor_daemon_client/dc_schedd.cpp
-@@ -41,6 +41,9 @@ DCSchedd::DCSchedd( const char* the_name, const char* the_pool )
- {
- }
-
-+DCSchedd::DCSchedd( const ClassAd* ad, const char* pool )
-+ : Daemon( ad, DT_SCHEDD ,pool ) // Surprise! DT_SCHEDD is the second argument.
-+{}
-
- DCSchedd::~DCSchedd( void )
- {
-diff --git a/src/condor_daemon_client/dc_schedd.h b/src/condor_daemon_client/dc_schedd.h
-index 201c9b4..6389e09 100644
---- a/src/condor_daemon_client/dc_schedd.h
-+++ b/src/condor_daemon_client/dc_schedd.h
-@@ -62,6 +62,13 @@ public:
- */
- DCSchedd( const char* const name = NULL, const char* pool = NULL );
-
-+ /** Constructor. Same as a Daemon object.
-+ @param ad The classad of the schedd object; saves a query
-+ to the collector.
-+ @param pool The name of the pool, NULL if you want local
-+ */
-+ DCSchedd( const ClassAd* ad, const char* pool = NULL );
-+
- /// Destructor
- ~DCSchedd();
-
-diff --git a/src/condor_daemon_client/dc_startd.cpp b/src/condor_daemon_client/dc_startd.cpp
-index ec3ab14..9a4b44c 100644
---- a/src/condor_daemon_client/dc_startd.cpp
-+++ b/src/condor_daemon_client/dc_startd.cpp
-@@ -914,7 +914,6 @@ DCStartd::getAds( ClassAdList &adsList )
- // fetch the query
- QueryResult q;
- CondorQuery* query;
-- char* ad_addr;
-
- // instantiate query object
- if (!(query = new CondorQuery (STARTD_AD))) {
-@@ -923,8 +922,7 @@ DCStartd::getAds( ClassAdList &adsList )
- }
-
- if( this->locate() ){
-- ad_addr = this->addr();
-- q = query->fetchAds(adsList, ad_addr, &errstack);
-+ q = query->fetchAds(adsList, *this, &errstack);
- if (q != Q_OK) {
- if (q == Q_COMMUNICATION_ERROR) {
- dprintf( D_ALWAYS, "%s\n", errstack.getFullText(true) );
-diff --git a/src/condor_daemon_core.V6/daemon_command.cpp b/src/condor_daemon_core.V6/daemon_command.cpp
-index e6da114..3a96315 100644
---- a/src/condor_daemon_core.V6/daemon_command.cpp
-+++ b/src/condor_daemon_core.V6/daemon_command.cpp
-@@ -991,7 +991,7 @@ DaemonCommandProtocol::CommandProtocolResult DaemonCommandProtocol::Authenticate
- int auth_timeout = daemonCore->getSecMan()->getSecTimeout( m_comTable[cmd_index].perm );
-
- char *method_used = NULL;
-- bool auth_success = m_sock->authenticate(m_key, auth_methods, &errstack, auth_timeout, &method_used);
-+ bool auth_success = m_sock->authenticate(m_key, auth_methods, &errstack, auth_timeout, &method_used, NULL);
-
- if ( method_used ) {
- m_policy->Assign(ATTR_SEC_AUTHENTICATION_METHODS, method_used);
-@@ -1279,7 +1279,7 @@ DaemonCommandProtocol::CommandProtocolResult DaemonCommandProtocol::ExecCommand(
- m_comTable[cmd_index].force_authentication &&
- !m_sock->triedAuthentication() )
- {
-- SecMan::authenticate_sock(m_sock, WRITE, &errstack);
-+ SecMan::authenticate_sock(m_sock, WRITE, &errstack, NULL);
- // we don't check the return value, because the code below
- // handles what to do with unauthenticated connections
- }
-diff --git a/src/condor_eventd.V2/admin_event.cpp b/src/condor_eventd.V2/admin_event.cpp
-index 3fa1c46..83abc49 100644
---- a/src/condor_eventd.V2/admin_event.cpp
-+++ b/src/condor_eventd.V2/admin_event.cpp
-@@ -1089,7 +1089,7 @@ AdminEvent::FetchAds_ByConstraint( const char *constraint )
-
- query->addORConstraint( constraint );
-
-- q = query->fetchAds( m_collector_query_ads, pool->addr(), &errstack);
-+ q = query->fetchAds( m_collector_query_ads, pool, &errstack);
-
- if( q != Q_OK ){
- dprintf(D_ALWAYS, "Trouble fetching Ads with<<%s>><<%d>>\n",
-diff --git a/src/condor_gridmanager/gridmanager.cpp b/src/condor_gridmanager/gridmanager.cpp
-index 1f97ef7..03db810 100644
---- a/src/condor_gridmanager/gridmanager.cpp
-+++ b/src/condor_gridmanager/gridmanager.cpp
-@@ -644,7 +644,7 @@ doContactSchedd()
- }
-
-
-- schedd = ConnectQ( ScheddAddr, QMGMT_TIMEOUT, false, NULL, myUserName, CondorVersion() );
-+ schedd = ConnectQ( *ScheddObj, QMGMT_TIMEOUT, false, NULL, myUserName, CondorVersion() );
- if ( !schedd ) {
- error_str = "Failed to connect to schedd!";
- goto contact_schedd_failure;
-diff --git a/src/condor_includes/authentication.h b/src/condor_includes/authentication.h
-index d2b976a..6167e84 100644
---- a/src/condor_includes/authentication.h
-+++ b/src/condor_includes/authentication.h
-@@ -40,7 +40,7 @@ class Authentication {
-
- ~Authentication();
-
-- int authenticate( char *hostAddr, const char* auth_methods, CondorError* errstack, int timeout);
-+ int authenticate( const char *hostAddr, const char* auth_methods, CondorError* errstack, int timeout);
- //------------------------------------------
- // PURPOSE: authenticate with the other side
- // REQUIRE: hostAddr -- host to authenticate
-@@ -50,7 +50,7 @@ class Authentication {
- // RETURNS: -1 -- failure
- //------------------------------------------
-
-- int authenticate( char *hostAddr, KeyInfo *& key, const char* auth_methods, CondorError* errstack, int timeout);
-+ int authenticate( const char *hostAddr, KeyInfo *& key, const char* auth_methods, CondorError* errstack, int timeout);
- //------------------------------------------
- // PURPOSE: To send the secret key over. this method
- // is written to keep compatibility issues
-@@ -161,7 +161,7 @@ class Authentication {
-
- #endif /* !SKIP_AUTHENTICATION */
-
-- int authenticate_inner( char *hostAddr, const char* auth_methods, CondorError* errstack, int timeout);
-+ int authenticate_inner( const char *hostAddr, const char* auth_methods, CondorError* errstack, int timeout);
-
- //------------------------------------------
- // Data (private)
-diff --git a/src/condor_includes/condor_auth_x509.h b/src/condor_includes/condor_auth_x509.h
-index bdbc545..dac8aa5 100644
---- a/src/condor_includes/condor_auth_x509.h
-+++ b/src/condor_includes/condor_auth_x509.h
-@@ -103,7 +103,7 @@ class Condor_Auth_X509 : public Condor_Auth_Base {
-
- int authenticate_self_gss(CondorError* errstack);
-
-- int authenticate_client_gss(CondorError* errstack);
-+ int authenticate_client_gss(const char *remoteHost, CondorError* errstack);
-
- int authenticate_server_gss(CondorError* errstack);
-
-diff --git a/src/condor_includes/condor_qmgr.h b/src/condor_includes/condor_qmgr.h
-index 5e5012e..642b602 100644
---- a/src/condor_includes/condor_qmgr.h
-+++ b/src/condor_includes/condor_qmgr.h
-@@ -25,7 +25,7 @@
- #include "proc.h"
- #include "../condor_utils/CondorError.h"
- #include "condor_classad.h"
--
-+#include "daemon.h"
-
- typedef struct {
- bool dummy;
-@@ -54,8 +54,7 @@ int InitializeConnection(const char *, const char *);
- int InitializeReadOnlyConnection(const char * );
-
- /** Initiate connection to schedd job queue and begin transaction.
-- @param qmgr_location can be the name or sinful string of a schedd or
-- NULL to connect to the local schedd
-+ @param daemon a daemon object of type DT_SCHEDD
- @param timeout specifies the maximum time (in seconds) to wait for TCP
- connection establishment
- @param read_only can be set to true to skip the potentially slow
-@@ -64,7 +63,7 @@ int InitializeReadOnlyConnection(const char * );
- @param schedd_version_str Version of schedd if known (o.w. NULL).
- @return opaque Qmgr_connection structure
- */
--Qmgr_connection *ConnectQ(const char *qmgr_location, int timeout=0,
-+Qmgr_connection *ConnectQ(Daemon &daemon, int timeout=0,
- bool read_only=false, CondorError* errstack=NULL,
- const char *effective_owner=NULL,
- char const *schedd_version_str=NULL);
-diff --git a/src/condor_includes/condor_secman.h b/src/condor_includes/condor_secman.h
-index b59519e..9891497 100644
---- a/src/condor_includes/condor_secman.h
-+++ b/src/condor_includes/condor_secman.h
-@@ -106,12 +106,12 @@ public:
- // spawn off a non-blocking attempt to create a security
- // session so that in the future, a UDP command could succeed
- // without StartCommandWouldBlock.
-- StartCommandResult startCommand( int cmd, Sock* sock, bool raw_protocol, CondorError* errstack, int subcmd, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking,char const *cmd_description,char const *sec_session_id);
-+ StartCommandResult startCommand( int cmd, Sock* sock, bool raw_protocol, CondorError* errstack, int subcmd, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking,char const *cmd_description,char const *sec_session_id, const char *hostname);
-
- // Authenticate a socket using whatever authentication methods
- // have been configured for the specified perm level.
-- static int authenticate_sock(Sock *s,DCpermission perm, CondorError* errstack);
-- static int authenticate_sock(Sock *s,KeyInfo *&ki, DCpermission perm, CondorError* errstack);
-+ static int authenticate_sock(Sock *s,DCpermission perm, CondorError* errstack, const char * hostname);
-+ static int authenticate_sock(Sock *s,KeyInfo *&ki, DCpermission perm, CondorError* errstack, const char * hostname);
-
-
- //------------------------------------------
-diff --git a/src/condor_includes/reli_sock.h b/src/condor_includes/reli_sock.h
-index b48ce8d..f5cdca8 100644
---- a/src/condor_includes/reli_sock.h
-+++ b/src/condor_includes/reli_sock.h
-@@ -218,9 +218,9 @@ public:
- virtual int peek(char &);
-
- ///
-- int authenticate( const char* methods, CondorError* errstack, int auth_timeout );
-+ int authenticate( const char* methods, CondorError* errstack, int auth_timeout, const char * hostname );
- ///
-- int authenticate( KeyInfo *& key, const char* methods, CondorError* errstack, int auth_timeout, char **method_used=NULL );
-+ int authenticate( KeyInfo *& key, const char* methods, CondorError* errstack, int auth_timeout, char **method_used, const char * hostname );
- ///
- int isClient() { return is_client; };
-
-@@ -254,7 +254,8 @@ protected:
- int prepare_for_nobuffering( stream_coding = stream_unknown);
- int perform_authenticate( bool with_key, KeyInfo *& key,
- const char* methods, CondorError* errstack,
-- int auth_timeout, char **method_used );
-+ int auth_timeout, char **method_used,
-+ const char* hostname );
-
- // This is used internally to recover sanity on the stream after
- // failing to open a file in put_file().
-diff --git a/src/condor_includes/sock.h b/src/condor_includes/sock.h
-index ca1b1c8..53f8b23 100644
---- a/src/condor_includes/sock.h
-+++ b/src/condor_includes/sock.h
-@@ -347,10 +347,10 @@ public:
- bool isAuthenticated() const;
-
- ///
-- virtual int authenticate(const char * auth_methods, CondorError* errstack, int timeout);
-+ virtual int authenticate(const char * auth_methods, CondorError* errstack, int timeout, const char *hostname);
- ///
- // method_used should be freed by the caller when finished with it
-- virtual int authenticate(KeyInfo *&ki, const char * auth_methods, CondorError* errstack, int timeout, char **method_used=NULL);
-+ virtual int authenticate(KeyInfo *&ki, const char * auth_methods, CondorError* errstack, int timeout, char **method_used, const char *hostname);
-
- /// if we are connecting, merges together Stream::get_deadline
- /// and connect_timeout_time()
-diff --git a/src/condor_io/authentication.cpp b/src/condor_io/authentication.cpp
-index 4a11db0..0374857 100644
---- a/src/condor_io/authentication.cpp
-+++ b/src/condor_io/authentication.cpp
-@@ -85,7 +85,7 @@ Authentication::~Authentication()
- #endif
- }
-
--int Authentication::authenticate( char *hostAddr, KeyInfo *& key,
-+int Authentication::authenticate( const char *hostAddr, KeyInfo *& key,
- const char* auth_methods, CondorError* errstack, int timeout)
- {
- int retval = authenticate(hostAddr, auth_methods, errstack, timeout);
-@@ -106,7 +106,7 @@ int Authentication::authenticate( char *hostAddr, KeyInfo *& key,
- return retval;
- }
-
--int Authentication::authenticate( char *hostAddr, const char* auth_methods,
-+int Authentication::authenticate( const char *hostAddr, const char* auth_methods,
- CondorError* errstack, int timeout)
- {
- int retval;
-@@ -124,7 +124,7 @@ int Authentication::authenticate( char *hostAddr, const char* auth_methods,
- return retval;
- }
-
--int Authentication::authenticate_inner( char *hostAddr, const char* auth_methods,
-+int Authentication::authenticate_inner( const char *hostAddr, const char* auth_methods,
- CondorError* errstack, int timeout)
- {
- #if defined(SKIP_AUTHENTICATION)
-diff --git a/src/condor_io/condor_auth_x509.cpp b/src/condor_io/condor_auth_x509.cpp
-index ee80b9d..7c81cea 100644
---- a/src/condor_io/condor_auth_x509.cpp
-+++ b/src/condor_io/condor_auth_x509.cpp
-@@ -92,7 +92,7 @@ Condor_Auth_X509 :: ~Condor_Auth_X509()
- }
- }
-
--int Condor_Auth_X509 :: authenticate(const char * /* remoteHost */, CondorError* errstack)
-+int Condor_Auth_X509 :: authenticate(const char * remoteHost, CondorError* errstack)
- {
- int status = 1;
- int reply = 0;
-@@ -171,7 +171,7 @@ int Condor_Auth_X509 :: authenticate(const char * /* remoteHost */, CondorError*
-
- switch ( mySock_->isClient() ) {
- case 1:
-- status = authenticate_client_gss(errstack);
-+ status = authenticate_client_gss(remoteHost, errstack);
- break;
- default:
- status = authenticate_server_gss(errstack);
-@@ -655,7 +655,7 @@ int Condor_Auth_X509::authenticate_self_gss(CondorError* errstack)
- return TRUE;
- }
-
--int Condor_Auth_X509::authenticate_client_gss(CondorError* errstack)
-+int Condor_Auth_X509::authenticate_client_gss(const char * remoteHost, CondorError* errstack)
- {
- OM_uint32 major_status = 0;
- OM_uint32 minor_status = 0;
-@@ -775,31 +775,48 @@ int Condor_Auth_X509::authenticate_client_gss(CondorError* errstack)
- }
- }
-
-- std::string fqh = get_full_hostname(mySock_->peer_addr());
-- StringList * daemonNames = getDaemonList("GSI_DAEMON_NAME",fqh.c_str());
--
-- // Now, let's see if the name is in the list, I am not using
-- // anycase here, so if the host name and what we are looking for
-- // are in different cases, then we will run into problems.
-- if( daemonNames ) {
-- status = daemonNames->contains_withwildcard(server) == TRUE? 1 : 0;
--
-- if( !status ) {
-- errstack->pushf("GSI", GSI_ERR_UNAUTHORIZED_SERVER,
-- "Failed to authenticate because the subject '%s' is not currently trusted by you. "
-- "If it should be, add it to GSI_DAEMON_NAME or undefine GSI_DAEMON_NAME.", server);
-- dprintf(D_SECURITY,
-- "GSI_DAEMON_NAME is defined and the server %s is not specified in the GSI_DAEMON_NAME parameter\n",
-- server);
-- }
-+ std::vector<MyString> fqhs;
-+ if (remoteHost)
-+ {
-+ std::vector<MyString> fqhs_copy = get_hostname_with_alias(mySock_->peer_addr());
-+ fqhs.push_back(remoteHost);
-+ fqhs.insert(fqhs.begin()+1, fqhs_copy.begin(), fqhs_copy.end());
- }
-- else {
-- status = CheckServerName(fqh.c_str(),mySock_->peer_ip_str(),mySock_,errstack);
-+ else
-+ {
-+ fqhs = get_hostname_with_alias(mySock_->peer_addr());
- }
-+ dprintf(D_FULLDEBUG, "Number of aliases: %zu\n", fqhs.size());
-+ for(std::vector<MyString>::const_iterator it = fqhs.begin(); it != fqhs.end(); ++it) {
-+ dprintf(D_FULLDEBUG, "Checking validity of alias %s\n", it->Value());
-+ std::string fqh = it->Value();
-+ StringList * daemonNames = getDaemonList("GSI_DAEMON_NAME",fqh.c_str());
-+
-+ // Now, let's see if the name is in the list, I am not using
-+ // anycase here, so if the host name and what we are looking for
-+ // are in different cases, then we will run into problems.
-+ if( daemonNames ) {
-+ status = daemonNames->contains_withwildcard(server) == TRUE? 1 : 0;
-+
-+ if( !status ) {
-+ errstack->pushf("GSI", GSI_ERR_UNAUTHORIZED_SERVER,
-+ "Failed to authenticate because the subject '%s' is not currently trusted by you. "
-+ "If it should be, add it to GSI_DAEMON_NAME or undefine GSI_DAEMON_NAME.", server);
-+ dprintf(D_SECURITY,
-+ "GSI_DAEMON_NAME is defined and the server %s is not specified in the GSI_DAEMON_NAME parameter\n",
-+ server);
-+ }
-+ }
-+ else {
-+ status = CheckServerName(fqh.c_str(),mySock_->peer_ip_str(),mySock_,errstack);
-+ }
-+ delete daemonNames;
-
-- if (status) {
-- dprintf(D_SECURITY, "valid GSS connection established to %s\n", server);
-- }
-+ if (status) {
-+ dprintf(D_SECURITY, "valid GSS connection established to %s\n", server);
-+ break;
-+ }
-+ }
-
- mySock_->encode();
- if (!mySock_->code(status) || !mySock_->end_of_message()) {
-@@ -810,7 +827,6 @@ int Condor_Auth_X509::authenticate_client_gss(CondorError* errstack)
- }
-
- delete [] server;
-- delete daemonNames;
- }
- clear:
- return (status == 0) ? FALSE : TRUE;
-diff --git a/src/condor_io/condor_secman.cpp b/src/condor_io/condor_secman.cpp
-index 21607fe..ea768bf 100644
---- a/src/condor_io/condor_secman.cpp
-+++ b/src/condor_io/condor_secman.cpp
-@@ -855,7 +855,7 @@ class SecManStartCommand: Service, public ClassyCountedPtr {
- SecManStartCommand (
- int cmd,Sock *sock,bool raw_protocol,
- CondorError *errstack,int subcmd,StartCommandCallbackType *callback_fn,
-- void *misc_data,bool nonblocking,char const *cmd_description,char const *sec_session_id_hint,SecMan *sec_man):
-+ void *misc_data,bool nonblocking,char const *cmd_description,char const *sec_session_id_hint,SecMan *sec_man, const std::string &hostname):
-
- m_cmd(cmd),
- m_subcmd(subcmd),
-@@ -867,7 +867,8 @@ class SecManStartCommand: Service, public ClassyCountedPtr {
- m_nonblocking(nonblocking),
- m_pending_socket_registered(false),
- m_sec_man(*sec_man),
-- m_use_tmp_sec_session(false)
-+ m_use_tmp_sec_session(false),
-+ m_hostname(hostname)
- {
- m_sec_session_id_hint = sec_session_id_hint ? sec_session_id_hint : "";
- if( m_sec_session_id_hint == USE_TMP_SEC_SESSION ) {
-@@ -972,6 +973,7 @@ class SecManStartCommand: Service, public ClassyCountedPtr {
- KeyCacheEntry *m_enc_key;
- KeyInfo* m_private_key;
- MyString m_sec_session_id_hint;
-+ std::string m_hostname;
-
- enum StartCommandState {
- SendAuthInfo,
-@@ -1023,7 +1025,7 @@ class SecManStartCommand: Service, public ClassyCountedPtr {
- };
-
- StartCommandResult
--SecMan::startCommand( int cmd, Sock* sock, bool raw_protocol, CondorError* errstack, int subcmd, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking,char const *cmd_description,char const *sec_session_id_hint)
-+SecMan::startCommand( int cmd, Sock* sock, bool raw_protocol, CondorError* errstack, int subcmd, StartCommandCallbackType *callback_fn, void *misc_data, bool nonblocking,char const *cmd_description,char const *sec_session_id_hint, const char *hostname)
- {
- // This function is simply a convenient wrapper around the
- // SecManStartCommand class, which does the actual work.
-@@ -1032,7 +1034,8 @@ SecMan::startCommand( int cmd, Sock* sock, bool raw_protocol, CondorError* errst
- // The blocking case could avoid use of the heap, but for simplicity,
- // we just do the same in both cases.
-
-- classy_counted_ptr<SecManStartCommand> sc = new SecManStartCommand(cmd,sock,raw_protocol,errstack,subcmd,callback_fn,misc_data,nonblocking,cmd_description,sec_session_id_hint,this);
-+ std::string hostname_str = hostname ? hostname : "";
-+ classy_counted_ptr<SecManStartCommand> sc = new SecManStartCommand(cmd,sock,raw_protocol,errstack,subcmd,callback_fn,misc_data,nonblocking,cmd_description,sec_session_id_hint,this, hostname_str);
-
- ASSERT(sc.get());
-
-@@ -1829,7 +1832,7 @@ SecManStartCommand::authenticate_inner()
- }
-
- int auth_timeout = m_sec_man.getSecTimeout( CLIENT_PERM );
-- bool auth_success = m_sock->authenticate(m_private_key, auth_methods, m_errstack,auth_timeout);
-+ bool auth_success = m_sock->authenticate(m_private_key, auth_methods, m_errstack,auth_timeout, NULL, m_hostname.c_str());
-
- if (auth_methods) {
- free(auth_methods);
-@@ -2159,7 +2162,8 @@ SecManStartCommand::DoTCPAuth_inner()
- m_nonblocking,
- m_cmd_description.Value(),
- m_sec_session_id_hint.Value(),
-- &m_sec_man);
-+ &m_sec_man,
-+ m_hostname);
-
- StartCommandResult auth_result = m_tcp_auth_command->startCommand();
-
-@@ -2796,23 +2800,23 @@ char* SecMan::my_parent_unique_id() {
- }
-
- int
--SecMan::authenticate_sock(Sock *s,DCpermission perm, CondorError* errstack)
-+SecMan::authenticate_sock(Sock *s,DCpermission perm, CondorError* errstack, const char * hostname)
- {
- MyString methods;
- getAuthenticationMethods( perm, &methods );
- ASSERT(s);
- int auth_timeout = getSecTimeout(perm);
-- return s->authenticate(methods.Value(),errstack,auth_timeout);
-+ return s->authenticate(methods.Value(),errstack,auth_timeout, hostname);
- }
-
- int
--SecMan::authenticate_sock(Sock *s,KeyInfo *&ki, DCpermission perm, CondorError* errstack)
-+SecMan::authenticate_sock(Sock *s,KeyInfo *&ki, DCpermission perm, CondorError* errstack, const char * hostname)
- {
- MyString methods;
- getAuthenticationMethods( perm, &methods );
- ASSERT(s);
- int auth_timeout = getSecTimeout(perm);
-- return s->authenticate(ki,methods.Value(),errstack,auth_timeout);
-+ return s->authenticate(ki,methods.Value(),errstack,auth_timeout, NULL, hostname);
- }
-
- int
-diff --git a/src/condor_io/reli_sock.cpp b/src/condor_io/reli_sock.cpp
-index d80bab4..00a6d10 100644
---- a/src/condor_io/reli_sock.cpp
-+++ b/src/condor_io/reli_sock.cpp
-@@ -967,11 +967,11 @@ ReliSock::prepare_for_nobuffering(stream_coding direction)
-
- int ReliSock::perform_authenticate(bool with_key, KeyInfo *& key,
- const char* methods, CondorError* errstack,
-- int auth_timeout, char **method_used)
-+ int auth_timeout, char **method_used,
-+ const char * hostname )
- {
- int in_encode_mode;
- int result;
--
- if( method_used ) {
- *method_used = NULL;
- }
-@@ -984,9 +984,9 @@ int ReliSock::perform_authenticate(bool with_key, KeyInfo *& key,
-
- // actually perform the authentication
- if ( with_key ) {
-- result = authob.authenticate( hostAddr, key, methods, errstack, auth_timeout );
-+ result = authob.authenticate( hostname, key, methods, errstack, auth_timeout );
- } else {
-- result = authob.authenticate( hostAddr, methods, errstack, auth_timeout );
-+ result = authob.authenticate( hostname, methods, errstack, auth_timeout );
- }
- // restore stream mode (either encode or decode)
- if ( in_encode_mode && is_decode() ) {
-@@ -1010,16 +1010,16 @@ int ReliSock::perform_authenticate(bool with_key, KeyInfo *& key,
- }
- }
-
--int ReliSock::authenticate(KeyInfo *& key, const char* methods, CondorError* errstack, int auth_timeout, char **method_used)
-+int ReliSock::authenticate(KeyInfo *& key, const char* methods, CondorError* errstack, int auth_timeout, char **method_used, const char *hostname)
- {
-- return perform_authenticate(true,key,methods,errstack,auth_timeout,method_used);
-+ return perform_authenticate(true,key,methods,errstack,auth_timeout,method_used, hostname);
- }
-
- int
--ReliSock::authenticate(const char* methods, CondorError* errstack,int auth_timeout )
-+ReliSock::authenticate(const char* methods, CondorError* errstack,int auth_timeout, const char * hostname)
- {
- KeyInfo *key = NULL;
-- return perform_authenticate(false,key,methods,errstack,auth_timeout,NULL);
-+ return perform_authenticate(false,key,methods,errstack,auth_timeout,NULL, hostname);
- }
-
- bool
-diff --git a/src/condor_io/sock.cpp b/src/condor_io/sock.cpp
-index c4dcb0b..e743139 100644
---- a/src/condor_io/sock.cpp
-+++ b/src/condor_io/sock.cpp
-@@ -2235,12 +2235,12 @@ bool Sock :: is_hdr_encrypt(){
- return FALSE;
- }
-
--int Sock :: authenticate(KeyInfo *&, const char * /* methods */, CondorError* /* errstack */, int /*timeout*/, char ** /*method_used*/)
-+int Sock :: authenticate(KeyInfo *&, const char * /* methods */, CondorError* /* errstack */, int /*timeout*/, char ** /*method_used*/, const char * /*hostname*/)
- {
- return -1;
- }
-
--int Sock :: authenticate(const char * /* methods */, CondorError* /* errstack */, int /*timeout*/)
-+int Sock :: authenticate(const char * /* methods */, CondorError* /* errstack */, int /*timeout*/, const char * /*hostname*/)
- {
- /*
- errstack->push("AUTHENTICATE", AUTHENTICATE_ERR_NOT_BUILT,
-diff --git a/src/condor_job_router/submit_job.cpp b/src/condor_job_router/submit_job.cpp
-index ccad171..393bafc 100644
---- a/src/condor_job_router/submit_job.cpp
-+++ b/src/condor_job_router/submit_job.cpp
-@@ -175,7 +175,7 @@ ClaimJobResult claim_job(int cluster, int proc, MyString * error_details, const
- static Qmgr_connection *open_q_as_owner(char const *effective_owner,DCSchedd &schedd,FailObj &failobj)
- {
- CondorError errstack;
-- Qmgr_connection * qmgr = ConnectQ(schedd.addr(), 0 /*timeout==default*/, false /*read-only*/, & errstack, effective_owner, schedd.version());
-+ Qmgr_connection * qmgr = ConnectQ(schedd, 0 /*timeout==default*/, false /*read-only*/, & errstack, effective_owner, schedd.version());
- if( ! qmgr ) {
- failobj.fail("Unable to connect\n%s\n", errstack.getFullText(true));
- return NULL;
-diff --git a/src/condor_prio/prio.cpp b/src/condor_prio/prio.cpp
-index deec9b1..c27aa94 100644
---- a/src/condor_prio/prio.cpp
-+++ b/src/condor_prio/prio.cpp
-@@ -157,8 +157,7 @@ main( int argc, char *argv[] )
- }
-
- // Open job queue
-- DaemonName = schedd.addr();
-- q = ConnectQ(DaemonName.Value());
-+ q = ConnectQ(schedd);
- if( !q ) {
- fprintf( stderr, "Failed to connect to queue manager %s\n",
- DaemonName.Value() );
-diff --git a/src/condor_q.V6/queue.cpp b/src/condor_q.V6/queue.cpp
-index 58ff6ed..85ad9cf 100644
---- a/src/condor_q.V6/queue.cpp
-+++ b/src/condor_q.V6/queue.cpp
-@@ -119,13 +119,13 @@ static char * bufferJobShort (ClassAd *);
- /* if useDB is false, then v1 =scheddAddress, v2=scheddName, v3=scheddMachine, v4=scheddVersion;
- if useDB is true, then v1 =quill_name, v2=db_ipAddr, v3=db_name, v4=db_password
- */
--static bool show_queue (const char* v1, const char* v2, const char* v3, const char* v4, bool useDB);
--static bool show_queue_buffered (const char* v1, const char* v2, const char* v3, const char* v4, bool useDB);
-+static bool show_queue (const char* v1, const char* v2, const char* v3, const char* v4, const ClassAd *ad, bool useDB);
-+static bool show_queue_buffered (const char* v1, const char* v2, const char* v3, const char* v4, const ClassAd *ad, bool useDB);
- static void init_output_mask();
-
-
- /* a type used to point to one of the above two functions */
--typedef bool (*show_queue_fp)(const char* v1, const char* v2, const char* v3, const char* v4, bool useDB);
-+typedef bool (*show_queue_fp)(const char* v1, const char* v2, const char* v3, const char* v4, const ClassAd *ad, bool useDB);
-
- static bool read_classad_file(const char *filename, ClassAdList &classads);
-
-@@ -506,7 +506,7 @@ int main (int argc, char **argv)
-
- /* ask the database for the queue */
-
-- if ( (retval = sqfp( NULL, NULL, NULL, NULL, TRUE) ) ) {
-+ if ( (retval = sqfp( NULL, NULL, NULL, NULL, NULL, TRUE) ) ) {
- /* if the queue was retrieved, then I am done */
- freeConnectionStrings();
- exit(retval?EXIT_SUCCESS:EXIT_FAILURE);
-@@ -557,7 +557,7 @@ int main (int argc, char **argv)
- (quill.name()):tmp_char,
- (quill.fullHostname())?
- (quill.fullHostname()):tmp_char,
-- NULL, FALSE) ) ) )
-+ NULL, NULL, FALSE) ) ) )
- {
- /* if the queue was retrieved, then I am done */
- freeConnectionStrings();
-@@ -600,7 +600,7 @@ int main (int argc, char **argv)
- #endif /* HAVE_EXT_POSTGRESQL */
- case DIRECT_SCHEDD:
- retval = sqfp(scheddAddr, scheddName, scheddMachine,
-- scheddVersion.Value(), FALSE);
-+ scheddVersion.Value(), NULL, FALSE);
-
- /* Hopefully I got the queue from the schedd... */
- freeConnectionStrings();
-@@ -793,7 +793,7 @@ int main (int argc, char **argv)
- case DIRECT_RDBMS:
- if (useDB) {
- if ( (retval = sqfp(quillName, dbIpAddr, dbName,
-- queryPassword, TRUE) ) )
-+ queryPassword, ad, TRUE) ) )
- {
- /* processed correctly, so do the next ad */
- continue;
-@@ -840,7 +840,7 @@ int main (int argc, char **argv)
-
- if((result2 == Q_OK) && quillAddr &&
- (retval = sqfp(quillAddr, quillName, quillMachine,
-- NULL, FALSE) ) )
-+ NULL, ad, FALSE) ) )
- {
- /* processed correctly, so do the next ad */
- continue;
-@@ -896,7 +896,7 @@ int main (int argc, char **argv)
- case DIRECT_SCHEDD:
- /* database not configured or could not be reached,
- query the schedd daemon directly */
-- retval = sqfp(scheddAddr, scheddName, scheddMachine, scheddVersion.Value(), FALSE);
-+ retval = sqfp(scheddAddr, scheddName, scheddMachine, scheddVersion.Value(), ad, FALSE);
-
- break;
-
-@@ -2536,7 +2536,7 @@ static void init_output_mask()
- */
-
- static bool
--show_queue_buffered( const char* v1, const char* v2, const char* v3, const char* v4, bool useDB )
-+show_queue_buffered( const char* v1, const char* v2, const char* v3, const char* v4, const ClassAd* ad, bool useDB )
- {
- const char *scheddAddress = 0;
- const char *scheddName = 0;
-@@ -2709,7 +2709,13 @@ show_queue_buffered( const char* v1, const char* v2, const char* v3, const char*
- #endif /* HAVE_EXT_POSTGRESQL */
- } else {
- // fetch queue from schedd and stash it in output_buffer.
-- Daemon schedd(DT_SCHEDD, scheddName, pool ? pool->addr() : NULL );
-+ Daemon *schedd_ptr = NULL;
-+ if (ad)
-+ schedd_ptr = new Daemon(ad, DT_SCHEDD, pool ? pool->addr() : NULL );
-+ else
-+ schedd_ptr = new Daemon(DT_SCHEDD, scheddName, pool ? pool->addr() : NULL );
-+ ASSERT(schedd_ptr);
-+ Daemon & schedd = *schedd_ptr;
- const char *version = schedd.version();
- bool useFastPath = false;
- if (version) {
-@@ -2719,9 +2725,12 @@ show_queue_buffered( const char* v1, const char* v2, const char* v3, const char*
-
- // stash the schedd daemon object for use by process_buffer_line
- g_cur_schedd_for_process_buffer_line = new Daemon( schedd );
-+ ASSERT( g_cur_schedd_for_process_buffer_line );
-+
-+ delete schedd_ptr;
-
- int fetchResult;
-- if( (fetchResult = Q.fetchQueueFromHostAndProcess( scheddAddress, attrs,
-+ if( (fetchResult = Q.fetchQueueFromDaemonAndProcess( *g_cur_schedd_for_process_buffer_line, attrs,
- process_buffer_line,
- useFastPath,
- &errstack)) != Q_OK) {
-@@ -2967,7 +2976,7 @@ process_buffer_line( ClassAd *job )
- refer to the prototype of this function on the top of this file
- */
- static bool
--show_queue( const char* v1, const char* v2, const char* v3, const char* v4, bool useDB )
-+show_queue( const char* v1, const char* v2, const char* v3, const char* v4, const ClassAd * ad, bool useDB )
- {
- const char *scheddAddress;
- const char *scheddName;
-@@ -3046,7 +3055,13 @@ show_queue( const char* v1, const char* v2, const char* v3, const char* v4, bool
- } else {
- // fetch queue from schedd
- int fetchResult;
-- if( (fetchResult = Q.fetchQueueFromHost(jobs, attrs,scheddAddress, scheddVersion, &errstack) != Q_OK)) {
-+ Daemon *schedd_daemon_ptr;
-+ if (ad)
-+ schedd_daemon_ptr = new Daemon(ad, DT_SCHEDD, pool ? pool->addr() : NULL );
-+ else
-+ schedd_daemon_ptr = new Daemon(DT_SCHEDD, scheddName, pool ? pool->addr() : NULL );
-+ Daemon &schedd_daemon = *schedd_daemon_ptr;
-+ if( (fetchResult = Q.fetchQueueFromDaemon(jobs, attrs, schedd_daemon, scheddVersion, &errstack) != Q_OK)) {
- // The parse + fetch failed, print out why
- switch(fetchResult) {
- case Q_PARSE_ERROR:
-@@ -3081,7 +3096,12 @@ show_queue( const char* v1, const char* v2, const char* v3, const char* v4, bool
- scheddAddress, scheddMachine);
- }
-
-- Daemon schedd_daemon(DT_SCHEDD,scheddName,pool ? pool->addr() : NULL);
-+ Daemon *schedd_daemon_ptr;
-+ if (ad)
-+ schedd_daemon_ptr = new Daemon(ad, DT_SCHEDD, pool ? pool->addr() : NULL );
-+ else
-+ schedd_daemon_ptr = new Daemon(DT_SCHEDD, scheddName, pool ? pool->addr() : NULL );
-+ Daemon &schedd_daemon = *schedd_daemon_ptr;
- schedd_daemon.locate();
-
- jobs.Open();
-@@ -3089,6 +3109,7 @@ show_queue( const char* v1, const char* v2, const char* v3, const char* v4, bool
- doRunAnalysis( job, &schedd_daemon );
- }
- jobs.Close();
-+ delete schedd_daemon_ptr;
-
- if(lastUpdate) {
- free(lastUpdate);
-diff --git a/src/condor_schedd.V6/qmgmt_receivers.cpp b/src/condor_schedd.V6/qmgmt_receivers.cpp
-index 98e1960..db6314f 100644
---- a/src/condor_schedd.V6/qmgmt_receivers.cpp
-+++ b/src/condor_schedd.V6/qmgmt_receivers.cpp
-@@ -71,7 +71,7 @@ do_Q_request(ReliSock *syscall_sock,bool &may_fork)
- dprintf(D_SECURITY,"Calling authenticate(%s) in qmgmt_receivers\n", methods.Value());
- }
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(syscall_sock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(syscall_sock, WRITE, &errstack, NULL) ) {
- // Failed to authenticate
- dprintf( D_ALWAYS, "SCHEDD: authentication failed: %s\n",
- errstack.getFullText() );
-diff --git a/src/condor_schedd.V6/qmgr_job_updater.cpp b/src/condor_schedd.V6/qmgr_job_updater.cpp
-index f9c100a..25126dd 100644
---- a/src/condor_schedd.V6/qmgr_job_updater.cpp
-+++ b/src/condor_schedd.V6/qmgr_job_updater.cpp
-@@ -47,8 +47,10 @@ QmgrJobUpdater::QmgrJobUpdater( ClassAd* job, const char* schedd_address,
- schedd_addr(schedd_address?strdup(schedd_address):0),
- schedd_ver(schedd_version?strdup(schedd_version):0),
- cluster(-1), proc(-1),
-- q_update_tid(-1)
-+ q_update_tid(-1),
-+ m_daemon(DT_SCHEDD, schedd_address)
- {
-+
- if( ! is_valid_sinful(schedd_address) ) {
- EXCEPT( "schedd_addr not specified with valid address (%s)",
- schedd_address );
-@@ -252,7 +254,7 @@ QmgrJobUpdater::updateAttr( const char *name, const char *expr, bool updateMaste
- if (log) {
- flags = SHOULDLOG;
- }
-- if( ConnectQ(schedd_addr,SHADOW_QMGMT_TIMEOUT,false,NULL,m_owner.Value(),schedd_ver) ) {
-+ if( ConnectQ(m_daemon,SHADOW_QMGMT_TIMEOUT,false,NULL,m_owner.Value(),schedd_ver) ) {
- if( SetAttribute(cluster,p,name,expr,flags) < 0 ) {
- err_msg = "SetAttribute() failed";
- result = FALSE;
-@@ -338,7 +340,7 @@ QmgrJobUpdater::updateJob( update_t type, SetAttributeFlags_t commit_flags )
- job_queue_attrs->contains_anycase(name)) ) {
-
- if( ! is_connected ) {
-- if( ! ConnectQ(schedd_addr, SHADOW_QMGMT_TIMEOUT, false, NULL, m_owner.Value(),schedd_ver) ) {
-+ if( ! ConnectQ(m_daemon, SHADOW_QMGMT_TIMEOUT, false, NULL, m_owner.Value(),schedd_ver) ) {
- return false;
- }
- is_connected = true;
-@@ -351,7 +353,7 @@ QmgrJobUpdater::updateJob( update_t type, SetAttributeFlags_t commit_flags )
- m_pull_attrs->rewind();
- while ( (name = m_pull_attrs->next()) ) {
- if ( !is_connected ) {
-- if ( !ConnectQ( schedd_addr, SHADOW_QMGMT_TIMEOUT, true, NULL, NULL, schedd_ver ) ) {
-+ if ( !ConnectQ( m_daemon, SHADOW_QMGMT_TIMEOUT, true, NULL, NULL, schedd_ver ) ) {
- return false;
- }
- is_connected = true;
-@@ -392,7 +394,7 @@ QmgrJobUpdater::retrieveJobUpdates( void )
- ProcIdToStr(cluster, proc, id_str);
- job_ids.insert(id_str);
-
-- if ( !ConnectQ( schedd_addr, SHADOW_QMGMT_TIMEOUT, false ) ) {
-+ if ( !ConnectQ( m_daemon, SHADOW_QMGMT_TIMEOUT, false ) ) {
- return false;
- }
- if ( GetDirtyAttributes( cluster, proc, &updates ) < 0 ) {
-diff --git a/src/condor_schedd.V6/qmgr_job_updater.h b/src/condor_schedd.V6/qmgr_job_updater.h
-index e487688..ad86fe2 100644
---- a/src/condor_schedd.V6/qmgr_job_updater.h
-+++ b/src/condor_schedd.V6/qmgr_job_updater.h
-@@ -47,7 +47,7 @@ class QmgrJobUpdater : public Service
- {
- public:
- QmgrJobUpdater( ClassAd* job_a, const char*schedd_address, char const *schedd_version);
-- QmgrJobUpdater( ) : common_job_queue_attrs(0), hold_job_queue_attrs(0), evict_job_queue_attrs(0), remove_job_queue_attrs(0), requeue_job_queue_attrs(0), terminate_job_queue_attrs(0), checkpoint_job_queue_attrs(0), x509_job_queue_attrs(0), m_pull_attrs(0), job_ad(0), schedd_addr(0), schedd_ver(0), cluster(-1), proc(-1), q_update_tid(-1) {}
-+ QmgrJobUpdater( ) : common_job_queue_attrs(0), hold_job_queue_attrs(0), evict_job_queue_attrs(0), remove_job_queue_attrs(0), requeue_job_queue_attrs(0), terminate_job_queue_attrs(0), checkpoint_job_queue_attrs(0), x509_job_queue_attrs(0), m_pull_attrs(0), job_ad(0), schedd_addr(0), schedd_ver(0), cluster(-1), proc(-1), q_update_tid(-1), m_daemon(DT_SCHEDD, NULL) {}
- virtual ~QmgrJobUpdater();
-
- virtual void startUpdateTimer( void );
-@@ -148,6 +148,8 @@ private:
- int proc;
-
- int q_update_tid;
-+
-+ Daemon m_daemon;
- };
-
- // usefull if you don't want to update the job queue
-diff --git a/src/condor_schedd.V6/qmgr_lib_support.cpp b/src/condor_schedd.V6/qmgr_lib_support.cpp
-index 64bfffd..4afd1a6 100644
---- a/src/condor_schedd.V6/qmgr_lib_support.cpp
-+++ b/src/condor_schedd.V6/qmgr_lib_support.cpp
-@@ -34,8 +34,9 @@ ReliSock *qmgmt_sock = NULL;
- static Qmgr_connection connection;
-
- Qmgr_connection *
--ConnectQ(const char *qmgr_location, int timeout, bool read_only, CondorError* errstack, const char *effective_owner, const char* schedd_version_str )
-+ConnectQ(Daemon &d, int timeout, bool read_only, CondorError* errstack, const char *effective_owner, const char* schedd_version_str )
- {
-+
- int rval, ok;
- int cmd = read_only ? QMGMT_READ_CMD : QMGMT_WRITE_CMD;
-
-@@ -54,15 +55,10 @@ ConnectQ(const char *qmgr_location, int timeout, bool read_only, CondorError* er
- }
-
- // no connection active as of now; create a new one
-- Daemon d( DT_SCHEDD, qmgr_location );
-+ dprintf(D_HOSTNAME, "Hostname of new connection: %s\n", d.fullHostname());
- if( ! d.locate() ) {
- ok = FALSE;
-- if( qmgr_location ) {
-- dprintf( D_ALWAYS, "Can't find address of queue manager %s\n",
-- qmgr_location );
-- } else {
-- dprintf( D_ALWAYS, "Can't find address of local queue manager\n" );
-- }
-+ dprintf( D_ALWAYS, "Can't find address of queue manager\n" );
- } else {
- // QMGMT_WRITE_CMD didn't exist before 7.5.0, so use QMGMT_READ_CMD
- // when talking to older schedds
-@@ -104,7 +100,7 @@ ConnectQ(const char *qmgr_location, int timeout, bool read_only, CondorError* er
- // the connection, because this command is registered with
- // force_authentication=true on the server side.
- if( cmd == QMGMT_WRITE_CMD && !qmgmt_sock->triedAuthentication()) {
-- if( !SecMan::authenticate_sock(qmgmt_sock, CLIENT_PERM, errstack_select ) )
-+ if( !SecMan::authenticate_sock(qmgmt_sock, CLIENT_PERM, errstack_select, d.fullHostname()) )
- {
- delete qmgmt_sock;
- qmgmt_sock = NULL;
-@@ -155,7 +151,7 @@ ConnectQ(const char *qmgr_location, int timeout, bool read_only, CondorError* er
- }
-
- if ( !read_only ) {
-- if (!SecMan::authenticate_sock(qmgmt_sock, CLIENT_PERM, errstack_select)) {
-+ if (!SecMan::authenticate_sock(qmgmt_sock, CLIENT_PERM, errstack_select, d.fullHostname())) {
- delete qmgmt_sock;
- qmgmt_sock = NULL;
- if (!errstack) {
-diff --git a/src/condor_schedd.V6/schedd.cpp b/src/condor_schedd.V6/schedd.cpp
-index b855407..6731e6c 100644
---- a/src/condor_schedd.V6/schedd.cpp
-+++ b/src/condor_schedd.V6/schedd.cpp
-@@ -3434,7 +3434,7 @@ Scheduler::spoolJobFiles(int mode, Stream* s)
-
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-@@ -3690,7 +3690,7 @@ Scheduler::updateGSICred(int cmd, Stream* s)
-
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-@@ -3901,7 +3901,7 @@ Scheduler::actOnJobs(int, Stream* s)
- rsock->decode();
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-@@ -12104,7 +12104,7 @@ Scheduler::get_job_connect_info_handler_implementation(int, Stream* s) {
- // force authentication
- if( !sock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(sock, WRITE, &errstack) ||
-+ if( ! SecMan::authenticate_sock(sock, WRITE, &errstack, NULL) ||
- ! sock->getFullyQualifiedUser() )
- {
- dprintf( D_ALWAYS,
-@@ -13145,7 +13145,7 @@ Scheduler::claimLocalStartd()
- CondorQuery query(STARTD_AD);
- QueryResult q;
- ClassAdList result;
-- q = query.fetchAds(result, startd_addr, &errstack);
-+ q = query.fetchAds(result, startd, &errstack);
- if ( q != Q_OK ) {
- dprintf(D_FULLDEBUG,
- "ERROR: could not fetch ads from local startd : %s (%s)\n",
-@@ -13654,7 +13654,7 @@ Scheduler::RecycleShadow(int /*cmd*/, Stream *stream)
- sock->decode();
- if( !sock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(sock, WRITE, &errstack) ||
-+ if( ! SecMan::authenticate_sock(sock, WRITE, &errstack, NULL) ||
- ! sock->getFullyQualifiedUser() )
- {
- dprintf( D_ALWAYS,
-@@ -13864,7 +13864,7 @@ Scheduler::clear_dirty_job_attrs_handler(int /*cmd*/, Stream *stream)
- sock->decode();
- if( !sock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(sock, WRITE, &errstack) ||
-+ if( ! SecMan::authenticate_sock(sock, WRITE, &errstack, NULL) ||
- ! sock->getFullyQualifiedUser() )
- {
- dprintf( D_ALWAYS,
-diff --git a/src/condor_schedd.V6/schedd_td.cpp b/src/condor_schedd.V6/schedd_td.cpp
-index 3e30f27..a9b0e4b 100644
---- a/src/condor_schedd.V6/schedd_td.cpp
-+++ b/src/condor_schedd.V6/schedd_td.cpp
-@@ -76,7 +76,7 @@ Scheduler::requestSandboxLocation(int mode, Stream* s)
-
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-diff --git a/src/condor_schedd.V6/tdman.cpp b/src/condor_schedd.V6/tdman.cpp
-index 53c6000..eb1e205 100644
---- a/src/condor_schedd.V6/tdman.cpp
-+++ b/src/condor_schedd.V6/tdman.cpp
-@@ -869,7 +869,7 @@ TDMan::transferd_registration(int cmd, Stream *sock)
- ///////////////////////////////////////////////////////////////
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-diff --git a/src/condor_status.V6/status.cpp b/src/condor_status.V6/status.cpp
-index dea2c92..187e843 100644
---- a/src/condor_status.V6/status.cpp
-+++ b/src/condor_status.V6/status.cpp
-@@ -442,14 +442,19 @@ main (int argc, char *argv[])
- // subsystem that corresponds to a daemon (above).
- // Here 'addr' represents either the host:port of requested pool, or
- // alternatively the host:port of daemon associated with requested subsystem (direct mode)
-- q = query->fetchAds (result, addr, &errstack);
-+
-+ // If we are querying the same collector as before, reuse that object.
-+ // This is important for hostname resolution.
-+ if (!direct)
-+ q = query->fetchAds (result, *pool, &errstack);
-+ else
-+ q = query->fetchAds (result, addr, &errstack);
- } else {
- // otherwise obtain list of collectors and submit query that way
- CollectorList * collectors = CollectorList::create();
- q = collectors->query (*query, result, &errstack);
- delete collectors;
- }
--
-
- // if any error was encountered during the query, report it and exit
- if (Q_OK != q) {
-diff --git a/src/condor_submit.V6/submit.cpp b/src/condor_submit.V6/submit.cpp
-index 76bbabf..f5e118b 100644
---- a/src/condor_submit.V6/submit.cpp
-+++ b/src/condor_submit.V6/submit.cpp
-@@ -6237,7 +6237,8 @@ connect_to_the_schedd()
- setupAuthentication();
-
- CondorError errstack;
-- if( ConnectQ(MySchedd->addr(), 0 /* default */, false /* default */, &errstack, NULL, MySchedd->version() ) == 0 ) {
-+ ASSERT(MySchedd);
-+ if( ConnectQ(*MySchedd, 0 /* default */, false /* default */, &errstack, NULL, MySchedd->version() ) == 0 ) {
- if( ScheddName ) {
- fprintf( stderr,
- "\nERROR: Failed to connect to queue manager %s\n%s\n",
-@@ -7202,7 +7203,8 @@ DoCleanup(int,int,const char*)
- // DoCleanup(). This lead to infinite recursion which is bad.
- ClusterCreated = 0;
- if (!ActiveQueueConnection) {
-- ActiveQueueConnection = (ConnectQ(MySchedd->addr()) != 0);
-+ ASSERT( MySchedd );
-+ ActiveQueueConnection = (ConnectQ(*MySchedd) != 0);
- }
- if (ActiveQueueConnection) {
- // Call DestroyCluster() now in an attempt to get the schedd
-diff --git a/src/condor_tools/preen.cpp b/src/condor_tools/preen.cpp
-index 57fcd04..e2f9774 100644
---- a/src/condor_tools/preen.cpp
-+++ b/src/condor_tools/preen.cpp
-@@ -356,7 +356,8 @@ check_spool_dir()
- well_known_list.append( ".pgpass" );
-
- // connect to the Q manager
-- if (!(qmgr = ConnectQ (0))) {
-+ Daemon d(DT_SCHEDD, 0);
-+ if (!(qmgr = ConnectQ (d))) {
- dprintf( D_ALWAYS, "Not cleaning spool directory: Can't contact schedd\n" );
- return;
- }
-diff --git a/src/condor_tools/qedit.cpp b/src/condor_tools/qedit.cpp
-index e36d844..61d1b63 100644
---- a/src/condor_tools/qedit.cpp
-+++ b/src/condor_tools/qedit.cpp
-@@ -131,7 +131,7 @@ main(int argc, char *argv[])
- }
-
- // Open job queue
-- q = ConnectQ( schedd.addr(), 0, false, NULL, NULL, schedd.version() );
-+ q = ConnectQ( schedd, 0, false, NULL, NULL, schedd.version() );
- if( !q ) {
- fprintf( stderr, "Failed to connect to queue manager %s\n",
- schedd.addr() );
-diff --git a/src/condor_tools/tool.cpp b/src/condor_tools/tool.cpp
-index 5e63dc7..8b3ba37 100644
---- a/src/condor_tools/tool.cpp
-+++ b/src/condor_tools/tool.cpp
-@@ -1167,8 +1167,8 @@ resolveNames( DaemonList* daemon_list, StringList* name_list )
- }
-
-
-- if (pool_addr) {
-- q_result = query.fetchAds(ads, pool_addr, &errstack);
-+ if (pool) {
-+ q_result = query.fetchAds(ads, *pool, &errstack);
- } else {
- CollectorList * collectors = CollectorList::create();
- q_result = collectors->query (query, ads);
-diff --git a/src/condor_transferd/td_init.cpp b/src/condor_transferd/td_init.cpp
-index 1fccebd..f2330e1 100644
---- a/src/condor_transferd/td_init.cpp
-+++ b/src/condor_transferd/td_init.cpp
-@@ -277,7 +277,7 @@ TransferD::setup_transfer_request_handler(int /*cmd*/, Stream *sock)
- ///////////////////////////////////////////////////////////////
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-diff --git a/src/condor_transferd/td_read_files.cpp b/src/condor_transferd/td_read_files.cpp
-index a6c7f87..4febc43 100644
---- a/src/condor_transferd/td_read_files.cpp
-+++ b/src/condor_transferd/td_read_files.cpp
-@@ -67,7 +67,7 @@ TransferD::read_files_handler(int cmd, Stream *sock)
- /////////////////////////////////////////////////////////////////////////
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-diff --git a/src/condor_transferd/td_write_files.cpp b/src/condor_transferd/td_write_files.cpp
-index 412a552..572cc79 100644
---- a/src/condor_transferd/td_write_files.cpp
-+++ b/src/condor_transferd/td_write_files.cpp
-@@ -67,7 +67,7 @@ TransferD::write_files_handler(int cmd, Stream *sock)
- /////////////////////////////////////////////////////////////////////////
- if( ! rsock->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(rsock, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-diff --git a/src/condor_utils/classad_command_util.cpp b/src/condor_utils/classad_command_util.cpp
-index 56d7ddb..1ae11a8 100644
---- a/src/condor_utils/classad_command_util.cpp
-+++ b/src/condor_utils/classad_command_util.cpp
-@@ -92,7 +92,7 @@ getCmdFromReliSock( ReliSock* s, ClassAd* ad, bool force_auth )
- s->decode();
- if( force_auth && ! s->triedAuthentication() ) {
- CondorError errstack;
-- if( ! SecMan::authenticate_sock(s, WRITE, &errstack) ) {
-+ if( ! SecMan::authenticate_sock(s, WRITE, &errstack, NULL) ) {
- // we failed to authenticate, we should bail out now
- // since we don't know what user is trying to perform
- // this action.
-diff --git a/src/condor_utils/condor_q.cpp b/src/condor_utils/condor_q.cpp
-index 886b664..c540bbd 100644
---- a/src/condor_utils/condor_q.cpp
-+++ b/src/condor_utils/condor_q.cpp
-@@ -28,6 +28,7 @@
- #include "CondorError.h"
- #include "condor_classad.h"
- #include "quill_enums.h"
-+#include "daemon.h"
-
- #ifdef HAVE_EXT_POSTGRESQL
- #include "pgsqldatabase.h"
-@@ -230,7 +231,8 @@ fetchQueue (ClassAdList &list, StringList &attrs, ClassAd *ad, CondorError* errs
- if (ad == 0)
- {
- // local case
-- if( !(qmgr = ConnectQ( 0, connect_timeout, true, errstack)) ) {
-+ Daemon d(DT_SCHEDD, 0, 0);
-+ if( !(qmgr = ConnectQ( d, connect_timeout, true, errstack)) ) {
- errstack->push("TEST", 0, "FOO");
- return Q_SCHEDD_COMMUNICATION_ERROR;
- }
-@@ -241,8 +243,9 @@ fetchQueue (ClassAdList &list, StringList &attrs, ClassAd *ad, CondorError* errs
- // remote case to handle condor_globalq
- if (!ad->LookupString (ATTR_SCHEDD_IP_ADDR, scheddString))
- return Q_NO_SCHEDD_IP_ADDR;
-+ Daemon d(ad, DT_SCHEDD, NULL);
-
-- if( !(qmgr = ConnectQ( scheddString, connect_timeout, true, errstack)) )
-+ if( !(qmgr = ConnectQ( d, connect_timeout, true, errstack)) )
- return Q_SCHEDD_COMMUNICATION_ERROR;
-
- }
-@@ -255,7 +258,7 @@ fetchQueue (ClassAdList &list, StringList &attrs, ClassAd *ad, CondorError* errs
- }
-
- int CondorQ::
--fetchQueueFromHost (ClassAdList &list, StringList &attrs, const char *host, char const *schedd_version, CondorError* errstack)
-+fetchQueueFromDaemon (ClassAdList &list, StringList &attrs, Daemon &daemon, char const *schedd_version, CondorError* errstack)
- {
- Qmgr_connection *qmgr;
- ExprTree *tree;
-@@ -276,7 +279,7 @@ fetchQueueFromHost (ClassAdList &list, StringList &attrs, const char *host, char
- optimal. :^).
- */
- init(); // needed to get default connect_timeout
-- if( !(qmgr = ConnectQ( host, connect_timeout, true, errstack)) )
-+ if( !(qmgr = ConnectQ( daemon, connect_timeout, true, errstack)) )
- return Q_SCHEDD_COMMUNICATION_ERROR;
-
- bool useFastPath = false;
-@@ -353,7 +356,7 @@ CondorQ::fetchQueueFromDB (ClassAdList &list,
- }
-
- int
--CondorQ::fetchQueueFromHostAndProcess ( const char *host,
-+CondorQ::fetchQueueFromDaemonAndProcess ( Daemon &daemon,
- StringList &attrs,
- process_function process_func,
- bool useFastPath,
-@@ -378,7 +381,7 @@ CondorQ::fetchQueueFromHostAndProcess ( const char *host,
- optimal. :^).
- */
- init(); // needed to get default connect_timeout
-- if( !(qmgr = ConnectQ( host, connect_timeout, true, errstack)) )
-+ if( !(qmgr = ConnectQ( daemon, connect_timeout, true, errstack)) )
- return Q_SCHEDD_COMMUNICATION_ERROR;
-
- // get the ads and filter them
-diff --git a/src/condor_utils/condor_q.h b/src/condor_utils/condor_q.h
-index 7f6a620..ccd9196 100644
---- a/src/condor_utils/condor_q.h
-+++ b/src/condor_utils/condor_q.h
-@@ -23,6 +23,7 @@
- #include "condor_common.h"
- #include "generic_query.h"
- #include "CondorError.h"
-+#include "daemon.h"
-
- #define MAXOWNERLEN 20
- #define MAXSCHEDDLEN 255
-@@ -90,8 +91,8 @@ class CondorQ
- // which pass the criterion specified by the constraints; default is
- // from the local schedd
- int fetchQueue (ClassAdList &, StringList &attrs, ClassAd * = 0, CondorError* errstack = 0);
-- int fetchQueueFromHost (ClassAdList &, StringList &attrs, const char * = 0, char const *schedd_version = 0,CondorError* errstack = 0);
-- int fetchQueueFromHostAndProcess ( const char *, StringList &attrs, process_function process_func, bool useFastPath, CondorError* errstack = 0);
-+ int fetchQueueFromDaemon (ClassAdList &, StringList &attrs, Daemon &, char const *schedd_version = 0,CondorError* errstack = 0);
-+ int fetchQueueFromDaemonAndProcess ( Daemon &, StringList &attrs, process_function process_func, bool useFastPath, CondorError* errstack = 0);
-
- // fetch the job ads from database
- int fetchQueueFromDB (ClassAdList &, char *&lastUpdate, const char * = 0, CondorError* errstack = 0);
-diff --git a/src/condor_utils/condor_query.cpp b/src/condor_utils/condor_query.cpp
-index 95bc78a..acc6201 100644
---- a/src/condor_utils/condor_query.cpp
-+++ b/src/condor_utils/condor_query.cpp
-@@ -386,10 +386,6 @@ addORConstraint (const char *value)
- QueryResult CondorQuery::
- fetchAds (ClassAdList &adList, const char *poolName, CondorError* errstack)
- {
-- Sock* sock;
-- int more;
-- QueryResult result;
-- ClassAd queryAd(extraAttrs), *ad;
-
- if ( !poolName ) {
- return Q_NO_COLLECTOR_HOST;
-@@ -402,7 +398,16 @@ fetchAds (ClassAdList &adList, const char *poolName, CondorError* errstack)
- return Q_NO_COLLECTOR_HOST;
- }
-
-+ return fetchAds(adList, my_collector, errstack);
-+}
-
-+QueryResult CondorQuery::
-+fetchAds (ClassAdList &adList, Daemon &my_collector, CondorError* errstack)
-+{
-+ Sock* sock;
-+ int more;
-+ QueryResult result;
-+ ClassAd queryAd(extraAttrs), *ad;
- // make the query ad
- result = getQueryAd (queryAd);
- if (result != Q_OK) return result;
-diff --git a/src/condor_utils/condor_query.h b/src/condor_utils/condor_query.h
-index 7e58eef..9fedcad 100644
---- a/src/condor_utils/condor_query.h
-+++ b/src/condor_utils/condor_query.h
-@@ -156,6 +156,7 @@ class CondorQuery
-
- // fetch from collector
- QueryResult fetchAds (ClassAdList &adList, const char * pool, CondorError* errstack = NULL);
-+ QueryResult fetchAds (ClassAdList &adList, Daemon &daemon, CondorError* errstack = NULL);
-
-
- // filter list of ads; arg1 is 'in', arg2 is 'out'
-diff --git a/src/condor_utils/ipv6_hostname.cpp b/src/condor_utils/ipv6_hostname.cpp
-index cfefb4b..3666bd4 100644
---- a/src/condor_utils/ipv6_hostname.cpp
-+++ b/src/condor_utils/ipv6_hostname.cpp
-@@ -197,10 +197,13 @@ int get_fqdn_and_ip_from_hostname(const MyString& hostname,
- MyString ret;
- condor_sockaddr ret_addr;
- bool found_ip = false;
-+ bool use_given_name = false;
-
- // if the hostname contains dot, hostname is assumed to be full hostname
- if (hostname.FindChar('.') != -1) {
- ret = hostname;
-+ fqdn = hostname;
-+ use_given_name = true;
- }
-
- if (nodns_enabled()) {
-@@ -219,7 +222,9 @@ int get_fqdn_and_ip_from_hostname(const MyString& hostname,
-
- while (addrinfo* info = ai.next()) {
- if (info->ai_canonname) {
-- fqdn = info->ai_canonname;
-+ dprintf(D_HOSTNAME, "Found canon addr: %s\n", info->ai_canonname);
-+ if (!use_given_name)
-+ fqdn = info->ai_canonname;
- addr = condor_sockaddr(info->ai_addr);
- return 1;
- }
-diff --git a/src/condor_who/who.cpp b/src/condor_who/who.cpp
-index 08f19b6..870668a 100644
---- a/src/condor_who/who.cpp
-+++ b/src/condor_who/who.cpp
-@@ -681,7 +681,7 @@ main( int argc, char *argv[] )
- ClassAdList result;
- if (addr || App.diagnostic) {
- CondorError errstack;
-- QueryResult qr = query->fetchAds (result, addr, &errstack);
-+ QueryResult qr = dae->locate() ? query->fetchAds (result, *dae, &errstack) : query->fetchAds (result, addr, &errstack);
- if (Q_OK != qr) {
- fprintf( stderr, "Error: %s\n", getStrQueryResult(qr) );
- fprintf( stderr, "%s\n", errstack.getFullText(true) );
diff --git a/condor-gahp.patch b/condor-gahp.patch
deleted file mode 100644
index 4d45bf2..0000000
--- a/condor-gahp.patch
+++ /dev/null
@@ -1,21 +0,0 @@
-diff --git a/src/condor_gridmanager/gahp-client.cpp b/src/condor_gridmanager/gahp-client.cpp
-index d2c66ce629..a2a694a6b9 100644
---- a/src/condor_gridmanager/gahp-client.cpp
-+++ b/src/condor_gridmanager/gahp-client.cpp
-@@ -820,6 +820,16 @@ GahpServer::Startup()
- free( tmp_char );
- }
-
-+ // GLOBUS_LOCATION needs to be set for the blahp; otherwise, it defaults to /opt/globus,
-+ // which is likely never correct
-+ tmp_char = param("GLOBUS_LOCATION");
-+ if ( tmp_char ) {
-+ newenv.SetEnv( "GLOBUS_LOCATION", tmp_char );
-+ free( tmp_char );
-+ } else if (getenv("GLOBUS_LOCATION") == NULL) {
-+ newenv.SetEnv( "GLOBUS_LOCATION", "/usr" );
-+ }
-+
- // For amazon ec2 ca authentication
- tmp_char = param("GAHP_SSL_CAFILE");
- if( tmp_char ) {
diff --git a/condor-tmpfiles.conf b/condor-tmpfiles.conf
deleted file mode 100644
index a0095ba..0000000
--- a/condor-tmpfiles.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-d /var/run/condor 0775 condor condor -
-d /var/lock/condor 0775 condor condor -
-d /var/lock/condor/local 0775 condor condor -
\ No newline at end of file
diff --git a/condor.service b/condor.service
deleted file mode 100644
index f39c3e7..0000000
--- a/condor.service
+++ /dev/null
@@ -1,37 +0,0 @@
-
-[Unit]
-Description=Condor Distributed High-Throughput-Computing
-After=syslog.target network.target
-Wants=network.target
-
-[Service]
-EnvironmentFile=-/etc/sysconfig/condor
-ExecStart=/usr/sbin/condor_master -f
-ExecStop=/usr/sbin/condor_off -master
-ExecReload=/bin/kill -HUP $MAINPID
-Restart=always
-RestartSec=5
-StandardOutput=syslog
-LimitNOFILE=16384
-
-#######################################
-# Note: Below are cgroup options
-#######################################
-#Slice=condor
-#CPUAccounting=true
-#CPUShares=1024
-
-#MemoryAccounting=true
-#TBD: MemoryLimit=bytes, MemorySoftLimit=bytes
-
-#BlockIOAccounting=true
-#BlockIOWeight=??
-#BlockIODeviceWeight=??
-#TBD: BlockIOReadBandwidth=bytes, BlockIOWriteBandwidth=bytes
-
-#DeviceAllow=
-#DevicePolicy=auto|closed|strict
-
-[Install]
-WantedBy=multi-user.target
-
diff --git a/condor.spec b/condor.spec
index f537a33..7f57bbf 100644
--- a/condor.spec
+++ b/condor.spec
@@ -1,136 +1,106 @@
%global newname htcondor
-%global srcver 8_8_15
-
-%ifarch %{arm} %{ix86} x86_64
-%global with_mongodb 1
-%endif
-
-# disable plumage (and need for mongodb)
-%global with_mongodb 0
-
-# enable aviary
-%global with_aviary 0
-
-# enable BOSCO
-%global with_bosco 0
-
-# enable CREAM gahp
-%global with_cream_gahp 0
+%global version 23.0.0
+%global version_ %(tr . _ <<< %{version})
#######################
Name: condor
-Version: 8.8.15
-Release: 10%{?dist}
+Version: 23.0.0
+Release: 1%{?dist}
Summary: HTCondor: High Throughput Computing
License: ASL 2.0
-URL: http://research.cs.wisc.edu/htcondor/
+URL: http://htcondor.org
##############################################################
# NOTE: If you wish to setup a debug build either add a patch
# or adjust the URL to a private github location
##############################################################
-Source0: https://github.com/htcondor/htcondor/archive/V%{srcver}/%{newname}-%{vers...
-Source1: %{name}-tmpfiles.conf
-Source2: %{name}.service
-Source3: 00personal_condor.config
-
-Patch1: condor-gahp.patch
-# turn off the cmake regex-replace hack that removes "-Werror", as it
-# breaks the new cflag "-Werror=format-security" passed in from build system:
-Patch2: Werror_replace.patch
-Patch5: python-scripts.patch
-Patch6: boost-python38.patch
-Patch7: doc-conf.patch
+Source0: https://github.com/htcondor/htcondor/archive/V%{version}/%{newname}-%{ver...
#######################
BuildRequires: gcc gcc-c++
-BuildRequires: cmake
-BuildRequires: flex
-BuildRequires: byacc
-BuildRequires: pcre-devel
+BuildRequires: cmake >= 3.16
+BuildRequires: pcre2-devel
BuildRequires: openssl-devel
BuildRequires: krb5-devel
BuildRequires: libvirt-devel
BuildRequires: bind-utils
-BuildRequires: m4
BuildRequires: libX11-devel
+BuildRequires: libXScrnSaver-devel
BuildRequires: libcurl-devel
BuildRequires: expat-devel
-BuildRequires: openldap-devel
BuildRequires: python3-setuptools
BuildRequires: python3-sphinx
BuildRequires: python3-sphinx_rtd_theme
BuildRequires: boost-devel
-BuildRequires: boost-python3
+BuildRequires: boost-python3-devel
+BuildRequires: boost-static
+BuildRequires: glibc-static
BuildRequires: libuuid-devel
BuildRequires: sqlite-devel
+BuildRequires: patch
# needed for param table generator
BuildRequires: perl-generators
+BuildRequires: perl(Archive::Tar)
BuildRequires: perl(Data::Dumper)
+BuildRequires: perl(Digest::MD5)
+BuildRequires: perl(XML::Parser)
-# Globus GSI build requirements
-BuildRequires: globus-gssapi-gsi-devel
-BuildRequires: globus-gass-server-ez-devel
-BuildRequires: globus-gass-transfer-devel
-BuildRequires: globus-gram-client-devel
-BuildRequires: globus-rsl-devel
-BuildRequires: globus-gram-protocol
-BuildRequires: globus-io-devel
-BuildRequires: globus-xio-devel
-BuildRequires: globus-gssapi-error-devel
-BuildRequires: globus-gss-assist-devel
-BuildRequires: globus-gsi-proxy-core-devel
-BuildRequires: globus-gsi-credential-devel
-BuildRequires: globus-gsi-callback-devel
-BuildRequires: globus-gsi-sysconfig-devel
-BuildRequires: globus-gsi-cert-utils-devel
-BuildRequires: globus-openssl-module-devel
-BuildRequires: globus-gsi-openssl-error-devel
-BuildRequires: globus-gsi-proxy-ssl-devel
-BuildRequires: globus-callout-devel
-BuildRequires: globus-common-devel
-BuildRequires: globus-ftp-client-devel
-BuildRequires: globus-ftp-control-devel
-BuildRequires: libtool-ltdl-devel
BuildRequires: munge-devel
BuildRequires: voms-devel
-# support for aviary
-%if 0%{?with_aviary}
-BuildRequires: wso2-wsf-cpp-devel
-BuildRequires: wso2-axis2-devel
-%endif
-# support for plumage
-%if 0%{?with_mongodb}
-BuildRequires: mongodb-devel
-%endif
-# support for cream (glite-ce-cream-client-devel doesn't exist in Fedora)
-#BuildRequires: glite-ce-cream-client-devel
-#BuildRequires: glite-lbjp-common-gsoap-plugin-devel
-#BuildRequires: glite-ce-cream-utils
-#BuildRequires: log4cpp-devel
-#BuildRequires: gridsite-devel
+BuildRequires: nss-devel
+BuildRequires: openldap-devel
+BuildRequires: scitokens-cpp-devel
# we now need to request the python libs and includes explicitly:
BuildRequires: python3-devel
-BuildRequires: python3-libs
# Added by B.DeKnuydt (Jan 2020)
-BuildRequires: zlib zlib-devel
BuildRequires: libxml2 libxml2-devel
-#BuildRequires: libcgroup libcgroup-devel
BuildRequires: pam-devel
BuildRequires: make
+BuildRequires: systemd-devel
+BuildRequires: systemd-units
+
#######################
# Installation requirements.
-Requires: mailx
+Requires: /usr/sbin/sendmail
Requires: python3
-Requires: condor-classads = %{version}-%{release}
-Requires: condor-procd = %{version}-%{release}
+
+# Require libraries that we dlopen
+# Ganglia is optional as well as nVidia and cuda libraries
Requires: voms
+Requires: krb5-libs
+Requires: libcom_err
+Requires: munge-libs
+Requires: openssl-libs
+Requires: scitokens-cpp >= 0.6.2
+Requires: systemd-libs
-# doesn't exist in fedora
-#Requires: blahp
-#Requires: glexec
+# openssh-server needed for condor_ssh_to_job
+Requires: openssh-server
+
+# net-tools needed to provide netstat for condor_who
+Requires: net-tools
+
+# Useful tools are using the Python bindings
+Requires: python3-condor = %{version}-%{release}
+Requires: python3-requests
+
+# procd package discontinued as of 10.8.0
+Obsoletes: %{name}-procd < 10.8.0
+Provides: %{name}-procd = %{version}-%{release}
+
+# all package discontinued as of 10.8.0
+Obsoletes: %{name}-openstack-gahp < 10.8.0
+Provides: %{name}-openstack-gahp = %{version}-%{release}
+
+# classads package discontinued as of 10.8.0
+Obsoletes: %{name}-classads < 10.8.0
+Provides: %{name}-classads = %{version}-%{release}
+
+# classads-devel package discontinued as of 10.8.0
+Obsoletes: %{name}-classads-devel < 10.8.0
+Provides: %{name}-classads-devel = %{version}-%{release}
%description
HTCondor is a workload management system for high-throughput and
@@ -143,64 +113,12 @@ monitors their progress, and ultimately informs the user upon
completion.
#######################
-%package procd
-Summary: HTCondor Process tracking Daemon
-%description procd
-A daemon for tracking child processes started by a parent.
-Part of HTCondor, but able to be stand-alone
+%package devel
+Summary: Development files for HTCondor
+Group: Applications/System
-#######################
-%if 0%{?with_aviary}
-%package aviary-common
-Summary: HTCondor Aviary development components
-Requires: %name = %version-%release
-Requires: python2-suds
-
-%description aviary-common
-Components to develop against simplified WS interface to HTCondor.
-
-%package aviary
-Summary: HTCondor Aviary components
-Requires: %name = %version-%release
-Requires: condor = %{version}-%{release}
-Requires: condor-aviary-common = %{version}-%{release}
-
-%description aviary
-Components to provide simplified WS interface to HTCondor.
-
-%package aviary-hadoop-common
-Summary: HTCondor Aviary Hadoop development components
-Requires: %name = %version-%release
-Requires: python2-suds
-Requires: condor-aviary-common = %{version}-%{release}
-Requires: tar
-
-%description aviary-hadoop-common
-Components to develop against simplified WS interface to HTCondor.
-
-%package aviary-hadoop
-Summary: HTCondor Aviary Hadoop components
-Requires: %name = %version-%release
-Requires: condor-aviary = %{version}-%{release}
-Requires: condor-aviary-hadoop-common = %{version}-%{release}
-
-%description aviary-hadoop
-Aviary Hadoop plugin and components.
-%endif
-
-#######################
-%if 0%{?with_mongodb}
-%package plumage
-Summary: HTCondor Plumage components
-Requires: %name = %version-%release
-Requires: condor-classads = %{version}-%{release}
-Requires: mongodb
-Requires: pymongo
-Requires: python2-dateutil
-
-%description plumage
-Components to provide a NoSQL operational data store for HTCondor.
-%endif
+%description devel
+Development files for HTCondor
#######################
%package kbdd
@@ -213,6 +131,15 @@ The condor_kbdd monitors logged in X users for activity. It is only
useful on systems where no device (e.g. /dev/*) can be used to
determine console idle time.
+#######################
+%package test
+Summary: HTCondor Self Tests
+Group: Applications/System
+Requires: %name = %version-%release
+
+%description test
+A collection of tests to verify that HTCondor is operating properly.
+
#######################
%package vm-gahp
Summary: HTCondor's VM Gahp
@@ -225,67 +152,9 @@ The condor_vm-gahp enables the Virtual Machine Universe feature of
HTCondor. The VM Universe uses libvirt to start and control VMs under
HTCondor's Startd.
-#######################
-%package openstack-gahp
-Summary: HTCondor's OpenStack Gahp
-Requires: %name = %version-%release
-Requires: condor = %{version}-%{release}
-
-%description openstack-gahp
-The openstack_gahp enables HTCondor's ability to manage jobs run on
-resources exposed by the OpenStack API.
-
-#######################
-%package classads
-Summary: HTCondor's classified advertisement language
-Obsoletes: classads <= 1.0.8
-Obsoletes: classads-static <= 1.0.8
-
-%description classads
-Classified Advertisements (classads) are the lingua franca of
-HTCondor. They are used for describing jobs, workstations, and other
-resources. They are exchanged by HTCondor processes to schedule
-jobs. They are logged to files for statistical and debugging
-purposes. They are used to enquire about current state of the system.
-
-A classad is a mapping from attribute names to expressions. In the
-simplest cases, the expressions are simple constants (integer,
-floating point, or string). A classad is thus a form of property
-list. Attribute expressions can also be more complicated. There is a
-protocol for evaluating an attribute expression of a classad vis a vis
-another ad. For example, the expression "other.size > 3" in one ad
-evaluates to true if the other ad has an attribute named size and the
-value of that attribute is (or evaluates to) an integer greater than
-three. Two classads match if each ad has an attribute requirements
-that evaluates to true in the context of the other ad. Classad
-matching is used by the HTCondor central manager to determine the
-compatibility of jobs and workstations where they may be run.
-
-#######################
-%package classads-devel
-Summary: Headers for HTCondor's classified advertisement language
-Requires: %name-classads = %version-%release
-Requires: pcre-devel
-Obsoletes: classads-devel <= 1.0.8
-
-%description classads-devel
-Header files for HTCondor's ClassAd Library, a powerful and flexible,
-semi-structured representation of data.
-
-#######################
-%if 0%{?with_cream_gahp}
-%%package cream-gahp
-Summary: Allows Condor to act as a client to CREAM.
-Requires: %%name = %%version-%%release
-
-%%description cream-gahp
-The cream_gahp enables the Condor grid universe to communicate with a remote
-CREAM server.
-%endif
-
#######################
%package -n python3-condor
-Summary: Python bindings for Condor.
+Summary: Python bindings for HTCondor
Requires: %name = %version-%release
%{?python_provide:%python_provide python3-condor}
@@ -293,6 +162,42 @@ Requires: %name = %version-%release
The python bindings allow one to directly invoke the C++ implementations of
the ClassAd library and HTCondor from python
+#######################
+%package credmon-oauth
+Summary: OAuth2 credmon for HTCondor
+Group: Applications/System
+Requires: %name = %version-%release
+Requires: python3-condor = %{version}-%{release}
+Requires: python3-requests-oauthlib
+Requires: python3-six
+Requires: python3-flask
+Requires: python3-cryptography
+Requires: python3-scitokens
+Requires: python3-mod_wsgi
+Requires: httpd
+
+%description credmon-oauth
+The OAuth2 credmon allows users to obtain credentials from configured
+OAuth2 endpoints and to use those credentials securely inside running jobs.
+
+#######################
+%package credmon-vault
+Summary: Vault credmon for HTCondor
+Group: Applications/System
+Requires: %name = %version-%release
+Requires: python3-condor = %{version}-%{release}
+Requires: python3-six
+Requires: python3-cryptography
+# Although htgettoken is only needed on the submit machine and
+# condor-credmon-vault is needed on both the submit and credd machines,
+# htgettoken is small so it doesn't hurt to require it in both places.
+Requires: htgettoken >= 1.1
+Conflicts: %name-credmon-oauth
+
+%description credmon-vault
+The Vault credmon allows users to obtain credentials from Vault using
+htgettoken and to use those credentials securely inside running jobs.
+
#######################
%package -n minicondor
Summary: Configuration for a single-node HTCondor
@@ -304,34 +209,10 @@ This example configuration is good for trying out HTCondor for the first time.
It only configures the IPv4 loopback address, turns on basic security, and
shortens many timers to be more responsive.
-#######################
-# The bosco subpkg is currently dropping file that breaks the out-of-box condor
-# configuration (60-campus_factory.config). The file looks somewhat site-
-# specific. I'm going to disable bosco until it can be made more generic for
-# fedora, and/or not break default condor config out of box.
-%if 0%{?with_bosco}
-%package bosco
-Summary: BOSCO, a Condor overlay system for managing jobs at remote clusters
-Url: http://bosco.opensciencegrid.org
-Requires: %name = %version-%release
-
-%description bosco
-BOSCO allows a locally-installed Condor to submit jobs to remote clusters,
-using SSH as a transit mechanism. It is designed for cases where the remote
-cluster is using a different batch system such as PBS, SGE, LSF, or another
-Condor system.
-
-BOSCO provides an overlay system so the remote clusters appear to be a Condor
-cluster. This allows the user to run their workflows using Condor tools across
-multiple clusters.
-%endif
-
#######################
%package annex-ec2
Summary: Configuration and scripts to make an EC2 image annex-compatible.
Requires: %name = %version-%release
-Requires(post): /sbin/chkconfig
-Requires(preun): /sbin/chkconfig
%description annex-ec2
Configures HTCondor to make an EC2 image annex-compatible. Do NOT install
@@ -344,13 +225,29 @@ on a non-EC2 image.
%config(noreplace) %_sysconfdir/condor/master_shutdown_script.sh
%post annex-ec2
-/bin/systemctl enable condor-annex-ec2
+#/bin/systemctl enable condor-annex-ec2
%preun annex-ec2
if [ $1 == 0 ]; then
/bin/systemctl disable condor-annex-ec2
fi
+#######################
+%package upgrade-checks
+Summary: Script to check for manual interventions needed to upgrade
+Group: Applications/System
+Requires: python3-condor
+Requires: pcre2-tools
+
+%description upgrade-checks
+HTCondor V9 to V10 check for for known breaking changes:
+1. IDToken TRUST_DOMAIN default value change
+2. Upgrade to PCRE2 breaking map file regex sequences
+3. The way to request GPU resources for a job
+
+%files upgrade-checks
+%_bindir/condor_upgrade_check
+
%pre
getent group %{name} >/dev/null || groupadd -r %{name}
getent passwd %{name} >/dev/null || \
@@ -359,54 +256,20 @@ getent passwd %{name} >/dev/null || \
exit 0
%prep
-%setup -q -n %{newname}-%{srcver}
-%patch1 -p1
-%patch2 -p1
-%patch5 -p1
-%patch7 -p1
-cp %{SOURCE1} %{name}-tmpfiles.conf
-cp %{SOURCE2} %{name}.service
-cp %{SOURCE3} .
+%setup -q -n %{newname}-%{version}
+
+# fix errant execute permissions
+find src -perm /a+x -type f -name "*.[Cch]" -exec chmod a-x {} \;
%build
make -C docs man
-%cmake -DNO_PHONE_HOME:BOOL=TRUE \
+%cmake -DBUILDID:STRING=RH-%{version}-%{release} \
-DBUILD_TESTING:BOOL=FALSE \
- -DBUILDID:STRING=RH-%{version}-%{release} \
- -D_VERBOSE:BOOL=TRUE \
-DCMAKE_SKIP_RPATH:BOOL=TRUE \
- -DHAVE_BACKFILL:BOOL=FALSE \
- -DHAVE_BOINC:BOOL=FALSE \
- -DWITH_GSOAP:BOOL=FALSE \
- -DWITH_POSTGRESQL:BOOL=FALSE \
- -DHAVE_KBDD:BOOL=TRUE \
- -DHAVE_HIBERNATION:BOOL=TRUE \
- -DWANT_LEASE_MANAGER:BOOL=FALSE \
- -DWANT_HDFS:BOOL=FALSE \
- -DWANT_QUILL:BOOL=FALSE \
- -DWITH_QPID:BOOL=FALSE \
- -DWITH_ZLIB:BOOL=FALSE \
- -DWITH_POSTGRESQL:BOOL=FALSE \
- -DWANT_CONTRIB:BOOL=ON \
- -DWITH_BOSCO:BOOL=FALSE \
- -DWITH_PIGEON:BOOL=FALSE \
- -DWITH_MANAGEMENT:BOOL=FALSE \
-%if 0%{?with_mongodb}
- -DWITH_PLUMAGE:BOOL=TRUE \
-%endif
-%if 0%{?with_aviary}
- -DWITH_AVIARY:BOOL=TRUE \
-%endif
- -DWANT_FULL_DEPLOYMENT:BOOL=TRUE \
- -DBLAHP_FOUND=/usr/libexec/BLClient \
- -DWITH_BLAHP:BOOL=TRUE \
- -DWITH_CREAM:BOOL=FALSE \
- -DWANT_GLEXEC:BOOL=TRUE \
- -DWANT_MAN_PAGES:BOOL=TRUE \
- -DWITH_LIBDELTACLOUD:BOOL=TRUE \
- -DWITH_GLOBUS:BOOL=TRUE \
- -DWITH_PYTHON_BINDINGS:BOOL=TRUE \
- -DWITH_LIBCGROUP:BOOL=FALSE
+ -DPACKAGEID:STRING=%{version}-%{release} \
+ -DCONDOR_PACKAGE_BUILD:BOOL=TRUE \
+ -DCONDOR_RPMBUILD:BOOL=TRUE \
+ -DCMAKE_INSTALL_PREFIX:PATH=/
%cmake_build
@@ -422,32 +285,11 @@ function populate {
rm -rf %{buildroot}
%cmake_install
-# The install target puts etc/ under usr/, let's fix that.
-mv %{buildroot}/usr/etc %{buildroot}/%{_sysconfdir}
-
-populate %_sysconfdir/condor %{buildroot}/%{_usr}/lib/condor_ssh_to_job_sshd_config_template
-
-# Things in /usr/lib really belong in /usr/share/condor
-populate %{_datadir}/condor %{buildroot}/%{_usr}/lib/*
-# Except for the shared libs
-populate %{_libdir}/ %{buildroot}/%{_datadir}/condor/libclassad.s*
-populate %{_libdir}/ %{buildroot}/%{_datadir}/condor/libcondor_utils*.so
-populate %{_libdir}/ %{buildroot}/%{_datadir}/condor/libpy3classad%{python3_version}_%{srcver}.so
-# and python site-packages
-if [ -d %{buildroot}/%{_datadir}/condor/python3.* ]; then
- mv %{buildroot}/%{_datadir}/condor/python3.* %{buildroot}/%{_libdir}/
-fi
-rm -f %{buildroot}/%{_datadir}/condor/libclassad.a
-
-# Remove the small shadow if built
-rm -f %{buildroot}/%{_sbindir}/condor_shadow_s
-
-# It is proper to put HTCondor specific libexec binaries under libexec/condor/
-populate %_libexecdir/condor %{buildroot}/usr/libexec/*
+# Drop in a symbolic link for backward compatibility
+ln -s ../..%{_libdir}/condor/condor_ssh_to_job_sshd_config_template %{buildroot}/%_sysconfdir/condor/condor_ssh_to_job_sshd_config_template
-# man pages
-mkdir -p %{buildroot}/%{_mandir}
-mv %{buildroot}/usr/man %{buildroot}/%{_mandir}/man1
+mv %{buildroot}/usr/share/doc/condor-%{version} %{buildroot}/usr/share/doc/condor
+populate /usr/share/doc/condor/examples %{buildroot}/usr/share/doc/condor/etc/examples/*
mkdir -p %{buildroot}/%{_sysconfdir}/condor
# the default condor_config file is not architecture aware and thus
@@ -460,236 +302,254 @@ if [ "$LIB" = "%_libdir" ]; then
echo "_libdir does not contain /usr, sed expression needs attention"
exit 1
fi
-sed -e "s:^LIB\s*=.*:LIB = \$(RELEASE_DIR)/$LIB/condor:" \
- %{buildroot}/etc/examples/condor_config.generic.redhat \
- > %{buildroot}/%{_sysconfdir}/condor/condor_config
# Install the basic configuration, a Personal HTCondor config. Allows for
# yum install condor + service condor start and go.
-#mkdir -m0755 %{buildroot}/%{_sysconfdir}/condor/config.d
-install -m 0644 00personal_condor.config %{buildroot}/%{_sysconfdir}/condor/config.d/00personal_condor.config
-
-populate %_sysconfdir/condor/config.d %{buildroot}/etc/examples/00-minicondor
-populate %_sysconfdir/condor/config.d %{buildroot}/etc/examples/50ec2.config
+mkdir -p -m0755 %{buildroot}/%{_sysconfdir}/condor/config.d
+mkdir -p -m0700 %{buildroot}/%{_sysconfdir}/condor/passwords.d
+mkdir -p -m0700 %{buildroot}/%{_sysconfdir}/condor/tokens.d
-%if 0%{?with_aviary}
-populate %_sysconfdir/condor/config.d %{buildroot}/etc/examples/61aviary.config
-populate %_sysconfdir/condor/config.d %{buildroot}/etc/examples/63aviary-hadoop.config
-
-mkdir -p %{buildroot}/%{_var}/lib/condor/aviary
-populate %{_var}/lib/condor/aviary %{buildroot}/usr/axis2.xml
-populate %{_var}/lib/condor/aviary %{buildroot}/usr/services/
-
-populate %{_libdir}/condor/plugins %{buildroot}/%{_usr}/libexec/condor/*-plugin.so
-populate %{_libdir}/ %{buildroot}/%{_datadir}/condor/libaviary_*
-%endif
+populate %_sysconfdir/condor/config.d %{buildroot}/usr/share/doc/condor/examples/00-htcondor-9.0.config
+populate %_sysconfdir/condor/config.d %{buildroot}/usr/share/doc/condor/examples/00-minicondor
+populate %_sysconfdir/condor/config.d %{buildroot}/usr/share/doc/condor/examples/50ec2.config
+# Install a second config.d directory under /usr/share, used for the
+# convenience of software built on top of Condor such as GlideinWMS.
+mkdir -p -m0755 %{buildroot}/usr/share/condor/config.d
-%if 0%{?with_mongodb}
-# Install condor-plumage's base plugin configuration
-populate %_sysconfdir/condor/config.d %{buildroot}/etc/examples/62plumage.config
-%endif
-rm -f %{buildroot}/%{_bindir}/ods_job_etl_tool
-rm -f %{buildroot}/%{_sbindir}/ods_job_etl_server
-mkdir -p -m0755 %{buildroot}/%{_var}/lib/condor/ViewHist
-
-mkdir -p -m0755 %{buildroot}/%{_var}/run/condor
mkdir -p -m0755 %{buildroot}/%{_var}/log/condor
-mkdir -p -m0755 %{buildroot}/%{_var}/lock/condor
-mkdir -p -m1777 %{buildroot}/%{_var}/lock/condor/local
+# Note we use %{_var}/lib instead of %{_sharedstatedir} for RHEL5 compatibility
mkdir -p -m0755 %{buildroot}/%{_var}/lib/condor/spool
-mkdir -p -m1777 %{buildroot}/%{_var}/lib/condor/execute
+mkdir -p -m0755 %{buildroot}/%{_var}/lib/condor/execute
+mkdir -p -m0755 %{buildroot}/%{_var}/lib/condor/krb_credentials
+mkdir -p -m2770 %{buildroot}/%{_var}/lib/condor/oauth_credentials
-# not packaging standard universe
-rm %{buildroot}/%{_mandir}/man1/condor_compile.1
-rm %{buildroot}/%{_mandir}/man1/condor_checkpoint.1
# not packaging configure/install scripts
-rm %{buildroot}/%{_mandir}/man1/condor_configure.1
+rm -f %{buildroot}%{_bindir}/make-personal-from-tarball
+rm -f %{buildroot}%{_sbindir}/condor_configure
+rm -f %{buildroot}%{_sbindir}/condor_install
+rm -f %{buildroot}/%{_mandir}/man1/condor_configure.1
+rm -f %{buildroot}/%{_mandir}/man1/condor_install.1
-# Remove junk
-rm -r %{buildroot}/%{_sysconfdir}/sysconfig
-rm -r %{buildroot}/%{_sysconfdir}/init.d
+mkdir -p %{buildroot}/%{_var}/www/wsgi-scripts/condor_credmon_oauth
+mv %{buildroot}/%{_libexecdir}/condor/condor_credmon_oauth.wsgi %{buildroot}/%{_var}/www/wsgi-scripts/condor_credmon_oauth/condor_credmon_oauth.wsgi
-# install tmpfiles.d/condor.conf
-mkdir -p %{buildroot}%{_tmpfilesdir}/tmpfiles.d
-install -m 0644 %{name}-tmpfiles.conf %{buildroot}%{_tmpfilesdir}/%{name}.conf
+# Move oauth credmon config files out of examples and into config.d
+mv %{buildroot}/usr/share/doc/condor/examples/condor_credmon_oauth/config/condor/40-oauth-credmon.conf %{buildroot}/%{_sysconfdir}/condor/config.d/40-oauth-credmon.conf
+mv %{buildroot}/usr/share/doc/condor/examples/condor_credmon_oauth/config/condor/40-oauth-tokens.conf %{buildroot}/%{_sysconfdir}/condor/config.d/40-oauth-tokens.conf
+mv %{buildroot}/usr/share/doc/condor/examples/condor_credmon_oauth/README.credentials %{buildroot}/%{_var}/lib/condor/oauth_credentials/README.credentials
-install -Dp -m0755 %{buildroot}/etc/examples/condor-annex-ec2 %{buildroot}%{_libexecdir}/condor/condor-annex-ec2
+# Move vault credmon config file out of examples and into config.d
+mv %{buildroot}/usr/share/doc/condor/examples/condor_credmon_oauth/config/condor/40-vault-credmon.conf %{buildroot}/%{_sysconfdir}/condor/config.d/40-vault-credmon.conf
-mkdir -p %{buildroot}%{_unitdir}
-install -m 0644 %{buildroot}/etc/examples/condor-annex-ec2.service %{buildroot}%{_unitdir}/condor-annex-ec2.service
+# install tmpfiles.d/condor.conf
+mkdir -p %{buildroot}%{_tmpfilesdir}
+install -m 0644 %{buildroot}/usr/share/doc/condor/examples/condor-tmpfiles.conf %{buildroot}%{_tmpfilesdir}/%{name}.conf
-mkdir -p %{buildroot}%{_localstatedir}/run/
-install -d -m 0710 %{buildroot}%{_localstatedir}/run/%{name}/
+install -Dp -m0755 %{buildroot}/usr/share/doc/condor/examples/condor-annex-ec2 %{buildroot}%{_libexecdir}/condor/condor-annex-ec2
mkdir -p %{buildroot}%{_unitdir}
-install -m 0644 %{name}.service %{buildroot}%{_unitdir}/condor.service
-
-mv %{buildroot}%{python3_sitearch}/py3htcondor.so %{buildroot}%{python3_sitearch}/htcondor.so
-mv %{buildroot}%{python3_sitearch}/py3classad.so %{buildroot}%{python3_sitearch}/classad.so
-
-# Remove stuff that comes from the full-deploy
-rm -rf %{buildroot}%{_sbindir}/cleanup_release
-rm -rf %{buildroot}%{_sbindir}/condor_cleanup_local
-rm -rf %{buildroot}%{_sbindir}/condor_cold_start
-rm -rf %{buildroot}%{_sbindir}/condor_cold_stop
-rm -rf %{buildroot}%{_sbindir}/condor_config_bind
-rm -rf %{buildroot}%{_sbindir}/condor_configure
-rm -rf %{buildroot}%{_sbindir}/condor_credd
-rm -rf %{buildroot}%{_sbindir}/condor_install
-rm -rf %{buildroot}%{_sbindir}/condor_install_local
-rm -rf %{buildroot}%{_sbindir}/condor_local_start
-rm -rf %{buildroot}%{_sbindir}/condor_local_stop
-rm -rf %{buildroot}%{_sbindir}/condor_startd_factory
-rm -rf %{buildroot}%{_sbindir}/condor_vm-gahp-vmware
-rm -rf %{buildroot}%{_sbindir}/condor_vm_vmwar*
-rm -rf %{buildroot}%{_sbindir}/filelock_midwife
-rm -rf %{buildroot}%{_sbindir}/filelock_undertaker
-rm -rf %{buildroot}%{_sbindir}/install_release
-rm -rf %{buildroot}%{_sbindir}/uniq_pid_command
-rm -rf %{buildroot}%{_sbindir}/uniq_pid_midwife
-rm -rf %{buildroot}%{_sbindir}/uniq_pid_undertaker
-rm -rf %{buildroot}%{_datadir}/condor/*.pm
-rm -rf %{buildroot}%{_datadir}/condor/Chirp.jar
-rm -rf %{buildroot}%{_usrsrc}/chirp/chirp_*
-rm -rf %{buildroot}%{_usrsrc}/startd_factory
-rm -rf %{buildroot}/usr/DOC
-rm -rf %{buildroot}/usr/INSTALL
-rm -rf %{buildroot}/usr/LICENSE-2.0.txt
-rm -rf %{buildroot}/usr/NOTICE.txt
-rm -rf %{buildroot}/usr/README
-rm -rf %{buildroot}/usr/examples/
-rm -rf %{buildroot}%{_includedir}/MyString.h
-rm -rf %{buildroot}%{_includedir}/chirp_client.h
-rm -rf %{buildroot}%{_includedir}/compat_classad*
-rm -rf %{buildroot}%{_includedir}/condor_classad.h
-rm -rf %{buildroot}%{_includedir}/condor_constants.h
-rm -rf %{buildroot}%{_includedir}/condor_event.h
-rm -rf %{buildroot}%{_includedir}/condor_header_features.h
-rm -rf %{buildroot}%{_includedir}/condor_holdcodes.h
-rm -rf %{buildroot}%{_includedir}/file_lock.h
-rm -rf %{buildroot}%{_includedir}/iso_dates.h
-rm -rf %{buildroot}%{_includedir}/read_user_log.h
-rm -rf %{buildroot}%{_includedir}/stl_string_utils.h
-rm -rf %{buildroot}%{_includedir}/user_log.README
-rm -rf %{buildroot}%{_includedir}/user_log.c++.h
-rm -rf %{buildroot}%{_includedir}/write_user_log.h
-rm -rf %{buildroot}%{_libexecdir}/condor/bgp_*
-rm -rf %{buildroot}%{_datadir}/condor/libchirp_client.*
-rm -rf %{buildroot}%{_datadir}/condor/libcondorapi.a
-rm -rf %{buildroot}%{_datadir}/condor/python/{htcondor,classad}.so
-rm -rf %{buildroot}%{_datadir}/condor/{libpy*classad_*,htcondor,classad}.so
-rm %{buildroot}%{_libexecdir}/condor/condor_schedd.init
-rm -rf %{buildroot}%{_libexecdir}/condor/pandad
-rm -rf %{buildroot}%{_libexecdir}/condor/libclassad_python*_user.so
-
-# Install BOSCO
-%if 0%{?with_bosco}
-mv %{buildroot}%{_libexecdir}/condor/campus_factory/share/condor/condor_config.factory %{buildroot}%{_sysconfdir}/condor/config.d/60-campus_factory.config
-mv %{buildroot}%{_libexecdir}/condor/campus_factory/etc/campus_factory.conf %{buildroot}%{_sysconfdir}/condor/
-mv %{buildroot}%{_libexecdir}/condor/campus_factory/share %{buildroot}%{_datadir}/condor/campus_factory
+install -m 0644 %{buildroot}/usr/share/doc/condor/examples/condor-annex-ec2.service %{buildroot}%{_unitdir}/condor-annex-ec2.service
+install -m 0644 %{buildroot}/usr/share/doc/condor/examples/condor.service %{buildroot}%{_unitdir}/condor.service
+# Disabled until HTCondor security fixed.
+# install -m 0644 %{buildroot}/usr/share/doc/condor/examples/condor.socket %{buildroot}%{_unitdir}/condor.socket
+
+%if 0%{?rhel} >= 7
+mkdir -p %{buildroot}%{_datadir}/condor/
+cp %{SOURCE8} %{buildroot}%{_datadir}/condor/
%endif
-rm -rf %{buildroot}%{_libexecdir}/condor/campus_factory
-rm -rf %{buildroot}/%{_sbindir}/bosco_install
-rm -rf %{buildroot}/%{_sbindir}/campus_factory
-rm -rf %{buildroot}/%{_sbindir}/condor_ft-gahp
-rm -rf %{buildroot}/%{_sbindir}/glidein_creation
-rm -rf %{buildroot}/%{_sbindir}/runfactory
-rm -rf %{buildroot}/%{_mandir}/man1/bosco*
-
-# we must place the config examples in builddir
-cp -rf %{buildroot}/etc/examples %{_builddir}/%{name}-%{tarball_version}
-rm -rf %{buildroot}/etc/examples
+#Fixups for packaged build, should have been done by cmake
+
+mkdir -p %{buildroot}/usr/share/condor
+mv %{buildroot}/usr/lib64/condor/Chirp.jar %{buildroot}/usr/share/condor
+mv %{buildroot}/usr/lib64/condor/CondorJava*.class %{buildroot}/usr/share/condor
+mv %{buildroot}/usr/lib64/condor/libchirp_client.so %{buildroot}/usr/lib64
+mv %{buildroot}/usr/lib64/condor/libcondorapi.so %{buildroot}/usr/lib64
+mv %{buildroot}/usr/lib64/condor/libcondor_utils_*.so %{buildroot}/usr/lib64
+mv %{buildroot}/usr/lib64/condor/libpyclassad3*.so %{buildroot}/usr/lib64
+
+rm -rf %{buildroot}/usr/share/doc/condor/LICENSE
+rm -rf %{buildroot}/usr/share/doc/condor/NOTICE.txt
+rm -rf %{buildroot}/usr/share/doc/condor/README
+
+# Move batch system customization files to /etc, with symlinks in the
+# original location. Admins will need to edit these.
+install -m 0755 -d -p %{buildroot}%{_sysconfdir}/blahp
+for batch_system in condor kubernetes lsf nqs pbs sge slurm; do
+ mv %{buildroot}%{_libexecdir}/blahp/${batch_system}_local_submit_attributes.sh %{buildroot}%{_sysconfdir}/blahp
+ ln -s %{_sysconfdir}/blahp/${batch_system}_local_submit_attributes.sh \
+ %{buildroot}%{_libexecdir}/blahp/${batch_system}_local_submit_attributes.sh
+done
+
+#################
%files
-%doc LICENSE-2.0.txt NOTICE.txt
-%config(noreplace) %_sysconfdir/bash_completion.d/condor
+%defattr(-,root,root,-)
+%doc LICENSE NOTICE.txt
+%doc /usr/share/doc/condor/examples
%dir %_sysconfdir/condor/
-%config(noreplace) %_sysconfdir/condor/condor_config
-%config(noreplace) %{_tmpfilesdir}/%{name}.conf
+%config %_sysconfdir/condor/condor_config
+%{_tmpfilesdir}/%{name}.conf
%{_unitdir}/condor.service
+# Disabled until HTCondor security fixed.
+# % {_unitdir}/condor.socket
%dir %_datadir/condor/
+%_datadir/condor/Chirp.jar
%_datadir/condor/CondorJavaInfo.class
%_datadir/condor/CondorJavaWrapper.class
-%_datadir/condor/scimark2lib.jar
+%if 0%{?rhel} >= 7
+%_datadir/condor/htcondor.pp
+%endif
+%dir %_sysconfdir/condor/passwords.d/
+%dir %_sysconfdir/condor/tokens.d/
%dir %_sysconfdir/condor/config.d/
-%dir %_sysconfdir/condor/ganglia.d
-%config(noreplace) %_sysconfdir/condor/ganglia.d/00_default_metrics
-%config(noreplace) %_sysconfdir/condor/config.d/00personal_condor.config
-%config(noreplace) %_sysconfdir/condor/condor_ssh_to_job_sshd_config_template
+%config(noreplace) %{_sysconfdir}/condor/config.d/00-htcondor-9.0.config
+%dir /usr/share/condor/config.d/
+%_libdir/condor/condor_ssh_to_job_sshd_config_template
+%_sysconfdir/condor/condor_ssh_to_job_sshd_config_template
+%_sysconfdir/bash_completion.d/condor
+%_libdir/libchirp_client.so
+%_libdir/libcondor_utils_%{version_}.so
+%_libdir/libcondorapi.so
+%_libdir/condor/libgetpwnam.so
%dir %_libexecdir/condor/
+%_libexecdir/condor/linux_kernel_tuning
+%_libexecdir/condor/accountant_log_fixer
%_libexecdir/condor/condor_chirp
%_libexecdir/condor/condor_ssh
%_libexecdir/condor/sshd.sh
%_libexecdir/condor/get_orted_cmd.sh
%_libexecdir/condor/orted_launcher.sh
+%_libexecdir/condor/set_batchtok_cmd
+%_libexecdir/condor/cred_producer_krb
%_libexecdir/condor/condor_job_router
-%_libexecdir/condor/condor_gangliad
-%_libexecdir/condor/condor_glexec_setup
-%_libexecdir/condor/condor_glexec_run
-%_libexecdir/condor/condor_glexec_job_wrapper
-%_libexecdir/condor/condor_glexec_update_proxy
-%_libexecdir/condor/condor_glexec_cleanup
-%_libexecdir/condor/condor_glexec_kill
-%_libexecdir/condor/glite/bin/*
+%_libexecdir/condor/condor_pid_ns_init
+%_libexecdir/condor/condor_urlfetch
+%_libexecdir/condor/htcondor_docker_test
+%_libexecdir/condor/exit_37.sif
+%dir %_libexecdir/condor/singularity_test_sandbox/
+%dir %_libexecdir/condor/singularity_test_sandbox/dev/
+%dir %_libexecdir/condor/singularity_test_sandbox/proc/
+%_libexecdir/condor/singularity_test_sandbox/exit_37
%_libexecdir/condor/condor_limits_wrapper.sh
%_libexecdir/condor/condor_rooster
+%_libexecdir/condor/condor_schedd.init
%_libexecdir/condor/condor_ssh_to_job_shell_setup
%_libexecdir/condor/condor_ssh_to_job_sshd_setup
%_libexecdir/condor/condor_power_state
%_libexecdir/condor/condor_kflops
%_libexecdir/condor/condor_mips
%_libexecdir/condor/data_plugin
+%_libexecdir/condor/box_plugin.py
+%_libexecdir/condor/gdrive_plugin.py
+%_libexecdir/condor/common-cloud-attributes-google.py
+%_libexecdir/condor/common-cloud-attributes-aws.py
+%_libexecdir/condor/common-cloud-attributes-aws.sh
+%_libexecdir/condor/onedrive_plugin.py
+# TODO: get rid of these
+# Not sure where these are getting built
+%if 0%{?rhel} <= 7 && ! 0%{?fedora}
+%_libexecdir/condor/box_plugin.pyc
+%_libexecdir/condor/box_plugin.pyo
+%_libexecdir/condor/gdrive_plugin.pyc
+%_libexecdir/condor/gdrive_plugin.pyo
+%_libexecdir/condor/onedrive_plugin.pyc
+%_libexecdir/condor/onedrive_plugin.pyo
+%_libexecdir/condor/adstash/__init__.pyc
+%_libexecdir/condor/adstash/__init__.pyo
+%_libexecdir/condor/adstash/ad_sources/__init__.pyc
+%_libexecdir/condor/adstash/ad_sources/__init__.pyo
+%_libexecdir/condor/adstash/ad_sources/registry.pyc
+%_libexecdir/condor/adstash/ad_sources/registry.pyo
+%_libexecdir/condor/adstash/interfaces/__init__.pyc
+%_libexecdir/condor/adstash/interfaces/__init__.pyo
+%_libexecdir/condor/adstash/interfaces/generic.pyc
+%_libexecdir/condor/adstash/interfaces/generic.pyo
+%_libexecdir/condor/adstash/interfaces/null.pyc
+%_libexecdir/condor/adstash/interfaces/null.pyo
+%_libexecdir/condor/adstash/interfaces/registry.pyc
+%_libexecdir/condor/adstash/interfaces/registry.pyo
+%_libexecdir/condor/adstash/interfaces/opensearch.pyc
+%_libexecdir/condor/adstash/interfaces/opensearch.pyo
+%endif
%_libexecdir/condor/curl_plugin
-%_libexecdir/condor/multifile_curl_plugin
%_libexecdir/condor/condor_shared_port
-%_libexecdir/condor/condor_sinful
-%_libexecdir/condor/condor_testingd
-%_libexecdir/condor/test_user_mapping
-%_libexecdir/condor/condor_glexec_wrapper
-%_libexecdir/condor/glexec_starter_setup.sh
%_libexecdir/condor/condor_defrag
%_libexecdir/condor/interactive.sub
-%_libexecdir/condor/linux_kernel_tuning
-%_libexecdir/condor/condor_dagman_metrics_reporter
-%_libexecdir/condor/condor_pid_ns_init
-%_libexecdir/condor/condor_urlfetch
-%_libexecdir/condor/test_user_mapping
+%_libexecdir/condor/condor_gangliad
+%_libexecdir/condor/ce-audit.so
+%_libexecdir/condor/adstash/__init__.py
+%_libexecdir/condor/adstash/adstash.py
+%_libexecdir/condor/adstash/config.py
+%_libexecdir/condor/adstash/convert.py
+%_libexecdir/condor/adstash/utils.py
+%_libexecdir/condor/adstash/ad_sources/__init__.py
+%_libexecdir/condor/adstash/ad_sources/ad_file.py
+%_libexecdir/condor/adstash/ad_sources/generic.py
+%_libexecdir/condor/adstash/ad_sources/registry.py
+%_libexecdir/condor/adstash/ad_sources/schedd_history.py
+%_libexecdir/condor/adstash/ad_sources/startd_history.py
+%_libexecdir/condor/adstash/interfaces/__init__.py
+%_libexecdir/condor/adstash/interfaces/elasticsearch.py
+%_libexecdir/condor/adstash/interfaces/opensearch.py
+%_libexecdir/condor/adstash/interfaces/generic.py
+%_libexecdir/condor/adstash/interfaces/json_file.py
+%_libexecdir/condor/adstash/interfaces/null.py
+%_libexecdir/condor/adstash/interfaces/registry.py
+%_libexecdir/condor/annex
%_mandir/man1/condor_advertise.1.gz
%_mandir/man1/condor_annex.1.gz
+%_mandir/man1/condor_check_password.1.gz
%_mandir/man1/condor_check_userlogs.1.gz
%_mandir/man1/condor_chirp.1.gz
-%_mandir/man1/condor_convert_history.1*
-%_mandir/man1/condor_cod.1.gz
%_mandir/man1/condor_config_val.1.gz
%_mandir/man1/condor_dagman.1.gz
%_mandir/man1/condor_fetchlog.1.gz
%_mandir/man1/condor_findhost.1.gz
+%_mandir/man1/condor_gpu_discovery.1.gz
%_mandir/man1/condor_history.1.gz
%_mandir/man1/condor_hold.1.gz
+%_mandir/man1/condor_job_router_info.1.gz
%_mandir/man1/condor_master.1.gz
%_mandir/man1/condor_off.1.gz
%_mandir/man1/condor_on.1.gz
+%_mandir/man1/condor_pool_job_report.1.gz
%_mandir/man1/condor_preen.1.gz
%_mandir/man1/condor_prio.1.gz
%_mandir/man1/condor_q.1.gz
+%_mandir/man1/condor_qsub.1.gz
%_mandir/man1/condor_qedit.1.gz
%_mandir/man1/condor_reconfig.1.gz
%_mandir/man1/condor_release.1.gz
+%_mandir/man1/condor_remote_cluster.1.gz
%_mandir/man1/condor_reschedule.1.gz
%_mandir/man1/condor_restart.1.gz
%_mandir/man1/condor_rm.1.gz
%_mandir/man1/condor_run.1.gz
%_mandir/man1/condor_set_shutdown.1.gz
+%_mandir/man1/condor_ssh_start.1.gz
+%_mandir/man1/condor_sos.1.gz
+%_mandir/man1/condor_ssl_fingerprint.1.gz
%_mandir/man1/condor_stats.1.gz
%_mandir/man1/condor_status.1.gz
%_mandir/man1/condor_store_cred.1.gz
%_mandir/man1/condor_submit.1.gz
%_mandir/man1/condor_submit_dag.1.gz
+%_mandir/man1/condor_test_token.1.gz
+%_mandir/man1/condor_token_create.1.gz
+%_mandir/man1/condor_token_fetch.1.gz
+%_mandir/man1/condor_token_list.1.gz
+%_mandir/man1/condor_token_request.1.gz
+%_mandir/man1/condor_token_request_approve.1.gz
+%_mandir/man1/condor_token_request_auto_approve.1.gz
+%_mandir/man1/condor_token_request_list.1.gz
%_mandir/man1/condor_top.1.gz
%_mandir/man1/condor_transfer_data.1.gz
+%_mandir/man1/condor_transform_ads.1.gz
+%_mandir/man1/condor_update_machine_ad.1.gz
%_mandir/man1/condor_updates_stats.1.gz
+%_mandir/man1/condor_urlfetch.1.gz
%_mandir/man1/condor_userlog.1.gz
%_mandir/man1/condor_userprio.1.gz
%_mandir/man1/condor_vacate.1.gz
@@ -704,26 +564,20 @@ rm -rf %{buildroot}/etc/examples
%_mandir/man1/condor_power.1.gz
%_mandir/man1/condor_gather_info.1.gz
%_mandir/man1/condor_router_rm.1.gz
-%_mandir/man1/condor_qsub.1.gz
%_mandir/man1/condor_drain.1.gz
-%_mandir/man1/condor_install.1.gz
%_mandir/man1/condor_ping.1.gz
%_mandir/man1/condor_rmdir.1.gz
%_mandir/man1/condor_tail.1.gz
%_mandir/man1/condor_who.1.gz
%_mandir/man1/condor_now.1.gz
-%_mandir/man1/condor_dagman_metrics_reporter.1.gz
-%_mandir/man1/condor_gpu_discovery.1.gz
-%_mandir/man1/condor_pool_job_report.1.gz
-%_mandir/man1/condor_sos.1.gz
-%_mandir/man1/condor_urlfetch.1.gz
-%_mandir/man1/condor_job_router_info.1.gz
-%_mandir/man1/condor_update_machine_ad.1.gz
-%_mandir/man1/condor_transform_ads.1.gz
+%_mandir/man1/classad_eval.1.gz
+%_mandir/man1/classads.1.gz
+%_mandir/man1/condor_adstash.1.gz
+%_mandir/man1/condor_evicted_files.1.gz
+%_mandir/man1/condor_watch_q.1.gz
+%_mandir/man1/get_htcondor.1.gz
+%_mandir/man1/htcondor.1.gz
# bin/condor is a link for checkpoint, reschedule, vacate
-%_libdir/libcondor_utils*.so
-%_libexecdir/condor/panda-plugin.so
-%_libexecdir/condor/libcollector_python3_plugin.so
%_bindir/condor_submit_dag
%_bindir/condor_who
%_bindir/condor_now
@@ -732,15 +586,17 @@ rm -rf %{buildroot}/etc/examples
%_bindir/condor_check_userlogs
%_bindir/condor_q
%_libexecdir/condor/condor_transferer
-%_bindir/condor_cod
%_bindir/condor_docker_enter
%_bindir/condor_qedit
+%_bindir/condor_qusers
%_bindir/condor_userlog
%_bindir/condor_release
%_bindir/condor_userlog_job_counter
%_bindir/condor_config_val
%_bindir/condor_reschedule
%_bindir/condor_userprio
+%_bindir/condor_check_password
+%_bindir/condor_check_config
%_bindir/condor_dagman
%_bindir/condor_rm
%_bindir/condor_vacate
@@ -751,7 +607,6 @@ rm -rf %{buildroot}/etc/examples
%_bindir/condor_vacate_job
%_bindir/condor_findhost
%_bindir/condor_stats
-%_bindir/condor_transform_ads
%_bindir/condor_version
%_bindir/condor_history
%_bindir/condor_status
@@ -762,24 +617,44 @@ rm -rf %{buildroot}/etc/examples
%_bindir/condor_power
%_bindir/condor_gather_info
%_bindir/condor_continue
+%_bindir/condor_ssl_fingerprint
%_bindir/condor_suspend
%_bindir/condor_test_match
+%_bindir/condor_token_create
+%_bindir/condor_token_fetch
+%_bindir/condor_token_request
+%_bindir/condor_token_request_approve
+%_bindir/condor_token_request_auto_approve
+%_bindir/condor_token_request_list
+%_bindir/condor_token_list
+%_bindir/condor_scitoken_exchange
%_bindir/condor_drain
%_bindir/condor_ping
-%_bindir/condor_qsub
%_bindir/condor_tail
+%_bindir/condor_qsub
%_bindir/condor_pool_job_report
%_bindir/condor_job_router_info
+%_bindir/condor_transform_ads
%_bindir/condor_update_machine_ad
%_bindir/condor_annex
%_bindir/condor_nsenter
+%_bindir/condor_evicted_files
+%_bindir/condor_adstash
+%_bindir/condor_remote_cluster
+%_bindir/bosco_cluster
+%_bindir/condor_ssh_start
+%_bindir/condor_test_token
+# sbin/condor is a link for master_off, off, on, reconfig,
+# reconfig_schedd, restart
%_sbindir/condor_advertise
%_sbindir/condor_aklog
+%_sbindir/condor_credmon_krb
%_sbindir/condor_c-gahp
%_sbindir/condor_c-gahp_worker_thread
%_sbindir/condor_collector
-%_sbindir/condor_convert_history
+%_sbindir/condor_credd
%_sbindir/condor_fetchlog
+%_sbindir/condor_ft-gahp
%_sbindir/condor_had
%_sbindir/condor_master
%_sbindir/condor_negotiator
@@ -792,38 +667,52 @@ rm -rf %{buildroot}/etc/examples
%_sbindir/condor_schedd
%_sbindir/condor_set_shutdown
%_sbindir/condor_shadow
+%_sbindir/condor_sos
%_sbindir/condor_startd
%_sbindir/condor_starter
%_sbindir/condor_store_cred
-%_sbindir/condor_transferd
+%_sbindir/condor_testwritelog
%_sbindir/condor_updates_stats
%_sbindir/ec2_gahp
%_sbindir/condor_gridmanager
-%_sbindir/condor_gridshell
-%_sbindir/gahp_server
-%_sbindir/grid_monitor
-%_sbindir/grid_monitor.sh
%_sbindir/remote_gahp
-%_sbindir/nordugrid_gahp
+%_sbindir/rvgahp_client
+%_sbindir/rvgahp_proxy
+%_sbindir/rvgahp_server
%_sbindir/AzureGAHPServer
-%_sbindir/condor_sos
-%_sbindir/condor_testwritelog
%_sbindir/gce_gahp
-#%%_bindir/condor_ping
+%_sbindir/arc_gahp
%_libexecdir/condor/condor_gpu_discovery
%_libexecdir/condor/condor_gpu_utilization
+%config(noreplace) %_sysconfdir/condor/ganglia.d/00_default_metrics
%defattr(-,condor,condor,-)
%dir %_var/lib/condor/
%dir %_var/lib/condor/execute/
-%dir %_var/log/condor/
%dir %_var/lib/condor/spool/
-%ghost %dir %_var/lock/condor/
-%dir %_var/run/condor/
-%_libexecdir/condor/accountant_log_fixer
-%_datadir/condor/libcondorapi.so
-
-#################
-%files procd
+%dir %_var/log/condor/
+%defattr(-,root,condor,-)
+%dir %_var/lib/condor/oauth_credentials
+%defattr(-,root,root,-)
+%dir %_var/lib/condor/krb_credentials
+
+###### blahp files #######
+%config %_sysconfdir/blah.config
+%config %_sysconfdir/blparser.conf
+%dir %_sysconfdir/blahp/
+%config %_sysconfdir/blahp/condor_local_submit_attributes.sh
+%config %_sysconfdir/blahp/kubernetes_local_submit_attributes.sh
+%config %_sysconfdir/blahp/lsf_local_submit_attributes.sh
+%config %_sysconfdir/blahp/nqs_local_submit_attributes.sh
+%config %_sysconfdir/blahp/pbs_local_submit_attributes.sh
+%config %_sysconfdir/blahp/sge_local_submit_attributes.sh
+%config %_sysconfdir/blahp/slurm_local_submit_attributes.sh
+%_bindir/blahpd
+%_sbindir/blah_check_config
+%_sbindir/blahpd_daemon
+%dir %_libexecdir/blahp
+%_libexecdir/blahp/*
+
+####### procd files #######
%_sbindir/condor_procd
%_sbindir/gidd_alloc
%_sbindir/procd_ctl
@@ -831,135 +720,24 @@ rm -rf %{buildroot}/etc/examples
%_mandir/man1/gidd_alloc.1.gz
%_mandir/man1/condor_procd.1.gz
-#################
-%if 0%{?with_aviary}
-%files aviary-common
-%doc LICENSE-2.0.txt NOTICE.txt
-%dir %_datadir/condor/aviary
-%_datadir/condor/aviary/jobcontrol.py*
-%_datadir/condor/aviary/jobquery.py*
-%_datadir/condor/aviary/submissions.py*
-%_datadir/condor/aviary/submission_ids.py*
-%_datadir/condor/aviary/subinventory.py*
-%_datadir/condor/aviary/submit.py*
-%_datadir/condor/aviary/setattr.py*
-%_datadir/condor/aviary/jobinventory.py*
-%_datadir/condor/aviary/locator.py*
-%_datadir/condor/aviary/collector_tool.py*
-%dir %_datadir/condor/aviary/dag
-%_datadir/condor/aviary/dag/diamond.dag
-%_datadir/condor/aviary/dag/dag-submit.py*
-%_datadir/condor/aviary/dag/job.sub
-%dir %_datadir/condor/aviary/module
-%_datadir/condor/aviary/module/aviary/util.py*
-%_datadir/condor/aviary/module/aviary/https.py*
-%_datadir/condor/aviary/module/aviary/__init__.py*
-%_datadir/condor/aviary/README
-%dir %_var/lib/condor/aviary
-%_var/lib/condor/aviary/axis2.xml
-%dir %_var/lib/condor/aviary/services
-%dir %_var/lib/condor/aviary/services/job
-%_var/lib/condor/aviary/services/job/services.xml
-%_var/lib/condor/aviary/services/job/aviary-common.xsd
-%_var/lib/condor/aviary/services/job/aviary-job.xsd
-%_var/lib/condor/aviary/services/job/aviary-job.wsdl
-%dir %_var/lib/condor/aviary/services/query
-%_var/lib/condor/aviary/services/query/services.xml
-%_var/lib/condor/aviary/services/query/aviary-common.xsd
-%_var/lib/condor/aviary/services/query/aviary-query.xsd
-%_var/lib/condor/aviary/services/query/aviary-query.wsdl
-%dir %_var/lib/condor/aviary/services/locator
-%_var/lib/condor/aviary/services/locator/services.xml
-%_var/lib/condor/aviary/services/locator/aviary-common.xsd
-%_var/lib/condor/aviary/services/locator/aviary-locator.xsd
-%_var/lib/condor/aviary/services/locator/aviary-locator.wsdl
-%dir %_var/lib/condor/aviary/services/collector
-%_var/lib/condor/aviary/services/collector/services.xml
-%_var/lib/condor/aviary/services/collector/aviary-common.xsd
-%_var/lib/condor/aviary/services/collector/aviary-collector.xsd
-%_var/lib/condor/aviary/services/collector/aviary-collector.wsdl
-
-#################
-%files aviary
-%doc LICENSE-2.0.txt NOTICE.txt
-%_sysconfdir/condor/config.d/61aviary.config
-%_libdir/libaviary_axis_provider.so
-%_libdir/libaviary_wso2_common.so
-%dir %_libdir/condor/plugins
-%_libdir/condor/plugins/AviaryScheddPlugin-plugin.so
-%_libdir/condor/plugins/AviaryLocatorPlugin-plugin.so
-%_libdir/condor/plugins/AviaryCollectorPlugin-plugin.so
-%_sbindir/aviary_query_server
-%_var/lib/condor/aviary/services/job/libaviary_job_axis.so
-%_var/lib/condor/aviary/services/query/libaviary_query_axis.so
-%_var/lib/condor/aviary/services/locator/libaviary_locator_axis.so
-%_var/lib/condor/aviary/services/collector/libaviary_collector_axis.so
-
-#################
-%files aviary-hadoop-common
-%doc LICENSE-2.0.txt NOTICE.txt
-%_var/lib/condor/aviary/services/hadoop/services.xml
-%_var/lib/condor/aviary/services/hadoop/aviary-common.xsd
-%_var/lib/condor/aviary/services/hadoop/aviary-hadoop.xsd
-%_var/lib/condor/aviary/services/hadoop/aviary-hadoop.wsdl
-%_datadir/condor/aviary/hadoop_tool.py*
-
-#################
-%files aviary-hadoop
-%doc LICENSE-2.0.txt NOTICE.txt
-%_var/lib/condor/aviary/services/hadoop/libaviary_hadoop_axis.so
-%_libdir/condor/plugins/AviaryHadoopPlugin-plugin.so
-%_sysconfdir/condor/config.d/63aviary-hadoop.config
-%_datadir/condor/aviary/hdfs_datanode.sh
-%_datadir/condor/aviary/hdfs_namenode.sh
-%_datadir/condor/aviary/mapred_jobtracker.sh
-%_datadir/condor/aviary/mapred_tasktracker.sh
-%endif
-
-#################
-%if 0%{?with_mongodb}
-%files plumage
-%doc LICENSE-2.0.txt NOTICE.txt
-%_sysconfdir/condor/config.d/62plumage.config
-%dir %_libdir/condor/plugins
-%_libdir/condor/plugins/PlumageCollectorPlugin-plugin.so
-%dir %_datadir/condor/plumage
-%_sbindir/plumage_job_etl_server
-%_bindir/plumage_history_load
-%_bindir/plumage_stats
-%_bindir/plumage_history
-%_datadir/condor/plumage/README
-%_datadir/condor/plumage/SCHEMA
-%_datadir/condor/plumage/plumage_accounting
-%_datadir/condor/plumage/plumage_scheduler
-%_datadir/condor/plumage/plumage_utilization
-%defattr(-,condor,condor,-)
-%endif
-
-#################
-%files kbdd
-%doc LICENSE-2.0.txt NOTICE.txt
-%_sbindir/condor_kbdd
-
-#################
-%files vm-gahp
-%doc LICENSE-2.0.txt NOTICE.txt
-%_sbindir/condor_vm-gahp
-%_libexecdir/condor/libvirt_simple_script.awk
-
-#################
-%files openstack-gahp
-%doc LICENSE-2.0.txt NOTICE.txt
-%_sbindir/openstack_gahp
-
-#################
-%files classads
-%doc LICENSE-2.0.txt NOTICE.txt
+####### classads files #######
+%defattr(-,root,root,-)
+%doc LICENSE NOTICE.txt
%_libdir/libclassad.so.*
#################
-%files classads-devel
-%doc LICENSE-2.0.txt NOTICE.txt
+%files devel
+%{_includedir}/condor/chirp_client.h
+%{_includedir}/condor/condor_event.h
+%{_includedir}/condor/file_lock.h
+%{_includedir}/condor/read_user_log.h
+%{_libdir}/condor/libchirp_client.a
+%{_libdir}/condor/libcondorapi.a
+%{_libdir}/libclassad.a
+
+####### classads-devel files #######
+%defattr(-,root,root,-)
+%doc LICENSE NOTICE.txt
%_bindir/classad_functional_tester
%_bindir/classad_version
%_libdir/libclassad.so
@@ -971,7 +749,7 @@ rm -rf %{buildroot}/etc/examples
%_includedir/classad/classad.h
%_includedir/classad/classadItor.h
%_includedir/classad/classadCache.h
-%_includedir/classad/classad_stl.h
+%_includedir/classad/classad_containers.h
%_includedir/classad/collectionBase.h
%_includedir/classad/collection.h
%_includedir/classad/common.h
@@ -979,14 +757,14 @@ rm -rf %{buildroot}/etc/examples
%_includedir/classad/exprList.h
%_includedir/classad/exprTree.h
%_includedir/classad/fnCall.h
+%_includedir/classad/indexfile.h
%_includedir/classad/jsonSink.h
%_includedir/classad/jsonSource.h
-%_includedir/classad/indexfile.h
%_includedir/classad/lexer.h
%_includedir/classad/lexerSource.h
%_includedir/classad/literals.h
-
%_includedir/classad/matchClassad.h
+%_includedir/classad/natural_cmp.h
%_includedir/classad/operators.h
%_includedir/classad/query.h
%_includedir/classad/sink.h
@@ -1000,55 +778,71 @@ rm -rf %{buildroot}/etc/examples
%_includedir/classad/xmlSource.h
#################
-%if 0%{?with_cream_gahp}
-%files cream-gahp
-%doc LICENSE-2.0.txt NOTICE.txt
-%_sbindir/cream_gahp
+%files kbdd
+%defattr(-,root,root,-)
+%doc LICENSE NOTICE.txt
+%_sbindir/condor_kbdd
+
+#################
+%if ! 0%{?amzn}
+%files vm-gahp
+%defattr(-,root,root,-)
+%doc LICENSE NOTICE.txt
+%_sbindir/condor_vm-gahp
+%_libexecdir/condor/libvirt_simple_script.awk
%endif
+#################
+%files test
+%defattr(-,root,root,-)
+%_libexecdir/condor/condor_sinful
+%_libexecdir/condor/condor_testingd
+%_libexecdir/condor/test_user_mapping
+%_bindir/condor_manifest
+
#################
%files -n python3-condor
+%defattr(-,root,root,-)
%_bindir/condor_top
-%{python3_sitearch}/classad.so
-%{python3_sitearch}/htcondor.so
-%{_libdir}/libpy3classad%{python3_version}_%{srcver}.so
+%_bindir/classad_eval
+%_bindir/condor_watch_q
+%_bindir/htcondor
+%_libdir/libpyclassad3*.so
+%_libexecdir/condor/libclassad_python_user.cpython-3*.so
+%_libexecdir/condor/libclassad_python3_user.so
+/usr/lib64/python%{python3_version}/site-packages/classad/
+/usr/lib64/python%{python3_version}/site-packages/htcondor/
+/usr/lib64/python%{python3_version}/site-packages/htcondor-*.egg-info/
+/usr/lib64/python%{python3_version}/site-packages/htcondor_cli/
+
+%files credmon-oauth
+%doc /usr/share/doc/condor/examples/condor_credmon_oauth
+%_sbindir/condor_credmon_oauth
+%_sbindir/scitokens_credential_producer
+%_var/www/wsgi-scripts/condor_credmon_oauth
+%_libexecdir/condor/credmon
+%_var/lib/condor/oauth_credentials/README.credentials
+%config(noreplace) %_sysconfdir/condor/config.d/40-oauth-credmon.conf
+%config(noreplace) %_sysconfdir/condor/config.d/40-oauth-tokens.conf
+%ghost %_var/lib/condor/oauth_credentials/wsgi_session_key
+%ghost %_var/lib/condor/oauth_credentials/CREDMON_COMPLETE
+%ghost %_var/lib/condor/oauth_credentials/pid
+
+%files credmon-vault
+%doc /usr/share/doc/condor/examples/condor_credmon_oauth
+%_sbindir/condor_credmon_vault
+%_bindir/condor_vault_storer
+%_libexecdir/condor/credmon
+%config(noreplace) %_sysconfdir/condor/config.d/40-vault-credmon.conf
+%ghost %_var/lib/condor/oauth_credentials/CREDMON_COMPLETE
+%ghost %_var/lib/condor/oauth_credentials/pid
-#################
%files -n minicondor
%config(noreplace) %_sysconfdir/condor/config.d/00-minicondor
-#################
-%if 0%{?with_bosco}
-%files bosco
-%config(noreplace) %_sysconfdir/condor/campus_factory.conf
-%config(noreplace) %_sysconfdir/condor/config.d/60-campus_factory.config
-%_libexecdir/condor/shellselector
-%_libexecdir/condor/campus_factory
-%_sbindir/bosco_install
-%_sbindir/campus_factory
-%_sbindir/condor_ft-gahp
-%_sbindir/runfactory
-%_bindir/bosco_cluster
-%_bindir/bosco_ssh_start
-%_bindir/bosco_start
-%_bindir/bosco_stop
-%_bindir/bosco_findplatform
-%_bindir/bosco_uninstall
-%_bindir/bosco_quickstart
-%_bindir/htsub
-%_sbindir/glidein_creation
-%_datadir/condor/campus_factory
-%_mandir/man1/bosco_cluster.1.gz
-%_mandir/man1/bosco_findplatform.1.gz
-%_mandir/man1/bosco_install.1.gz
-%_mandir/man1/bosco_ssh_start.1.gz
-%_mandir/man1/bosco_start.1.gz
-%_mandir/man1/bosco_stop.1.gz
-%_mandir/man1/bosco_uninstall.1.gz
-%endif
-
#################
%post
+/sbin/ldconfig
%systemd_post %{name}.service
%preun
@@ -1059,6 +853,11 @@ rm -rf %{buildroot}/etc/examples
/sbin/ldconfig
%changelog
+* Sat Sep 30 2023 Tim Theisen <ttheisen(a)fedoraproject.org> - 23.0.0-1
+- Update to latest upstream 23.0.0 - rhbz#1959462
+- Fix build issues - rhbz#2114520, rhbz#2172630, rhbz#2172684
+- Update to PCRE2 - rhbz#2128284
+
* Thu Jan 19 2023 Fedora Release Engineering <releng(a)fedoraproject.org> - 8.8.15-10
- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild
diff --git a/condor_oom_v2.patch b/condor_oom_v2.patch
deleted file mode 100644
index 0521be6..0000000
--- a/condor_oom_v2.patch
+++ /dev/null
@@ -1,340 +0,0 @@
-diff --git a/build/cmake/CondorConfigure.cmake b/build/cmake/CondorConfigure.cmake
-index e61fb4f..1094cb3 100644
---- a/build/cmake/CondorConfigure.cmake
-+++ b/build/cmake/CondorConfigure.cmake
-@@ -164,6 +164,7 @@ if( NOT WINDOWS)
- check_function_exists("setlinebuf" HAVE_SETLINEBUF)
- check_function_exists("snprintf" HAVE_SNPRINTF)
- check_function_exists("snprintf" HAVE_WORKING_SNPRINTF)
-+ check_function_exists("eventfd" HAVE_EVENTFD)
-
- check_function_exists("stat64" HAVE_STAT64)
- check_function_exists("_stati64" HAVE__STATI64)
-diff --git a/src/condor_includes/config.h.cmake b/src/condor_includes/config.h.cmake
-index b083945..3bd92b0 100644
---- a/src/condor_includes/config.h.cmake
-+++ b/src/condor_includes/config.h.cmake
-@@ -438,6 +438,9 @@
- /* Define to 1 if you have the 'snprintf' function. (USED)*/
- #cmakedefine HAVE_SNPRINTF 1
-
-+/* Define to 1 if you have the 'eventfd' function. (USED)*/
-+#cmakedefine HAVE_EVENTFD 1
-+
- /* Define to 1 if you have the 'stat64' function. (USED)*/
- #cmakedefine HAVE_STAT64 1
-
-diff --git a/src/condor_starter.V6.1/vanilla_proc.cpp b/src/condor_starter.V6.1/vanilla_proc.cpp
-index 2e5538f..fe02dd3 100644
---- a/src/condor_starter.V6.1/vanilla_proc.cpp
-+++ b/src/condor_starter.V6.1/vanilla_proc.cpp
-@@ -42,9 +42,16 @@
- extern dynuser* myDynuser;
- #endif
-
-+#if defined(HAVE_EVENTFD)
-+#include <sys/eventfd.h>
-+#endif
-+
- extern CStarter *Starter;
-
--VanillaProc::VanillaProc(ClassAd* jobAd) : OsProc(jobAd)
-+VanillaProc::VanillaProc(ClassAd* jobAd) : OsProc(jobAd),
-+ m_memory_limit(-1),
-+ m_oom_fd(-1),
-+ m_oom_efd(-1)
- {
- #if !defined(WIN32)
- m_escalation_tid = -1;
-@@ -215,6 +222,12 @@ VanillaProc::StartJob()
- }
- fi.group_ptr = &tracking_gid;
- }
-+
-+ // Increase the OOM score of this process; the child will inherit it.
-+ // This way, the job will be heavily preferred to be killed over a normal process.
-+ // OOM score is currently exponential - a score of 4 is a factor-16 increase in
-+ // the OOM score.
-+ setupOOMScore(4);
- #endif
-
- #if defined(HAVE_EXT_LIBCGROUP)
-@@ -406,6 +419,7 @@ VanillaProc::StartJob()
- int MemMb;
- if (MachineAd->LookupInteger(ATTR_MEMORY, MemMb)) {
- uint64_t MemMb_big = MemMb;
-+ m_memory_limit = MemMb_big;
- climits.set_memory_limit_bytes(1024*1024*MemMb_big, mem_is_soft);
- } else {
- dprintf(D_ALWAYS, "Not setting memory soft limit in cgroup because "
-@@ -425,6 +439,14 @@ VanillaProc::StartJob()
- } else {
- dprintf(D_FULLDEBUG, "Invalid value of SlotWeight in machine ClassAd; ignoring.\n");
- }
-+ setupOOMEvent(cgroup);
-+ }
-+
-+ // Now that the job is started, decrease the likelihood that the starter
-+ // is killed instead of the job itself.
-+ if (retval)
-+ {
-+ setupOOMScore(-4);
- }
-
- #endif
-@@ -611,5 +633,224 @@ VanillaProc::finishShutdownFast()
- // -gquinn, 2007-11-14
- daemonCore->Kill_Family(JobPid);
-
-+ if (m_oom_efd >= 0) {
-+ dprintf(D_FULLDEBUG, "Closing event FD pipe in shutdown %d.\n", m_oom_efd);
-+ daemonCore->Close_Pipe(m_oom_efd);
-+ m_oom_efd = -1;
-+ }
-+ if (m_oom_fd >= 0) {
-+ close(m_oom_fd);
-+ m_oom_fd = -1;
-+ }
-+
- return false; // shutdown is pending, so return false
- }
-+
-+/*
-+ * This will be called when the event fd fires, indicating an OOM event.
-+ */
-+int
-+VanillaProc::outOfMemoryEvent(int /* fd */)
-+{
-+ std::stringstream ss;
-+ if (m_memory_limit >= 0) {
-+ ss << "Job has gone over memory limit of " << m_memory_limit << " megabytes.";
-+ } else {
-+ ss << "Job has encountered an out-of-memory event.";
-+ }
-+ Starter->jic->holdJob(ss.str().c_str(), CONDOR_HOLD_CODE_JobOutOfResources, 0);
-+
-+ // this will actually clean up the job
-+ if ( Starter->Hold( ) ) {
-+ dprintf( D_FULLDEBUG, "All jobs were removed due to OOM event.\n" );
-+ Starter->allJobsDone();
-+ }
-+
-+ dprintf(D_FULLDEBUG, "Closing event FD pipe %d.\n", m_oom_efd);
-+ daemonCore->Close_Pipe(m_oom_efd);
-+ close(m_oom_fd);
-+ m_oom_efd = -1;
-+ m_oom_fd = -1;
-+
-+ Starter->ShutdownFast();
-+
-+ return 0;
-+}
-+
-+int
-+VanillaProc::setupOOMScore(int new_score)
-+{
-+#if !defined(LINUX)
-+ if (new_score) // Done to suppress compiler warnings.
-+ return 0;
-+ return 0;
-+#endif
-+ TemporaryPrivSentry sentry(PRIV_ROOT);
-+ // oom_adj is deprecated on modern kernels and causes a deprecation warning when used.
-+ int oom_score_fd = open("/proc/self/oom_score_adj", O_WRONLY | O_CLOEXEC);
-+ if (oom_score_fd == -1) {
-+ if (errno != ENOENT) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open oom_score_adj for the starter: (errno=%u, %s)\n",
-+ errno, strerror(errno));
-+ return 1;
-+ } else {
-+ int oom_score_fd = open("/proc/self/oom_adj", O_WRONLY | O_CLOEXEC);
-+ if (oom_score_fd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open oom_adj for the starter: (errno=%u, %s)\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+ }
-+ } else {
-+ // oom_score_adj is linear; oom_adj was exponential.
-+ if (new_score > 0)
-+ new_score = 1 << new_score;
-+ else
-+ new_score = -(1 << -new_score);
-+ }
-+
-+ std::stringstream ss;
-+ ss << new_score;
-+ std::string new_score_str = ss.str();
-+ ssize_t nwritten = full_write(oom_score_fd, new_score_str.c_str(), new_score_str.length());
-+ if (nwritten < 0) {
-+ dprintf(D_ALWAYS,
-+ "Unable to write into oom_adj file for the starter: (errno=%u, %s)\n",
-+ errno, strerror(errno));
-+ close(oom_score_fd);
-+ return 1;
-+ }
-+ close(oom_score_fd);
-+ return 0;
-+}
-+
-+int
-+VanillaProc::setupOOMEvent(const std::string &cgroup_string)
-+{
-+#if !(defined(HAVE_EVENTFD) && defined(HAVE_EXT_LIBCGROUP))
-+ return 0;
-+#endif
-+ // Initialize the event descriptor
-+ m_oom_efd = eventfd(0, EFD_CLOEXEC);
-+ if (m_oom_efd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to create new event FD for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+
-+ // Find the memcg location on disk
-+ void * handle = NULL;
-+ struct cgroup_mount_point mount_info;
-+ int ret = cgroup_get_controller_begin(&handle, &mount_info);
-+ std::stringstream oom_control;
-+ std::stringstream event_control;
-+ bool found_memcg = false;
-+ while (ret == 0) {
-+ if (strcmp(mount_info.name, MEMORY_CONTROLLER_STR) == 0) {
-+ found_memcg = true;
-+ oom_control << mount_info.path << "/";
-+ event_control << mount_info.path << "/";
-+ break;
-+ }
-+ cgroup_get_controller_next(&handle, &mount_info);
-+ }
-+ if (!found_memcg && (ret != ECGEOF)) {
-+ dprintf(D_ALWAYS,
-+ "Error while locating memcg controller for starter: %u %s\n",
-+ ret, cgroup_strerror(ret));
-+ return 1;
-+ }
-+ cgroup_get_controller_end(&handle);
-+ if (found_memcg == false) {
-+ dprintf(D_ALWAYS,
-+ "Memcg is not available; OOM notification disabled for starter.\n");
-+ return 1;
-+ }
-+
-+ // Finish constructing the location of the control files
-+ oom_control << cgroup_string << "/memory.oom_control";
-+ std::string oom_control_str = oom_control.str();
-+ event_control << cgroup_string << "/cgroup.event_control";
-+ std::string event_control_str = event_control.str();
-+
-+ // Open the oom_control and event control files
-+ TemporaryPrivSentry sentry(PRIV_ROOT);
-+ m_oom_fd = open(oom_control_str.c_str(), O_RDONLY | O_CLOEXEC);
-+ if (m_oom_fd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open the OOM control file for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+ int event_ctrl_fd = open(event_control_str.c_str(), O_WRONLY | O_CLOEXEC);
-+ if (event_ctrl_fd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open event control for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+
-+ // Inform Linux we will be handling the OOM events for this container.
-+ int oom_fd2 = open(oom_control_str.c_str(), O_WRONLY | O_CLOEXEC);
-+ if (oom_fd2 == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open the OOM control file for writing for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+ const char limits [] = "1";
-+ ssize_t nwritten = full_write(oom_fd2, &limits, 1);
-+ if (nwritten < 0) {
-+ dprintf(D_ALWAYS,
-+ "Unable to set OOM control to %s for starter: %u %s\n",
-+ limits, errno, strerror(errno));
-+ close(event_ctrl_fd);
-+ close(oom_fd2);
-+ return 1;
-+ }
-+ close(oom_fd2);
-+
-+ // Create the subscription string:
-+ std::stringstream sub_ss;
-+ sub_ss << m_oom_efd << " " << m_oom_fd;
-+ std::string sub_str = sub_ss.str();
-+
-+ if ((nwritten = full_write(event_ctrl_fd, sub_str.c_str(), sub_str.size())) < 0) {
-+ dprintf(D_ALWAYS,
-+ "Unable to write into event control file for starter: %u %s\n",
-+ errno, strerror(errno));
-+ close(event_ctrl_fd);
-+ return 1;
-+ }
-+ close(event_ctrl_fd);
-+
-+ // Fool DC into talking to the eventfd
-+ int pipes[2]; pipes[0] = -1; pipes[1] = -1;
-+ int fd_to_replace = -1;
-+ if (daemonCore->Create_Pipe(pipes, true) == -1 || pipes[0] == -1) {
-+ dprintf(D_ALWAYS, "Unable to create a DC pipe\n");
-+ close(m_oom_efd);
-+ m_oom_efd = -1;
-+ close(m_oom_fd);
-+ m_oom_fd = -1;
-+ return 1;
-+ }
-+ if ( daemonCore->Get_Pipe_FD(pipes[0], &fd_to_replace) == -1 || fd_to_replace == -1) {
-+ dprintf(D_ALWAYS, "Unable to lookup pipe's FD\n");
-+ close(m_oom_efd); m_oom_efd = -1;
-+ close(m_oom_fd); m_oom_fd = -1;
-+ daemonCore->Close_Pipe(pipes[0]);
-+ daemonCore->Close_Pipe(pipes[1]);
-+ }
-+ dup3(m_oom_efd, fd_to_replace, O_CLOEXEC);
-+ close(m_oom_efd);
-+ m_oom_efd = pipes[0];
-+
-+ // Inform DC we want to recieve notifications from this FD.
-+ daemonCore->Register_Pipe(pipes[0],"OOM event fd", static_cast<PipeHandlercpp>(&VanillaProc::outOfMemoryEvent),"OOM Event Handler",this,HANDLE_READ);
-+ return 0;
-+}
-+
-diff --git a/src/condor_starter.V6.1/vanilla_proc.h b/src/condor_starter.V6.1/vanilla_proc.h
-index d524cf5..90b4741 100644
---- a/src/condor_starter.V6.1/vanilla_proc.h
-+++ b/src/condor_starter.V6.1/vanilla_proc.h
-@@ -74,6 +74,15 @@ private:
- #if !defined(WIN32)
- int m_escalation_tid;
- #endif
-+
-+ // Configure OOM killer for this job
-+ int m_memory_limit; // Memory limit, in MB.
-+ int m_oom_fd; // The file descriptor which recieves events
-+ int m_oom_efd; // The event FD to watch
-+ int setupOOMScore(int new_score);
-+ int outOfMemoryEvent(int fd);
-+ int setupOOMEvent(const std::string & cgroup_string);
-+
- };
-
- #endif
-diff --git a/src/condor_utils/condor_holdcodes.h b/src/condor_utils/condor_holdcodes.h
-index d788d6e..3083db3 100644
---- a/src/condor_utils/condor_holdcodes.h
-+++ b/src/condor_utils/condor_holdcodes.h
-@@ -128,4 +128,6 @@ const int CONDOR_HOLD_CODE_GlexecChownSandboxToCondor = 30;
-
- const int CONDOR_HOLD_CODE_PrivsepChownSandboxToCondor = 31;
-
-+const int CONDOR_HOLD_CODE_JobOutOfResources = 32;
-+
- #endif
diff --git a/condor_oom_v3.patch b/condor_oom_v3.patch
deleted file mode 100644
index 5f6db89..0000000
--- a/condor_oom_v3.patch
+++ /dev/null
@@ -1,342 +0,0 @@
-diff --git a/build/cmake/CondorConfigure.cmake b/build/cmake/CondorConfigure.cmake
-index e61fb4f..1094cb3 100644
---- a/build/cmake/CondorConfigure.cmake
-+++ b/build/cmake/CondorConfigure.cmake
-@@ -164,6 +164,7 @@ if( NOT WINDOWS)
- check_function_exists("setlinebuf" HAVE_SETLINEBUF)
- check_function_exists("snprintf" HAVE_SNPRINTF)
- check_function_exists("snprintf" HAVE_WORKING_SNPRINTF)
-+ check_function_exists("eventfd" HAVE_EVENTFD)
-
- check_function_exists("stat64" HAVE_STAT64)
- check_function_exists("_stati64" HAVE__STATI64)
-diff --git a/src/condor_includes/config.h.cmake b/src/condor_includes/config.h.cmake
-index b083945..3bd92b0 100644
---- a/src/condor_includes/config.h.cmake
-+++ b/src/condor_includes/config.h.cmake
-@@ -438,6 +438,9 @@
- /* Define to 1 if you have the 'snprintf' function. (USED)*/
- #cmakedefine HAVE_SNPRINTF 1
-
-+/* Define to 1 if you have the 'eventfd' function. (USED)*/
-+#cmakedefine HAVE_EVENTFD 1
-+
- /* Define to 1 if you have the 'stat64' function. (USED)*/
- #cmakedefine HAVE_STAT64 1
-
-diff --git a/src/condor_starter.V6.1/vanilla_proc.cpp b/src/condor_starter.V6.1/vanilla_proc.cpp
-index 2e5538f..0246e5e 100644
---- a/src/condor_starter.V6.1/vanilla_proc.cpp
-+++ b/src/condor_starter.V6.1/vanilla_proc.cpp
-@@ -42,9 +42,16 @@
- extern dynuser* myDynuser;
- #endif
-
-+#if defined(HAVE_EVENTFD)
-+#include <sys/eventfd.h>
-+#endif
-+
- extern CStarter *Starter;
-
--VanillaProc::VanillaProc(ClassAd* jobAd) : OsProc(jobAd)
-+VanillaProc::VanillaProc(ClassAd* jobAd) : OsProc(jobAd),
-+ m_memory_limit(-1),
-+ m_oom_fd(-1),
-+ m_oom_efd(-1)
- {
- #if !defined(WIN32)
- m_escalation_tid = -1;
-@@ -215,6 +222,12 @@ VanillaProc::StartJob()
- }
- fi.group_ptr = &tracking_gid;
- }
-+
-+ // Increase the OOM score of this process; the child will inherit it.
-+ // This way, the job will be heavily preferred to be killed over a normal process.
-+ // OOM score is currently exponential - a score of 4 is a factor-16 increase in
-+ // the OOM score.
-+ setupOOMScore(4);
- #endif
-
- #if defined(HAVE_EXT_LIBCGROUP)
-@@ -406,6 +419,7 @@ VanillaProc::StartJob()
- int MemMb;
- if (MachineAd->LookupInteger(ATTR_MEMORY, MemMb)) {
- uint64_t MemMb_big = MemMb;
-+ m_memory_limit = MemMb_big;
- climits.set_memory_limit_bytes(1024*1024*MemMb_big, mem_is_soft);
- } else {
- dprintf(D_ALWAYS, "Not setting memory soft limit in cgroup because "
-@@ -425,6 +439,14 @@ VanillaProc::StartJob()
- } else {
- dprintf(D_FULLDEBUG, "Invalid value of SlotWeight in machine ClassAd; ignoring.\n");
- }
-+ setupOOMEvent(cgroup);
-+ }
-+
-+ // Now that the job is started, decrease the likelihood that the starter
-+ // is killed instead of the job itself.
-+ if (retval)
-+ {
-+ setupOOMScore(-4);
- }
-
- #endif
-@@ -611,5 +633,226 @@ VanillaProc::finishShutdownFast()
- // -gquinn, 2007-11-14
- daemonCore->Kill_Family(JobPid);
-
-+ if (m_oom_efd >= 0) {
-+ dprintf(D_FULLDEBUG, "Closing event FD pipe in shutdown %d.\n", m_oom_efd);
-+ daemonCore->Close_Pipe(m_oom_efd);
-+ m_oom_efd = -1;
-+ }
-+ if (m_oom_fd >= 0) {
-+ close(m_oom_fd);
-+ m_oom_fd = -1;
-+ }
-+
- return false; // shutdown is pending, so return false
- }
-+
-+/*
-+ * This will be called when the event fd fires, indicating an OOM event.
-+ */
-+int
-+VanillaProc::outOfMemoryEvent(int /* fd */)
-+{
-+ std::stringstream ss;
-+ if (m_memory_limit >= 0) {
-+ ss << "Job has gone over memory limit of " << m_memory_limit << " megabytes.";
-+ } else {
-+ ss << "Job has encountered an out-of-memory event.";
-+ }
-+ Starter->jic->holdJob(ss.str().c_str(), CONDOR_HOLD_CODE_JobOutOfResources, 0);
-+
-+ // this will actually clean up the job
-+ if ( Starter->Hold( ) ) {
-+ dprintf( D_FULLDEBUG, "All jobs were removed due to OOM event.\n" );
-+ Starter->allJobsDone();
-+ }
-+
-+ dprintf(D_FULLDEBUG, "Closing event FD pipe %d.\n", m_oom_efd);
-+ daemonCore->Close_Pipe(m_oom_efd);
-+ close(m_oom_fd);
-+ m_oom_efd = -1;
-+ m_oom_fd = -1;
-+
-+ Starter->ShutdownFast();
-+
-+ return 0;
-+}
-+
-+int
-+VanillaProc::setupOOMScore(int new_score)
-+{
-+#if !defined(LINUX)
-+ if (new_score) // Done to suppress compiler warnings.
-+ return 0;
-+ return 0;
-+#else
-+ TemporaryPrivSentry sentry(PRIV_ROOT);
-+ // oom_adj is deprecated on modern kernels and causes a deprecation warning when used.
-+ int oom_score_fd = open("/proc/self/oom_score_adj", O_WRONLY);
-+ if (oom_score_fd == -1) {
-+ if (errno != ENOENT) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open oom_score_adj for the starter: (errno=%u, %s)\n",
-+ errno, strerror(errno));
-+ return 1;
-+ } else {
-+ int oom_score_fd = open("/proc/self/oom_adj", O_WRONLY);
-+ if (oom_score_fd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open oom_adj for the starter: (errno=%u, %s)\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+ }
-+ } else {
-+ // oom_score_adj is linear; oom_adj was exponential.
-+ if (new_score > 0)
-+ new_score = 1 << new_score;
-+ else
-+ new_score = -(1 << -new_score);
-+ }
-+
-+ std::stringstream ss;
-+ ss << new_score;
-+ std::string new_score_str = ss.str();
-+ ssize_t nwritten = full_write(oom_score_fd, new_score_str.c_str(), new_score_str.length());
-+ if (nwritten < 0) {
-+ dprintf(D_ALWAYS,
-+ "Unable to write into oom_adj file for the starter: (errno=%u, %s)\n",
-+ errno, strerror(errno));
-+ close(oom_score_fd);
-+ return 1;
-+ }
-+ close(oom_score_fd);
-+ return 0;
-+#endif
-+}
-+
-+int
-+VanillaProc::setupOOMEvent(const std::string &cgroup_string)
-+{
-+#if !(defined(HAVE_EVENTFD) && defined(HAVE_EXT_LIBCGROUP))
-+ return 0;
-+#else
-+ // Initialize the event descriptor
-+ m_oom_efd = eventfd(0, EFD_CLOEXEC);
-+ if (m_oom_efd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to create new event FD for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+
-+ // Find the memcg location on disk
-+ void * handle = NULL;
-+ struct cgroup_mount_point mount_info;
-+ int ret = cgroup_get_controller_begin(&handle, &mount_info);
-+ std::stringstream oom_control;
-+ std::stringstream event_control;
-+ bool found_memcg = false;
-+ while (ret == 0) {
-+ if (strcmp(mount_info.name, MEMORY_CONTROLLER_STR) == 0) {
-+ found_memcg = true;
-+ oom_control << mount_info.path << "/";
-+ event_control << mount_info.path << "/";
-+ break;
-+ }
-+ cgroup_get_controller_next(&handle, &mount_info);
-+ }
-+ if (!found_memcg && (ret != ECGEOF)) {
-+ dprintf(D_ALWAYS,
-+ "Error while locating memcg controller for starter: %u %s\n",
-+ ret, cgroup_strerror(ret));
-+ return 1;
-+ }
-+ cgroup_get_controller_end(&handle);
-+ if (found_memcg == false) {
-+ dprintf(D_ALWAYS,
-+ "Memcg is not available; OOM notification disabled for starter.\n");
-+ return 1;
-+ }
-+
-+ // Finish constructing the location of the control files
-+ oom_control << cgroup_string << "/memory.oom_control";
-+ std::string oom_control_str = oom_control.str();
-+ event_control << cgroup_string << "/cgroup.event_control";
-+ std::string event_control_str = event_control.str();
-+
-+ // Open the oom_control and event control files
-+ TemporaryPrivSentry sentry(PRIV_ROOT);
-+ m_oom_fd = open(oom_control_str.c_str(), O_RDONLY | O_CLOEXEC);
-+ if (m_oom_fd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open the OOM control file for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+ int event_ctrl_fd = open(event_control_str.c_str(), O_WRONLY | O_CLOEXEC);
-+ if (event_ctrl_fd == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open event control for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+
-+ // Inform Linux we will be handling the OOM events for this container.
-+ int oom_fd2 = open(oom_control_str.c_str(), O_WRONLY | O_CLOEXEC);
-+ if (oom_fd2 == -1) {
-+ dprintf(D_ALWAYS,
-+ "Unable to open the OOM control file for writing for starter: %u %s\n",
-+ errno, strerror(errno));
-+ return 1;
-+ }
-+ const char limits [] = "1";
-+ ssize_t nwritten = full_write(oom_fd2, &limits, 1);
-+ if (nwritten < 0) {
-+ dprintf(D_ALWAYS,
-+ "Unable to set OOM control to %s for starter: %u %s\n",
-+ limits, errno, strerror(errno));
-+ close(event_ctrl_fd);
-+ close(oom_fd2);
-+ return 1;
-+ }
-+ close(oom_fd2);
-+
-+ // Create the subscription string:
-+ std::stringstream sub_ss;
-+ sub_ss << m_oom_efd << " " << m_oom_fd;
-+ std::string sub_str = sub_ss.str();
-+
-+ if ((nwritten = full_write(event_ctrl_fd, sub_str.c_str(), sub_str.size())) < 0) {
-+ dprintf(D_ALWAYS,
-+ "Unable to write into event control file for starter: %u %s\n",
-+ errno, strerror(errno));
-+ close(event_ctrl_fd);
-+ return 1;
-+ }
-+ close(event_ctrl_fd);
-+
-+ // Fool DC into talking to the eventfd
-+ int pipes[2]; pipes[0] = -1; pipes[1] = -1;
-+ int fd_to_replace = -1;
-+ if (daemonCore->Create_Pipe(pipes, true) == -1 || pipes[0] == -1) {
-+ dprintf(D_ALWAYS, "Unable to create a DC pipe\n");
-+ close(m_oom_efd);
-+ m_oom_efd = -1;
-+ close(m_oom_fd);
-+ m_oom_fd = -1;
-+ return 1;
-+ }
-+ if ( daemonCore->Get_Pipe_FD(pipes[0], &fd_to_replace) == -1 || fd_to_replace == -1) {
-+ dprintf(D_ALWAYS, "Unable to lookup pipe's FD\n");
-+ close(m_oom_efd); m_oom_efd = -1;
-+ close(m_oom_fd); m_oom_fd = -1;
-+ daemonCore->Close_Pipe(pipes[0]);
-+ daemonCore->Close_Pipe(pipes[1]);
-+ }
-+ dup3(m_oom_efd, fd_to_replace, O_CLOEXEC);
-+ close(m_oom_efd);
-+ m_oom_efd = pipes[0];
-+
-+ // Inform DC we want to recieve notifications from this FD.
-+ daemonCore->Register_Pipe(pipes[0],"OOM event fd", static_cast<PipeHandlercpp>(&VanillaProc::outOfMemoryEvent),"OOM Event Handler",this,HANDLE_READ);
-+ return 0;
-+#endif
-+}
-+
-diff --git a/src/condor_starter.V6.1/vanilla_proc.h b/src/condor_starter.V6.1/vanilla_proc.h
-index d524cf5..90b4741 100644
---- a/src/condor_starter.V6.1/vanilla_proc.h
-+++ b/src/condor_starter.V6.1/vanilla_proc.h
-@@ -74,6 +74,15 @@ private:
- #if !defined(WIN32)
- int m_escalation_tid;
- #endif
-+
-+ // Configure OOM killer for this job
-+ int m_memory_limit; // Memory limit, in MB.
-+ int m_oom_fd; // The file descriptor which recieves events
-+ int m_oom_efd; // The event FD to watch
-+ int setupOOMScore(int new_score);
-+ int outOfMemoryEvent(int fd);
-+ int setupOOMEvent(const std::string & cgroup_string);
-+
- };
-
- #endif
-diff --git a/src/condor_utils/condor_holdcodes.h b/src/condor_utils/condor_holdcodes.h
-index d788d6e..3083db3 100644
---- a/src/condor_utils/condor_holdcodes.h
-+++ b/src/condor_utils/condor_holdcodes.h
-@@ -128,4 +128,6 @@ const int CONDOR_HOLD_CODE_GlexecChownSandboxToCondor = 30;
-
- const int CONDOR_HOLD_CODE_PrivsepChownSandboxToCondor = 31;
-
-+const int CONDOR_HOLD_CODE_JobOutOfResources = 32;
-+
- #endif
diff --git a/condor_partial_defrag_v2.patch b/condor_partial_defrag_v2.patch
deleted file mode 100644
index d2b0016..0000000
--- a/condor_partial_defrag_v2.patch
+++ /dev/null
@@ -1,208 +0,0 @@
-diff --git a/src/condor_daemon_client/dc_startd.cpp b/src/condor_daemon_client/dc_startd.cpp
-index 7261c4a..09a2689 100644
---- a/src/condor_daemon_client/dc_startd.cpp
-+++ b/src/condor_daemon_client/dc_startd.cpp
-@@ -51,7 +51,7 @@ DCStartd::DCStartd( const char* tName, const char* tPool, const char* tAddr,
- }
- }
-
--DCStartd::DCStartd( ClassAd *ad, const char *tPool )
-+DCStartd::DCStartd( const ClassAd *ad, const char *tPool )
- : Daemon(ad,DT_STARTD,tPool),
- claim_id(NULL)
- {
-diff --git a/src/condor_daemon_client/dc_startd.h b/src/condor_daemon_client/dc_startd.h
-index c5f3e89..ff20892 100644
---- a/src/condor_daemon_client/dc_startd.h
-+++ b/src/condor_daemon_client/dc_startd.h
-@@ -49,7 +49,7 @@ public:
- DCStartd( const char* const name, const char* const pool,
- const char* const addr, const char* const id );
-
-- DCStartd( ClassAd *ad, const char *pool = NULL );
-+ DCStartd( const ClassAd *ad, const char *pool = NULL );
-
- /// Destructor.
- ~DCStartd();
-diff --git a/src/defrag/defrag.cpp b/src/defrag/defrag.cpp
-index 26aec0a..8710b5d 100644
---- a/src/defrag/defrag.cpp
-+++ b/src/defrag/defrag.cpp
-@@ -185,6 +185,8 @@ void Defrag::config()
- }
- }
-
-+ m_can_cancel = param_boolean("DEFRAG_CAN_CANCEL", true);
-+
- param(m_defrag_name,"DEFRAG_NAME");
-
- int stats_quantum = m_polling_interval;
-@@ -487,8 +489,17 @@ void Defrag::poll()
- int num_whole_machines = countMachines(m_whole_machine_expr.c_str(),"DEFRAG_WHOLE_MACHINE_EXPR",&whole_machines);
- m_stats.WholeMachines = num_whole_machines;
-
-+ MachineSet draining_whole_machines;
-+ std::stringstream draining_whole_machines_ss;
-+ draining_whole_machines_ss << m_whole_machine_expr << " && Draining && Offline=!=True";
-+ int num_draining_whole_machines = countMachines(draining_whole_machines_ss.str().c_str(),
-+ "<DEFRAG_WHOLE_MACHINE_EXPR Draining>", &draining_whole_machines);
-+
- dprintf(D_ALWAYS,"There are currently %d draining and %d whole machines.\n",
- num_draining,num_whole_machines);
-+ if (num_draining_whole_machines)
-+ dprintf(D_ALWAYS, "Of the %d whole machines, %d are in the draining state.\n",
-+ num_whole_machines, num_draining_whole_machines);
-
- queryDrainingCost();
-
-@@ -548,8 +559,7 @@ void Defrag::poll()
-
- ClassAdList startdAds;
- std::string requirements;
-- sprintf(requirements,"(%s) && Draining =!= true",m_defrag_requirements.c_str());
-- if( !queryMachines(requirements.c_str(),"DEFRAG_REQUIREMENTS",startdAds) ) {
-+ if( !queryMachines(m_defrag_requirements.c_str(),"DEFRAG_REQUIREMENTS",startdAds) ) {
- dprintf(D_ALWAYS,"Doing nothing, because the query to select machines matching DEFRAG_REQUIREMENTS failed.\n");
- return;
- }
-@@ -561,12 +571,26 @@ void Defrag::poll()
- int num_drained = 0;
- ClassAd *startd_ad;
- MachineSet machines_done;
-+ MachineSet draining_machines_done;
- while( (startd_ad=startdAds.Next()) ) {
- std::string machine;
- std::string name;
- startd_ad->LookupString(ATTR_NAME,name);
- slotNameToDaemonName(name,machine);
-
-+ if( !draining_machines_done.count(machine) && draining_whole_machines.count(machine) ) {
-+ cancel_drain(*startd_ad);
-+ draining_machines_done.insert(machine);
-+ continue;
-+ }
-+
-+ // Do not consider slots which are already draining.
-+ bool startd_currently_draining = false;
-+ startd_ad->LookupBool("Draining", startd_currently_draining);
-+ if( startd_currently_draining ) {
-+ continue;
-+ }
-+
- if( machines_done.count(machine) ) {
- dprintf(D_FULLDEBUG,
- "Skipping %s: already attempted to drain %s in this cycle.\n",
-@@ -581,14 +605,13 @@ void Defrag::poll()
- continue;
- }
-
-- if( drain(startd_ad) ) {
-+ if( (num_drained++ < num_to_drain) && drain(*startd_ad) ) {
- machines_done.insert(machine);
-
-- if( ++num_drained >= num_to_drain ) {
-+ if( num_drained >= num_to_drain ) {
- dprintf(D_ALWAYS,
- "Drained maximum number of machines allowed in this cycle (%d).\n",
- num_to_drain);
-- break;
- }
- }
- }
-@@ -601,26 +624,24 @@ void Defrag::poll()
- }
-
- bool
--Defrag::drain(ClassAd *startd_ad)
-+Defrag::drain(const ClassAd &startd_ad)
- {
-- ASSERT( startd_ad );
--
- std::string name;
-- startd_ad->LookupString(ATTR_NAME,name);
-+ startd_ad.LookupString(ATTR_NAME,name);
-
- dprintf(D_ALWAYS,"Initiating %s draining of %s.\n",
- m_draining_schedule_str.c_str(),name.c_str());
-
-- DCStartd startd( startd_ad );
-+ DCStartd startd( &startd_ad );
-
- int graceful_completion = 0;
-- startd_ad->LookupInteger(ATTR_EXPECTED_MACHINE_GRACEFUL_DRAINING_COMPLETION,graceful_completion);
-+ startd_ad.LookupInteger(ATTR_EXPECTED_MACHINE_GRACEFUL_DRAINING_COMPLETION,graceful_completion);
- int quick_completion = 0;
-- startd_ad->LookupInteger(ATTR_EXPECTED_MACHINE_QUICK_DRAINING_COMPLETION,quick_completion);
-+ startd_ad.LookupInteger(ATTR_EXPECTED_MACHINE_QUICK_DRAINING_COMPLETION,quick_completion);
- int graceful_badput = 0;
-- startd_ad->LookupInteger(ATTR_EXPECTED_MACHINE_GRACEFUL_DRAINING_BADPUT,graceful_badput);
-+ startd_ad.LookupInteger(ATTR_EXPECTED_MACHINE_GRACEFUL_DRAINING_BADPUT,graceful_badput);
- int quick_badput = 0;
-- startd_ad->LookupInteger(ATTR_EXPECTED_MACHINE_QUICK_DRAINING_BADPUT,quick_badput);
-+ startd_ad.LookupInteger(ATTR_EXPECTED_MACHINE_QUICK_DRAINING_BADPUT,quick_badput);
-
- time_t now = time(NULL);
- std::string draining_check_expr;
-@@ -659,6 +680,27 @@ Defrag::drain(ClassAd *startd_ad)
- return true;
- }
-
-+bool
-+Defrag::cancel_drain(const ClassAd &startd_ad)
-+{
-+
-+ std::string name;
-+ startd_ad.LookupString(ATTR_NAME,name);
-+
-+ dprintf(D_ALWAYS,"Initiating %s draining of %s.\n",
-+ m_draining_schedule_str.c_str(),name.c_str());
-+
-+ DCStartd startd( &startd_ad );
-+
-+ bool rval = startd.cancelDrainJobs( NULL );
-+ if ( rval ) {
-+ dprintf(D_FULLDEBUG, "Sent request to cancel draining on %s\n", startd.name());
-+ } else {
-+ dprintf(D_ALWAYS, "Unable to cancel draining on %s: %s\n", startd.name(), startd.error());
-+ }
-+ return rval;
-+}
-+
- void
- Defrag::publish(ClassAd *ad)
- {
-diff --git a/src/defrag/defrag.h b/src/defrag/defrag.h
-index 8c7fd51..909b569 100644
---- a/src/defrag/defrag.h
-+++ b/src/defrag/defrag.h
-@@ -40,11 +40,11 @@ class Defrag: public Service {
- void stop();
-
- void poll(); // do the periodic policy evaluation
-- bool drain(ClassAd *startd_ad);
-
- typedef std::set< std::string > MachineSet;
-
- private:
-+
- int m_polling_interval; // delay between evaluations of the policy
- int m_polling_timer;
- double m_draining_per_hour;
-@@ -58,6 +58,7 @@ class Defrag: public Service {
- ClassAd m_rank_ad;
- int m_draining_schedule;
- std::string m_draining_schedule_str;
-+ bool m_can_cancel; // Whether condor_defrag can also cancel draining early.
-
- time_t m_last_poll;
-
-@@ -70,6 +71,9 @@ class Defrag: public Service {
- ClassAd m_public_ad;
- DefragStats m_stats;
-
-+ bool drain(const ClassAd &startd_ad);
-+ bool cancel_drain(const ClassAd &startd_ad);
-+
- void validateExpr(char const *constraint,char const *constraint_source);
- bool queryMachines(char const *constraint,char const *constraint_source,ClassAdList &startdAds);
-
diff --git a/condor_pid_namespaces_v7.patch b/condor_pid_namespaces_v7.patch
deleted file mode 100644
index 810a4b4..0000000
--- a/condor_pid_namespaces_v7.patch
+++ /dev/null
@@ -1,305 +0,0 @@
-diff --git a/src/condor_daemon_core.V6/condor_daemon_core.h b/src/condor_daemon_core.V6/condor_daemon_core.h
-index 3562577..d9d1736 100644
---- a/src/condor_daemon_core.V6/condor_daemon_core.h
-+++ b/src/condor_daemon_core.V6/condor_daemon_core.h
-@@ -192,6 +192,7 @@ struct FamilyInfo {
- gid_t* group_ptr;
- #endif
- const char* glexec_proxy;
-+ bool want_pid_namespace;
- const char* cgroup;
-
- FamilyInfo() {
-@@ -201,6 +202,7 @@ struct FamilyInfo {
- group_ptr = NULL;
- #endif
- glexec_proxy = NULL;
-+ want_pid_namespace = false;
- cgroup = NULL;
- }
- };
-diff --git a/src/condor_daemon_core.V6/daemon_core.cpp b/src/condor_daemon_core.V6/daemon_core.cpp
-index e058fd3..74fe8a0 100644
---- a/src/condor_daemon_core.V6/daemon_core.cpp
-+++ b/src/condor_daemon_core.V6/daemon_core.cpp
-@@ -34,6 +34,7 @@
- #if HAVE_CLONE
- #include <sched.h>
- #include <sys/syscall.h>
-+#include <sys/mount.h>
- #endif
-
- #if HAVE_RESOLV_H && HAVE_DECL_RES_INIT
-@@ -112,6 +113,10 @@ CRITICAL_SECTION Big_fat_mutex; // coarse grained mutex for debugging purposes
- #include <sched.h>
- #endif
-
-+#if !defined(CLONE_NEWPID)
-+#define CLONE_NEWPID 0x20000000
-+#endif
-+
- static const char* EMPTY_DESCRIP = "<NULL>";
-
- // special errno values that may be returned from Create_Process
-@@ -6566,7 +6571,9 @@ public:
- m_affinity_mask(affinity_mask),
- m_fs_remap(fs_remap),
- m_wrote_tracking_gid(false),
-- m_no_dprintf_allowed(false)
-+ m_no_dprintf_allowed(false),
-+ m_clone_newpid_pid(-1),
-+ m_clone_newpid_ppid(-1)
- {
- }
-
-@@ -6627,6 +6634,10 @@ private:
- bool m_wrote_tracking_gid;
- bool m_no_dprintf_allowed;
- priv_state m_priv_state;
-+ pid_t m_clone_newpid_pid;
-+ pid_t m_clone_newpid_ppid;
-+
-+ pid_t fork(int);
- };
-
- enum {
-@@ -6650,7 +6661,19 @@ pid_t CreateProcessForkit::clone_safe_getpid() {
- // the pid of the parent process (presumably due to internal
- // caching in libc). Therefore, use the syscall to get
- // the answer directly.
-- return syscall(SYS_getpid);
-+
-+ int retval = syscall(SYS_getpid);
-+
-+ // If we were fork'd with CLONE_NEWPID, we think our PID is 1.
-+ // In this case, ask the parent!
-+ if (retval == 1) {
-+ if (m_clone_newpid_pid == -1) {
-+ EXCEPT("getpid is 1!");
-+ }
-+ retval = m_clone_newpid_pid;
-+ }
-+
-+ return retval;
- #else
- return ::getpid();
- #endif
-@@ -6659,12 +6682,115 @@ pid_t CreateProcessForkit::clone_safe_getppid() {
- #if HAVE_CLONE
- // See above comment for clone_safe_getpid() for explanation of
- // why we need to do this.
-- return syscall(SYS_getppid);
-+
-+ int retval = syscall(SYS_getppid);
-+
-+ // If ppid is 0, then either Condor is init (DEAR GOD) or we
-+ // were created with CLONE_NEWPID; ask the parent!
-+ if (retval == 0) {
-+ if (m_clone_newpid_ppid == -1) {
-+ EXCEPT("getppid is 0!");
-+ }
-+ retval = m_clone_newpid_ppid;
-+ }
-+
-+ return retval;
- #else
- return ::getppid();
- #endif
- }
-
-+/**
-+ * fork allows one to use certain clone syscall flags, but provides more
-+ * familiar POSIX fork semantics.
-+ * NOTES:
-+ * - We whitelist the flags you are allowed to pass. Currently supported:
-+ * - CLONE_NEWPID. Implies CLONE_NEWNS.
-+ * If the clone succeeds but the remount fails, the child calls _exit(1),
-+ * but the parent will return successfully.
-+ * It would be a simple fix to have the parent return the failure, if
-+ * someone desired.
-+ * Flags are whitelisted to help us adhere to the fork-like semantics (no
-+ * shared memory between parent and child, for example). If you give other
-+ * flags, they are silently ignored.
-+ * - man pages indicate that clone on i386 is only fully functional when used
-+ * via ASM, not the vsyscall interface. This doesn't appear to be relevant
-+ * to this particular use case.
-+ * - To avoid linking with pthreads (or copy/pasting lots of glibc code), I
-+ * don't include integration with threads. This means various threading
-+ * calls in the child may not function correctly (pre-exec; post-exec
-+ * should be fine), and pthreads might not notice when the child exits.
-+ * Traditional POSIX calls like wait will still function because the
-+ * parent will receive the SIGCHLD.
-+ * This is simple to fix if someone desired, but I'd mostly rather not link
-+ * with pthreads.
-+ */
-+
-+#define ALLOWED_FLAGS (SIGCHLD | CLONE_NEWPID | CLONE_NEWNS )
-+
-+pid_t CreateProcessForkit::fork(int flags) {
-+
-+ // If you don't need any fancy flags, just do the old boring POSIX call
-+ if (flags == 0) {
-+ return ::fork();
-+ }
-+
-+#if HAVE_CLONE
-+
-+ int rw[2]; // Communication pipes for the CLONE_NEWPID case.
-+
-+ flags |= SIGCHLD; // The only necessary flag.
-+ if (flags & CLONE_NEWPID) {
-+ flags |= CLONE_NEWNS;
-+ if (pipe(rw)) {
-+ EXCEPT("UNABLE TO CREATE PIPE.");
-+ }
-+ }
-+
-+ // fork as root if we have our fancy flags.
-+ priv_state orig_state = set_priv(PRIV_ROOT);
-+ int retval = syscall(SYS_clone, ALLOWED_FLAGS & flags, 0, NULL, NULL);
-+
-+ // Child
-+ if ((retval == 0) && (flags & CLONE_NEWPID)) {
-+
-+ // If we should have forked as non-root, make things in life final.
-+ set_priv(orig_state);
-+
-+ if (full_read(rw[0], &m_clone_newpid_ppid, sizeof(pid_t)) != sizeof(pid_t)) {
-+ EXCEPT("Unable to write into pipe.");
-+ }
-+ if (full_read(rw[0], &m_clone_newpid_pid, sizeof(pid_t)) != sizeof(pid_t)) {
-+ EXCEPT("Unable to write into pipe.");
-+ }
-+
-+ // Parent
-+ } else if (retval > 0) {
-+ set_priv(orig_state);
-+ pid_t ppid = getpid(); // We are parent, so don't need clone_safe_pid.
-+ if (full_write(rw[1], &ppid, sizeof(ppid)) != sizeof(ppid)) {
-+ EXCEPT("Unable to write into pipe.");
-+ }
-+ if (full_write(rw[1], &retval, sizeof(ppid)) != sizeof(ppid)) {
-+ EXCEPT("Unable to write into pipe.");
-+ }
-+ }
-+ // retval=-1 falls through here.
-+ if (flags & CLONE_NEWPID) {
-+ close(rw[0]);
-+ close(rw[1]);
-+ }
-+ return retval;
-+
-+#else
-+
-+ // Note we silently ignore flags if there's no clone on the platform.
-+ return ::fork();
-+
-+#endif
-+
-+}
-+
- pid_t CreateProcessForkit::fork_exec() {
- pid_t newpid;
-
-@@ -6736,7 +6862,11 @@ pid_t CreateProcessForkit::fork_exec() {
- }
- #endif /* HAVE_CLONE */
-
-- newpid = fork();
-+ int fork_flags = 0;
-+ if (m_family_info) {
-+ fork_flags |= m_family_info->want_pid_namespace ? CLONE_NEWPID : 0;
-+ }
-+ newpid = this->fork(fork_flags);
- if( newpid == 0 ) {
- // in child
- enterCreateProcessChild(this);
-diff --git a/src/condor_starter.V6.1/vanilla_proc.cpp b/src/condor_starter.V6.1/vanilla_proc.cpp
-index 044cb10..8528ca7 100644
---- a/src/condor_starter.V6.1/vanilla_proc.cpp
-+++ b/src/condor_starter.V6.1/vanilla_proc.cpp
-@@ -360,6 +360,24 @@ VanillaProc::StartJob()
- }
- }
-
-+#if defined(LINUX)
-+ // On Linux kernel 2.6.24 and later, we can give each
-+ // job its own PID namespace
-+ if (param_boolean("USE_PID_NAMESPACES", false)) {
-+ if (!can_switch_ids()) {
-+ EXCEPT("USE_PID_NAMESPACES enabled, but can't perform this "
-+ "call in Linux unless running as root.");
-+ }
-+ fi.want_pid_namespace = true;
-+ if (!fs_remap) {
-+ fs_remap = new FilesystemRemap();
-+ }
-+ fs_remap->RemapProc();
-+ }
-+ dprintf(D_FULLDEBUG, "PID namespace option: %s\n", fi.want_pid_namespace ? "true" : "false");
-+#endif
-+
-+
- // have OsProc start the job
- //
- int retval = OsProc::StartJob(&fi, fs_remap);
-diff --git a/src/condor_utils/filesystem_remap.cpp b/src/condor_utils/filesystem_remap.cpp
-index e0f2e61..735c744 100644
---- a/src/condor_utils/filesystem_remap.cpp
-+++ b/src/condor_utils/filesystem_remap.cpp
-@@ -29,7 +29,8 @@
-
- FilesystemRemap::FilesystemRemap() :
- m_mappings(),
-- m_mounts_shared()
-+ m_mounts_shared(),
-+ m_remap_proc(false)
- {
- ParseMountinfo();
- }
-@@ -120,6 +121,9 @@ int FilesystemRemap::PerformMappings() {
- break;
- }
- }
-+ if ((!retval) && m_remap_proc) {
-+ retval = mount("proc", "/proc", "proc", 0, NULL);
-+ }
- #endif
- return retval;
- }
-@@ -148,6 +152,10 @@ std::string FilesystemRemap::RemapDir(std::string target) {
- return target;
- }
-
-+void FilesystemRemap::RemapProc() {
-+ m_remap_proc = true;
-+}
-+
- /*
- Sample mountinfo contents (from http://www.kernel.org/doc/Documentation/filesystems/proc.txt):
- 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
-diff --git a/src/condor_utils/filesystem_remap.h b/src/condor_utils/filesystem_remap.h
-index 5e9362d..2e17476 100644
---- a/src/condor_utils/filesystem_remap.h
-+++ b/src/condor_utils/filesystem_remap.h
-@@ -74,6 +74,12 @@ public:
- */
- std::string RemapFile(std::string);
-
-+ /**
-+ * Indicate that we should remount /proc in the child process.
-+ * Necessary for PID namespaces.
-+ */
-+ void RemapProc();
-+
- private:
-
- /**
-@@ -89,6 +95,7 @@ private:
- std::list<pair_strings> m_mappings;
- std::list<pair_str_bool> m_mounts_shared;
- std::list<pair_strings> m_mounts_autofs;
-+ bool m_remap_proc;
-
- };
- #endif
diff --git a/cream_sl6_build.patch b/cream_sl6_build.patch
deleted file mode 100644
index debc220..0000000
--- a/cream_sl6_build.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- a/externals/bundles/cream/1.12.1_14/CMakeLists.txt
-+++ b/externals/bundles/cream/1.12.1_14/CMakeLists.txt
-@@ -250,7 +250,7 @@ if ( NOT PROPER )
-
- else( NOT PROPER )
-
-- if ( ${SYSTEM_NAME} MATCHES "rhel6" OR ${SYSTEM_NAME} MATCHES "centos6")
-+ if ( ${SYSTEM_NAME} MATCHES "rhel6" OR ${SYSTEM_NAME} MATCHES "centos6" OR ${SYSTEM_NAME} MATCHES "sl6")
- find_multiple("glite_ce_cream_client_soap;glite_ce_cream_client_util;glite_security_gsoap_plugin_2716_cxx;glite_security_gss;gridsite" CREAM_FOUND )
- else()
- find_multiple("glite_ce_cream_client_soap;glite_ce_cream_client_util;glite_security_gsoap_plugin_2713_cxx;glite_security_gss;gridsite" CREAM_FOUND )
diff --git a/d10e85eada71599caebb56fde50dd42bbbf6b65d.patch b/d10e85eada71599caebb56fde50dd42bbbf6b65d.patch
deleted file mode 100644
index f0ab9a6..0000000
--- a/d10e85eada71599caebb56fde50dd42bbbf6b65d.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From d10e85eada71599caebb56fde50dd42bbbf6b65d Mon Sep 17 00:00:00 2001
-From: Brian Bockelman <bbockelm(a)cse.unl.edu>
-Date: Tue, 28 Jul 2015 21:24:36 -0500
-Subject: [PATCH] Allow compilation with both old and new Globus version. #5180
-
-The signature of globus_gsi_cred_write_proxy changed from
-
-globus_result_t
-globus_gsi_cred_write_proxy(globus_l_gsi_cred_handle_s*, char*)
-
-to
-
-globus_result_t
-globus_gsi_cred_write_proxy(globus_l_gsi_cred_handle_s*, const char*)
-
-This causes a function pointer assignment to fail. Since we want to support
-both the old and new interface, simply reinterpret_cast the pointer to the
-correct type.
-
-Tested compilation against both globus-gsi-credential 7.7 and 7.9.
----
- src/condor_utils/globus_utils.cpp | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/src/condor_utils/globus_utils.cpp b/src/condor_utils/globus_utils.cpp
-index 2027e3e..1810b74 100644
---- a/src/condor_utils/globus_utils.cpp
-+++ b/src/condor_utils/globus_utils.cpp
-@@ -354,7 +354,7 @@ activate_globus_gsi( void )
- globus_gsi_cred_handle_destroy_ptr = globus_gsi_cred_handle_destroy;
- globus_gsi_cred_handle_init_ptr = globus_gsi_cred_handle_init;
- globus_gsi_cred_read_proxy_ptr = globus_gsi_cred_read_proxy;
-- globus_gsi_cred_write_proxy_ptr = globus_gsi_cred_write_proxy;
-+ globus_gsi_cred_write_proxy_ptr = reinterpret_cast<globus_result_t (*)(globus_l_gsi_cred_handle_s*, char*)>(globus_gsi_cred_write_proxy);
- globus_gsi_proxy_assemble_cred_ptr = globus_gsi_proxy_assemble_cred;
- globus_gsi_proxy_create_req_ptr = globus_gsi_proxy_create_req;
- globus_gsi_proxy_handle_attrs_destroy_ptr = globus_gsi_proxy_handle_attrs_destroy;
diff --git a/doc-conf.patch b/doc-conf.patch
deleted file mode 100644
index 9358225..0000000
--- a/doc-conf.patch
+++ /dev/null
@@ -1,11 +0,0 @@
-diff --git a/docs/conf.py b/docs/conf.py
-index c1e082031a..8a044532dc 100644
---- a/docs/conf.py
-+++ b/docs/conf.py
-@@ -450,6 +450,5 @@ def modify_signature(app, what, name, obj, options, signature, return_annotation
- return signature, return_annotation
-
- def setup(app):
-- app.add_stylesheet('css/htcondor-manual.css')
- app.connect('autodoc-process-docstring', modify_docstring)
- app.connect('autodoc-process-signature', modify_signature)
diff --git a/dprintf_syslog.patch b/dprintf_syslog.patch
deleted file mode 100644
index c567266..0000000
--- a/dprintf_syslog.patch
+++ /dev/null
@@ -1,324 +0,0 @@
-From cf86dbaf75f4c81e406036b6695c717cf4fd1331 Mon Sep 17 00:00:00 2001
-From: Brian Bockelman <bbockelm(a)cse.unl.edu>
-Date: Wed, 24 Oct 2012 20:28:09 -0500
-Subject: [PATCH 1/3] First attempt at syslog code for dprintf.
-
----
- src/condor_includes/dprintf_internal.h | 11 ++++-
- src/condor_utils/dprintf_setup.cpp | 16 ++++++
- src/condor_utils/dprintf_syslog.cpp | 19 +++++++
- src/condor_utils/dprintf_syslog.h | 82 ++++++++++++++++++++++++++++++++
- 4 files changed, 127 insertions(+), 1 deletions(-)
- create mode 100644 src/condor_utils/dprintf_syslog.cpp
- create mode 100644 src/condor_utils/dprintf_syslog.h
-
-diff --git a/src/condor_includes/dprintf_internal.h b/src/condor_includes/dprintf_internal.h
-index c26a886..b0ecf48 100644
---- a/src/condor_includes/dprintf_internal.h
-+++ b/src/condor_includes/dprintf_internal.h
-@@ -17,6 +17,9 @@
- *
- ***************************************************************/
-
-+#ifndef __dprintf_internal_h_
-+#define __dprintf_internal_h_
-+
- // This #define doesn't actually do anything. This value needs to be
- // defined before any system header files are included in the source file
- // to have any effect.
-@@ -27,6 +30,7 @@ typedef _Longlong int64_t;
- #else
- #include <stdint.h>
- #endif
-+#include <ctime>
-
- struct DebugFileInfo;
-
-@@ -37,7 +41,8 @@ enum DebugOutput
- FILE_OUT,
- STD_OUT,
- STD_ERR,
-- OUTPUT_DEBUG_STR
-+ OUTPUT_DEBUG_STR,
-+ SYSLOG
- };
-
- /* future
-@@ -70,6 +75,7 @@ struct DebugFileInfo
- bool want_truncate;
- bool accepts_all;
- bool dont_panic;
-+ void *userData;
- DebugFileInfo() :
- outputTarget(FILE_OUT),
- debugFP(0),
-@@ -79,6 +85,7 @@ struct DebugFileInfo
- want_truncate(false),
- accepts_all(false),
- dont_panic(false),
-+ userData(NULL),
- dprintfFunc(NULL)
- {}
- DebugFileInfo(const DebugFileInfo &dfi) : outputTarget(dfi.outputTarget), debugFP(NULL), choice(dfi.choice),
-@@ -115,3 +122,5 @@ void _dprintf_global_func(int cat_and_flags, int hdr_flags, time_t clock_now, st
- void dprintf_to_outdbgstr(int cat_and_flags, int hdr_flags, time_t clock_now, struct tm *tm, const char* message, DebugFileInfo* dbgInfo);
- #endif
-
-+#endif
-+
-diff --git a/src/condor_utils/dprintf_setup.cpp b/src/condor_utils/dprintf_setup.cpp
-index 440ef98..b1ccd3a 100644
---- a/src/condor_utils/dprintf_setup.cpp
-+++ b/src/condor_utils/dprintf_setup.cpp
-@@ -24,6 +24,7 @@
- #include "condor_sys_types.h"
- #include "condor_debug.h"
- #include "dprintf_internal.h"
-+#include "dprintf_syslog.h"
- #include "condor_constants.h"
-
- #if HAVE_BACKTRACE
-@@ -134,6 +135,13 @@ void dprintf_set_outputs(const struct dprintf_output_settings *p_info, int c_inf
- it->dprintfFunc = dprintf_to_outdbgstr;
- }
- #endif
-+ else if (logPath == "SYSLOG")
-+ {
-+ // Intention is to eventually user-selected
-+ it->dprintfFunc = DprintfSyslog::Log;
-+ it->outputTarget = SYSLOG;
-+ it->userData = static_cast<void*>(DprintfSyslogFactory::NewLog(LOG_DAEMON));
-+ }
- else
- {
- it->outputTarget = FILE_OUT;
-@@ -211,6 +219,14 @@ void dprintf_set_outputs(const struct dprintf_output_settings *p_info, int c_inf
-
- if(debugLogsOld)
- {
-+
-+ for (it = debugLogsOld->begin(); it != debugLogsOld->end(); it++)
-+ {
-+ if ((it->outputTarget == SYSLOG) && (it->userData))
-+ {
-+ delete static_cast<DprintfSyslog*>(it->userData);
-+ }
-+ }
- delete debugLogsOld;
- }
-
-diff --git a/src/condor_utils/dprintf_syslog.cpp b/src/condor_utils/dprintf_syslog.cpp
-new file mode 100644
-index 0000000..d0189f8
---- /dev/null
-+++ b/src/condor_utils/dprintf_syslog.cpp
-@@ -0,0 +1,19 @@
-+
-+#include "condor_common.h"
-+#include "condor_debug.h"
-+#include "dprintf_syslog.h"
-+
-+DprintfSyslogFactory * DprintfSyslogFactory::m_singleton = NULL;
-+
-+void
-+DprintfSyslog::Log(const char * message)
-+{
-+ syslog(LOG_INFO, "%s", message);
-+}
-+
-+DprintfSyslog::~DprintfSyslog()
-+{
-+ DprintfSyslogFactory &factory = DprintfSyslogFactory::getInstance();
-+ factory.DecCount();
-+}
-+
-diff --git a/src/condor_utils/dprintf_syslog.h b/src/condor_utils/dprintf_syslog.h
-new file mode 100644
-index 0000000..a10d42d
---- /dev/null
-+++ b/src/condor_utils/dprintf_syslog.h
-@@ -0,0 +1,82 @@
-+
-+#include "dprintf_internal.h"
-+#include <syslog.h>
-+
-+class DprintfSyslogFactory;
-+
-+class DprintfSyslog
-+{
-+ friend class DprintfSyslogFactory;
-+
-+public:
-+ static void Log(int, int, time_t, struct tm*, const char * message, DebugFileInfo* info)
-+ {
-+ if (!info || !info->userData)
-+ {
-+ return;
-+ }
-+ DprintfSyslog * logger = static_cast<DprintfSyslog*>(info->userData);
-+ logger->Log(message);
-+ }
-+
-+ ~DprintfSyslog();
-+
-+protected:
-+ DprintfSyslog() {}
-+
-+private:
-+ void Log(const char *);
-+};
-+
-+class DprintfSyslogFactory
-+{
-+ friend class DprintfSyslog;
-+
-+public:
-+ static DprintfSyslog *NewLog(int facility)
-+ {
-+ DprintfSyslogFactory & factory = getInstance();
-+ return factory.NewDprintfSyslog(facility);
-+ }
-+
-+protected:
-+ void DecCount()
-+ {
-+ m_count--;
-+ if (m_count == 0)
-+ {
-+ closelog();
-+ }
-+ }
-+
-+ static DprintfSyslogFactory & getInstance()
-+ {
-+ if (!m_singleton)
-+ {
-+ m_singleton = new DprintfSyslogFactory();
-+ }
-+ return *m_singleton;
-+ }
-+
-+private:
-+ DprintfSyslog * NewDprintfSyslog(int facility)
-+ {
-+ DprintfSyslog * logger = new DprintfSyslog();
-+ if (!logger) return NULL;
-+ if (m_count == 0)
-+ {
-+ openlog("condor", LOG_PID|LOG_NDELAY, facility);
-+ }
-+ m_count++;
-+ return logger;
-+ }
-+
-+ DprintfSyslogFactory() :
-+ m_count(0)
-+ {
-+ }
-+
-+ static DprintfSyslogFactory *m_singleton;
-+
-+ unsigned int m_count;
-+};
---
-1.7.4.1
-
-
-From 5b17f58b41722735bf1a7da34c728bfe3114479b Mon Sep 17 00:00:00 2001
-From: Brian Bockelman <bbockelm(a)cse.unl.edu>
-Date: Wed, 24 Oct 2012 20:46:52 -0500
-Subject: [PATCH 2/3] Don't provide an ident - it defaults to the binary name, which is more useful anyway.
-
----
- src/condor_utils/dprintf_syslog.h | 2 +-
- 1 files changed, 1 insertions(+), 1 deletions(-)
-
-diff --git a/src/condor_utils/dprintf_syslog.h b/src/condor_utils/dprintf_syslog.h
-index a10d42d..364a228 100644
---- a/src/condor_utils/dprintf_syslog.h
-+++ b/src/condor_utils/dprintf_syslog.h
-@@ -65,7 +65,7 @@ private:
- if (!logger) return NULL;
- if (m_count == 0)
- {
-- openlog("condor", LOG_PID|LOG_NDELAY, facility);
-+ openlog(NULL, LOG_PID|LOG_NDELAY, facility);
- }
- m_count++;
- return logger;
---
-1.7.4.1
-
-
-From d082fcc410b3729241dbe82912f526d51a96a2f5 Mon Sep 17 00:00:00 2001
-From: Brian Bockelman <bbockelm(a)cse.unl.edu>
-Date: Tue, 30 Oct 2012 18:15:21 -0500
-Subject: [PATCH 3/3] Prevent dprintf_syslog from compiling on Windows.
-
----
- src/condor_utils/CMakeLists.txt | 4 +++-
- src/condor_utils/dprintf_setup.cpp | 7 ++++++-
- 2 files changed, 9 insertions(+), 2 deletions(-)
-
-diff --git a/src/condor_utils/CMakeLists.txt b/src/condor_utils/CMakeLists.txt
-index 7ce1fd6..7de76fb 100644
---- a/src/condor_utils/CMakeLists.txt
-+++ b/src/condor_utils/CMakeLists.txt
-@@ -84,10 +84,12 @@ endif()
- ##################################################
- # condorapi & tests
-
--condor_selective_glob("my_username.*;condor_event.*;file_sql.*;misc_utils.*;user_log_header.*;write_user_log*;read_user_log*;iso_dates.*;file_lock.*;format_time.*;utc_time.*;stat_wrapper*;log_rotate.*;dprintf*;sig_install.*;basename.*;mkargv.*;except.*;strupr.*;lock_file.*;rotate_file.*;strcasestr.*;strnewp.*;condor_environ.*;setsyscalls.*;passwd_cache.*;uids.c*;chomp.*;subsystem_info.*;my_subsystem.*;distribution.*;my_distribution.*;get_random_num.*;libcondorapi_stubs.*;seteuid.*;setegid.*;condor_open.*;classad_merge.*;condor_attributes.*;simple_arg.*;compat_classad.*;compat_classad_util.*;classad_oldnew.*;condor_snutils.*;stringSpace.*;string_list.*;stl_string_utils.*;MyString.*;condor_xml_classads.*;directory*;param_functions.*;filename_tools_cpp.*;filename_tools.*;stat_info.*;${SAFE_OPEN_SRC}" ApiSrcs)
-+condor_selective_glob("my_username.*;condor_event.*;file_sql.*;misc_utils.*;user_log_header.*;write_user_log*;read_user_log*;iso_dates.*;file_lock.*;format_time.*;utc_time.*;stat_wrapper*;log_rotate.*;dprintf.cpp;dprintf_c*;dprintf_setup.cpp;sig_install.*;basename.*;mkargv.*;except.*;strupr.*;lock_file.*;rotate_file.*;strcasestr.*;strnewp.*;condor_environ.*;setsyscalls.*;passwd_cache.*;uids.c*;chomp.*;subsystem_info.*;my_subsystem.*;distribution.*;my_distribution.*;get_random_num.*;libcondorapi_stubs.*;seteuid.*;setegid.*;condor_open.*;classad_merge.*;condor_attributes.*;simple_arg.*;compat_classad.*;compat_classad_util.*;classad_oldnew.*;condor_snutils.*;stringSpace.*;string_list.*;stl_string_utils.*;MyString.*;condor_xml_classads.*;directory*;param_functions.*;filename_tools_cpp.*;filename_tools.*;stat_info.*;${SAFE_OPEN_SRC}" ApiSrcs)
- if(WINDOWS)
- condor_selective_glob("directory.WINDOWS.*;directory_util.*;dynuser.WINDOWS.*;lock_file.WINDOWS.*;lsa_mgr.*;my_dynuser.*;ntsysinfo.WINDOWS.*;posix.WINDOWS.*;stat.WINDOWS.*;store_cred.*;token_cache.WINDOWS.*;truncate.WINDOWS.*" ApiSrcs)
- set_property( TARGET utils_genparams PROPERTY FOLDER "libraries" )
-+else()
-+ condor_selective_glob("dprintf_syslog*" ApiSrcs)
- endif()
-
- condor_static_lib( condorapi "${ApiSrcs}" )
-diff --git a/src/condor_utils/dprintf_setup.cpp b/src/condor_utils/dprintf_setup.cpp
-index b1ccd3a..b5938e2 100644
---- a/src/condor_utils/dprintf_setup.cpp
-+++ b/src/condor_utils/dprintf_setup.cpp
-@@ -24,7 +24,9 @@
- #include "condor_sys_types.h"
- #include "condor_debug.h"
- #include "dprintf_internal.h"
-+#if !defined(WIN32)
- #include "dprintf_syslog.h"
-+#endif
- #include "condor_constants.h"
-
- #if HAVE_BACKTRACE
-@@ -134,7 +136,7 @@ void dprintf_set_outputs(const struct dprintf_output_settings *p_info, int c_inf
- it->outputTarget = OUTPUT_DEBUG_STR;
- it->dprintfFunc = dprintf_to_outdbgstr;
- }
--#endif
-+#else
- else if (logPath == "SYSLOG")
- {
- // Intention is to eventually user-selected
-@@ -142,6 +144,7 @@ void dprintf_set_outputs(const struct dprintf_output_settings *p_info, int c_inf
- it->outputTarget = SYSLOG;
- it->userData = static_cast<void*>(DprintfSyslogFactory::NewLog(LOG_DAEMON));
- }
-+#endif
- else
- {
- it->outputTarget = FILE_OUT;
-@@ -224,7 +227,9 @@ void dprintf_set_outputs(const struct dprintf_output_settings *p_info, int c_inf
- {
- if ((it->outputTarget == SYSLOG) && (it->userData))
- {
-+#if !defined(WIN32)
- delete static_cast<DprintfSyslog*>(it->userData);
-+#endif
- }
- }
- delete debugLogsOld;
---
-1.7.4.1
-
diff --git a/glexec_privsep_helper.patch b/glexec_privsep_helper.patch
deleted file mode 100644
index c2b664e..0000000
--- a/glexec_privsep_helper.patch
+++ /dev/null
@@ -1,19 +0,0 @@
-commit 3baf81cbdd3c86594382027cd5d075ca036da78b
-Author: Ben Cotton <bcotton(a)fedoraproject.org>
-Date: Thu Mar 23 20:34:00 2017 -0400
-
- Fix a build issue for Fedora
-
-diff --git a/src/condor_starter.V6.1/glexec_privsep_helper.linux.cpp b/src/condor_starter.V6.1/glexec_privsep_helper.linux.cpp
-index d4bb589..1093273 100644
---- a/src/condor_starter.V6.1/glexec_privsep_helper.linux.cpp
-+++ b/src/condor_starter.V6.1/glexec_privsep_helper.linux.cpp
-@@ -418,7 +418,7 @@ GLExecPrivSepHelper::create_process(const char* path,
- if( !retry ) {
- // return the most recent glexec error output
- if( error_msg ) {
-- error_msg->formatstr_cat(glexec_error_msg.Value());
-+ error_msg->formatstr_cat("%s", glexec_error_msg.Value());
- }
- return 0;
- }
diff --git a/hcc-condor-build b/hcc-condor-build
deleted file mode 100755
index 4100113..0000000
--- a/hcc-condor-build
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/usr/bin/python
-
-import os
-import sys
-import shutil
-import optparse
-
-def prep_dirs(base_dir):
- for i in ["BUILD", "INSTALL", "RPMS", "SOURCES", "SPECS", "SRPMS"]:
- new_dir = os.path.join(base_dir, i)
- if not os.path.exists(new_dir):
- os.makedirs(new_dir)
-
-def prep_source(base_dir):
- source_dir = os.path.join(base_dir, "_build", "SOURCES")
- for file in os.listdir(base_dir):
- if file == "_build":
- continue
- full_name = os.path.join(base_dir, file)
- if not os.path.isfile(full_name):
- continue
- shutil.copy(full_name, os.path.join(source_dir, file))
-
-def prepare_condor_tarball(build_dir, source_dir, branch):
- cur_dir = os.getcwd()
- tarball_dir = os.path.join(build_dir, "SOURCES")
- fd = open(os.path.join(tarball_dir, "condor.tar.gz"), "w")
- fdnum = fd.fileno()
- try:
- os.chdir(source_dir)
- pid = os.fork()
- if not pid:
- try:
- os.dup2(fdnum, 1)
- os.execvp("/bin/sh", ["sh", "-c", "git archive %s | gzip -7" % branch])
- finally:
- os._exit(1)
- else:
- (pid, status) = os.waitpid(pid, 0)
- if status:
- raise Exception("git archive failed")
- finally:
- os.chdir(cur_dir)
-
-def get_rpmbuild_defines(results_dir):
- results_dir = os.path.abspath(results_dir)
- defines = []
- defines += ["--define=_topdir %s" % results_dir]
- return defines
-
-def parse_opts():
- parser = optparse.OptionParser()
- parser.add_option("-s", "--source-dir", help="Location of the Condor git repo clone.", dest="source_dir", default="~/projects/condor")
- parser.add_option("-b", "--branch", help="Name of the git branch to use for the condor build.", dest="branch", default="master")
-
- opts, args = parser.parse_args()
-
- opts.source_dir = os.path.expanduser(opts.source_dir)
-
- return args, opts
-
-def main():
-
- args, opts = parse_opts()
-
- if len(args) != 2:
- print "Usage: hcc_make_condor <action> <directory>"
- print "Valid commands are 'build', 'prep', and 'srpm'"
- print "<directory> should point at the fedpkg-condor-hcc clone."
- return 1
-
- build_dir = os.path.join(args[1], "_build")
-
- prep_dirs(build_dir)
- defines = get_rpmbuild_defines(build_dir)
- prep_source(args[1])
-
- prepare_condor_tarball(build_dir, opts.source_dir, opts.branch)
-
- if args[0] == 'build':
- os.execvp("rpmbuild", ["rpmbuild"] + defines + ["-ba", "condor.spec"])
- elif args[0] == 'srpm':
- os.execvp("rpmbuild", ["rpmbuild"] + defines + ["-bs", "condor.spec"])
- elif args[0] == "prep":
- os.execvp("rpmbuild", ["rpmbuild"] + defines + ["-bp", "condor.spec"])
- else:
- print "Unknown action: %s" % args[0]
-
- return 1
-
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/libdl.patch b/libdl.patch
deleted file mode 100644
index 6278707..0000000
--- a/libdl.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-diff --git a/src/gt2_gahp/CMakeLists.txt b/src/gt2_gahp/CMakeLists.txt
-index 1e3c2f2090..41c517dc3e 100644
---- a/src/gt2_gahp/CMakeLists.txt
-+++ b/src/gt2_gahp/CMakeLists.txt
-@@ -20,7 +20,7 @@
- if (HAVE_EXT_GLOBUS)
-
- condor_exe( gahp_server "gahp_server.cpp;my_ez.cpp" ${C_SBIN}
-- "${GLOBUS_GRID_UNIVERSE_GT2};${GLOBUS_GRID_UNIVERSE_COMMON};${GLOBUS_FOUND};${KRB5_FOUND};${OPENSSL_FOUND}" OFF )
-+ "${GLOBUS_GRID_UNIVERSE_GT2};${GLOBUS_GRID_UNIVERSE_COMMON};${GLOBUS_FOUND};${KRB5_FOUND};${OPENSSL_FOUND};${HAVE_LIBDL}" OFF )
-
- else()
-
-diff --git a/src/nordugrid_gahp/CMakeLists.txt b/src/nordugrid_gahp/CMakeLists.txt
-index 1fda9cd601..e91491f4ff 100644
---- a/src/nordugrid_gahp/CMakeLists.txt
-+++ b/src/nordugrid_gahp/CMakeLists.txt
-@@ -33,7 +33,7 @@ if (HAVE_EXT_GLOBUS)
- condor_exe( nordugrid_gahp
- "${HeaderFiles};${SourceFiles}"
- ${C_SBIN}
-- "${GLOBUS_GRID_UNIVERSE_NORDUGRID};${GLOBUS_GRID_UNIVERSE_COMMON};${GLOBUS_FOUND};${OPENSSL_FOUND};${LDAP_FOUND}"
-+ "${GLOBUS_GRID_UNIVERSE_NORDUGRID};${GLOBUS_GRID_UNIVERSE_COMMON};${GLOBUS_FOUND};${OPENSSL_FOUND};${LDAP_FOUND};${HAVE_LIBDL}"
- OFF )
-
- # Check nordugrid_gahp's shared library dependencies and copy a
diff --git a/python-bindings-v1.patch b/python-bindings-v1.patch
deleted file mode 100644
index 7eed28c..0000000
--- a/python-bindings-v1.patch
+++ /dev/null
@@ -1,2058 +0,0 @@
-diff --git a/.gitignore b/.gitignore
-index 8fe6157..5a569ca 100644
---- a/.gitignore
-+++ b/.gitignore
-@@ -209,3 +209,4 @@ src/safefile/stamp-h1
- src/safefile/stamp-h2
- src/safefile/safe_id_range_list.h.in.tmp
- src/safefile/safe_id_range_list.h.tmp_out
-+src/condor_contrib/python-bindings/tests_tmp
-diff --git a/externals/bundles/boost/1.49.0/CMakeLists.txt b/externals/bundles/boost/1.49.0/CMakeLists.txt
-index 8608ee6..dcba24b 100644
---- a/externals/bundles/boost/1.49.0/CMakeLists.txt
-+++ b/externals/bundles/boost/1.49.0/CMakeLists.txt
-@@ -28,6 +28,9 @@ if (NOT WINDOWS)
- if (BUILD_TESTING)
- set (BOOST_COMPONENTS unit_test_framework ${BOOST_COMPONENTS})
- endif()
-+ if (WITH_PYTHON_BINDINGS)
-+ set (BOOST_COMPONENTS python ${BOOST_COMPONENTS})
-+ endif()
-
- endif()
-
-@@ -104,6 +107,9 @@ if (NOT PROPER) # AND (NOT Boost_FOUND OR SYSTEM_NOT_UP_TO_SNUFF) )
- condor_pre_external( BOOST ${BOOST_FILENAME}-p2 "lib;${INCLUDE_LOC}" "done")
-
- set(BOOST_MIN_BUILD_DEP --with-thread --with-test)
-+ if (WITH_PYTHON_BINDINGS)
-+ set(BOOST_MIN_BUILD_DEP --with-python)
-+ endif()
- set(BOOST_PATCH echo "nothing")
- set(BOOST_INSTALL echo "nothing")
- unset(BOOST_INCLUDE)
-diff --git a/src/condor_contrib/CMakeLists.txt b/src/condor_contrib/CMakeLists.txt
-index 52f14c0..41b9002 100644
---- a/src/condor_contrib/CMakeLists.txt
-+++ b/src/condor_contrib/CMakeLists.txt
-@@ -32,4 +32,5 @@ else(WANT_CONTRIB)
- add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/campus_factory")
- add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/bosco")
- add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/lark")
-+ add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/python-bindings")
- endif(WANT_CONTRIB)
-diff --git a/src/condor_contrib/python-bindings/CMakeLists.txt b/src/condor_contrib/python-bindings/CMakeLists.txt
-new file mode 100644
-index 0000000..50d8a29
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/CMakeLists.txt
-@@ -0,0 +1,26 @@
-+
-+option(WITH_PYTHON_BINDINGS "Support for HTCondor python bindings" OFF)
-+
-+if ( WITH_PYTHON_BINDINGS )
-+
-+ set ( CMAKE_LIBRARY_PATH_ORIG ${CMAKE_LIBRARY_PATH} )
-+ set ( CMAKE_LIBRARY_PATH ${CMAKE_LIBRARY_PATH} /usr/lib64 )
-+ find_package(PythonLibs REQUIRED)
-+ set ( CMAKE_LIBRARY_PATH CMAKE_LIBRARY_PATH_ORIG)
-+
-+ include_directories(${PYTHON_INCLUDE_DIRS})
-+
-+ condor_shared_lib( pyclassad classad.cpp classad_wrapper.h exprtree_wrapper.h )
-+ target_link_libraries( pyclassad classad ${PYTHON_LIBRARIES} -lboost_python )
-+
-+ condor_shared_lib( classad_module classad_module.cpp )
-+ target_link_libraries( classad_module pyclassad -lboost_python ${PYTHON_LIBRARIES} )
-+ set_target_properties(classad_module PROPERTIES PREFIX "" OUTPUT_NAME classad )
-+
-+ set_source_files_properties(dc_tool.cpp schedd.cpp PROPERTIES COMPILE_FLAGS -Wno-strict-aliasing)
-+ condor_shared_lib( condor condor.cpp collector.cpp config.cpp daemon_and_ad_types.cpp dc_tool.cpp export_headers.h old_boost.h schedd.cpp secman.cpp )
-+ target_link_libraries( condor pyclassad condor_utils -lboost_python ${PYTHON_LIBRARIES} )
-+ set_target_properties( condor PROPERTIES PREFIX "" )
-+
-+endif ( WITH_PYTHON_BINDINGS )
-+
-diff --git a/src/condor_contrib/python-bindings/classad.cpp b/src/condor_contrib/python-bindings/classad.cpp
-new file mode 100644
-index 0000000..4c2db18
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/classad.cpp
-@@ -0,0 +1,341 @@
-+
-+#include <string>
-+
-+#include <classad/source.h>
-+#include <classad/sink.h>
-+
-+#include "classad_wrapper.h"
-+#include "exprtree_wrapper.h"
-+
-+
-+ExprTreeHolder::ExprTreeHolder(const std::string &str)
-+ : m_expr(NULL), m_owns(true)
-+{
-+ classad::ClassAdParser parser;
-+ classad::ExprTree *expr = NULL;
-+ if (!parser.ParseExpression(str, expr))
-+ {
-+ PyErr_SetString(PyExc_SyntaxError, "Unable to parse string into a ClassAd.");
-+ boost::python::throw_error_already_set();
-+ }
-+ m_expr = expr;
-+}
-+
-+
-+ExprTreeHolder::ExprTreeHolder(classad::ExprTree *expr)
-+ : m_expr(expr), m_owns(false)
-+{}
-+
-+
-+ExprTreeHolder::~ExprTreeHolder()
-+{
-+ if (m_owns && m_expr) delete m_expr;
-+}
-+
-+
-+boost::python::object ExprTreeHolder::Evaluate() const
-+{
-+ if (!m_expr)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Cannot operate on an invalid ExprTree");
-+ boost::python::throw_error_already_set();
-+ }
-+ classad::Value value;
-+ if (!m_expr->Evaluate(value)) {
-+ PyErr_SetString(PyExc_SyntaxError, "Unable to evaluate expression");
-+ boost::python::throw_error_already_set();
-+ }
-+ boost::python::object result;
-+ std::string strvalue;
-+ long long intvalue;
-+ bool boolvalue;
-+ double realvalue;
-+ PyObject* obj;
-+ switch (value.GetType())
-+ {
-+ case classad::Value::BOOLEAN_VALUE:
-+ value.IsBooleanValue(boolvalue);
-+ obj = boolvalue ? Py_True : Py_False;
-+ result = boost::python::object(boost::python::handle<>(boost::python::borrowed(obj)));
-+ break;
-+ case classad::Value::STRING_VALUE:
-+ value.IsStringValue(strvalue);
-+ result = boost::python::str(strvalue);
-+ break;
-+ case classad::Value::ABSOLUTE_TIME_VALUE:
-+ case classad::Value::INTEGER_VALUE:
-+ value.IsIntegerValue(intvalue);
-+ result = boost::python::long_(intvalue);
-+ break;
-+ case classad::Value::RELATIVE_TIME_VALUE:
-+ case classad::Value::REAL_VALUE:
-+ value.IsRealValue(realvalue);
-+ result = boost::python::object(realvalue);
-+ break;
-+ case classad::Value::ERROR_VALUE:
-+ result = boost::python::object(classad::Value::ERROR_VALUE);
-+ break;
-+ case classad::Value::UNDEFINED_VALUE:
-+ result = boost::python::object(classad::Value::UNDEFINED_VALUE);
-+ break;
-+ default:
-+ PyErr_SetString(PyExc_TypeError, "Unknown ClassAd value type.");
-+ boost::python::throw_error_already_set();
-+ }
-+ return result;
-+}
-+
-+
-+std::string ExprTreeHolder::toRepr()
-+{
-+ if (!m_expr)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Cannot operate on an invalid ExprTree");
-+ boost::python::throw_error_already_set();
-+ }
-+ classad::ClassAdUnParser up;
-+ std::string ad_str;
-+ up.Unparse(ad_str, m_expr);
-+ return ad_str;
-+}
-+
-+
-+std::string ExprTreeHolder::toString()
-+{
-+ if (!m_expr)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Cannot operate on an invalid ExprTree");
-+ boost::python::throw_error_already_set();
-+ }
-+ classad::PrettyPrint pp;
-+ std::string ad_str;
-+ pp.Unparse(ad_str, m_expr);
-+ return ad_str;
-+}
-+
-+
-+classad::ExprTree *ExprTreeHolder::get()
-+{
-+ if (!m_expr)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Cannot operate on an invalid ExprTree");
-+ boost::python::throw_error_already_set();
-+ }
-+ return m_expr->Copy();
-+}
-+
-+AttrPairToSecond::result_type AttrPairToSecond::operator()(AttrPairToSecond::argument_type p) const
-+{
-+ ExprTreeHolder holder(p.second);
-+ if (p.second->GetKind() == classad::ExprTree::LITERAL_NODE)
-+ {
-+ return holder.Evaluate();
-+ }
-+ boost::python::object result(holder);
-+ return result;
-+}
-+
-+
-+AttrPair::result_type AttrPair::operator()(AttrPair::argument_type p) const
-+{
-+ ExprTreeHolder holder(p.second);
-+ boost::python::object result(holder);
-+ if (p.second->GetKind() == classad::ExprTree::LITERAL_NODE)
-+ {
-+ result = holder.Evaluate();
-+ }
-+ return boost::python::make_tuple<std::string, boost::python::object>(p.first, result);
-+}
-+
-+
-+boost::python::object ClassAdWrapper::LookupWrap(const std::string &attr) const
-+{
-+ classad::ExprTree * expr = Lookup(attr);
-+ if (!expr)
-+ {
-+ PyErr_SetString(PyExc_KeyError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ if (expr->GetKind() == classad::ExprTree::LITERAL_NODE) return EvaluateAttrObject(attr);
-+ ExprTreeHolder holder(expr);
-+ boost::python::object result(holder);
-+ return result;
-+}
-+
-+boost::python::object ClassAdWrapper::LookupExpr(const std::string &attr) const
-+{
-+ classad::ExprTree * expr = Lookup(attr);
-+ if (!expr)
-+ {
-+ PyErr_SetString(PyExc_KeyError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ ExprTreeHolder holder(expr);
-+ boost::python::object result(holder);
-+ return result;
-+}
-+
-+boost::python::object ClassAdWrapper::EvaluateAttrObject(const std::string &attr) const
-+{
-+ classad::ExprTree *expr;
-+ if (!(expr = Lookup(attr))) {
-+ PyErr_SetString(PyExc_KeyError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ ExprTreeHolder holder(expr);
-+ return holder.Evaluate();
-+}
-+
-+
-+void ClassAdWrapper::InsertAttrObject( const std::string &attr, boost::python::object value)
-+{
-+ boost::python::extract<ExprTreeHolder&> expr_obj(value);
-+ if (expr_obj.check())
-+ {
-+ classad::ExprTree *expr = expr_obj().get();
-+ Insert(attr, expr);
-+ return;
-+ }
-+ boost::python::extract<classad::Value::ValueType> value_enum_obj(value);
-+ if (value_enum_obj.check())
-+ {
-+ classad::Value::ValueType value_enum = value_enum_obj();
-+ classad::Value classad_value;
-+ if (value_enum == classad::Value::ERROR_VALUE)
-+ {
-+ classad_value.SetErrorValue();
-+ classad::ExprTree *lit = classad::Literal::MakeLiteral(classad_value);
-+ Insert(attr, lit);
-+ }
-+ else if (value_enum == classad::Value::UNDEFINED_VALUE)
-+ {
-+ classad_value.SetUndefinedValue();
-+ classad::ExprTree *lit = classad::Literal::MakeLiteral(classad_value);
-+ if (!Insert(attr, lit))
-+ {
-+ PyErr_SetString(PyExc_AttributeError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ }
-+ return;
-+ }
-+ if (PyString_Check(value.ptr()))
-+ {
-+ std::string cppvalue = boost::python::extract<std::string>(value);
-+ if (!InsertAttr(attr, cppvalue))
-+ {
-+ PyErr_SetString(PyExc_AttributeError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ return;
-+ }
-+ if (PyLong_Check(value.ptr()))
-+ {
-+ long long cppvalue = boost::python::extract<long long>(value);
-+ if (!InsertAttr(attr, cppvalue))
-+ {
-+ PyErr_SetString(PyExc_AttributeError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ return;
-+ }
-+ if (PyInt_Check(value.ptr()))
-+ {
-+ long int cppvalue = boost::python::extract<long int>(value);
-+ if (!InsertAttr(attr, cppvalue))
-+ {
-+ PyErr_SetString(PyExc_AttributeError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ return;
-+ }
-+ if (PyFloat_Check(value.ptr()))
-+ {
-+ double cppvalue = boost::python::extract<double>(value);
-+ if (!InsertAttr(attr, cppvalue))
-+ {
-+ PyErr_SetString(PyExc_AttributeError, attr.c_str());
-+ boost::python::throw_error_already_set();
-+ }
-+ return;
-+ }
-+ PyErr_SetString(PyExc_TypeError, "Unknown ClassAd value type.");
-+ boost::python::throw_error_already_set();
-+}
-+
-+
-+std::string ClassAdWrapper::toRepr()
-+{
-+ classad::ClassAdUnParser up;
-+ std::string ad_str;
-+ up.Unparse(ad_str, this);
-+ return ad_str;
-+}
-+
-+
-+std::string ClassAdWrapper::toString()
-+{
-+ classad::PrettyPrint pp;
-+ std::string ad_str;
-+ pp.Unparse(ad_str, this);
-+ return ad_str;
-+}
-+
-+std::string ClassAdWrapper::toOldString()
-+{
-+ classad::ClassAdUnParser pp;
-+ std::string ad_str;
-+ pp.SetOldClassAd(true);
-+ pp.Unparse(ad_str, this);
-+ return ad_str;
-+}
-+
-+AttrKeyIter ClassAdWrapper::beginKeys()
-+{
-+ return AttrKeyIter(begin());
-+}
-+
-+
-+AttrKeyIter ClassAdWrapper::endKeys()
-+{
-+ return AttrKeyIter(end());
-+}
-+
-+AttrValueIter ClassAdWrapper::beginValues()
-+{
-+ return AttrValueIter(begin());
-+}
-+
-+AttrValueIter ClassAdWrapper::endValues()
-+{
-+ return AttrValueIter(end());
-+}
-+
-+AttrItemIter ClassAdWrapper::beginItems()
-+{
-+ return AttrItemIter(begin());
-+}
-+
-+
-+AttrItemIter ClassAdWrapper::endItems()
-+{
-+ return AttrItemIter(end());
-+}
-+
-+
-+ClassAdWrapper::ClassAdWrapper() : classad::ClassAd() {}
-+
-+
-+ClassAdWrapper::ClassAdWrapper(const std::string &str)
-+{
-+ classad::ClassAdParser parser;
-+ classad::ClassAd *result = parser.ParseClassAd(str);
-+ if (!result)
-+ {
-+ PyErr_SetString(PyExc_SyntaxError, "Unable to parse string into a ClassAd.");
-+ boost::python::throw_error_already_set();
-+ }
-+ CopyFrom(*result);
-+ result;
-+}
-+
-diff --git a/src/condor_contrib/python-bindings/classad_module.cpp b/src/condor_contrib/python-bindings/classad_module.cpp
-new file mode 100644
-index 0000000..b3f1970
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/classad_module.cpp
-@@ -0,0 +1,145 @@
-+
-+#include <boost/python.hpp>
-+#include <classad/source.h>
-+
-+#include "classad_wrapper.h"
-+#include "exprtree_wrapper.h"
-+
-+using namespace boost::python;
-+
-+
-+Py_ssize_t py_len(boost::python::object const& obj)
-+{
-+ Py_ssize_t result = PyObject_Length(obj.ptr());
-+ if (PyErr_Occurred()) boost::python::throw_error_already_set();
-+ return result;
-+}
-+
-+
-+std::string ClassadLibraryVersion()
-+{
-+ std::string val;
-+ classad::ClassAdLibraryVersion(val);
-+ return val;
-+}
-+
-+
-+ClassAdWrapper *parseString(const std::string &str)
-+{
-+ classad::ClassAdParser parser;
-+ classad::ClassAd *result = parser.ParseClassAd(str);
-+ if (!result)
-+ {
-+ PyErr_SetString(PyExc_SyntaxError, "Unable to parse string into a ClassAd.");
-+ boost::python::throw_error_already_set();
-+ }
-+ ClassAdWrapper * wrapper_result = new ClassAdWrapper();
-+ wrapper_result->CopyFrom(*result);
-+ delete result;
-+ return wrapper_result;
-+}
-+
-+
-+ClassAdWrapper *parseFile(FILE *stream)
-+{
-+ classad::ClassAdParser parser;
-+ classad::ClassAd *result = parser.ParseClassAd(stream);
-+ if (!result)
-+ {
-+ PyErr_SetString(PyExc_SyntaxError, "Unable to parse input stream into a ClassAd.");
-+ boost::python::throw_error_already_set();
-+ }
-+ ClassAdWrapper * wrapper_result = new ClassAdWrapper();
-+ wrapper_result->CopyFrom(*result);
-+ delete result;
-+ return wrapper_result;
-+}
-+
-+ClassAdWrapper *parseOld(object input)
-+{
-+ ClassAdWrapper * wrapper = new ClassAdWrapper();
-+ object input_list;
-+ extract<std::string> input_extract(input);
-+ if (input_extract.check())
-+ {
-+ input_list = input.attr("splitlines")();
-+ }
-+ else
-+ {
-+ input_list = input.attr("readlines")();
-+ }
-+ unsigned input_len = py_len(input_list);
-+ for (unsigned idx=0; idx<input_len; idx++)
-+ {
-+ object line = input_list[idx].attr("strip")();
-+ if (line.attr("startswith")("#"))
-+ {
-+ continue;
-+ }
-+ std::string line_str = extract<std::string>(line);
-+ if (!wrapper->Insert(line_str))
-+ {
-+ PyErr_SetString(PyExc_SyntaxError, line_str.c_str());
-+ throw_error_already_set();
-+ }
-+ }
-+ return wrapper;
-+}
-+
-+void *convert_to_FILEptr(PyObject* obj) {
-+ return PyFile_Check(obj) ? PyFile_AsFile(obj) : 0;
-+}
-+
-+BOOST_PYTHON_MODULE(classad)
-+{
-+ using namespace boost::python;
-+
-+ def("version", ClassadLibraryVersion, "Return the version of the linked ClassAd library.");
-+
-+ def("parse", parseString, return_value_policy<manage_new_object>());
-+ def("parse", parseFile, return_value_policy<manage_new_object>(),
-+ "Parse input into a ClassAd.\n"
-+ ":param input: A string or a file pointer.\n"
-+ ":return: A ClassAd object.");
-+ def("parseOld", parseOld, return_value_policy<manage_new_object>(),
-+ "Parse old ClassAd format input into a ClassAd.\n"
-+ ":param input: A string or a file pointer.\n"
-+ ":return: A ClassAd object.");
-+
-+ class_<ClassAdWrapper, boost::noncopyable>("ClassAd", "A classified advertisement.")
-+ .def(init<std::string>())
-+ .def("__delitem__", &ClassAdWrapper::Delete)
-+ .def("__getitem__", &ClassAdWrapper::LookupWrap)
-+ .def("eval", &ClassAdWrapper::EvaluateAttrObject, "Evaluate the ClassAd attribute to a python object.")
-+ .def("__setitem__", &ClassAdWrapper::InsertAttrObject)
-+ .def("__str__", &ClassAdWrapper::toString)
-+ .def("__repr__", &ClassAdWrapper::toRepr)
-+ // I see no way to use the SetParentScope interface safely.
-+ // Delay exposing it to python until we absolutely have to!
-+ //.def("setParentScope", &ClassAdWrapper::SetParentScope)
-+ .def("__iter__", boost::python::range(&ClassAdWrapper::beginKeys, &ClassAdWrapper::endKeys))
-+ .def("keys", boost::python::range(&ClassAdWrapper::beginKeys, &ClassAdWrapper::endKeys))
-+ .def("values", boost::python::range(&ClassAdWrapper::beginValues, &ClassAdWrapper::endValues))
-+ .def("items", boost::python::range(&ClassAdWrapper::beginItems, &ClassAdWrapper::endItems))
-+ .def("__len__", &ClassAdWrapper::size)
-+ .def("lookup", &ClassAdWrapper::LookupExpr, "Lookup an attribute and return a ClassAd expression. This method will not attempt to evaluate it to a python object.")
-+ .def("printOld", &ClassAdWrapper::toOldString, "Represent this ClassAd as a string in the \"old ClassAd\" format.")
-+ ;
-+
-+ class_<ExprTreeHolder>("ExprTree", "An expression in the ClassAd language", init<std::string>())
-+ .def("__str__", &ExprTreeHolder::toString)
-+ .def("__repr__", &ExprTreeHolder::toRepr)
-+ .def("eval", &ExprTreeHolder::Evaluate)
-+ ;
-+
-+ register_ptr_to_python< boost::shared_ptr<ClassAdWrapper> >();
-+
-+ boost::python::enum_<classad::Value::ValueType>("Value")
-+ .value("Error", classad::Value::ERROR_VALUE)
-+ .value("Undefined", classad::Value::UNDEFINED_VALUE)
-+ ;
-+
-+ boost::python::converter::registry::insert(convert_to_FILEptr,
-+ boost::python::type_id<FILE>());
-+}
-+
-diff --git a/src/condor_contrib/python-bindings/classad_wrapper.h b/src/condor_contrib/python-bindings/classad_wrapper.h
-new file mode 100644
-index 0000000..96600c3
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/classad_wrapper.h
-@@ -0,0 +1,72 @@
-+
-+#ifndef __CLASSAD_WRAPPER_H_
-+#define __CLASSAD_WRAPPER_H_
-+
-+#include <classad/classad.h>
-+#include <boost/python.hpp>
-+#include <boost/iterator/transform_iterator.hpp>
-+
-+struct AttrPairToFirst :
-+ public std::unary_function<std::pair<std::string, classad::ExprTree*> const&, std::string>
-+{
-+ AttrPairToFirst::result_type operator()(AttrPairToFirst::argument_type p) const
-+ {
-+ return p.first;
-+ }
-+};
-+
-+typedef boost::transform_iterator<AttrPairToFirst, classad::AttrList::iterator> AttrKeyIter;
-+
-+class ExprTreeHolder;
-+
-+struct AttrPairToSecond :
-+ public std::unary_function<std::pair<std::string, classad::ExprTree*> const&, boost::python::object>
-+{
-+ AttrPairToSecond::result_type operator()(AttrPairToSecond::argument_type p) const;
-+};
-+
-+typedef boost::transform_iterator<AttrPairToSecond, classad::AttrList::iterator> AttrValueIter;
-+
-+struct AttrPair :
-+ public std::unary_function<std::pair<std::string, classad::ExprTree*> const&, boost::python::object>
-+{
-+ AttrPair::result_type operator()(AttrPair::argument_type p) const;
-+};
-+
-+typedef boost::transform_iterator<AttrPair, classad::AttrList::iterator> AttrItemIter;
-+
-+struct ClassAdWrapper : classad::ClassAd, boost::python::wrapper<classad::ClassAd>
-+{
-+ boost::python::object LookupWrap( const std::string &attr) const;
-+
-+ boost::python::object EvaluateAttrObject(const std::string &attr) const;
-+
-+ void InsertAttrObject( const std::string &attr, boost::python::object value);
-+
-+ boost::python::object LookupExpr(const std::string &attr) const;
-+
-+ std::string toRepr();
-+
-+ std::string toString();
-+
-+ std::string toOldString();
-+
-+ AttrKeyIter beginKeys();
-+
-+ AttrKeyIter endKeys();
-+
-+ AttrValueIter beginValues();
-+
-+ AttrValueIter endValues();
-+
-+ AttrItemIter beginItems();
-+
-+ AttrItemIter endItems();
-+
-+ ClassAdWrapper();
-+
-+ ClassAdWrapper(const std::string &str);
-+};
-+
-+#endif
-+
-diff --git a/src/condor_contrib/python-bindings/collector.cpp b/src/condor_contrib/python-bindings/collector.cpp
-new file mode 100644
-index 0000000..3c4fa39
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/collector.cpp
-@@ -0,0 +1,329 @@
-+
-+#include "condor_adtypes.h"
-+#include "dc_collector.h"
-+#include "condor_version.h"
-+
-+#include <memory>
-+#include <boost/python.hpp>
-+
-+#include "old_boost.h"
-+#include "classad_wrapper.h"
-+
-+using namespace boost::python;
-+
-+AdTypes convert_to_ad_type(daemon_t d_type)
-+{
-+ AdTypes ad_type = NO_AD;
-+ switch (d_type)
-+ {
-+ case DT_MASTER:
-+ ad_type = MASTER_AD;
-+ break;
-+ case DT_STARTD:
-+ ad_type = STARTD_AD;
-+ break;
-+ case DT_SCHEDD:
-+ ad_type = SCHEDD_AD;
-+ break;
-+ case DT_NEGOTIATOR:
-+ ad_type = NEGOTIATOR_AD;
-+ break;
-+ case DT_COLLECTOR:
-+ ad_type = COLLECTOR_AD;
-+ break;
-+ default:
-+ PyErr_SetString(PyExc_ValueError, "Unknown daemon type.");
-+ throw_error_already_set();
-+ }
-+ return ad_type;
-+}
-+
-+struct Collector {
-+
-+ Collector(const std::string &pool="")
-+ : m_collectors(NULL)
-+ {
-+ if (pool.size())
-+ m_collectors = CollectorList::create(pool.c_str());
-+ else
-+ m_collectors = CollectorList::create();
-+ }
-+
-+ ~Collector()
-+ {
-+ if (m_collectors) delete m_collectors;
-+ }
-+
-+ object query(AdTypes ad_type, const std::string &constraint, list attrs)
-+ {
-+ CondorQuery query(ad_type);
-+ if (constraint.length())
-+ {
-+ query.addANDConstraint(constraint.c_str());
-+ }
-+ std::vector<const char *> attrs_char;
-+ std::vector<std::string> attrs_str;
-+ int len_attrs = py_len(attrs);
-+ if (len_attrs)
-+ {
-+ attrs_str.reserve(len_attrs);
-+ attrs_char.reserve(len_attrs+1);
-+ attrs_char[len_attrs] = NULL;
-+ for (int i=0; i<len_attrs; i++)
-+ {
-+ std::string str = extract<std::string>(attrs[i]);
-+ attrs_str.push_back(str);
-+ attrs_char[i] = attrs_str[i].c_str();
-+ }
-+ query.setDesiredAttrs(&attrs_char[0]);
-+ }
-+ ClassAdList adList;
-+
-+ QueryResult result = m_collectors->query(query, adList, NULL);
-+
-+ switch (result)
-+ {
-+ case Q_OK:
-+ break;
-+ case Q_INVALID_CATEGORY:
-+ PyErr_SetString(PyExc_RuntimeError, "Category not supported by query type.");
-+ boost::python::throw_error_already_set();
-+ case Q_MEMORY_ERROR:
-+ PyErr_SetString(PyExc_MemoryError, "Memory allocation error.");
-+ boost::python::throw_error_already_set();
-+ case Q_PARSE_ERROR:
-+ PyErr_SetString(PyExc_SyntaxError, "Query constraints could not be parsed.");
-+ boost::python::throw_error_already_set();
-+ case Q_COMMUNICATION_ERROR:
-+ PyErr_SetString(PyExc_IOError, "Failed communication with collector.");
-+ boost::python::throw_error_already_set();
-+ case Q_INVALID_QUERY:
-+ PyErr_SetString(PyExc_RuntimeError, "Invalid query.");
-+ boost::python::throw_error_already_set();
-+ case Q_NO_COLLECTOR_HOST:
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to determine collector host.");
-+ boost::python::throw_error_already_set();
-+ default:
-+ PyErr_SetString(PyExc_RuntimeError, "Unknown error from collector query.");
-+ boost::python::throw_error_already_set();
-+ }
-+
-+ list retval;
-+ ClassAd * ad;
-+ adList.Open();
-+ while ((ad = adList.Next()))
-+ {
-+ boost::shared_ptr<ClassAdWrapper> wrapper(new ClassAdWrapper());
-+ wrapper->CopyFrom(*ad);
-+ retval.append(wrapper);
-+ }
-+ return retval;
-+ }
-+
-+ object locateAll(daemon_t d_type)
-+ {
-+ AdTypes ad_type = convert_to_ad_type(d_type);
-+ return query(ad_type, "", list());
-+ }
-+
-+ object locate(daemon_t d_type, const std::string &name)
-+ {
-+ std::string constraint = ATTR_NAME " =?= \"" + name + "\"";
-+ object result = query(convert_to_ad_type(d_type), constraint, list());
-+ if (py_len(result) >= 1) {
-+ return result[0];
-+ }
-+ PyErr_SetString(PyExc_ValueError, "Unable to find daemon.");
-+ throw_error_already_set();
-+ return object();
-+ }
-+
-+ ClassAdWrapper *locateLocal(daemon_t d_type)
-+ {
-+ Daemon my_daemon( d_type, 0, 0 );
-+
-+ ClassAdWrapper *wrapper = new ClassAdWrapper();
-+ if (my_daemon.locate())
-+ {
-+ classad::ClassAd *daemonAd;
-+ if ((daemonAd = my_daemon.daemonAd()))
-+ {
-+ wrapper->CopyFrom(*daemonAd);
-+ }
-+ else
-+ {
-+ std::string addr = my_daemon.addr();
-+ if (!my_daemon.addr() || !wrapper->InsertAttr(ATTR_MY_ADDRESS, addr))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to locate daemon address.");
-+ throw_error_already_set();
-+ }
-+ std::string name = my_daemon.name() ? my_daemon.name() : "Unknown";
-+ if (!wrapper->InsertAttr(ATTR_NAME, name))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to insert daemon name.");
-+ throw_error_already_set();
-+ }
-+ std::string hostname = my_daemon.fullHostname() ? my_daemon.fullHostname() : "Unknown";
-+ if (!wrapper->InsertAttr(ATTR_MACHINE, hostname))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to insert daemon hostname.");
-+ throw_error_already_set();
-+ }
-+ std::string version = my_daemon.version() ? my_daemon.version() : "";
-+ if (!wrapper->InsertAttr(ATTR_VERSION, version))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to insert daemon version.");
-+ throw_error_already_set();
-+ }
-+ const char * my_type = AdTypeToString(convert_to_ad_type(d_type));
-+ if (!my_type)
-+ {
-+ PyErr_SetString(PyExc_ValueError, "Unable to determined daemon type.");
-+ throw_error_already_set();
-+ }
-+ std::string my_type_str = my_type;
-+ if (!wrapper->InsertAttr(ATTR_MY_TYPE, my_type_str))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to insert daemon type.");
-+ throw_error_already_set();
-+ }
-+ std::string cversion = CondorVersion(); std::string platform = CondorPlatform();
-+ if (!wrapper->InsertAttr(ATTR_VERSION, cversion) || !wrapper->InsertAttr(ATTR_PLATFORM, platform))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to insert HTCondor version.");
-+ throw_error_already_set();
-+ }
-+ }
-+ }
-+ else
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to locate local daemon");
-+ boost::python::throw_error_already_set();
-+ }
-+ return wrapper;
-+ }
-+
-+
-+ // Overloads for the Collector; can't be done in boost.python and provide
-+ // docstrings.
-+ object query0()
-+ {
-+ return query(ANY_AD, "", list());
-+ }
-+ object query1(AdTypes ad_type)
-+ {
-+ return query(ad_type, "", list());
-+ }
-+ object query2(AdTypes ad_type, const std::string &constraint)
-+ {
-+ return query(ad_type, constraint, list());
-+ }
-+
-+ // TODO: this has crappy error handling when there are multiple collectors.
-+ void advertise(list ads, const std::string &command_str="UPDATE_AD_GENERIC", bool use_tcp=false)
-+ {
-+ m_collectors->rewind();
-+ Daemon *collector;
-+ std::auto_ptr<Sock> sock;
-+
-+ int command = getCollectorCommandNum(command_str.c_str());
-+ if (command == -1)
-+ {
-+ PyErr_SetString(PyExc_ValueError, ("Invalid command " + command_str).c_str());
-+ throw_error_already_set();
-+ }
-+
-+ if (command == UPDATE_STARTD_AD_WITH_ACK)
-+ {
-+ PyErr_SetString(PyExc_NotImplementedError, "Startd-with-ack protocol is not implemented at this time.");
-+ }
-+
-+ int list_len = py_len(ads);
-+ if (!list_len)
-+ return;
-+
-+ compat_classad::ClassAd ad;
-+ while (m_collectors->next(collector))
-+ {
-+ if(!collector->locate()) {
-+ PyErr_SetString(PyExc_ValueError, "Unable to locate collector.");
-+ throw_error_already_set();
-+ }
-+ int list_len = py_len(ads);
-+ sock.reset();
-+ for (int i=0; i<list_len; i++)
-+ {
-+ ClassAdWrapper &wrapper = extract<ClassAdWrapper &>(ads[i]);
-+ ad.CopyFrom(wrapper);
-+ if (use_tcp)
-+ {
-+ if (!sock.get())
-+ sock.reset(collector->startCommand(command,Stream::reli_sock,20));
-+ else
-+ {
-+ sock->encode();
-+ sock->put(command);
-+ }
-+ }
-+ else
-+ {
-+ sock.reset(collector->startCommand(command,Stream::safe_sock,20));
-+ }
-+ int result = 0;
-+ if (sock.get()) {
-+ result += ad.put(*sock);
-+ result += sock->end_of_message();
-+ }
-+ if (result != 2) {
-+ PyErr_SetString(PyExc_ValueError, "Failed to advertise to collector");
-+ throw_error_already_set();
-+ }
-+ }
-+ sock->encode();
-+ sock->put(DC_NOP);
-+ sock->end_of_message();
-+ }
-+ }
-+
-+private:
-+
-+ CollectorList *m_collectors;
-+
-+};
-+
-+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(advertise_overloads, advertise, 1, 3);
-+
-+void export_collector()
-+{
-+ class_<Collector>("Collector", "Client-side operations for the HTCondor collector")
-+ .def(init<std::string>(":param pool: Name of collector to query; if not specified, uses the local one."))
-+ .def("query", &Collector::query0)
-+ .def("query", &Collector::query1)
-+ .def("query", &Collector::query2)
-+ .def("query", &Collector::query,
-+ "Query the contents of a collector.\n"
-+ ":param ad_type: Type of ad to return from the AdTypes enum; if not specified, uses ANY_AD.\n"
-+ ":param constraint: A constraint for the ad query; defaults to true.\n"
-+ ":param attrs: A list of attributes; if specified, the returned ads will be "
-+ "projected along these attributes.\n"
-+ ":return: A list of ads in the collector matching the constraint.")
-+ .def("locate", &Collector::locateLocal, return_value_policy<manage_new_object>())
-+ .def("locate", &Collector::locate,
-+ "Query the collector for a particular daemon.\n"
-+ ":param daemon_type: Type of daemon; must be from the DaemonTypes enum.\n"
-+ ":param name: Name of daemon to locate. If not specified, it searches for the local daemon.\n"
-+ ":return: The ad of the corresponding daemon.")
-+ .def("locateAll", &Collector::locateAll,
-+ "Query the collector for all ads of a particular type.\n"
-+ ":param daemon_type: Type of daemon; must be from the DaemonTypes enum.\n"
-+ ":return: A list of matching ads.")
-+ .def("advertise", &Collector::advertise, advertise_overloads(
-+ "Advertise a list of ClassAds into the collector.\n"
-+ ":param ad_list: A list of ClassAds.\n"
-+ ":param command: A command for the collector; defaults to UPDATE_AD_GENERIC;"
-+ " other commands, such as UPDATE_STARTD_AD, may require reduced authorization levels.\n"
-+ ":param use_tcp: When set to true, updates are sent via TCP."))
-+ ;
-+}
-+
-diff --git a/src/condor_contrib/python-bindings/condor.cpp b/src/condor_contrib/python-bindings/condor.cpp
-new file mode 100644
-index 0000000..f4a4fd4
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/condor.cpp
-@@ -0,0 +1,25 @@
-+
-+#include <boost/python.hpp>
-+
-+#include "old_boost.h"
-+#include "export_headers.h"
-+
-+using namespace boost::python;
-+
-+
-+BOOST_PYTHON_MODULE(condor)
-+{
-+ scope().attr("__doc__") = "Utilities for interacting with the HTCondor system.";
-+
-+ py_import("classad");
-+
-+ // TODO: old boost doesn't have this; conditionally compile only one newer systems.
-+ //docstring_options local_docstring_options(true, false, false);
-+
-+ export_config();
-+ export_daemon_and_ad_types();
-+ export_collector();
-+ export_schedd();
-+ export_dc_tool();
-+ export_secman();
-+}
-diff --git a/src/condor_contrib/python-bindings/config.cpp b/src/condor_contrib/python-bindings/config.cpp
-new file mode 100644
-index 0000000..0afdfc4
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/config.cpp
-@@ -0,0 +1,60 @@
-+
-+#include "condor_common.h"
-+#include "condor_config.h"
-+#include "condor_version.h"
-+
-+#include <boost/python.hpp>
-+
-+using namespace boost::python;
-+
-+struct Param
-+{
-+ std::string getitem(const std::string &attr)
-+ {
-+ std::string result;
-+ if (!param(result, attr.c_str()))
-+ {
-+ PyErr_SetString(PyExc_KeyError, attr.c_str());
-+ throw_error_already_set();
-+ }
-+ return result;
-+ }
-+
-+ void setitem(const std::string &attr, const std::string &val)
-+ {
-+ param_insert(attr.c_str(), val.c_str());
-+ }
-+
-+ std::string setdefault(const std::string &attr, const std::string &def)
-+ {
-+ std::string result;
-+ if (!param(result, attr.c_str()))
-+ {
-+ param_insert(attr.c_str(), def.c_str());
-+ return def;
-+ }
-+ return result;
-+ }
-+};
-+
-+std::string CondorVersionWrapper() { return CondorVersion(); }
-+
-+std::string CondorPlatformWrapper() { return CondorPlatform(); }
-+
-+BOOST_PYTHON_FUNCTION_OVERLOADS(config_overloads, config, 0, 3);
-+
-+void export_config()
-+{
-+ config();
-+ def("version", CondorVersionWrapper, "Returns the version of HTCondor this module is linked against.");
-+ def("platform", CondorPlatformWrapper, "Returns the platform of HTCondor this module is running on.");
-+ def("reload_config", config, config_overloads("Reload the HTCondor configuration from disk."));
-+ class_<Param>("_Param")
-+ .def("__getitem__", &Param::getitem)
-+ .def("__setitem__", &Param::setitem)
-+ .def("setdefault", &Param::setdefault)
-+ ;
-+ object param = object(Param());
-+ param.attr("__doc__") = "A dictionary-like object containing the HTCondor configuration.";
-+ scope().attr("param") = param;
-+}
-diff --git a/src/condor_contrib/python-bindings/daemon_and_ad_types.cpp b/src/condor_contrib/python-bindings/daemon_and_ad_types.cpp
-new file mode 100644
-index 0000000..f2b0bab
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/daemon_and_ad_types.cpp
-@@ -0,0 +1,30 @@
-+
-+#include <condor_adtypes.h>
-+#include <daemon_types.h>
-+#include <boost/python.hpp>
-+
-+using namespace boost::python;
-+
-+void export_daemon_and_ad_types()
-+{
-+ enum_<daemon_t>("DaemonTypes")
-+ .value("None", DT_NONE)
-+ .value("Any", DT_ANY)
-+ .value("Master", DT_MASTER)
-+ .value("Schedd", DT_SCHEDD)
-+ .value("Startd", DT_STARTD)
-+ .value("Collector", DT_COLLECTOR)
-+ .value("Negotiator", DT_NEGOTIATOR)
-+ ;
-+
-+ enum_<AdTypes>("AdTypes")
-+ .value("None", NO_AD)
-+ .value("Any", ANY_AD)
-+ .value("Generic", GENERIC_AD)
-+ .value("Startd", STARTD_AD)
-+ .value("Schedd", SCHEDD_AD)
-+ .value("Master", MASTER_AD)
-+ .value("Collector", COLLECTOR_AD)
-+ .value("Negotiator", NEGOTIATOR_AD)
-+ ;
-+}
-diff --git a/src/condor_contrib/python-bindings/dc_tool.cpp b/src/condor_contrib/python-bindings/dc_tool.cpp
-new file mode 100644
-index 0000000..973c1e3
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/dc_tool.cpp
-@@ -0,0 +1,129 @@
-+
-+#include "condor_common.h"
-+
-+#include <boost/python.hpp>
-+
-+#include "daemon.h"
-+#include "daemon_types.h"
-+#include "condor_commands.h"
-+#include "condor_attributes.h"
-+#include "compat_classad.h"
-+
-+#include "classad_wrapper.h"
-+
-+using namespace boost::python;
-+
-+enum DaemonCommands {
-+ DDAEMONS_OFF = DAEMONS_OFF,
-+ DDAEMONS_OFF_FAST = DAEMONS_OFF_FAST,
-+ DDAEMONS_OFF_PEACEFUL = DAEMONS_OFF_PEACEFUL,
-+ DDAEMON_OFF = DAEMON_OFF,
-+ DDAEMON_OFF_FAST = DAEMON_OFF_FAST,
-+ DDAEMON_OFF_PEACEFUL = DAEMON_OFF_PEACEFUL,
-+ DDC_OFF_FAST = DC_OFF_FAST,
-+ DDC_OFF_PEACEFUL = DC_OFF_PEACEFUL,
-+ DDC_OFF_GRACEFUL = DC_OFF_GRACEFUL,
-+ DDC_SET_PEACEFUL_SHUTDOWN = DC_SET_PEACEFUL_SHUTDOWN,
-+ DDC_RECONFIG_FULL = DC_RECONFIG_FULL,
-+ DRESTART = RESTART,
-+ DRESTART_PEACEFUL = RESTART_PEACEFUL
-+};
-+
-+void send_command(const ClassAdWrapper & ad, DaemonCommands dc, const std::string &target="")
-+{
-+ std::string addr;
-+ if (!ad.EvaluateAttrString(ATTR_MY_ADDRESS, addr))
-+ {
-+ PyErr_SetString(PyExc_ValueError, "Address not available in location ClassAd.");
-+ throw_error_already_set();
-+ }
-+ std::string ad_type_str;
-+ if (!ad.EvaluateAttrString(ATTR_MY_TYPE, ad_type_str))
-+ {
-+ PyErr_SetString(PyExc_ValueError, "Daemon type not available in location ClassAd.");
-+ throw_error_already_set();
-+ }
-+ int ad_type = AdTypeFromString(ad_type_str.c_str());
-+ if (ad_type == NO_AD)
-+ {
-+ printf("ad type %s.\n", ad_type_str.c_str());
-+ PyErr_SetString(PyExc_ValueError, "Unknown ad type.");
-+ throw_error_already_set();
-+ }
-+ daemon_t d_type;
-+ switch (ad_type) {
-+ case MASTER_AD: d_type = DT_MASTER; break;
-+ case STARTD_AD: d_type = DT_STARTD; break;
-+ case SCHEDD_AD: d_type = DT_SCHEDD; break;
-+ case NEGOTIATOR_AD: d_type = DT_NEGOTIATOR; break;
-+ case COLLECTOR_AD: d_type = DT_COLLECTOR; break;
-+ default:
-+ d_type = DT_NONE;
-+ PyErr_SetString(PyExc_ValueError, "Unknown daemon type.");
-+ throw_error_already_set();
-+ }
-+
-+ ClassAd ad_copy; ad_copy.CopyFrom(ad);
-+ Daemon d(&ad_copy, d_type, NULL);
-+ if (!d.locate())
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to locate daemon.");
-+ throw_error_already_set();
-+ }
-+ ReliSock sock;
-+ if (!sock.connect(d.addr()))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to connect to the remote daemon");
-+ throw_error_already_set();
-+ }
-+ if (!d.startCommand(dc, &sock, 0, NULL))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Failed to start command.");
-+ throw_error_already_set();
-+ }
-+ if (target.size())
-+ {
-+ std::vector<unsigned char> target_cstr; target_cstr.reserve(target.size()+1);
-+ memcpy(&target_cstr[0], target.c_str(), target.size()+1);
-+ if (!sock.code(&target_cstr[0]))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Failed to send target.");
-+ throw_error_already_set();
-+ }
-+ if (!sock.end_of_message())
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Failed to send end-of-message.");
-+ throw_error_already_set();
-+ }
-+ }
-+ sock.close();
-+}
-+
-+BOOST_PYTHON_FUNCTION_OVERLOADS(send_command_overloads, send_command, 2, 3);
-+
-+void
-+export_dc_tool()
-+{
-+ enum_<DaemonCommands>("DaemonCommands")
-+ .value("DaemonsOff", DDAEMONS_OFF)
-+ .value("DaemonsOffFast", DDAEMONS_OFF_FAST)
-+ .value("DaemonsOffPeaceful", DDAEMONS_OFF_PEACEFUL)
-+ .value("DaemonOff", DDAEMON_OFF)
-+ .value("DaemonOffFast", DDAEMON_OFF_FAST)
-+ .value("DaemonOffPeaceful", DDAEMON_OFF_PEACEFUL)
-+ .value("OffGraceful", DDC_OFF_GRACEFUL)
-+ .value("OffPeaceful", DDC_OFF_PEACEFUL)
-+ .value("OffFast", DDC_OFF_FAST)
-+ .value("SetPeacefulShutdown", DDC_SET_PEACEFUL_SHUTDOWN)
-+ .value("Reconfig", DDC_RECONFIG_FULL)
-+ .value("Restart", DRESTART)
-+ .value("RestartPeacful", DRESTART_PEACEFUL)
-+ ;
-+
-+ def("send_command", send_command, send_command_overloads("Send a command to a HTCondor daemon specified by a location ClassAd\n"
-+ ":param ad: An ad specifying the location of the daemon; typically, found by using Collector.locate(...).\n"
-+ ":param dc: A command type; must be a member of the enum DaemonCommands.\n"
-+ ":param target: Some commands require additional arguments; for example, sending DaemonOff to a master requires one to specify which subsystem to turn off."
-+ " If this parameter is given, the daemon is sent an additional argument."))
-+ ;
-+}
-diff --git a/src/condor_contrib/python-bindings/export_headers.h b/src/condor_contrib/python-bindings/export_headers.h
-new file mode 100644
-index 0000000..4480495
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/export_headers.h
-@@ -0,0 +1,8 @@
-+
-+void export_collector();
-+void export_schedd();
-+void export_dc_tool();
-+void export_daemon_and_ad_types();
-+void export_config();
-+void export_secman();
-+
-diff --git a/src/condor_contrib/python-bindings/exprtree_wrapper.h b/src/condor_contrib/python-bindings/exprtree_wrapper.h
-new file mode 100644
-index 0000000..e3d2bc0
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/exprtree_wrapper.h
-@@ -0,0 +1,30 @@
-+
-+#ifndef __EXPRTREE_WRAPPER_H_
-+#define __EXPRTREE_WRAPPER_H_
-+
-+#include <classad/exprTree.h>
-+#include <boost/python.hpp>
-+
-+struct ExprTreeHolder
-+{
-+ ExprTreeHolder(const std::string &str);
-+
-+ ExprTreeHolder(classad::ExprTree *expr);
-+
-+ ~ExprTreeHolder();
-+
-+ boost::python::object Evaluate() const;
-+
-+ std::string toRepr();
-+
-+ std::string toString();
-+
-+ classad::ExprTree *get();
-+
-+private:
-+ classad::ExprTree *m_expr;
-+ bool m_owns;
-+};
-+
-+#endif
-+
-diff --git a/src/condor_contrib/python-bindings/old_boost.h b/src/condor_contrib/python-bindings/old_boost.h
-new file mode 100644
-index 0000000..7d159bc
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/old_boost.h
-@@ -0,0 +1,25 @@
-+
-+#include <boost/python.hpp>
-+
-+/*
-+ * This header contains all boost.python constructs missing in
-+ * older versions of boost.
-+ *
-+ * We'll eventually not compile these if the version of boost
-+ * is sufficiently recent.
-+ */
-+
-+inline ssize_t py_len(boost::python::object const& obj)
-+{
-+ ssize_t result = PyObject_Length(obj.ptr());
-+ if (PyErr_Occurred()) boost::python::throw_error_already_set();
-+ return result;
-+}
-+
-+inline boost::python::object py_import(boost::python::str name)
-+{
-+ char * n = boost::python::extract<char *>(name);
-+ boost::python::handle<> module(PyImport_ImportModule(n));
-+ return boost::python::object(module);
-+}
-+
-diff --git a/src/condor_contrib/python-bindings/schedd.cpp b/src/condor_contrib/python-bindings/schedd.cpp
-new file mode 100644
-index 0000000..9bbc830
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/schedd.cpp
-@@ -0,0 +1,402 @@
-+
-+#include "condor_attributes.h"
-+#include "condor_q.h"
-+#include "condor_qmgr.h"
-+#include "daemon.h"
-+#include "daemon_types.h"
-+#include "enum_utils.h"
-+#include "dc_schedd.h"
-+
-+#include <boost/python.hpp>
-+
-+#include "old_boost.h"
-+#include "classad_wrapper.h"
-+#include "exprtree_wrapper.h"
-+
-+using namespace boost::python;
-+
-+#define DO_ACTION(action_name) \
-+ reason_str = extract<std::string>(reason); \
-+ if (use_ids) \
-+ result = schedd. action_name (&ids, reason_str.c_str(), NULL, AR_TOTALS); \
-+ else \
-+ result = schedd. action_name (constraint.c_str(), reason_str.c_str(), NULL, AR_TOTALS);
-+
-+struct Schedd {
-+
-+ Schedd()
-+ {
-+ Daemon schedd( DT_SCHEDD, 0, 0 );
-+
-+ if (schedd.locate())
-+ {
-+ if (schedd.addr())
-+ {
-+ m_addr = schedd.addr();
-+ }
-+ else
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to locate schedd address.");
-+ throw_error_already_set();
-+ }
-+ m_name = schedd.name() ? schedd.name() : "Unknown";
-+ m_version = schedd.version() ? schedd.version() : "";
-+ }
-+ else
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to locate local daemon");
-+ boost::python::throw_error_already_set();
-+ }
-+ }
-+
-+ Schedd(const ClassAdWrapper &ad)
-+ : m_addr(), m_name("Unknown"), m_version("")
-+ {
-+ if (!ad.EvaluateAttrString(ATTR_SCHEDD_IP_ADDR, m_addr))
-+ {
-+ PyErr_SetString(PyExc_ValueError, "Schedd address not specified.");
-+ throw_error_already_set();
-+ }
-+ ad.EvaluateAttrString(ATTR_NAME, m_name);
-+ ad.EvaluateAttrString(ATTR_VERSION, m_version);
-+ }
-+
-+ object query(const std::string &constraint="", list attrs=list())
-+ {
-+ CondorQ q;
-+
-+ if (constraint.size())
-+ q.addAND(constraint.c_str());
-+
-+ StringList attrs_list(NULL, "\n");
-+ // Must keep strings alive; StringList does not create an internal copy.
-+ int len_attrs = py_len(attrs);
-+ std::vector<std::string> attrs_str; attrs_str.reserve(len_attrs);
-+ for (int i=0; i<len_attrs; i++)
-+ {
-+ std::string attrName = extract<std::string>(attrs[i]);
-+ attrs_str.push_back(attrName);
-+ attrs_list.append(attrs_str[i].c_str());
-+ }
-+
-+ ClassAdList jobs;
-+
-+ int fetchResult = q.fetchQueueFromHost(jobs, attrs_list, m_addr.c_str(), m_version.c_str(), NULL);
-+ switch (fetchResult)
-+ {
-+ case Q_OK:
-+ break;
-+ case Q_PARSE_ERROR:
-+ case Q_INVALID_CATEGORY:
-+ PyErr_SetString(PyExc_RuntimeError, "Parse error in constraint.");
-+ throw_error_already_set();
-+ break;
-+ default:
-+ PyErr_SetString(PyExc_IOError, "Failed to fetch ads from schedd.");
-+ throw_error_already_set();
-+ break;
-+ }
-+
-+ list retval;
-+ ClassAd *job;
-+ jobs.Open();
-+ while ((job = jobs.Next()))
-+ {
-+ boost::shared_ptr<ClassAdWrapper> wrapper(new ClassAdWrapper());
-+ wrapper->CopyFrom(*job);
-+ retval.append(wrapper);
-+ }
-+ return retval;
-+ }
-+
-+ object actOnJobs(JobAction action, object job_spec, object reason=object())
-+ {
-+ if (reason == object())
-+ {
-+ reason = object("Python-initiated action");
-+ }
-+ StringList ids;
-+ std::vector<std::string> ids_list;
-+ std::string constraint, reason_str, reason_code;
-+ bool use_ids = false;
-+ extract<std::string> constraint_extract(job_spec);
-+ if (constraint_extract.check())
-+ {
-+ constraint = constraint_extract();
-+ }
-+ else
-+ {
-+ int id_len = py_len(job_spec);
-+ ids_list.reserve(id_len);
-+ for (int i=0; i<id_len; i++)
-+ {
-+ std::string str = extract<std::string>(job_spec[i]);
-+ ids_list.push_back(str);
-+ ids.append(ids_list[i].c_str());
-+ }
-+ use_ids = true;
-+ }
-+ DCSchedd schedd(m_addr.c_str());
-+ ClassAd *result = NULL;
-+ VacateType vacate_type;
-+ tuple reason_tuple;
-+ const char *reason_char, *reason_code_char = NULL;
-+ extract<tuple> try_extract_tuple(reason);
-+ switch (action)
-+ {
-+ case JA_HOLD_JOBS:
-+ if (try_extract_tuple.check())
-+ {
-+ reason_tuple = extract<tuple>(reason);
-+ if (py_len(reason_tuple) != 2)
-+ {
-+ PyErr_SetString(PyExc_ValueError, "Hold action requires (hold string, hold code) tuple as the reason.");
-+ throw_error_already_set();
-+ }
-+ reason_str = extract<std::string>(reason_tuple[0]); reason_char = reason_str.c_str();
-+ reason_code = extract<std::string>(reason_tuple[1]); reason_code_char = reason_code.c_str();
-+ }
-+ else
-+ {
-+ reason_str = extract<std::string>(reason);
-+ reason_char = reason_str.c_str();
-+ }
-+ if (use_ids)
-+ result = schedd.holdJobs(&ids, reason_char, reason_code_char, NULL, AR_TOTALS);
-+ else
-+ result = schedd.holdJobs(constraint.c_str(), reason_char, reason_code_char, NULL, AR_TOTALS);
-+ break;
-+ case JA_RELEASE_JOBS:
-+ DO_ACTION(releaseJobs)
-+ break;
-+ case JA_REMOVE_JOBS:
-+ DO_ACTION(removeJobs)
-+ break;
-+ case JA_REMOVE_X_JOBS:
-+ DO_ACTION(removeXJobs)
-+ break;
-+ case JA_VACATE_JOBS:
-+ case JA_VACATE_FAST_JOBS:
-+ vacate_type = action == JA_VACATE_JOBS ? VACATE_GRACEFUL : VACATE_FAST;
-+ if (use_ids)
-+ result = schedd.vacateJobs(&ids, vacate_type, NULL, AR_TOTALS);
-+ else
-+ result = schedd.vacateJobs(constraint.c_str(), vacate_type, NULL, AR_TOTALS);
-+ break;
-+ case JA_SUSPEND_JOBS:
-+ DO_ACTION(suspendJobs)
-+ break;
-+ case JA_CONTINUE_JOBS:
-+ DO_ACTION(continueJobs)
-+ break;
-+ default:
-+ PyErr_SetString(PyExc_NotImplementedError, "Job action not implemented.");
-+ throw_error_already_set();
-+ }
-+ if (!result)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Error when querying the schedd.");
-+ throw_error_already_set();
-+ }
-+
-+ boost::shared_ptr<ClassAdWrapper> wrapper(new ClassAdWrapper());
-+ wrapper->CopyFrom(*result);
-+ object wrapper_obj(wrapper);
-+
-+ boost::shared_ptr<ClassAdWrapper> result_ptr(new ClassAdWrapper());
-+ object result_obj(result_ptr);
-+
-+ result_obj["TotalError"] = wrapper_obj["result_total_0"];
-+ result_obj["TotalSuccess"] = wrapper_obj["result_total_1"];
-+ result_obj["TotalNotFound"] = wrapper_obj["result_total_2"];
-+ result_obj["TotalBadStatus"] = wrapper_obj["result_total_3"];
-+ result_obj["TotalAlreadyDone"] = wrapper_obj["result_total_4"];
-+ result_obj["TotalPermissionDenied"] = wrapper_obj["result_total_5"];
-+ result_obj["TotalJobAds"] = wrapper_obj["TotalJobAds"];
-+ result_obj["TotalChangedAds"] = wrapper_obj["ActionResult"];
-+ return result_obj;
-+ }
-+
-+ object actOnJobs2(JobAction action, object job_spec)
-+ {
-+ return actOnJobs(action, job_spec, object("Python-initiated action."));
-+ }
-+
-+ int submit(ClassAdWrapper &wrapper, int count=1)
-+ {
-+ ConnectionSentry sentry(*this); // Automatically connects / disconnects.
-+
-+ int cluster = NewCluster();
-+ if (cluster < 0)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Failed to create new cluster.");
-+ throw_error_already_set();
-+ }
-+ ClassAd ad; ad.CopyFrom(wrapper);
-+ for (int idx=0; idx<count; idx++)
-+ {
-+ int procid = NewProc (cluster);
-+ if (procid < 0)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Failed to create new proc id.");
-+ throw_error_already_set();
-+ }
-+ ad.InsertAttr(ATTR_CLUSTER_ID, cluster);
-+ ad.InsertAttr(ATTR_PROC_ID, procid);
-+
-+ classad::ClassAdUnParser unparser;
-+ unparser.SetOldClassAd( true );
-+ for (classad::ClassAd::const_iterator it = ad.begin(); it != ad.end(); it++)
-+ {
-+ std::string rhs;
-+ unparser.Unparse(rhs, it->second);
-+ if (-1 == SetAttribute(cluster, procid, it->first.c_str(), rhs.c_str(), SetAttribute_NoAck))
-+ {
-+ PyErr_SetString(PyExc_ValueError, it->first.c_str());
-+ throw_error_already_set();
-+ }
-+ }
-+ }
-+
-+ return cluster;
-+ }
-+
-+ void edit(object job_spec, std::string attr, object val)
-+ {
-+ std::vector<int> clusters;
-+ std::vector<int> procs;
-+ std::string constraint;
-+ bool use_ids = false;
-+ extract<std::string> constraint_extract(job_spec);
-+ if (constraint_extract.check())
-+ {
-+ constraint = constraint_extract();
-+ }
-+ else
-+ {
-+ int id_len = py_len(job_spec);
-+ clusters.reserve(id_len);
-+ procs.reserve(id_len);
-+ for (int i=0; i<id_len; i++)
-+ {
-+ object id_list = job_spec[i].attr("split")(".");
-+ if (py_len(id_list) != 2)
-+ {
-+ PyErr_SetString(PyExc_ValueError, "Invalid ID");
-+ throw_error_already_set();
-+ }
-+ clusters.push_back(extract<int>(long_(id_list[0])));
-+ procs.push_back(extract<int>(long_(id_list[1])));
-+ }
-+ use_ids = true;
-+ }
-+
-+ std::string val_str;
-+ extract<ExprTreeHolder &> exprtree_extract(val);
-+ if (exprtree_extract.check())
-+ {
-+ classad::ClassAdUnParser unparser;
-+ unparser.Unparse(val_str, exprtree_extract().get());
-+ }
-+ else
-+ {
-+ val_str = extract<std::string>(val);
-+ }
-+
-+ ConnectionSentry sentry(*this);
-+
-+ if (use_ids)
-+ {
-+ for (unsigned idx=0; idx<clusters.size(); idx++)
-+ {
-+ if (-1 == SetAttribute(clusters[idx], procs[idx], attr.c_str(), val_str.c_str()))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to edit job");
-+ throw_error_already_set();
-+ }
-+ }
-+ }
-+ else
-+ {
-+ if (-1 == SetAttributeByConstraint(constraint.c_str(), attr.c_str(), val_str.c_str()))
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Unable to edit jobs matching constraint");
-+ throw_error_already_set();
-+ }
-+ }
-+ }
-+
-+private:
-+ struct ConnectionSentry
-+ {
-+ public:
-+ ConnectionSentry(Schedd &schedd) : m_connected(false)
-+ {
-+ if (ConnectQ(schedd.m_addr.c_str(), 0, false, NULL, NULL, schedd.m_version.c_str()) == 0)
-+ {
-+ PyErr_SetString(PyExc_RuntimeError, "Failed to connect to schedd.");
-+ throw_error_already_set();
-+ }
-+ m_connected = true;
-+ }
-+
-+ void disconnect()
-+ {
-+ if (m_connected && !DisconnectQ(NULL))
-+ {
-+ m_connected = false;
-+ PyErr_SetString(PyExc_RuntimeError, "Failed to commmit and disconnect from queue.");
-+ throw_error_already_set();
-+ }
-+ m_connected = false;
-+ }
-+
-+ ~ConnectionSentry()
-+ {
-+ disconnect();
-+ }
-+ private:
-+ bool m_connected;
-+ };
-+
-+ std::string m_addr, m_name, m_version;
-+};
-+
-+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(query_overloads, query, 0, 2);
-+BOOST_PYTHON_MEMBER_FUNCTION_OVERLOADS(submit_overloads, submit, 1, 2);
-+
-+void export_schedd()
-+{
-+ enum_<JobAction>("JobAction")
-+ .value("Hold", JA_HOLD_JOBS)
-+ .value("Release", JA_RELEASE_JOBS)
-+ .value("Remove", JA_REMOVE_JOBS)
-+ .value("RemoveX", JA_REMOVE_X_JOBS)
-+ .value("Vacate", JA_VACATE_JOBS)
-+ .value("VacateFast", JA_VACATE_FAST_JOBS)
-+ .value("Suspend", JA_SUSPEND_JOBS)
-+ .value("Continue", JA_CONTINUE_JOBS)
-+ ;
-+
-+ class_<Schedd>("Schedd", "A client class for the HTCondor schedd")
-+ .def(init<const ClassAdWrapper &>(":param ad: An ad containing the location of the schedd"))
-+ .def("query", &Schedd::query, query_overloads("Query the HTCondor schedd for jobs.\n"
-+ ":param constraint: An optional constraint for filtering out jobs; defaults to 'true'\n"
-+ ":param attr_list: A list of attributes for the schedd to project along. Defaults to having the schedd return all attributes.\n"
-+ ":return: A list of matching jobs, containing the requested attributes."))
-+ .def("act", &Schedd::actOnJobs2)
-+ .def("act", &Schedd::actOnJobs, "Change status of job(s) in the schedd.\n"
-+ ":param action: Action to perform; must be from enum JobAction.\n"
-+ ":param job_spec: Job specification; can either be a list of job IDs or a string specifying a constraint to match jobs.\n"
-+ ":return: Number of jobs changed.")
-+ .def("submit", &Schedd::submit, submit_overloads("Submit one or more jobs to the HTCondor schedd.\n"
-+ ":param ad: ClassAd describing job cluster.\n"
-+ ":param count: Number of jobs to submit to cluster.\n"
-+ ":return: Newly created cluster ID."))
-+ .def("edit", &Schedd::edit, "Edit one or more jobs in the queue.\n"
-+ ":param job_spec: Either a list of jobs (CLUSTER.PROC) or a string containing a constraint to match jobs against.\n"
-+ ":param attr: Attribute name to edit.\n"
-+ ":param value: The new value of the job attribute; should be a string (which will be converted to a ClassAds expression) or a ClassAds expression.");
-+ ;
-+}
-+
-diff --git a/src/condor_contrib/python-bindings/secman.cpp b/src/condor_contrib/python-bindings/secman.cpp
-new file mode 100644
-index 0000000..343fba8
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/secman.cpp
-@@ -0,0 +1,35 @@
-+
-+#include "condor_common.h"
-+
-+#include <boost/python.hpp>
-+
-+// Note - condor_secman.h can't be included directly. The following headers must
-+// be loaded first. Sigh.
-+#include "condor_ipverify.h"
-+#include "sock.h"
-+
-+#include "condor_secman.h"
-+
-+using namespace boost::python;
-+
-+struct SecManWrapper
-+{
-+public:
-+ SecManWrapper() : m_secman() {}
-+
-+ void
-+ invalidateAllCache()
-+ {
-+ m_secman.invalidateAllCache();
-+ }
-+
-+private:
-+ SecMan m_secman;
-+};
-+
-+void
-+export_secman()
-+{
-+ class_<SecManWrapper>("SecMan", "Access to the internal security state information.")
-+ .def("invalidateAllSessions", &SecManWrapper::invalidateAllCache, "Invalidate all security sessions.");
-+}
-diff --git a/src/condor_contrib/python-bindings/tests/classad_tests.py b/src/condor_contrib/python-bindings/tests/classad_tests.py
-new file mode 100644
-index 0000000..7641190
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/tests/classad_tests.py
-@@ -0,0 +1,79 @@
-+#!/usr/bin/python
-+
-+import re
-+import classad
-+import unittest
-+
-+class TestClassad(unittest.TestCase):
-+
-+ def test_load_classad_from_file(self):
-+ ad = classad.parse(open("tests/test.ad"))
-+ self.assertEqual(ad["foo"], "bar")
-+ self.assertEqual(ad["baz"], classad.Value.Undefined)
-+ self.assertRaises(KeyError, ad.__getitem__, "bar")
-+
-+ def test_old_classad(self):
-+ ad = classad.parseOld(open("tests/test.old.ad"))
-+ contents = open("tests/test.old.ad").read()
-+ self.assertEqual(ad.printOld(), contents)
-+
-+ def test_exprtree(self):
-+ ad = classad.ClassAd()
-+ ad["foo"] = classad.ExprTree("2+2")
-+ expr = ad["foo"]
-+ self.assertEqual(expr.__repr__(), "2 + 2")
-+ self.assertEqual(expr.eval(), 4)
-+
-+ def test_exprtree_func(self):
-+ ad = classad.ClassAd()
-+ ad["foo"] = classad.ExprTree('regexps("foo (bar)", "foo bar", "\\\\1")')
-+ self.assertEqual(ad.eval("foo"), "bar")
-+
-+ def test_ad_assignment(self):
-+ ad = classad.ClassAd()
-+ ad["foo"] = 2.1
-+ self.assertEqual(ad["foo"], 2.1)
-+ ad["foo"] = 2
-+ self.assertEqual(ad["foo"], 2)
-+ ad["foo"] = "bar"
-+ self.assertEqual(ad["foo"], "bar")
-+ self.assertRaises(TypeError, ad.__setitem__, {})
-+
-+ def test_ad_refs(self):
-+ ad = classad.ClassAd()
-+ ad["foo"] = classad.ExprTree("bar + baz")
-+ ad["bar"] = 2.1
-+ ad["baz"] = 4
-+ self.assertEqual(ad["foo"].__repr__(), "bar + baz")
-+ self.assertEqual(ad.eval("foo"), 6.1)
-+
-+ def test_ad_special_values(self):
-+ ad = classad.ClassAd()
-+ ad["foo"] = classad.ExprTree('regexp(12, 34)')
-+ ad["bar"] = classad.Value.Undefined
-+ self.assertEqual(ad["foo"].eval(), classad.Value.Error)
-+ self.assertNotEqual(ad["foo"].eval(), ad["bar"])
-+ self.assertEqual(classad.Value.Undefined, ad["bar"])
-+
-+ def test_ad_iterator(self):
-+ ad = classad.ClassAd()
-+ ad["foo"] = 1
-+ ad["bar"] = 2
-+ self.assertEqual(len(ad), 2)
-+ self.assertEqual(len(list(ad)), 2)
-+ self.assertEqual(list(ad)[1], "foo")
-+ self.assertEqual(list(ad)[0], "bar")
-+ self.assertEqual(list(ad.items())[1][1], 1)
-+ self.assertEqual(list(ad.items())[0][1], 2)
-+ self.assertEqual(list(ad.values())[1], 1)
-+ self.assertEqual(list(ad.values())[0], 2)
-+
-+ def test_ad_lookup(self):
-+ ad = classad.ClassAd()
-+ ad["foo"] = classad.Value.Error
-+ self.assertTrue(isinstance(ad.lookup("foo"), classad.ExprTree))
-+ self.assertEquals(ad.lookup("foo").eval(), classad.Value.Error)
-+
-+if __name__ == '__main__':
-+ unittest.main()
-+
-diff --git a/src/condor_contrib/python-bindings/tests/condor_tests.py b/src/condor_contrib/python-bindings/tests/condor_tests.py
-new file mode 100644
-index 0000000..2293fc2
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/tests/condor_tests.py
-@@ -0,0 +1,173 @@
-+#!/usr/bin/python
-+
-+import os
-+import re
-+import time
-+import condor
-+import errno
-+import signal
-+import classad
-+import unittest
-+
-+class TestConfig(unittest.TestCase):
-+
-+ def setUp(self):
-+ os.environ["_condor_FOO"] = "BAR"
-+ condor.reload_config()
-+
-+ def test_config(self):
-+ self.assertEquals(condor.param["FOO"], "BAR")
-+
-+ def test_reconfig(self):
-+ condor.param["FOO"] = "BAZ"
-+ self.assertEquals(condor.param["FOO"], "BAZ")
-+ os.environ["_condor_FOO"] = "1"
-+ condor.reload_config()
-+ self.assertEquals(condor.param["FOO"], "1")
-+
-+class TestVersion(unittest.TestCase):
-+
-+ def setUp(self):
-+ fd = os.popen("condor_version")
-+ self.lines = []
-+ for line in fd.readlines():
-+ self.lines.append(line.strip())
-+ if fd.close():
-+ raise RuntimeError("Unable to invoke condor_version")
-+
-+ def test_version(self):
-+ self.assertEquals(condor.version(), self.lines[0])
-+
-+ def test_platform(self):
-+ self.assertEquals(condor.platform(), self.lines[1])
-+
-+def makedirs_ignore_exist(directory):
-+ try:
-+ os.makedirs(directory)
-+ except OSError, oe:
-+ if oe.errno != errno.EEXIST:
-+ raise
-+
-+def remove_ignore_missing(file):
-+ try:
-+ os.unlink(file)
-+ except OSError, oe:
-+ if oe.errno != errno.ENOENT:
-+ raise
-+
-+class TestWithDaemons(unittest.TestCase):
-+
-+ def setUp(self):
-+ self.pid = -1
-+ testdir = os.path.join(os.getcwd(), "tests_tmp")
-+ makedirs_ignore_exist(testdir)
-+ os.environ["_condor_LOCAL_DIR"] = testdir
-+ os.environ["_condor_LOG"] = '$(LOCAL_DIR)/log'
-+ os.environ["_condor_LOCK"] = '$(LOCAL_DIR)/lock'
-+ os.environ["_condor_RUN"] = '$(LOCAL_DIR)/run'
-+ os.environ["_condor_COLLECTOR_NAME"] = "python_classad_tests"
-+ os.environ["_condor_SCHEDD_NAME"] = "python_classad_tests"
-+ condor.reload_config()
-+ condor.SecMan().invalidateAllSessions()
-+
-+ def launch_daemons(self, daemons=["MASTER", "COLLECTOR"]):
-+ makedirs_ignore_exist(condor.param["LOG"])
-+ makedirs_ignore_exist(condor.param["LOCK"])
-+ makedirs_ignore_exist(condor.param["EXECUTE"])
-+ makedirs_ignore_exist(condor.param["SPOOL"])
-+ makedirs_ignore_exist(condor.param["RUN"])
-+ remove_ignore_missing(condor.param["MASTER_ADDRESS_FILE"])
-+ remove_ignore_missing(condor.param["COLLECTOR_ADDRESS_FILE"])
-+ remove_ignore_missing(condor.param["SCHEDD_ADDRESS_FILE"])
-+ if "COLLECTOR" in daemons:
-+ os.environ["_condor_PORT"] = "9622"
-+ os.environ["_condor_COLLECTOR_ARGS"] = "-port $(PORT)"
-+ os.environ["_condor_COLLECTOR_HOST"] = "$(CONDOR_HOST):$(PORT)"
-+ if 'MASTER' not in daemons:
-+ daemons.append('MASTER')
-+ os.environ["_condor_DAEMON_LIST"] = ", ".join(daemons)
-+ condor.reload_config()
-+ self.pid = os.fork()
-+ if not self.pid:
-+ try:
-+ try:
-+ os.execvp("condor_master", ["condor_master", "-f"])
-+ except Exception, e:
-+ print str(e)
-+ finally:
-+ os._exit(1)
-+ for daemon in daemons:
-+ self.waitLocalDaemon(daemon)
-+
-+ def tearDown(self):
-+ if self.pid > 1:
-+ os.kill(self.pid, signal.SIGQUIT)
-+ pid, exit_status = os.waitpid(self.pid, 0)
-+ self.assertTrue(os.WIFEXITED(exit_status))
-+ code = os.WEXITSTATUS(exit_status)
-+ self.assertEquals(code, 0)
-+
-+ def waitLocalDaemon(self, daemon, timeout=5):
-+ address_file = condor.param[daemon + "_ADDRESS_FILE"]
-+ for i in range(timeout):
-+ if os.path.exists(address_file):
-+ return
-+ time.sleep(1)
-+ if not os.path.exists(address_file):
-+ raise RuntimeError("Waiting for daemon %s timed out." % daemon)
-+
-+ def waitRemoteDaemon(self, dtype, dname, pool=None, timeout=5):
-+ if pool:
-+ coll = condor.Collector(pool)
-+ else:
-+ coll = condor.Collector()
-+ for i in range(timeout):
-+ try:
-+ return coll.locate(dtype, dname)
-+ except Exception:
-+ pass
-+ time.sleep(1)
-+ return coll.locate(dtype, dname)
-+
-+ def testDaemon(self):
-+ self.launch_daemons(["COLLECTOR"])
-+
-+ def testLocate(self):
-+ self.launch_daemons(["COLLECTOR"])
-+ coll = condor.Collector()
-+ coll_ad = coll.locate(condor.DaemonTypes.Collector)
-+ self.assertTrue("MyAddress" in coll_ad)
-+ self.assertEquals(coll_ad["Name"].split(":")[-1], os.environ["_condor_PORT"])
-+
-+ def testRemoteLocate(self):
-+ self.launch_daemons(["COLLECTOR"])
-+ coll = condor.Collector()
-+ coll_ad = coll.locate(condor.DaemonTypes.Collector)
-+ remote_ad = self.waitRemoteDaemon(condor.DaemonTypes.Collector, "%s@%s" % (condor.param["COLLECTOR_NAME"], condor.param["CONDOR_HOST"]))
-+ self.assertEquals(remote_ad["MyAddress"], coll_ad["MyAddress"])
-+
-+ def testScheddLocate(self):
-+ self.launch_daemons(["SCHEDD", "COLLECTOR"])
-+ coll = condor.Collector()
-+ name = "%s@%s" % (condor.param["SCHEDD_NAME"], condor.param["CONDOR_HOST"])
-+ schedd_ad = self.waitRemoteDaemon(condor.DaemonTypes.Schedd, name, timeout=10)
-+ self.assertEquals(schedd_ad["Name"], name)
-+
-+ def testCollectorAdvertise(self):
-+ self.launch_daemons(["COLLECTOR"])
-+ print condor.param["COLLECTOR_HOST"]
-+ coll = condor.Collector()
-+ now = time.time()
-+ ad = classad.ClassAd('[MyType="GenericAd"; Name="Foo"; Foo=1; Bar=%f; Baz="foo"]' % now)
-+ coll.advertise([ad])
-+ for i in range(5):
-+ ads = coll.query(condor.AdTypes.Any, 'Name =?= "Foo"', ["Bar"])
-+ if ads: break
-+ time.sleep
-+ self.assertEquals(len(ads), 1)
-+ self.assertEquals(ads[0]["Bar"], now)
-+ self.assertTrue("Foo" not in ads[0])
-+
-+if __name__ == '__main__':
-+ unittest.main()
-+
-diff --git a/src/condor_contrib/python-bindings/tests/test.ad b/src/condor_contrib/python-bindings/tests/test.ad
-new file mode 100644
-index 0000000..06eeeb5
---- /dev/null
-+++ b/src/condor_contrib/python-bindings/tests/test.ad
-@@ -0,0 +1,4 @@
-+[
-+foo = "bar";
-+baz = undefined;
-+]
diff --git a/python-boost.patch b/python-boost.patch
deleted file mode 100644
index 20e79ed..0000000
--- a/python-boost.patch
+++ /dev/null
@@ -1,23 +0,0 @@
-diff --git a/src/python-bindings/CMakeLists.txt b/src/python-bindings/CMakeLists.txt
-index 781580d8f9..93d297ee92 100644
---- a/src/python-bindings/CMakeLists.txt
-+++ b/src/python-bindings/CMakeLists.txt
-@@ -224,7 +224,7 @@ else()
- if (${SYSTEM_NAME} MATCHES "Debian" OR ${SYSTEM_NAME} MATCHES "Ubuntu")
- set ( PYTHON_BOOST_LIB "boost_python-py${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR}" )
- endif()
-- if (${SYSTEM_NAME} MATCHES "rhel7" OR ${SYSTEM_NAME} MATCHES "centos7" OR ${SYSTEM_NAME} MATCHES "sl7" OR ${SYSTEM_NAME} MATCHES "amzn2" OR ${SYSTEM_NAME} MATCHES "Debian.*10")
-+ if (${SYSTEM_NAME} MATCHES "rhel7" OR ${SYSTEM_NAME} MATCHES "centos7" OR ${SYSTEM_NAME} MATCHES "sl7" OR ${SYSTEM_NAME} MATCHES "fc3[0-9]" OR ${SYSTEM_NAME} MATCHES "amzn2" OR ${SYSTEM_NAME} MATCHES "Debian.*10")
- set ( PYTHON_BOOST_LIB "boost_python${PYTHON_VERSION_MAJOR}${PYTHON_VERSION_MINOR}" )
- endif()
- if (${SYSTEM_NAME} MATCHES "rhel8" OR ${SYSTEM_NAME} MATCHES "centos8" )
-@@ -309,7 +309,7 @@ else()
- if (${SYSTEM_NAME} MATCHES "Debian" OR ${SYSTEM_NAME} MATCHES "Ubuntu")
- set ( PYTHON3_BOOST_LIB "boost_python-py${PYTHON3_VERSION_MAJOR}${PYTHON3_VERSION_MINOR}" )
- endif()
-- if (${SYSTEM_NAME} MATCHES "rhel7" OR ${SYSTEM_NAME} MATCHES "centos7" OR ${SYSTEM_NAME} MATCHES "sl7" OR ${SYSTEM_NAME} MATCHES "amzn2" OR ${SYSTEM_NAME} MATCHES "Debian.*10" OR ${SYSTEM_NAME} MATCHES "Ubuntu.*20")
-+ if (${SYSTEM_NAME} MATCHES "rhel7" OR ${SYSTEM_NAME} MATCHES "centos7" OR ${SYSTEM_NAME} MATCHES "sl7" OR ${SYSTEM_NAME} MATCHES "fc3[0-9]" OR ${SYSTEM_NAME} MATCHES "amzn2" OR ${SYSTEM_NAME} MATCHES "Debian.*10" OR ${SYSTEM_NAME} MATCHES "Ubuntu.*20")
- set ( PYTHON3_BOOST_LIB "boost_python${PYTHON3_VERSION_MAJOR}${PYTHON3_VERSION_MINOR}" )
- endif()
- if (${SYSTEM_NAME} MATCHES "rhel8" OR ${SYSTEM_NAME} MATCHES "centos8" )
-
diff --git a/python-executable.patch b/python-executable.patch
deleted file mode 100644
index 2533a50..0000000
--- a/python-executable.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/build/cmake/CondorConfigure.cmake b/build/cmake/CondorConfigure.cmake
-index 83e68eb..2ee07bc 100644
---- a/build/cmake/CondorConfigure.cmake
-+++ b/build/cmake/CondorConfigure.cmake
-@@ -146,7 +146,7 @@ if(NOT WINDOWS)
- message(STATUS "PYTHON_INCLUDE_PATH = ${PYTHON_INCLUDE_PATH}")
- message(STATUS "PYTHON_VERSION_STRING = ${PYTHON_VERSION_STRING}")
- endif()
-- find_program(PYTHON3_EXECUTABLE python3)
-+ find_program(PYTHON3_EXECUTABLE python)
- if (PYTHON3_EXECUTABLE)
- set(PYTHON3INTERP_FOUND TRUE)
- set(PYTHON_QUERY_PART_01 "from distutils import sysconfig;")
diff --git a/python-scripts.patch b/python-scripts.patch
deleted file mode 100644
index 8c7773f..0000000
--- a/python-scripts.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-Patch scripts to use the python3.
-
-diff --git a/src/azure_gahp/AzureGAHPServer.py b/src/azure_gahp/AzureGAHPServer.py
-index ec89591c38..274a8d3929 100644
---- a/src/azure_gahp/AzureGAHPServer.py
-+++ b/src/azure_gahp/AzureGAHPServer.py
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python -u
-+#!/usr/bin/env python3 -u
-
- from __future__ import print_function
-
-diff --git a/src/condor_gridmanager/slurm_status.py b/src/condor_gridmanager/slurm_status.py
-index 61e68682ff..9ca5bd3cee 100755
---- a/src/condor_gridmanager/slurm_status.py
-+++ b/src/condor_gridmanager/slurm_status.py
-@@ -1,4 +1,4 @@
--#!/usr/bin/python
-+#!/usr/bin/python3
-
- # File: slurm_status.py
- #
-diff --git a/src/condor_job_router/condor_router_history b/src/condor_job_router/condor_router_history
-index 8120fde566..452f0b6aca 100755
---- a/src/condor_job_router/condor_router_history
-+++ b/src/condor_job_router/condor_router_history
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/env python3
- ##**************************************************************
- ##
- ## Copyright (C) 1990-2007, Condor Team, Computer Sciences Department,
-diff --git a/src/condor_scripts/condor_top b/src/condor_scripts/condor_top
-index c31f14d6a7..bd647c2d00 100755
---- a/src/condor_scripts/condor_top
-+++ b/src/condor_scripts/condor_top
-@@ -1,4 +1,4 @@
--#!/usr/bin/env python
-+#!/usr/bin/env python3
-
- import htcondor
- import classad
diff --git a/python3.patch b/python3.patch
deleted file mode 100644
index 9fadbf2..0000000
--- a/python3.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/src/python-bindings/CMakeLists.txt b/src/python-bindings/CMakeLists.txt
-index 5c2b104363..b610a3ac4e 100644
---- a/src/python-bindings/CMakeLists.txt
-+++ b/src/python-bindings/CMakeLists.txt
-@@ -204,7 +204,7 @@ if(WINDOWS)
- endif(NOT (MSVC_VERSION LESS 1700))
- endif()
- else()
-- if ( WITH_PYTHON_BINDINGS AND PYTHONLIBS_FOUND AND Boost_PYTHON_LIBRARY AND NOT SOLARIS )
-+ if ( WITH_PYTHON_BINDINGS AND PYTHON3LIBS_FOUND )
- configure_file (
- "${PROJECT_SOURCE_DIR}/src/python-bindings/test_driver.in"
- "${CMAKE_CURRENT_BINARY_DIR}/test_driver"
diff --git a/sources b/sources
index a5e7b88..54537c9 100644
--- a/sources
+++ b/sources
@@ -1 +1 @@
-SHA512 (htcondor-8.8.15.tar.gz) = 47aa05c48138bebd911c10da834736c0cae5540db3a0a4f48960fffc31c5d09e2ab99560bb3b775b986a8d56496e9d8b59ab6a72ee957b833ffde6607808c61a
+SHA512 (htcondor-23.0.0.tar.gz) = 767b1769e81f2a9aced274877330999b25c182d7cfe8f27b6d841d501ae50a9c388083e25f710527db62e402b63d82f2525ed993fe4a78d33cc9e6226d6ad233
7 months, 1 week