dirsrvtests/tests
by Simon Pichugin
dirsrvtests/tests/tickets/ticket47536_test.py | 8 ++++----
dirsrvtests/tests/tickets/ticket48013_test.py | 11 ++++++++++-
dirsrvtests/tests/tickets/ticket48194_test.py | 8 ++++----
dirsrvtests/tests/tickets/ticket48212_test.py | 24 +++++-------------------
dirsrvtests/tests/tickets/ticket48228_test.py | 24 ++++++++++++------------
dirsrvtests/tests/tickets/ticket48383_test.py | 11 +++++++----
dirsrvtests/tests/tickets/ticket48665_test.py | 8 ++++++--
7 files changed, 48 insertions(+), 46 deletions(-)
New commits:
commit 735ccc6d2929143e4ae7fce9c0c07f0a0cedd3a5
Author: Simon Pichugin <spichugi(a)redhat.com>
Date: Tue May 24 11:03:55 2016 +0200
Ticket 48832 - CI test - fix ticket failures
Description:
ticket47536_test.py
- Get prefix from variable, not from env
ticket48013_test.py
- Change hardcoded host:port to variables
ticket48194_test.py
- Change security port to 636 to avoid SElinux denial
ticket48212_test.py
- Increase sleeping time and change the checks to smart assertion
ticket48383_test.py
ticket48665_test.py
- Change test case to use simple ldap modify operation for setting
backend property
ticket48228_test.py
- Refactor assertions and add time.sleep(1) between operations
https://fedorahosted.org/389/ticket/48832
Review by: nhosoi (Thanks!)
diff --git a/dirsrvtests/tests/tickets/ticket47536_test.py b/dirsrvtests/tests/tickets/ticket47536_test.py
index 1712e7c..1bcbb14 100644
--- a/dirsrvtests/tests/tickets/ticket47536_test.py
+++ b/dirsrvtests/tests/tickets/ticket47536_test.py
@@ -25,7 +25,7 @@ from lib389.utils import *
logging.getLogger(__name__).setLevel(logging.DEBUG)
log = logging.getLogger(__name__)
-installation1_prefix = None
+installation1_prefix = ''
CONFIG_DN = 'cn=config'
ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN
@@ -70,7 +70,7 @@ def topology(request):
master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
# Creating master 2...
- master2 = DirSrv(verbose=True)
+ master2 = DirSrv(verbose=False)
if installation1_prefix:
args_instance[SER_DEPLOYED_DIR] = installation1_prefix
args_instance[SER_HOST] = HOST_MASTER_2
@@ -489,7 +489,7 @@ def test_ticket47536(topology):
add_entry(topology.master2, 'master2', 'uid=m2user', 0, 5)
time.sleep(1)
-
+
log.info('##### Searching for entries on master1...')
entries = topology.master1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
assert 10 == len(entries)
@@ -513,7 +513,7 @@ def test_ticket47536(topology):
entries = topology.master2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(uid=*)')
assert 20 == len(entries)
- db2ldifpl = '%s/sbin/db2ldif.pl' % os.getenv('PREFIX')
+ db2ldifpl = '%s/sbin/db2ldif.pl' % installation1_prefix
cmdline = [db2ldifpl, '-n', 'userRoot', '-Z', SERVERID_MASTER_1, '-D', DN_DM, '-w', PASSWORD]
log.info("##### db2ldif.pl -- %s" % (cmdline))
doAndPrintIt(cmdline)
diff --git a/dirsrvtests/tests/tickets/ticket48013_test.py b/dirsrvtests/tests/tickets/ticket48013_test.py
index 0ccdeba..730f929 100644
--- a/dirsrvtests/tests/tickets/ticket48013_test.py
+++ b/dirsrvtests/tests/tickets/ticket48013_test.py
@@ -1,3 +1,11 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2016 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
import os
import sys
import time
@@ -89,7 +97,8 @@ def test_ticket48013(topology):
topology.standalone.plugins.enable(name=PLUGIN_REPL_SYNC)
# Set everything up
- ldap_url = ldapurl.LDAPUrl('ldap://localhost:31389')
+ ldap_url = ldapurl.LDAPUrl('ldap://%s:%s' % (HOST_STANDALONE,
+ PORT_STANDALONE))
ldap_connection = SyncObject(ldap_url.initializeUrl())
# Authenticate
diff --git a/dirsrvtests/tests/tickets/ticket48194_test.py b/dirsrvtests/tests/tickets/ticket48194_test.py
index f69d013..fa4fe72 100644
--- a/dirsrvtests/tests/tickets/ticket48194_test.py
+++ b/dirsrvtests/tests/tickets/ticket48194_test.py
@@ -1,9 +1,9 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
-# See LICENSE for details.
+# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
import os
@@ -26,7 +26,7 @@ CONFIG_DN = 'cn=config'
ENCRYPTION_DN = 'cn=encryption,%s' % CONFIG_DN
RSA = 'RSA'
RSA_DN = 'cn=%s,%s' % (RSA, ENCRYPTION_DN)
-LDAPSPORT = '10636'
+LDAPSPORT = '636'
SERVERCERT = 'Server-Cert'
plus_all_ecount = 0
plus_all_dcount = 0
@@ -326,7 +326,7 @@ def my_test_run_5(topology):
def my_test_run_6(topology):
"""
- Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256
+ Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256
All ciphers are disabled.
default allowWeakCipher
"""
diff --git a/dirsrvtests/tests/tickets/ticket48212_test.py b/dirsrvtests/tests/tickets/ticket48212_test.py
index 4da6939..82ec102 100644
--- a/dirsrvtests/tests/tickets/ticket48212_test.py
+++ b/dirsrvtests/tests/tickets/ticket48212_test.py
@@ -82,7 +82,7 @@ def runDbVerify(topology):
assert False
else:
topology.standalone.log.info("dbverify passed")
-
+
def reindexUidNumber(topology):
topology.standalone.log.info("\n\n +++++ reindex uidnumber +++++\n")
sbin_dir = get_sbin_dir(prefix=topology.standalone.prefix)
@@ -91,27 +91,13 @@ def reindexUidNumber(topology):
indexOUT = os.popen(indexCMD, "r")
topology.standalone.log.info("Running %s" % indexCMD)
- time.sleep(10)
+ time.sleep(15)
tailCMD = "tail -n 3 " + topology.standalone.errlog
tailOUT = os.popen(tailCMD, "r")
- running = True
- done = False
- while running:
- l = tailOUT.readline()
- if l == "":
- running = False
- elif "Finished indexing" in l:
- running = False
- done = True
- topology.standalone.log.info("%s" % l)
-
- if done:
- topology.standalone.log.info("%s done" % indexCMD)
- else:
- topology.standalone.log.fatal("%s did not finish" % indexCMD)
- assert False
-
+ assert 'Finished indexing' in tailOUT.read()
+
+
def test_ticket48212(topology):
"""
Import posixAccount entries.
diff --git a/dirsrvtests/tests/tickets/ticket48228_test.py b/dirsrvtests/tests/tickets/ticket48228_test.py
index bb20620..f3657c4 100644
--- a/dirsrvtests/tests/tickets/ticket48228_test.py
+++ b/dirsrvtests/tests/tickets/ticket48228_test.py
@@ -162,22 +162,22 @@ def check_passwd_inhistory(topology, user, cpw, passwd):
except ldap.LDAPError as e:
log.info(' The password ' + passwd + ' of user' + USER1_DN + ' in history: error ' + e.message['desc'])
inhistory = 1
+ time.sleep(1)
return inhistory
def update_passwd(topology, user, passwd, times):
cpw = passwd
- loop = 0
- while loop < times:
+ for i in range(times):
log.info(" Bind as {%s,%s}" % (user, cpw))
topology.standalone.simple_bind_s(user, cpw)
- cpw = 'password%d' % loop
+ cpw = 'password%d' % i
try:
topology.standalone.modify_s(user, [(ldap.MOD_REPLACE, 'userpassword', cpw)])
except ldap.LDAPError as e:
log.fatal('test_ticket48228: Failed to update the password ' + cpw + ' of user ' + user + ': error ' + e.message['desc'])
assert False
- loop += 1
+ time.sleep(1)
# checking the first password, which is supposed to be in history
inhistory = check_passwd_inhistory(topology, user, cpw, passwd)
@@ -227,15 +227,15 @@ def test_ticket48228_test_global_policy(topology):
inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw)
assert inhistory == 0
- log.info(' checking the second password, which is supposed NOT to be in history any more')
+ log.info(' checking the third password, which is supposed NOT to be in history any more')
cpw = tpw
tpw = 'password%d' % 1
inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw)
assert inhistory == 0
- log.info(' checking the third password, which is supposed to be in history')
+ log.info(' checking the sixth password, which is supposed to be in history')
cpw = tpw
- tpw = 'password%d' % 2
+ tpw = 'password%d' % 5
inhistory = check_passwd_inhistory(topology, USER1_DN, cpw, tpw)
assert inhistory == 1
@@ -286,19 +286,19 @@ def test_ticket48228_test_subtree_policy(topology):
log.info(' checking the second password, which is supposed NOT to be in history any more')
cpw = tpw
- tpw = 'password%d' % 0
+ tpw = 'password%d' % 1
inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw)
assert inhistory == 0
- log.info(' checking the second password, which is supposed NOT to be in history any more')
+ log.info(' checking the third password, which is supposed NOT to be in history any more')
cpw = tpw
- tpw = 'password%d' % 1
+ tpw = 'password%d' % 2
inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw)
assert inhistory == 0
- log.info(' checking the third password, which is supposed to be in history')
+ log.info(' checking the six password, which is supposed to be in history')
cpw = tpw
- tpw = 'password%d' % 2
+ tpw = 'password%d' % 5
inhistory = check_passwd_inhistory(topology, USER2_DN, cpw, tpw)
assert inhistory == 1
diff --git a/dirsrvtests/tests/tickets/ticket48383_test.py b/dirsrvtests/tests/tickets/ticket48383_test.py
index fc11cee..d05c7c2 100644
--- a/dirsrvtests/tests/tickets/ticket48383_test.py
+++ b/dirsrvtests/tests/tickets/ticket48383_test.py
@@ -26,7 +26,7 @@ class TopologyStandalone(object):
@pytest.fixture(scope="module")
def topology(request):
# Creating standalone instance ...
- standalone = DirSrv(verbose=True)
+ standalone = DirSrv(verbose=False)
args_instance[SER_HOST] = HOST_STANDALONE
args_instance[SER_PORT] = PORT_STANDALONE
args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
@@ -98,10 +98,13 @@ def test_ticket48383(topology):
except ldap.LDAPError as e:
log.fatal('test 48383: Failed to user%s: error %s ' % (i, e.message['desc']))
assert False
- # Set the dbsize really low.
- topology.standalone.backend.setProperties(bename=DEFAULT_BENAME,
- prop='nsslapd-cachememsize', values='1')
+ # Set the dbsize really low.
+ try:
+ topology.standalone.modify_s(DEFAULT_BENAME, [(ldap.MOD_REPLACE,
+ 'nsslapd-cachememsize', '1')])
+ except ldap.LDAPError as e:
+ log.fatal('Failed to change nsslapd-cachememsize ' + e.message['desc'])
## Does ds try and set a minimum possible value for this?
## Yes: [16/Feb/2016:16:39:18 +1000] - WARNING: cache too small, increasing to 500K bytes
diff --git a/dirsrvtests/tests/tickets/ticket48665_test.py b/dirsrvtests/tests/tickets/ticket48665_test.py
index 702319c..9396b8a 100644
--- a/dirsrvtests/tests/tickets/ticket48665_test.py
+++ b/dirsrvtests/tests/tickets/ticket48665_test.py
@@ -76,8 +76,12 @@ def test_ticket48665(topology):
assert(DN_DM.lower() in result.lower())
# This has a magic hack to determine if we are in cn=config.
- topology.standalone.backend.setProperties(bename=DEFAULT_BENAME,
- prop='nsslapd-cachememsize', values='1')
+ try:
+ topology.standalone.modify_s(DEFAULT_BENAME, [(ldap.MOD_REPLACE,
+ 'nsslapd-cachememsize', '1')])
+ except ldap.LDAPError as e:
+ log.fatal('Failed to change nsslapd-cachememsize ' + e.message['desc'])
+
# Check the server has not commited seppuku.
result = topology.standalone.whoami_s()
assert(DN_DM.lower() in result.lower())
7 years, 10 months
ldap/servers
by Noriko Hosoi
ldap/servers/slapd/libglobs.c | 40 +++++++++++++------
ldap/servers/slapd/proto-slap.h | 6 +-
ldap/servers/slapd/pw.c | 59 ++++++++++++++++++-----------
ldap/servers/slapd/slap.h | 8 ++-
ldap/servers/slapd/slapi-plugin.h | 18 ++++++++
ldap/servers/slapd/slapi-private.h | 1
ldap/servers/slapd/time.c | 75 ++++++++++++++++++++++++++++++++++++-
ldap/servers/slapd/value.c | 16 +++++++
8 files changed, 183 insertions(+), 40 deletions(-)
New commits:
commit 78f730ac363761c159423543767e7cc4bc34d4ed
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Fri Jun 3 16:22:32 2016 -0700
Ticket #48833 - 389 showing inconsistent values for shadowMax and shadowWarning in 1.3.5.1
Description: Current passwordMaxAge, passwordMinAge, and passwordWarning
internally have the integer type which is too small to hold the maximum
shadowMax value 99999 days (== 8,639,913,600 secs > INT_MAX 2,147,483,647).
To allow shadowMax and its friends in sync with the values in the password
policy, this patch changes the type of the pw_maxage, pw_minage, and pw_
warning from long to long long.
If password policy is enabled, and the value of passwordMaxAge is greater
than 8639913600:
passwordMaxAge: 8639913600
then the search returns shadowMax 99999.
$ ldapsearch [...] -b "uid=tuser,ou=People,dc=example,dc=com" shadowMax
dn: uid=tuser,ou=People,dc=example,dc=com
shadowMax: 99999
Note: This patch is setting the initial value of passwordMaxAge to 99999 days.
cfg->pw_policy.pw_maxage = 8639913600; /* 99999 days */
Requires: This change requires the Doc and the DS Console updates.
https://fedorahosted.org/389/ticket/48833
Reviewed by mreynolds(a)redhat.com (Thank you soooooo much, Mark!)
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index a9334e4..aa77783 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -1546,7 +1546,11 @@ FrontendConfig_init () {
cfg->pw_policy.pw_maxrepeats = 0;
cfg->pw_policy.pw_mincategories = 3;
cfg->pw_policy.pw_mintokenlength = 3;
+#if defined(CPU_x86_64)
+ cfg->pw_policy.pw_maxage = 8639913600; /* 99999 days */
+#else
cfg->pw_policy.pw_maxage = 8640000; /* 100 days */
+#endif
cfg->pw_policy.pw_minage = 0;
cfg->pw_policy.pw_warning = _SEC_PER_DAY; /* 1 day */
init_pw_history = cfg->pw_policy.pw_history = LDAP_OFF;
@@ -4360,7 +4364,7 @@ config_set_auditfaillog( const char *attrname, char *value, char *errorbuf, int
int
config_set_pw_maxage( const char *attrname, char *value, char *errorbuf, int apply ) {
int retVal = LDAP_SUCCESS;
- long age;
+ long long age;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
@@ -4370,9 +4374,15 @@ config_set_pw_maxage( const char *attrname, char *value, char *errorbuf, int app
errno = 0;
/* age in seconds */
- age = parse_duration(value);
+ age = slapi_parse_duration_longlong(value);
- if ( age <= 0 || age > (MAX_ALLOWED_TIME_IN_SECS - current_time()) ) {
+ if (age <= 0 ||
+#if defined(CPU_x86_64)
+ age > (MAX_ALLOWED_TIME_IN_SECS_64 - current_time())
+#else
+ age > (MAX_ALLOWED_TIME_IN_SECS - current_time())
+#endif
+ ) {
slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: password maximum age \"%s\" is invalid.", attrname, value);
retVal = LDAP_OPERATIONS_ERROR;
return retVal;
@@ -4387,7 +4397,7 @@ config_set_pw_maxage( const char *attrname, char *value, char *errorbuf, int app
int
config_set_pw_minage( const char *attrname, char *value, char *errorbuf, int apply ) {
int retVal = LDAP_SUCCESS;
- long age;
+ long long age;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
if ( config_value_is_null( attrname, value, errorbuf, 1 )) {
@@ -4396,8 +4406,14 @@ config_set_pw_minage( const char *attrname, char *value, char *errorbuf, int app
errno = 0;
/* age in seconds */
- age = parse_duration(value);
- if ( age < 0 || age > (MAX_ALLOWED_TIME_IN_SECS - current_time()) ) {
+ age = slapi_parse_duration_longlong(value);
+ if (age < 0 ||
+#if defined(CPU_x86_64)
+ age > (MAX_ALLOWED_TIME_IN_SECS_64 - current_time())
+#else
+ age > (MAX_ALLOWED_TIME_IN_SECS - current_time())
+#endif
+ ) {
slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "%s: password minimum age \"%s\" is invalid.", attrname, value);
retVal = LDAP_OPERATIONS_ERROR;
return retVal;
@@ -4412,7 +4428,7 @@ config_set_pw_minage( const char *attrname, char *value, char *errorbuf, int app
int
config_set_pw_warning( const char *attrname, char *value, char *errorbuf, int apply ) {
int retVal = LDAP_SUCCESS;
- long sec;
+ long long sec;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
@@ -4422,7 +4438,7 @@ config_set_pw_warning( const char *attrname, char *value, char *errorbuf, int ap
errno = 0;
/* in seconds */
- sec = parse_duration(value);
+ sec = slapi_parse_duration_longlong(value);
if (errno == ERANGE || sec < 0) {
slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE,
@@ -5699,10 +5715,10 @@ config_get_auditfaillog( ){
return retVal;
}
-long
+long long
config_get_pw_maxage() {
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
- long retVal;
+ long long retVal;
CFG_LOCK_READ(slapdFrontendConfig);
retVal = slapdFrontendConfig->pw_policy.pw_maxage;
@@ -5710,7 +5726,7 @@ config_get_pw_maxage() {
return retVal;
}
-long
+long long
config_get_pw_minage(){
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
@@ -5723,7 +5739,7 @@ config_get_pw_minage(){
return retVal;
}
-long
+long long
config_get_pw_warning() {
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
long retVal;
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 1de1e38..7e0e632 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -490,9 +490,9 @@ char *config_get_accesslog();
char *config_get_errorlog();
char *config_get_auditlog();
char *config_get_auditfaillog();
-long config_get_pw_maxage();
-long config_get_pw_minage();
-long config_get_pw_warning();
+long long config_get_pw_maxage();
+long long config_get_pw_minage();
+long long config_get_pw_warning();
int config_get_errorlog_level();
int config_get_accesslog_level();
int config_get_auditlog_logging_enabled();
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
index c52fa9b..498afd4 100644
--- a/ldap/servers/slapd/pw.c
+++ b/ldap/servers/slapd/pw.c
@@ -1821,7 +1821,7 @@ new_passwdPolicy(Slapi_PBlock *pb, const char *dn)
slapi_attr_get_type(attr, &attr_name);
if (!strcasecmp(attr_name, "passwordminage")) {
if ((sval = attr_get_present_values(attr))) {
- pwdpolicy->pw_minage = slapi_value_get_timelong(*sval);
+ pwdpolicy->pw_minage = slapi_value_get_timelonglong(*sval);
if (-1 == pwdpolicy->pw_minage) {
LDAPDebug2Args(LDAP_DEBUG_ANY,
"Password Policy Entry%s: Invalid passwordMinAge: %s\n",
@@ -1833,7 +1833,7 @@ new_passwdPolicy(Slapi_PBlock *pb, const char *dn)
else
if (!strcasecmp(attr_name, "passwordmaxage")) {
if ((sval = attr_get_present_values(attr))) {
- pwdpolicy->pw_maxage = slapi_value_get_timelong(*sval);
+ pwdpolicy->pw_maxage = slapi_value_get_timelonglong(*sval);
if (-1 == pwdpolicy->pw_maxage) {
LDAPDebug2Args(LDAP_DEBUG_ANY,
"Password Policy Entry%s: Invalid passwordMaxAge: %s\n",
@@ -1845,7 +1845,7 @@ new_passwdPolicy(Slapi_PBlock *pb, const char *dn)
else
if (!strcasecmp(attr_name, "passwordwarning")) {
if ((sval = attr_get_present_values(attr))) {
- pwdpolicy->pw_warning = slapi_value_get_timelong(*sval);
+ pwdpolicy->pw_warning = slapi_value_get_timelonglong(*sval);
if (-1 == pwdpolicy->pw_warning) {
LDAPDebug2Args(LDAP_DEBUG_ANY,
"Password Policy Entry%s: Invalid passwordWarning: %s\n",
@@ -2201,15 +2201,19 @@ check_pw_duration_value(const char *attr_name, char *value,
long minval, long maxval, char *errorbuf, size_t ebuflen)
{
int retVal = LDAP_SUCCESS;
- long age;
+ long long age;
- age = parse_duration(value);
+ age = slapi_parse_duration_longlong(value);
if (-1 == age) {
slapi_create_errormsg(errorbuf, ebuflen, "password minimum age \"%s\" is invalid. ", value);
retVal = LDAP_CONSTRAINT_VIOLATION;
} else if (0 == strcasecmp(CONFIG_PW_LOCKDURATION_ATTRIBUTE, attr_name)) {
if ( (age <= 0) ||
+#if defined(CPU_x86_64)
+ (age > (MAX_ALLOWED_TIME_IN_SECS_64 - current_time())) ||
+#else
(age > (MAX_ALLOWED_TIME_IN_SECS - current_time())) ||
+#endif
((-1 != minval) && (age < minval)) ||
((-1 != maxval) && (age > maxval))) {
slapi_create_errormsg(errorbuf, ebuflen, "%s: \"%s\" seconds is invalid. ", attr_name, value);
@@ -2217,7 +2221,11 @@ check_pw_duration_value(const char *attr_name, char *value,
}
} else {
if ( (age < 0) ||
+#if defined(CPU_x86_64)
+ (age > (MAX_ALLOWED_TIME_IN_SECS_64 - current_time())) ||
+#else
(age > (MAX_ALLOWED_TIME_IN_SECS - current_time())) ||
+#endif
((-1 != minval) && (age < minval)) ||
((-1 != maxval) && (age > maxval))) {
slapi_create_errormsg(errorbuf, ebuflen, "%s: \"%s\" seconds is invalid. ", attr_name, value);
@@ -2867,11 +2875,11 @@ add_shadow_ext_password_attrs(Slapi_PBlock *pb, Slapi_Entry **e)
{
const char *dn = NULL;
passwdPolicy *pwpolicy = NULL;
- time_t shadowval = 0;
- time_t exptime = 0;
+ long long shadowval = 0;
+ long long exptime = 0;
Slapi_Mods *smods = NULL;
LDAPMod **mods;
- long sval;
+ long long sval;
int mod_num = 0;
char *shmin = NULL;
char *shmax = NULL;
@@ -2904,59 +2912,68 @@ add_shadow_ext_password_attrs(Slapi_PBlock *pb, Slapi_Entry **e)
/* shadowMin - the minimum number of days required between password changes. */
if (pwpolicy->pw_minage > 0) {
shadowval = pwpolicy->pw_minage / _SEC_PER_DAY;
+ if (shadowval > _MAX_SHADOW) {
+ shadowval = _MAX_SHADOW;
+ }
} else {
shadowval = 0;
}
shmin = slapi_entry_attr_get_charptr(*e, "shadowMin");
if (shmin) {
- sval = strtol(shmin, NULL, 0);
+ sval = strtoll(shmin, NULL, 0);
if (sval != shadowval) {
slapi_ch_free_string(&shmin);
- shmin = slapi_ch_smprintf("%ld", shadowval);
+ shmin = slapi_ch_smprintf("%lld", shadowval);
mod_num++;
}
} else {
mod_num++;
- shmin = slapi_ch_smprintf("%ld", shadowval);
+ shmin = slapi_ch_smprintf("%lld", shadowval);
}
/* shadowMax - the maximum number of days for which the user password remains valid. */
if (pwpolicy->pw_maxage > 0) {
shadowval = pwpolicy->pw_maxage / _SEC_PER_DAY;
exptime = time_plus_sec(current_time(), pwpolicy->pw_maxage);
+ if (shadowval > _MAX_SHADOW) {
+ shadowval = _MAX_SHADOW;
+ }
} else {
- shadowval = 99999;
+ shadowval = _MAX_SHADOW;
}
shmax = slapi_entry_attr_get_charptr(*e, "shadowMax");
if (shmax) {
- sval = strtol(shmax, NULL, 0);
+ sval = strtoll(shmax, NULL, 0);
if (sval != shadowval) {
slapi_ch_free_string(&shmax);
- shmax = slapi_ch_smprintf("%ld", shadowval);
+ shmax = slapi_ch_smprintf("%lld", shadowval);
mod_num++;
}
} else {
mod_num++;
- shmax = slapi_ch_smprintf("%ld", shadowval);
+ shmax = slapi_ch_smprintf("%lld", shadowval);
}
/* shadowWarning - the number of days of advance warning given to the user before the user password expires. */
if (pwpolicy->pw_warning > 0) {
shadowval = pwpolicy->pw_warning / _SEC_PER_DAY;
+ if (shadowval > _MAX_SHADOW) {
+ shadowval = _MAX_SHADOW;
+ }
} else {
shadowval = 0;
}
shwarn = slapi_entry_attr_get_charptr(*e, "shadowWarning");
if (shwarn) {
- sval = strtol(shwarn, NULL, 0);
+ sval = strtoll(shwarn, NULL, 0);
if (sval != shadowval) {
slapi_ch_free_string(&shwarn);
- shwarn = slapi_ch_smprintf("%ld", shadowval);
+ shwarn = slapi_ch_smprintf("%lld", shadowval);
mod_num++;
}
} else {
mod_num++;
- shwarn = slapi_ch_smprintf("%ld", shadowval);
+ shwarn = slapi_ch_smprintf("%lld", shadowval);
}
/* shadowExpire - the date on which the user login will be disabled. */
@@ -2964,15 +2981,15 @@ add_shadow_ext_password_attrs(Slapi_PBlock *pb, Slapi_Entry **e)
shexp = slapi_entry_attr_get_charptr(*e, "shadowExpire");
exptime /= _SEC_PER_DAY;
if (shexp) {
- sval = strtol(shexp, NULL, 0);
+ sval = strtoll(shexp, NULL, 0);
if (sval != exptime) {
slapi_ch_free_string(&shexp);
- shexp = slapi_ch_smprintf("%ld", shadowval);
+ shexp = slapi_ch_smprintf("%lld", exptime);
mod_num++;
}
} else {
mod_num++;
- shexp = slapi_ch_smprintf("%ld", exptime);
+ shexp = slapi_ch_smprintf("%lld", exptime);
}
}
smods = slapi_mods_new();
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 52bdc8b..b74f9bd 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1573,9 +1573,9 @@ typedef struct passwordpolicyarray {
int pw_mintokenlength;
slapi_onoff_t pw_exp;
slapi_onoff_t pw_send_expiring;
- long pw_maxage;
- long pw_minage;
- long pw_warning;
+ long long pw_maxage;
+ long long pw_minage;
+ long long pw_warning;
slapi_onoff_t pw_history;
int pw_inhistory;
slapi_onoff_t pw_lockout;
@@ -2200,6 +2200,7 @@ typedef struct _slapdEntryPoints {
#define REFER_MODE_ON 1
#define MAX_ALLOWED_TIME_IN_SECS 2147483647
+#define MAX_ALLOWED_TIME_IN_SECS_64 9223372036854775807
typedef struct _slapdFrontendConfig {
#if SLAPI_CFG_USE_RWLOCK == 1
@@ -2589,5 +2590,6 @@ extern char *attr_dataversion;
#define RUV_STORAGE_ENTRY_UNIQUEID "ffffffff-ffffffff-ffffffff-ffffffff"
#define _SEC_PER_DAY 86400
+#define _MAX_SHADOW 99999
#endif /* _slap_h_ */
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 32c0db1..a7e544a 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -4836,6 +4836,24 @@ unsigned long long slapi_value_get_ulonglong(const Slapi_Value *value);
long slapi_value_get_timelong(const Slapi_Value *value);
/**
+ * Retrieves the value of a \c Slapi_Value structure as a long long integer.
+ *
+ * \param value Pointer to the value you wish to get as a long long integer.
+ * The value could end with D or d for days, H or h for hours,
+ * M or m for minutes, S or s for seconds, or no extension.
+ * \return A long long integer that corresponds to the value stored in the
+ * \c Slapi_Value structure.
+ * \return \c 0 if there is no value.
+ * \return \c -1 if the given value is invalid.
+ * \see slapi_value_get_int()
+ * \see slapi_value_get_uint()
+ * \see slapi_value_get_ulong()
+ * \see slapi_value_get_longlong()
+ * \see slapi_value_get_ulonglong()
+ */
+long long slapi_value_get_timelonglong(const Slapi_Value *value);
+
+/**
* Gets the length of a value contained in a \c Slapi_Value structure.
*
* \param value Pointer to the value of which you wish to get the length.
diff --git a/ldap/servers/slapd/slapi-private.h b/ldap/servers/slapd/slapi-private.h
index a5efdda..7636559 100644
--- a/ldap/servers/slapd/slapi-private.h
+++ b/ldap/servers/slapd/slapi-private.h
@@ -1335,6 +1335,7 @@ char *slapi_getSSLVersion_str(PRUint16 vnum, char *buf, size_t bufsize);
* Failure: -1
*/
time_t slapi_parse_duration(const char *value);
+long long slapi_parse_duration_longlong(const char *value);
int slapi_is_duration_valid(const char *value);
/**
diff --git a/ldap/servers/slapd/time.c b/ldap/servers/slapd/time.c
index d98b94b..4471a6f 100644
--- a/ldap/servers/slapd/time.c
+++ b/ldap/servers/slapd/time.c
@@ -517,7 +517,74 @@ parse_duration(char *value)
duration *= times;
bail:
if (duration == -1) {
- LDAPDebug1Arg(LDAP_DEBUG_ANY, "slapi_parse_duration: invalid duration (%s)\n", value?value:"null");
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "parse_duration: invalid duration (%s)\n", value?value:"null");
+ }
+ slapi_ch_free_string(&input);
+ return duration;
+}
+
+long long
+parse_duration_longlong(char *value)
+{
+ char *input = NULL;
+ char *endp;
+ long long duration = -1;
+ int times = 1;
+
+ if (NULL == value || '\0' == *value) {
+ goto bail;
+ }
+ input = slapi_ch_strdup(value);
+ endp = input + strlen(input) - 1;
+ while ((' ' == *endp || '\t' == *endp) && endp > input) {
+ endp--;
+ }
+ if ((endp == input) && !isdigit(*input)) {
+ goto bail;
+ }
+ switch ( *endp ) {
+ case 'w':
+ case 'W':
+ times = 60 * 60 * 24 * 7;
+ *endp = '\0';
+ break;
+ case 'd':
+ case 'D':
+ times = 60 * 60 * 24;
+ *endp = '\0';
+ break;
+ case 'h':
+ case 'H':
+ times = 60 * 60;
+ *endp = '\0';
+ break;
+ case 'm':
+ case 'M':
+ times = 60;
+ *endp = '\0';
+ break;
+ case 's':
+ case 'S':
+ times = 1;
+ *endp = '\0';
+ break;
+ default:
+ if (isdigit(*endp)) {
+ times = 1;
+ break;
+ } else {
+ goto bail;
+ }
+ }
+ duration = strtoll(input, &endp, 10);
+ if ( *endp != '\0' || errno == ERANGE ) {
+ duration = -1;
+ goto bail;
+ }
+ duration *= times;
+bail:
+ if (duration == -1) {
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "parse_duration_longlong: invalid duration (%s)\n", value?value:"null");
}
slapi_ch_free_string(&input);
return duration;
@@ -529,6 +596,12 @@ slapi_parse_duration(const char *value)
return (time_t)parse_duration((char *)value);
}
+long long
+slapi_parse_duration_longlong(const char *value)
+{
+ return parse_duration_longlong((char *)value);
+}
+
static int
is_valid_duration_unit(const char value)
{
diff --git a/ldap/servers/slapd/value.c b/ldap/servers/slapd/value.c
index 51beff7..6eaa4cf 100644
--- a/ldap/servers/slapd/value.c
+++ b/ldap/servers/slapd/value.c
@@ -509,6 +509,22 @@ slapi_value_get_timelong(const Slapi_Value *value)
return r;
}
+long long
+slapi_value_get_timelonglong(const Slapi_Value *value)
+{
+ long long r = 0;
+ if(value)
+ {
+ char *p;
+ p = slapi_ch_malloc(value->bv.bv_len + 1);
+ memcpy(p, value->bv.bv_val, value->bv.bv_len);
+ p[value->bv.bv_len] = '\0';
+ r = slapi_parse_duration_longlong(p);
+ slapi_ch_free_string(&p);
+ }
+ return r;
+}
+
int
slapi_value_compare(const Slapi_Attr *a,const Slapi_Value *v1,const Slapi_Value *v2)
{
7 years, 10 months
ldap/servers
by William Brown
ldap/servers/slapd/back-ldbm/ldbm_config.c | 29 +++++------------------------
ldap/servers/slapd/back-ldbm/start.c | 6 ++++--
ldap/servers/slapd/util.c | 9 ++++++---
3 files changed, 15 insertions(+), 29 deletions(-)
New commits:
commit 1b8baa83d9fdbecec5b6ce7182524eb7198cc578
Author: William Brown <firstyear(a)redhat.com>
Date: Thu Jun 9 12:56:36 2016 +1000
Ticket 48873 - Backend should accept the reduced cache allocation when issane == 1
Bug Description: We introduced a stricter cache checking mode, and corrected
our cache check behaviours. Sadly, at some sites, this will cause pain as
the admins may not be ready for this.
Fix Description: Make the error messages clearer. We also DO NOT prevent
server start up on an invalid config, but we do WARN that this behaviour
will change.
https://fedorahosted.org/389/ticket/48873
Author: wibrown
Review by: tbordaz (Thanks!)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c b/ldap/servers/slapd/back-ldbm/ldbm_config.c
index 37ce02d..11cc373 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
@@ -425,8 +425,8 @@ static int ldbm_config_dbcachesize_set(void *arg, void *value, char *errorbuf, i
} else if (val > li->li_dbcachesize) {
delta = val - li->li_dbcachesize;
if (!util_is_cachesize_sane(&delta)){
- slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: dbcachememsize value is too large.");
- LDAPDebug0Args(LDAP_DEBUG_ANY,"Error: dbcachememsize value is too large.\n");
+ slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: nsslapd-dbcachesize value is too large.");
+ LDAPDebug0Args(LDAP_DEBUG_ANY,"Error: nsslapd-dbcachesize value is too large.\n");
return LDAP_UNWILLING_TO_PERFORM;
}
}
@@ -481,36 +481,17 @@ static int ldbm_config_dbncache_set(void *arg, void *value, char *errorbuf, int
struct ldbminfo *li = (struct ldbminfo *) arg;
int retval = LDAP_SUCCESS;
size_t val = (size_t) ((uintptr_t)value);
- size_t delta = 0;
- /* There is an error here. We check the new val against our current mem-alloc
- * Issue is that we already are using system pages, so while our value *might*
- * be valid, we may reject it here due to the current procs page usage.
- *
- * So how do we solve this? If we are setting a SMALLER value than we
- * currently have ALLOW it, because we already passed the cache sanity.
- * If we are setting a LARGER value, we check the delta of the two, and make
- * sure that it is sane.
- */
-
if (apply) {
- if (val > li->li_dbncache) {
- delta = val - li->li_dbncache;
- if (!util_is_cachesize_sane(&delta)){
- slapi_create_errormsg(errorbuf, SLAPI_DSE_RETURNTEXT_SIZE, "Error: dbncache size value is too large.");
- LDAPDebug1Arg(LDAP_DEBUG_ANY,"Error: dbncache size value is too large.\n", val);
- return LDAP_UNWILLING_TO_PERFORM;
- }
- }
-
+
if (CONFIG_PHASE_RUNNING == phase) {
li->li_new_dbncache = val;
- LDAPDebug(LDAP_DEBUG_ANY, "New db ncache will not take affect until the server is restarted\n", 0, 0, 0);
+ LDAPDebug(LDAP_DEBUG_ANY, "New nsslapd-dbncache will not take affect until the server is restarted\n", 0, 0, 0);
} else {
li->li_new_dbncache = val;
li->li_dbncache = val;
}
-
+
}
return retval;
diff --git a/ldap/servers/slapd/back-ldbm/start.c b/ldap/servers/slapd/back-ldbm/start.c
index acac2aa..299e5f0 100644
--- a/ldap/servers/slapd/back-ldbm/start.c
+++ b/ldap/servers/slapd/back-ldbm/start.c
@@ -266,7 +266,8 @@ ldbm_back_start( Slapi_PBlock *pb )
issane = util_is_cachesize_sane(&total_size);
if (!issane) {
/* Right, it's time to panic */
- LDAPDebug( LDAP_DEBUG_ANY, "CRITICAL: It is highly likely your memory configuration will EXCEED your systems memory.\n", 0, 0, 0 );
+ LDAPDebug( LDAP_DEBUG_ANY, "CRITICAL: It is highly likely your memory configuration of all backends will EXCEED your systems memory.\n", 0, 0, 0 );
+ LDAPDebug( LDAP_DEBUG_ANY, "CRITICAL: In a future release this WILL prevent server start up. You MUST alter your configuration.\n", 0, 0, 0 );
LDAPDebug(LDAP_DEBUG_ANY,
"Total entry cache size: %llu B; "
"dbcache size: %llu B; "
@@ -278,7 +279,8 @@ ldbm_back_start( Slapi_PBlock *pb )
#endif
);
LDAPDebug(LDAP_DEBUG_ANY, msg, 0,0,0);
- return SLAPI_FAIL_GENERAL;
+ /* WB 2016 - This should be UNCOMMENTED in a future release */
+ /* return SLAPI_FAIL_GENERAL; */
}
diff --git a/ldap/servers/slapd/util.c b/ldap/servers/slapd/util.c
index 3f17461..e96b32b 100644
--- a/ldap/servers/slapd/util.c
+++ b/ldap/servers/slapd/util.c
@@ -1787,8 +1787,11 @@ int util_is_cachesize_sane(size_t *cachesize)
* the remaining system mem to the cachesize instead, and log a warning
*/
*cachesize = (size_t)((availpages * 0.75 ) * pagesize);
- slapi_log_error(SLAPI_LOG_FATAL, "util_is_cachesize_sane", "Available pages %lu, requested pages %lu, pagesize %lu\n", (unsigned long)availpages, (unsigned long)cachepages, (unsigned long)pagesize);
- slapi_log_error(SLAPI_LOG_FATAL, "util_is_cachesize_sane", "WARNING adjusted cachesize to %lu\n", (unsigned long)*cachesize);
+ /* These are now trace warnings, because it was to confusing to log this *then* kill the request anyway.
+ * Instead, we will let the caller worry about the notification, and we'll just use this in debugging and tracing.
+ */
+ slapi_log_error(SLAPI_LOG_TRACE, "util_is_cachesize_sane", "Available pages %lu, requested pages %lu, pagesize %lu\n", (unsigned long)availpages, (unsigned long)cachepages, (unsigned long)pagesize);
+ slapi_log_error(SLAPI_LOG_TRACE, "util_is_cachesize_sane", "WARNING adjusted cachesize to %lu\n", (unsigned long)*cachesize);
}
#else
size_t freepages = 0;
@@ -1808,7 +1811,7 @@ int util_is_cachesize_sane(size_t *cachesize)
#endif
out:
if (!issane) {
- slapi_log_error(SLAPI_LOG_FATAL,"util_is_cachesize_sane", "WARNING: Cachesize not sane \n");
+ slapi_log_error(SLAPI_LOG_TRACE,"util_is_cachesize_sane", "WARNING: Cachesize not sane \n");
}
return issane;
7 years, 10 months
ldap/admin
by Mark Reynolds
ldap/admin/src/scripts/ns-accountstatus.pl.in | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
New commits:
commit 886a1ff0ae83b10bc8b0b45a803354fdc1d86e56
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Fri Jun 10 10:07:51 2016 -0400
Ticket 48815 - ns-accountstatus.pl - fix DN normalization
Bug Description: When processing an entry DN the script breaks the entry
up into separate parts which are used in a filter.
These parts were not being normalized which leads to
the script failing.
Fix Description: First improve the DN normalize function to strip both
starting and trailing spaces, then normalize the new
DN filter when searching for the backend.
https://fedorahosted.org/389/ticket/48815
Reviewed by: nhosoi(Thanks!)
diff --git a/ldap/admin/src/scripts/ns-accountstatus.pl.in b/ldap/admin/src/scripts/ns-accountstatus.pl.in
index 0d52ec9..37fc7fa 100644
--- a/ldap/admin/src/scripts/ns-accountstatus.pl.in
+++ b/ldap/admin/src/scripts/ns-accountstatus.pl.in
@@ -620,7 +620,7 @@ sub normalizeDN
@suffix=split /([,])/,$entry;
$result="";
foreach $part (@suffix){
- $part=~s/^ +//;
+ $part =~ s/^\s+|\s+$//g;
$part=~ tr/A-Z/a-z/;
$result="$result$part";
}
@@ -641,9 +641,11 @@ sub getSuffix
# Look if suffix is the suffix of the entry
# ldapsearch -s one -b "cn=mapping tree,cn=config" "cn=\"uid=jvedder,ou=People,dc=example,dc=com\""
#
+ my $filter = normalizeDN("@suffix");
+
debug("\tSuffix from the entry: #@suffixN#\n");
$info{base} = "cn=mapping tree, cn=config";
- $info{filter} = "cn=@suffix";
+ $info{filter} = "cn=$filter";
$info{scope} = "one";
$info{attrs} = "cn";
@mapping = DSUtil::ldapsrch_ext(%info);
7 years, 10 months
ldap/servers
by thierry bordaz
ldap/servers/slapd/extendop.c | 9 +++++++++
ldap/servers/slapd/pblock.c | 25 ++++++++++++++++++++++++-
ldap/servers/slapd/plugin.c | 14 +++++++++++++-
ldap/servers/slapd/slap.h | 8 +++++++-
ldap/servers/slapd/slapi-plugin.h | 8 ++++++--
5 files changed, 59 insertions(+), 5 deletions(-)
New commits:
commit 2860907fb86badac9151dd5d5c368c18c8e84b73
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Thu Jun 9 16:09:12 2016 +0200
Ticket 48880 - adding pre/post extop ability
Bug Description:
In some cases it is useful to allow a plugin to change the target_dn before processing the extop.
The extop does not support PRE/POST operation.
Fix Description:
extended operation would call SLAPI_PLUGIN_PRE_EXTOP_FN/SLAPI_PLUGIN_POST_EXTOP_FN callbacks.
The plugins register those callbacks as SLAPI_PLUGIN_PRE_EXTOP_FN/SLAPI_PLUGIN_PRE_EXTOP_FN in their
init function. The callbacks are then store into dedicated plugins lists SLAPI_PLUGIN_PREEXTOPERATION and
SLAPI_PLUGIN_POSTEXTOPERATION
http://fedorahosted.org/389/ticket/48880ticket URL>
Reviewed by: William Brown (thanks)
Platforms tested: F23
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/slapd/extendop.c b/ldap/servers/slapd/extendop.c
index 0a6a739..5459f9e 100644
--- a/ldap/servers/slapd/extendop.c
+++ b/ldap/servers/slapd/extendop.c
@@ -337,6 +337,11 @@ do_extended( Slapi_PBlock *pb )
rc = plugin_determine_exop_plugins( extoid, &p );
slapi_log_error(SLAPI_LOG_TRACE, NULL, "exendop.c plugin_determine_exop_plugins rc %d\n", rc);
+
+ if (plugin_call_plugins(pb, SLAPI_PLUGIN_PRE_EXTOP_FN) != SLAPI_PLUGIN_SUCCESS) {
+ goto free_and_return;
+ }
+
if (rc == SLAPI_PLUGIN_EXTENDEDOP && p != NULL) {
slapi_log_error(SLAPI_LOG_TRACE, NULL, "extendop.c calling plugin ... \n");
rc = plugin_call_exop_plugins( pb, p);
@@ -385,6 +390,10 @@ do_extended( Slapi_PBlock *pb )
} /* if be */
}
+ if (plugin_call_plugins(pb, SLAPI_PLUGIN_POST_EXTOP_FN) != SLAPI_PLUGIN_SUCCESS) {
+ goto free_and_return;
+ }
+
if ( SLAPI_PLUGIN_EXTENDED_SENT_RESULT != rc ) {
if ( SLAPI_PLUGIN_EXTENDED_NOT_HANDLED == rc ) {
lderr = LDAP_PROTOCOL_ERROR; /* no plugin handled the op */
diff --git a/ldap/servers/slapd/pblock.c b/ldap/servers/slapd/pblock.c
index f6d3af0..7205337 100644
--- a/ldap/servers/slapd/pblock.c
+++ b/ldap/servers/slapd/pblock.c
@@ -828,6 +828,12 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
}
(*(IFP *)value) = pblock->pb_plugin->plg_preresult;
break;
+ case SLAPI_PLUGIN_PRE_EXTOP_FN:
+ if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREEXTOPERATION) {
+ return( -1 );
+ }
+ (*(IFP *)value) = pblock->pb_plugin->plg_preextop;
+ break;
/* postoperation plugin functions */
case SLAPI_PLUGIN_POST_BIND_FN:
@@ -908,7 +914,12 @@ slapi_pblock_get( Slapi_PBlock *pblock, int arg, void *value )
}
(*(IFP *)value) = pblock->pb_plugin->plg_postresult;
break;
-
+ case SLAPI_PLUGIN_POST_EXTOP_FN:
+ if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTEXTOPERATION) {
+ return( -1 );
+ }
+ (*(IFP *)value) = pblock->pb_plugin->plg_postextop;
+ break;
case SLAPI_ENTRY_PRE_OP:
(*(Slapi_Entry **)value) = pblock->pb_pre_op_entry;
break;
@@ -2474,6 +2485,12 @@ slapi_pblock_set( Slapi_PBlock *pblock, int arg, void *value )
}
pblock->pb_plugin->plg_preresult = (IFP) value;
break;
+ case SLAPI_PLUGIN_PRE_EXTOP_FN:
+ if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_PREEXTOPERATION) {
+ return( -1 );
+ }
+ pblock->pb_plugin->plg_preextop = (IFP) value;
+ break;
/* postoperation plugin functions */
case SLAPI_PLUGIN_POST_BIND_FN:
@@ -2554,6 +2571,12 @@ slapi_pblock_set( Slapi_PBlock *pblock, int arg, void *value )
}
pblock->pb_plugin->plg_postresult = (IFP) value;
break;
+ case SLAPI_PLUGIN_POST_EXTOP_FN:
+ if (pblock->pb_plugin->plg_type != SLAPI_PLUGIN_POSTEXTOPERATION) {
+ return( -1 );
+ }
+ pblock->pb_plugin->plg_postextop = (IFP) value;
+ break;
/* backend preoperation plugin */
case SLAPI_PLUGIN_BE_PRE_MODIFY_FN:
diff --git a/ldap/servers/slapd/plugin.c b/ldap/servers/slapd/plugin.c
index 440be98..de907d8 100644
--- a/ldap/servers/slapd/plugin.c
+++ b/ldap/servers/slapd/plugin.c
@@ -420,6 +420,12 @@ plugin_call_plugins( Slapi_PBlock *pb, int whichfunction )
plugin_list_number= PLUGIN_LIST_BETXNPOSTOPERATION;
do_op = 1; /* always allow backend callbacks (even during startup) */
break;
+ case SLAPI_PLUGIN_PRE_EXTOP_FN:
+ plugin_list_number= PLUGIN_LIST_PREEXTENDED_OPERATION;
+ break;
+ case SLAPI_PLUGIN_POST_EXTOP_FN:
+ plugin_list_number= PLUGIN_LIST_POSTEXTENDED_OPERATION;
+ break;
}
if(plugin_list_number!=-1 && do_op)
@@ -2311,7 +2317,13 @@ plugin_get_type_and_list(
} else if ( strcasecmp( plugintype, "betxnextendedop" ) == 0 ) {
*type = SLAPI_PLUGIN_BETXNEXTENDEDOP;
plugin_list_index= PLUGIN_LIST_BE_TXN_EXTENDED_OPERATION;
- } else {
+ } else if ( strcasecmp( plugintype, "preextendedop" ) == 0 ) {
+ *type = SLAPI_PLUGIN_PREEXTOPERATION;
+ plugin_list_index= PLUGIN_LIST_PREEXTENDED_OPERATION;
+ } else if ( strcasecmp( plugintype, "postextendedop" ) == 0 ) {
+ *type = SLAPI_PLUGIN_POSTEXTOPERATION;
+ plugin_list_index= PLUGIN_LIST_POSTEXTENDED_OPERATION;
+ } else {
return( 1 ); /* unknown plugin type - pass to backend */
}
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index ea834f1..52bdc8b 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -688,7 +688,9 @@ struct matchingRuleList {
#define PLUGIN_LIST_INTERNAL_POSTOPERATION 6
#define PLUGIN_LIST_EXTENDED_OPERATION 7
#define PLUGIN_LIST_BE_TXN_EXTENDED_OPERATION 8
-#define PLUGIN_LIST_BACKEND_MAX 9
+#define PLUGIN_LIST_PREEXTENDED_OPERATION 9
+#define PLUGIN_LIST_POSTEXTENDED_OPERATION 10
+#define PLUGIN_LIST_BACKEND_MAX 11
/* Global Plugins */
#define PLUGIN_LIST_ACL 10
@@ -908,11 +910,15 @@ struct slapdplugin {
char **plg_un_pe_exoids; /* exop oids */
char **plg_un_pe_exnames; /* exop names (may be NULL) */
IFP plg_un_pe_exhandler; /* handler */
+ IFP plg_un_pe_pre_exhandler; /* pre extop */
+ IFP plg_un_pe_post_exhandler; /* post extop */
IFP plg_un_pe_be_exhandler; /* handler to retrieve the be name for the operation */
} plg_un_pe;
#define plg_exoids plg_un.plg_un_pe.plg_un_pe_exoids
#define plg_exnames plg_un.plg_un_pe.plg_un_pe_exnames
#define plg_exhandler plg_un.plg_un_pe.plg_un_pe_exhandler
+#define plg_preextop plg_un.plg_un_pe.plg_un_pe_pre_exhandler
+#define plg_postextop plg_un.plg_un_pe.plg_un_pe_post_exhandler
#define plg_be_exhandler plg_un.plg_un_pe.plg_un_pe_be_exhandler
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 856432e..32c0db1 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -6762,7 +6762,9 @@ time_t slapi_current_time( void );
#define SLAPI_PLUGIN_INDEX 18
#define SLAPI_PLUGIN_BETXNPREOPERATION 19
#define SLAPI_PLUGIN_BETXNPOSTOPERATION 20
-#define SLAPI_PLUGIN_BETXNEXTENDEDOP 21
+#define SLAPI_PLUGIN_BETXNEXTENDEDOP 21
+#define SLAPI_PLUGIN_PREEXTOPERATION 22
+#define SLAPI_PLUGIN_POSTEXTOPERATION 23
/*
* special return values for extended operation plugins (zero or positive
@@ -6910,6 +6912,7 @@ typedef struct slapi_plugindesc {
#define SLAPI_PLUGIN_PRE_ENTRY_FN 410
#define SLAPI_PLUGIN_PRE_REFERRAL_FN 411
#define SLAPI_PLUGIN_PRE_RESULT_FN 412
+#define SLAPI_PLUGIN_PRE_EXTOP_FN 413
/* internal preoperation plugin functions */
#define SLAPI_PLUGIN_INTERNAL_PRE_ADD_FN 420
@@ -6946,7 +6949,8 @@ typedef struct slapi_plugindesc {
#define SLAPI_PLUGIN_POST_ENTRY_FN 510
#define SLAPI_PLUGIN_POST_REFERRAL_FN 511
#define SLAPI_PLUGIN_POST_RESULT_FN 512
-#define SLAPI_PLUGIN_POST_SEARCH_FAIL_FN 513
+#define SLAPI_PLUGIN_POST_SEARCH_FAIL_FN 513
+#define SLAPI_PLUGIN_POST_EXTOP_FN 514
/* internal preoperation plugin functions */
#define SLAPI_PLUGIN_INTERNAL_POST_ADD_FN 520
7 years, 10 months
aclocal.m4 config.h.in configure ldap/admin Makefile.am Makefile.in man/man1
by William Brown
Makefile.am | 3
Makefile.in | 74 +++++--
aclocal.m4 | 194 ++++++++++++-------
config.h.in | 3
configure | 342 ++++++++++++++++------------------
ldap/admin/src/scripts/readnsstate.in | 100 +++++++++
man/man1/readnsstate.1 | 50 ++++
7 files changed, 502 insertions(+), 264 deletions(-)
New commits:
commit f650df3e8a92d227dae87b7545fa2c78c60350d6
Author: William Brown <firstyear(a)redhat.com>
Date: Fri May 13 10:39:42 2016 +1000
Ticket 48449 - Import readNSState from richm's repo
Bug Description: We reference readnsstate in a number of documents. We should
include it in the repo
Fix Description: import and add readnsstate.py
https://fedorahosted.org/389/ticket/48449
Author: wibrown
Review by: nhosoi, vashirov (Thanks!)
diff --git a/Makefile.am b/Makefile.am
index cad1b61..2468aa7 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -155,7 +155,7 @@ CLEANFILES = dberrstrs.h ns-slapd.properties \
ldap/admin/src/scripts/schema-reload.pl ldap/admin/src/scripts/syntax-validate.pl \
ldap/admin/src/scripts/usn-tombstone-cleanup.pl ldap/admin/src/scripts/verify-db.pl \
ldap/admin/src/scripts/ds_selinux_port_query ldap/admin/src/scripts/ds_selinux_enabled \
- ldap/admin/src/scripts/dbverify \
+ ldap/admin/src/scripts/dbverify ldap/admin/src/scripts/readnsstate \
$(NULL)
clean-local:
@@ -637,6 +637,7 @@ bin_SCRIPTS = ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl \
ldap/admin/src/scripts/cl-dump.pl \
wrappers/repl-monitor \
ldap/admin/src/scripts/repl-monitor.pl \
+ ldap/admin/src/scripts/readnsstate \
ldap/admin/src/scripts/ds-logpipe.py
# SCRIPTS makes them executables - these are perl modules
diff --git a/Makefile.in b/Makefile.in
index a45a0d6..200aed3 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -1,7 +1,7 @@
-# Makefile.in generated by automake 1.13.4 from Makefile.am.
+# Makefile.in generated by automake 1.15 from Makefile.am.
# @configure_input@
-# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+# Copyright (C) 1994-2014 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -20,7 +20,17 @@
VPATH = @srcdir@
-am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__is_gnu_make = { \
+ if test -z '$(MAKELEVEL)'; then \
+ false; \
+ elif test -n '$(MAKE_HOST)'; then \
+ true; \
+ elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
+ true; \
+ else \
+ false; \
+ fi; \
+}
am__make_running_with_option = \
case $${target_option-} in \
?) ;; \
@@ -93,12 +103,6 @@ noinst_PROGRAMS = makstrdb$(EXEEXT)
@SOLARIS_TRUE@am__append_2 = -lrt
@SOLARIS_TRUE@am__append_3 = ldap/servers/slapd/tools/ldclt/opCheck.c
subdir = .
-DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
- $(top_srcdir)/configure $(am__configure_deps) \
- $(srcdir)/config.h.in $(top_srcdir)/rpm/389-ds-base.spec.in \
- depcomp $(dist_man_MANS) $(dist_noinst_DATA) \
- $(dist_noinst_HEADERS) $(serverinc_HEADERS) README compile \
- config.guess config.sub install-sh missing ltmain.sh
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/libtool.m4 \
$(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \
@@ -113,6 +117,9 @@ am__aclocal_m4_deps = $(top_srcdir)/m4/libtool.m4 \
$(top_srcdir)/m4/systemd.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
+DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \
+ $(am__configure_deps) $(dist_noinst_DATA) \
+ $(dist_noinst_HEADERS) $(serverinc_HEADERS) $(am__DIST_COMMON)
am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
configure.lineno config.status.lineno
mkinstalldirs = $(install_sh) -d
@@ -1255,6 +1262,10 @@ ETAGS = etags
CTAGS = ctags
CSCOPE = cscope
AM_RECURSIVE_TARGETS = cscope
+am__DIST_COMMON = $(dist_man_MANS) $(srcdir)/Makefile.in \
+ $(srcdir)/config.h.in $(top_srcdir)/rpm/389-ds-base.spec.in \
+ README compile config.guess config.sub depcomp install-sh \
+ ltmain.sh missing
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
distdir = $(PACKAGE)-$(VERSION)
top_distdir = $(distdir)
@@ -1465,6 +1476,7 @@ program_transform_name = @program_transform_name@
propertydir = $(datadir)@propertydir@
psdir = @psdir@
pythondir = $(libdir)@pythondir@
+pythonexec = @pythonexec@
sampledatadir = $(datadir)@sampledatadir@
sasl_inc = @sasl_inc@
sasl_lib = @sasl_lib@
@@ -1523,6 +1535,8 @@ DS_DEFINES = -DBUILD_NUM=$(BUILDNUM) -DVENDOR="\"$(vendor)\"" -DBRAND="\"$(brand
@enable_nunc_stans_FALSE@NUNC_STANS_ON = 0
@enable_nunc_stans_TRUE@NUNC_STANS_ON = 1
DS_INCLUDES = -I$(srcdir)/ldap/include -I$(srcdir)/ldap/servers/slapd -I$(srcdir)/include -I. $(NUNC_STANS_INCLUDES)
+@enable_asan_FALSE@ASAN_ON = 0
+@enable_asan_TRUE@ASAN_ON = 1
# these paths are dependent on the settings of prefix and exec_prefix which may be specified
# at make time. So we cannot use AC_DEFINE in the configure.ac because that would set the
@@ -1628,7 +1642,8 @@ CLEANFILES = dberrstrs.h ns-slapd.properties \
ldap/admin/src/scripts/ns-inactivate.pl ldap/admin/src/scripts/ns-newpwpolicy.pl \
ldap/admin/src/scripts/schema-reload.pl ldap/admin/src/scripts/syntax-validate.pl \
ldap/admin/src/scripts/usn-tombstone-cleanup.pl ldap/admin/src/scripts/verify-db.pl \
- ldap/admin/src/scripts/dbverify \
+ ldap/admin/src/scripts/ds_selinux_port_query ldap/admin/src/scripts/ds_selinux_enabled \
+ ldap/admin/src/scripts/dbverify ldap/admin/src/scripts/readnsstate \
$(NULL)
taskdir = $(datadir)@scripttemplatedir@
@@ -2027,6 +2042,8 @@ sbin_SCRIPTS = ldap/admin/src/scripts/setup-ds.pl \
ldap/admin/src/scripts/dbverify \
ldap/admin/src/scripts/upgradedb \
ldap/admin/src/scripts/dbmon.sh \
+ ldap/admin/src/scripts/ds_selinux_enabled \
+ ldap/admin/src/scripts/ds_selinux_port_query \
wrappers/ldap-agent
bin_SCRIPTS = ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl \
@@ -2044,6 +2061,7 @@ bin_SCRIPTS = ldap/servers/slapd/tools/rsearch/scripts/dbgen.pl \
ldap/admin/src/scripts/cl-dump.pl \
wrappers/repl-monitor \
ldap/admin/src/scripts/repl-monitor.pl \
+ ldap/admin/src/scripts/readnsstate \
ldap/admin/src/scripts/ds-logpipe.py
@@ -3109,6 +3127,7 @@ rsearch_bin_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) $(LIBS
@BUNDLE_FALSE@ -e 's,@enable_autobind\@,$(enable_autobind),g' \
@BUNDLE_FALSE@ -e 's,@enable_auto_dn_suffix\@,$(enable_auto_dn_suffix),g' \
@BUNDLE_FALSE@ -e 's,@enable_presence\@,$(enable_presence),g' \
+@BUNDLE_FALSE@ -e 's,@enable_asan\@,$(ASAN_ON),g' \
@BUNDLE_FALSE@ -e 's,@ECHO_N\@,$(ECHO_N),g' \
@BUNDLE_FALSE@ -e 's,@ECHO_C\@,$(ECHO_C),g' \
@BUNDLE_FALSE@ -e 's,@brand\@,$(brand),g' \
@@ -3127,6 +3146,7 @@ rsearch_bin_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) $(LIBS
@BUNDLE_FALSE@ -e 's,@with_selinux\@,@with_selinux@,g' \
@BUNDLE_FALSE@ -e 's,@with_tmpfiles_d\@,@with_tmpfiles_d@,g' \
@BUNDLE_FALSE@ -e 's,@perlexec\@,@perlexec@,g' \
+@BUNDLE_FALSE@ -e 's,@pythonexec\@,@pythonexec@,g' \
@BUNDLE_FALSE@ -e 's,@sttyexec\@,@sttyexec@,g' \
@BUNDLE_FALSE@ -e 's,@initconfigdir\@,$(initconfigdir),g' \
@BUNDLE_FALSE@ -e 's,@updatedir\@,$(updatedir),g' \
@@ -3186,6 +3206,7 @@ rsearch_bin_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) $(LIBS
@BUNDLE_TRUE@ -e 's,@enable_autobind\@,$(enable_autobind),g' \
@BUNDLE_TRUE@ -e 's,@enable_auto_dn_suffix\@,$(enable_auto_dn_suffix),g' \
@BUNDLE_TRUE@ -e 's,@enable_presence\@,$(enable_presence),g' \
+@BUNDLE_TRUE@ -e 's,@enable_asan\@,$(ASAN_ON),g' \
@BUNDLE_TRUE@ -e 's,@ECHO_N\@,$(ECHO_N),g' \
@BUNDLE_TRUE@ -e 's,@ECHO_C\@,$(ECHO_C),g' \
@BUNDLE_TRUE@ -e 's,@brand\@,$(brand),g' \
@@ -3204,6 +3225,7 @@ rsearch_bin_LDADD = $(NSPR_LINK) $(NSS_LINK) $(LDAPSDK_LINK) $(SASL_LINK) $(LIBS
@BUNDLE_TRUE@ -e 's,@with_selinux\@,@with_selinux@,g' \
@BUNDLE_TRUE@ -e 's,@with_tmpfiles_d\@,@with_tmpfiles_d@,g' \
@BUNDLE_TRUE@ -e 's,@perlexec\@,@perlexec@,g' \
+@BUNDLE_TRUE@ -e 's,@pythonexec\@,@pythonexec@,g' \
@BUNDLE_TRUE@ -e 's,@sttyexec\@,@sttyexec@,g' \
@BUNDLE_TRUE@ -e 's,@initconfigdir\@,$(initconfigdir),g'\
@BUNDLE_TRUE@ -e 's,@updatedir\@,$(updatedir),g' \
@@ -3233,7 +3255,6 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__confi
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --foreign Makefile
-.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
@@ -3254,8 +3275,8 @@ $(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
$(am__aclocal_m4_deps):
config.h: stamp-h1
- @if test ! -f $@; then rm -f stamp-h1; else :; fi
- @if test ! -f $@; then $(MAKE) $(AM_MAKEFLAGS) stamp-h1; else :; fi
+ @test -f $@ || rm -f stamp-h1
+ @test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1
stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status
@rm -f stamp-h1
@@ -10352,10 +10373,16 @@ dist-xz: distdir
$(am__post_remove_distdir)
dist-tarZ: distdir
+ @echo WARNING: "Support for distribution archives compressed with" \
+ "legacy program 'compress' is deprecated." >&2
+ @echo WARNING: "It will be removed altogether in Automake 2.0" >&2
tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z
$(am__post_remove_distdir)
dist-shar: distdir
+ @echo WARNING: "Support for shar distribution archives is" \
+ "deprecated." >&2
+ @echo WARNING: "It will be removed altogether in Automake 2.0" >&2
shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz
$(am__post_remove_distdir)
@@ -10390,16 +10417,17 @@ distcheck: dist
esac
chmod -R a-w $(distdir)
chmod u+w $(distdir)
- mkdir $(distdir)/_build $(distdir)/_inst
+ mkdir $(distdir)/_build $(distdir)/_build/sub $(distdir)/_inst
chmod a-w $(distdir)
test -d $(distdir)/_build || exit 0; \
dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \
&& dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \
&& am__cwd=`pwd` \
- && $(am__cd) $(distdir)/_build \
- && ../configure --srcdir=.. --prefix="$$dc_install_base" \
+ && $(am__cd) $(distdir)/_build/sub \
+ && ../../configure \
$(AM_DISTCHECK_CONFIGURE_FLAGS) \
$(DISTCHECK_CONFIGURE_FLAGS) \
+ --srcdir=../.. --prefix="$$dc_install_base" \
&& $(MAKE) $(AM_MAKEFLAGS) \
&& $(MAKE) $(AM_MAKEFLAGS) dvi \
&& $(MAKE) $(AM_MAKEFLAGS) check \
@@ -10727,6 +10755,8 @@ uninstall-man: uninstall-man1 uninstall-man8
uninstall-systemdsystemunitDATA uninstall-taskSCRIPTS \
uninstall-updateDATA uninstall-updateSCRIPTS
+.PRECIOUS: Makefile
+
clean-local:
-rm -rf dist
@@ -10775,9 +10805,13 @@ ns-slapd.properties: makstrdb
$(fixupcmd) $^ > $@
# yes, that is an @ in the filename . . .
-%/$(PACKAGE_NAME)@.service: %/systemd.template.service.in
- if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi
- $(fixupcmd) $^ > $@
+@enable_asan_TRUE@%/$(PACKAGE_NAME)@.service: %/systemd.template.asan.service.in
+@enable_asan_TRUE@ if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi
+@enable_asan_TRUE@ $(fixupcmd) $^ > $@
+# yes, that is an @ in the filename . . .
+@enable_asan_FALSE@%/$(PACKAGE_NAME)@.service: %/systemd.template.service.in
+@enable_asan_FALSE@ if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi
+@enable_asan_FALSE@ $(fixupcmd) $^ > $@
%/$(PACKAGE_NAME).systemd: %/systemd.template.sysconfig
if [ ! -d $(dir $@) ] ; then mkdir -p $(dir $@) ; fi
@@ -10822,7 +10856,7 @@ rpmbrprep: dist-bzip2 rpmroot
cp $(distdir).tar.bz2 $(RPMBUILD)/SOURCES
cp $(srcdir)/rpm/389-ds-base-git.sh $(RPMBUILD)/SOURCES
cp $(srcdir)/rpm/389-ds-base-devel.README $(RPMBUILD)/SOURCES
- sed -e "s/__VERSION__/$(RPM_VERSION)/" -e "s/__RELEASE__/$(RPM_RELEASE)/" -e "s/__NUNC_STANS_ON__/$(NUNC_STANS_ON)/" < $(abs_builddir)/rpm/389-ds-base.spec > $(RPMBUILD)/SPECS/389-ds-base.spec
+ sed -e "s/__VERSION__/$(RPM_VERSION)/" -e "s/__RELEASE__/$(RPM_RELEASE)/" -e "s/__NUNC_STANS_ON__/$(NUNC_STANS_ON)/" -e "s/__ASAN_ON__/$(ASAN_ON)/" < $(abs_builddir)/rpm/389-ds-base.spec > $(RPMBUILD)/SPECS/389-ds-base.spec
# Requires rpmdevtools. Consider making this a dependancy of rpms.
rpmsources: rpmbrprep
diff --git a/aclocal.m4 b/aclocal.m4
index 58e04a0..8d926b8 100644
--- a/aclocal.m4
+++ b/aclocal.m4
@@ -1,6 +1,6 @@
-# generated automatically by aclocal 1.13.4 -*- Autoconf -*-
+# generated automatically by aclocal 1.15 -*- Autoconf -*-
-# Copyright (C) 1996-2013 Free Software Foundation, Inc.
+# Copyright (C) 1996-2014 Free Software Foundation, Inc.
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -20,7 +20,7 @@ You have another version of autoconf. It may work, but is not guaranteed to.
If you have problems, you may need to regenerate the build system entirely.
To do so, use the procedure documented by the package, typically 'autoreconf'.])])
-# Copyright (C) 2002-2013 Free Software Foundation, Inc.
+# Copyright (C) 2002-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -32,10 +32,10 @@ To do so, use the procedure documented by the package, typically 'autoreconf'.])
# generated from the m4 files accompanying Automake X.Y.
# (This private macro should not be called outside this file.)
AC_DEFUN([AM_AUTOMAKE_VERSION],
-[am__api_version='1.13'
+[am__api_version='1.15'
dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
dnl require some minimum version. Point them to the right macro.
-m4_if([$1], [1.13.4], [],
+m4_if([$1], [1.15], [],
[AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
])
@@ -51,14 +51,14 @@ m4_define([_AM_AUTOCONF_VERSION], [])
# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
-[AM_AUTOMAKE_VERSION([1.13.4])dnl
+[AM_AUTOMAKE_VERSION([1.15])dnl
m4_ifndef([AC_AUTOCONF_VERSION],
[m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
# Figure out how to run the assembler. -*- Autoconf -*-
-# Copyright (C) 2001-2013 Free Software Foundation, Inc.
+# Copyright (C) 2001-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -78,7 +78,7 @@ _AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl
# AM_AUX_DIR_EXPAND -*- Autoconf -*-
-# Copyright (C) 2001-2013 Free Software Foundation, Inc.
+# Copyright (C) 2001-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -123,15 +123,14 @@ _AM_IF_OPTION([no-dependencies],, [_AM_DEPENDENCIES([CCAS])])dnl
# configured tree to be moved without reconfiguration.
AC_DEFUN([AM_AUX_DIR_EXPAND],
-[dnl Rely on autoconf to set up CDPATH properly.
-AC_PREREQ([2.50])dnl
-# expand $ac_aux_dir to an absolute path
-am_aux_dir=`cd $ac_aux_dir && pwd`
+[AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl
+# Expand $ac_aux_dir to an absolute path.
+am_aux_dir=`cd "$ac_aux_dir" && pwd`
])
# AM_COND_IF -*- Autoconf -*-
-# Copyright (C) 2008-2013 Free Software Foundation, Inc.
+# Copyright (C) 2008-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -168,7 +167,7 @@ fi[]dnl
# AM_CONDITIONAL -*- Autoconf -*-
-# Copyright (C) 1997-2013 Free Software Foundation, Inc.
+# Copyright (C) 1997-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -199,7 +198,7 @@ AC_CONFIG_COMMANDS_PRE(
Usually this means the macro was only invoked conditionally.]])
fi])])
-# Copyright (C) 1999-2013 Free Software Foundation, Inc.
+# Copyright (C) 1999-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -390,7 +389,7 @@ _AM_SUBST_NOTMAKE([am__nodep])dnl
# Generate code to set up dependency tracking. -*- Autoconf -*-
-# Copyright (C) 1999-2013 Free Software Foundation, Inc.
+# Copyright (C) 1999-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -466,7 +465,7 @@ AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
# Do all the work for Automake. -*- Autoconf -*-
-# Copyright (C) 1996-2013 Free Software Foundation, Inc.
+# Copyright (C) 1996-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -475,6 +474,12 @@ AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
# This macro actually does too much. Some checks are only needed if
# your package does certain things. But this isn't really a big deal.
+dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O.
+m4_define([AC_PROG_CC],
+m4_defn([AC_PROG_CC])
+[_AM_PROG_CC_C_O
+])
+
# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE])
# AM_INIT_AUTOMAKE([OPTIONS])
# -----------------------------------------------
@@ -550,8 +555,8 @@ AC_REQUIRE([AC_PROG_MKDIR_P])dnl
# <http://lists.gnu.org/archive/html/automake/2012-07/msg00001.html>
# <http://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
AC_SUBST([mkdir_p], ['$(MKDIR_P)'])
-# We need awk for the "check" target. The system "awk" is bad on
-# some platforms.
+# We need awk for the "check" target (and possibly the TAP driver). The
+# system "awk" is bad on some platforms.
AC_REQUIRE([AC_PROG_AWK])dnl
AC_REQUIRE([AC_PROG_MAKE_SET])dnl
AC_REQUIRE([AM_SET_LEADING_DOT])dnl
@@ -583,6 +588,51 @@ dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below.
AC_CONFIG_COMMANDS_PRE(dnl
[m4_provide_if([_AM_COMPILER_EXEEXT],
[AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl
+
+# POSIX will say in a future version that running "rm -f" with no argument
+# is OK; and we want to be able to make that assumption in our Makefile
+# recipes. So use an aggressive probe to check that the usage we want is
+# actually supported "in the wild" to an acceptable degree.
+# See automake bug#10828.
+# To make any issue more visible, cause the running configure to be aborted
+# by default if the 'rm' program in use doesn't match our expectations; the
+# user can still override this though.
+if rm -f && rm -fr && rm -rf; then : OK; else
+ cat >&2 <<'END'
+Oops!
+
+Your 'rm' program seems unable to run without file operands specified
+on the command line, even when the '-f' option is present. This is contrary
+to the behaviour of most rm programs out there, and not conforming with
+the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542>
+
+Please tell bug-automake(a)gnu.org about your system, including the value
+of your $PATH and any error possibly output before this message. This
+can help us improve future automake versions.
+
+END
+ if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
+ echo 'Configuration will proceed anyway, since you have set the' >&2
+ echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
+ echo >&2
+ else
+ cat >&2 <<'END'
+Aborting the configuration process, to ensure you take notice of the issue.
+
+You can download and install GNU coreutils to get an 'rm' implementation
+that behaves properly: <http://www.gnu.org/software/coreutils/>.
+
+If you want to complete the configuration process using your problematic
+'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
+to "yes", and re-run configure.
+
+END
+ AC_MSG_ERROR([Your 'rm' program is bad, sorry.])
+ fi
+fi
+dnl The trailing newline in this macro's definition is deliberate, for
+dnl backward compatibility and to allow trailing 'dnl'-style comments
+dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841.
])
dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not
@@ -591,7 +641,6 @@ dnl mangled by Autoconf and run in a shell conditional statement.
m4_define([_AC_COMPILER_EXEEXT],
m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])])
-
# When config.status generates a header, we must update the stamp-h file.
# This file resides in the same directory as the config header
# that is generated. The stamp files are numbered to have different names.
@@ -613,7 +662,7 @@ for _am_header in $config_headers :; do
done
echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
-# Copyright (C) 2001-2013 Free Software Foundation, Inc.
+# Copyright (C) 2001-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -624,7 +673,7 @@ echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_co
# Define $install_sh.
AC_DEFUN([AM_PROG_INSTALL_SH],
[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
-if test x"${install_sh}" != xset; then
+if test x"${install_sh+set}" != xset; then
case $am_aux_dir in
*\ * | *\ *)
install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
@@ -634,7 +683,7 @@ if test x"${install_sh}" != xset; then
fi
AC_SUBST([install_sh])])
-# Copyright (C) 2003-2013 Free Software Foundation, Inc.
+# Copyright (C) 2003-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -656,7 +705,7 @@ AC_SUBST([am__leading_dot])])
# Add --enable-maintainer-mode option to configure. -*- Autoconf -*-
# From Jim Meyering
-# Copyright (C) 1996-2013 Free Software Foundation, Inc.
+# Copyright (C) 1996-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -691,7 +740,7 @@ AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles])
# Check to see how 'make' treats includes. -*- Autoconf -*-
-# Copyright (C) 2001-2013 Free Software Foundation, Inc.
+# Copyright (C) 2001-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -739,41 +788,9 @@ AC_MSG_RESULT([$_am_result])
rm -f confinc confmf
])
-# Copyright (C) 1999-2013 Free Software Foundation, Inc.
-#
-# This file is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# AM_PROG_CC_C_O
-# --------------
-# Like AC_PROG_CC_C_O, but changed for automake.
-AC_DEFUN([AM_PROG_CC_C_O],
-[AC_REQUIRE([AC_PROG_CC_C_O])dnl
-AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
-AC_REQUIRE_AUX_FILE([compile])dnl
-# FIXME: we rely on the cache variable name because
-# there is no other way.
-set dummy $CC
-am_cc=`echo $[2] | sed ['s/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/']`
-eval am_t=\$ac_cv_prog_cc_${am_cc}_c_o
-if test "$am_t" != yes; then
- # Losing compiler, so override with the script.
- # FIXME: It is wrong to rewrite CC.
- # But if we don't then we get into trouble of one sort or another.
- # A longer-term fix would be to have automake use am__CC in this case,
- # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
- CC="$am_aux_dir/compile $CC"
-fi
-dnl Make sure AC_PROG_CC is never called again, or it will override our
-dnl setting of CC.
-m4_define([AC_PROG_CC],
- [m4_fatal([AC_PROG_CC cannot be called after AM_PROG_CC_C_O])])
-])
-
# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*-
-# Copyright (C) 1997-2013 Free Software Foundation, Inc.
+# Copyright (C) 1997-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -812,7 +829,7 @@ fi
# Helper functions for option handling. -*- Autoconf -*-
-# Copyright (C) 2001-2013 Free Software Foundation, Inc.
+# Copyright (C) 2001-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -841,7 +858,54 @@ AC_DEFUN([_AM_SET_OPTIONS],
AC_DEFUN([_AM_IF_OPTION],
[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
-# Copyright (C) 2001-2013 Free Software Foundation, Inc.
+# Copyright (C) 1999-2014 Free Software Foundation, Inc.
+#
+# This file is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# _AM_PROG_CC_C_O
+# ---------------
+# Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC
+# to automatically call this.
+AC_DEFUN([_AM_PROG_CC_C_O],
+[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+AC_REQUIRE_AUX_FILE([compile])dnl
+AC_LANG_PUSH([C])dnl
+AC_CACHE_CHECK(
+ [whether $CC understands -c and -o together],
+ [am_cv_prog_cc_c_o],
+ [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])])
+ # Make sure it works both with $CC and with simple cc.
+ # Following AC_PROG_CC_C_O, we do the test twice because some
+ # compilers refuse to overwrite an existing .o file with -o,
+ # though they will create one.
+ am_cv_prog_cc_c_o=yes
+ for am_i in 1 2; do
+ if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \
+ && test -f conftest2.$ac_objext; then
+ : OK
+ else
+ am_cv_prog_cc_c_o=no
+ break
+ fi
+ done
+ rm -f core conftest*
+ unset am_i])
+if test "$am_cv_prog_cc_c_o" != yes; then
+ # Losing compiler, so override with the script.
+ # FIXME: It is wrong to rewrite CC.
+ # But if we don't then we get into trouble of one sort or another.
+ # A longer-term fix would be to have automake use am__CC in this case,
+ # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+ CC="$am_aux_dir/compile $CC"
+fi
+AC_LANG_POP([C])])
+
+# For backward compatibility.
+AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])])
+
+# Copyright (C) 2001-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -860,7 +924,7 @@ AC_DEFUN([AM_RUN_LOG],
# Check to make sure that the build environment is sane. -*- Autoconf -*-
-# Copyright (C) 1996-2013 Free Software Foundation, Inc.
+# Copyright (C) 1996-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -941,7 +1005,7 @@ AC_CONFIG_COMMANDS_PRE(
rm -f conftest.file
])
-# Copyright (C) 2009-2013 Free Software Foundation, Inc.
+# Copyright (C) 2009-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -1001,7 +1065,7 @@ AC_SUBST([AM_BACKSLASH])dnl
_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl
])
-# Copyright (C) 2001-2013 Free Software Foundation, Inc.
+# Copyright (C) 2001-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -1029,7 +1093,7 @@ fi
INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
AC_SUBST([INSTALL_STRIP_PROGRAM])])
-# Copyright (C) 2006-2013 Free Software Foundation, Inc.
+# Copyright (C) 2006-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
@@ -1048,7 +1112,7 @@ AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
# Check how to create a tarball. -*- Autoconf -*-
-# Copyright (C) 2004-2013 Free Software Foundation, Inc.
+# Copyright (C) 2004-2014 Free Software Foundation, Inc.
#
# This file is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
diff --git a/config.h.in b/config.h.in
index ea9369b..1b549f7 100644
--- a/config.h.in
+++ b/config.h.in
@@ -357,9 +357,6 @@
/* no getdomainname */
#undef NO_DOMAINNAME
-/* Define to 1 if your C compiler doesn't accept -c and -o together. */
-#undef NO_MINUS_C_MINUS_O
-
/* OS version */
#undef OSVERSION
diff --git a/configure b/configure
index 7dc8c3e..3f26631 100755
--- a/configure
+++ b/configure
@@ -715,6 +715,7 @@ HPUX_FALSE
HPUX_TRUE
initconfigdir
sttyexec
+pythonexec
perlexec
initdir
LIBCRUN
@@ -771,6 +772,8 @@ BUNDLE_TRUE
gccsec_defs
RPM_HARDEND_CC_FALSE
RPM_HARDEND_CC_TRUE
+enable_asan_FALSE
+enable_asan_TRUE
asan_defs
debug_defs
LIBOBJS
@@ -935,7 +938,7 @@ with_fhs
with_fhs_opt
with_tmpfiles_d
with_perldir
-with_pythondir
+with_pythonexec
with_instconfigdir
with_initddir
with_nspr
@@ -1667,7 +1670,7 @@ Optional Packages:
--with-perldir=PATH Directory for perl)
- --with-pythondir=PATH Directory for python)
+ --with-pythonexec=PATH Path to executable for python)
--with-instconfigdir=/path
Base directory for instance specific writable
@@ -2802,7 +2805,7 @@ cat >>confdefs.h <<_ACEOF
#define DS_PACKAGE_STRING "$PACKAGE_STRING"
_ACEOF
-am__api_version='1.13'
+am__api_version='1.15'
ac_aux_dir=
for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do
@@ -3003,8 +3006,8 @@ test "$program_suffix" != NONE &&
ac_script='s/[\\$]/&&/g;s/;s,x,x,$//'
program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"`
-# expand $ac_aux_dir to an absolute path
-am_aux_dir=`cd $ac_aux_dir && pwd`
+# Expand $ac_aux_dir to an absolute path.
+am_aux_dir=`cd "$ac_aux_dir" && pwd`
if test x"${MISSING+set}" != xset; then
case $am_aux_dir in
@@ -3023,7 +3026,7 @@ else
$as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;}
fi
-if test x"${install_sh}" != xset; then
+if test x"${install_sh+set}" != xset; then
case $am_aux_dir in
*\ * | *\ *)
install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
@@ -3342,8 +3345,8 @@ MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"}
# <http://lists.gnu.org/archive/html/automake/2012-07/msg00014.html>
mkdir_p='$(MKDIR_P)'
-# We need awk for the "check" target. The system "awk" is bad on
-# some platforms.
+# We need awk for the "check" target (and possibly the TAP driver). The
+# system "awk" is bad on some platforms.
# Always define AMTAR for backward compatibility. Yes, it's still used
# in the wild :-( We should find a proper way to deprecate it ...
AMTAR='$${TAR-tar}'
@@ -3444,6 +3447,48 @@ $as_echo "$am_cv_prog_tar_pax" >&6; }
+# POSIX will say in a future version that running "rm -f" with no argument
+# is OK; and we want to be able to make that assumption in our Makefile
+# recipes. So use an aggressive probe to check that the usage we want is
+# actually supported "in the wild" to an acceptable degree.
+# See automake bug#10828.
+# To make any issue more visible, cause the running configure to be aborted
+# by default if the 'rm' program in use doesn't match our expectations; the
+# user can still override this though.
+if rm -f && rm -fr && rm -rf; then : OK; else
+ cat >&2 <<'END'
+Oops!
+
+Your 'rm' program seems unable to run without file operands specified
+on the command line, even when the '-f' option is present. This is contrary
+to the behaviour of most rm programs out there, and not conforming with
+the upcoming POSIX standard: <http://austingroupbugs.net/view.php?id=542>
+
+Please tell bug-automake(a)gnu.org about your system, including the value
+of your $PATH and any error possibly output before this message. This
+can help us improve future automake versions.
+
+END
+ if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
+ echo 'Configuration will proceed anyway, since you have set the' >&2
+ echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
+ echo >&2
+ else
+ cat >&2 <<'END'
+Aborting the configuration process, to ensure you take notice of the issue.
+
+You can download and install GNU coreutils to get an 'rm' implementation
+that behaves properly: <http://www.gnu.org/software/coreutils/>.
+
+If you want to complete the configuration process using your problematic
+'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
+to "yes", and re-run configure.
+
+END
+ as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5
+ fi
+fi
+
# define these for automake distdir
VERSION=$PACKAGE_VERSION
PACKAGE=$PACKAGE_TARNAME
@@ -4793,6 +4838,65 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5
+$as_echo_n "checking whether $CC understands -c and -o together... " >&6; }
+if ${am_cv_prog_cc_c_o+:} false; then :
+ $as_echo_n "(cached) " >&6
+else
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h. */
+
+int
+main ()
+{
+
+ ;
+ return 0;
+}
+_ACEOF
+ # Make sure it works both with $CC and with simple cc.
+ # Following AC_PROG_CC_C_O, we do the test twice because some
+ # compilers refuse to overwrite an existing .o file with -o,
+ # though they will create one.
+ am_cv_prog_cc_c_o=yes
+ for am_i in 1 2; do
+ if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5
+ ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5
+ ac_status=$?
+ echo "$as_me:$LINENO: \$? = $ac_status" >&5
+ (exit $ac_status); } \
+ && test -f conftest2.$ac_objext; then
+ : OK
+ else
+ am_cv_prog_cc_c_o=no
+ break
+ fi
+ done
+ rm -f core conftest*
+ unset am_i
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5
+$as_echo "$am_cv_prog_cc_c_o" >&6; }
+if test "$am_cv_prog_cc_c_o" != yes; then
+ # Losing compiler, so override with the script.
+ # FIXME: It is wrong to rewrite CC.
+ # But if we don't then we get into trouble of one sort or another.
+ # A longer-term fix would be to have automake use am__CC in this case,
+ # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
+ CC="$am_aux_dir/compile $CC"
+fi
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
depcc="$CC" am_compiler_list=
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
@@ -4921,131 +5025,6 @@ else
fi
-if test "x$CC" != xcc; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC and cc understand -c and -o together" >&5
-$as_echo_n "checking whether $CC and cc understand -c and -o together... " >&6; }
-else
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether cc understands -c and -o together" >&5
-$as_echo_n "checking whether cc understands -c and -o together... " >&6; }
-fi
-set dummy $CC; ac_cc=`$as_echo "$2" |
- sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'`
-if eval \${ac_cv_prog_cc_${ac_cc}_c_o+:} false; then :
- $as_echo_n "(cached) " >&6
-else
- cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h. */
-
-int
-main ()
-{
-
- ;
- return 0;
-}
-_ACEOF
-# Make sure it works both with $CC and with simple cc.
-# We do the test twice because some compilers refuse to overwrite an
-# existing .o file with -o, though they will create one.
-ac_try='$CC -c conftest.$ac_ext -o conftest2.$ac_objext >&5'
-rm -f conftest2.*
-if { { case "(($ac_try" in
- *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
- *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
- (eval "$ac_try") 2>&5
- ac_status=$?
- $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
- test $ac_status = 0; } &&
- test -f conftest2.$ac_objext && { { case "(($ac_try" in
- *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
- *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
- (eval "$ac_try") 2>&5
- ac_status=$?
- $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
- test $ac_status = 0; };
-then
- eval ac_cv_prog_cc_${ac_cc}_c_o=yes
- if test "x$CC" != xcc; then
- # Test first that cc exists at all.
- if { ac_try='cc -c conftest.$ac_ext >&5'
- { { case "(($ac_try" in
- *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
- *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
- (eval "$ac_try") 2>&5
- ac_status=$?
- $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
- test $ac_status = 0; }; }; then
- ac_try='cc -c conftest.$ac_ext -o conftest2.$ac_objext >&5'
- rm -f conftest2.*
- if { { case "(($ac_try" in
- *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
- *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
- (eval "$ac_try") 2>&5
- ac_status=$?
- $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
- test $ac_status = 0; } &&
- test -f conftest2.$ac_objext && { { case "(($ac_try" in
- *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
- *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
- (eval "$ac_try") 2>&5
- ac_status=$?
- $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
- test $ac_status = 0; };
- then
- # cc works too.
- :
- else
- # cc exists but doesn't like -o.
- eval ac_cv_prog_cc_${ac_cc}_c_o=no
- fi
- fi
- fi
-else
- eval ac_cv_prog_cc_${ac_cc}_c_o=no
-fi
-rm -f core conftest*
-
-fi
-if eval test \$ac_cv_prog_cc_${ac_cc}_c_o = yes; then
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-
-$as_echo "#define NO_MINUS_C_MINUS_O 1" >>confdefs.h
-
-fi
-
-# FIXME: we rely on the cache variable name because
-# there is no other way.
-set dummy $CC
-am_cc=`echo $2 | sed 's/[^a-zA-Z0-9_]/_/g;s/^[0-9]/_/'`
-eval am_t=\$ac_cv_prog_cc_${am_cc}_c_o
-if test "$am_t" != yes; then
- # Losing compiler, so override with the script.
- # FIXME: It is wrong to rewrite CC.
- # But if we don't then we get into trouble of one sort or another.
- # A longer-term fix would be to have automake use am__CC in this case,
- # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
- CC="$am_aux_dir/compile $CC"
-fi
-
# By default we simply use the C compiler to build assembly code.
@@ -17694,7 +17673,7 @@ if test "${enable_debug+set}" = set; then :
enableval=$enable_debug;
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
$as_echo "yes" >&6; }
- debug_defs="-g3 -DDEBUG -DMCC_DEBUG"
+ debug_defs="-g3 -DDEBUG -DMCC_DEBUG -O0"
else
@@ -17724,6 +17703,14 @@ $as_echo "no" >&6; }
fi
+ if test "$enable_asan" = "yes"; then
+ enable_asan_TRUE=
+ enable_asan_FALSE='#'
+else
+ enable_asan_TRUE='#'
+ enable_asan_FALSE=
+fi
+
if test -f /usr/lib/rpm/redhat/redhat-hardened-cc1; then
RPM_HARDEND_CC_TRUE=
@@ -18275,25 +18262,25 @@ else
with_perldir=
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for --with-pythondir" >&5
-$as_echo_n "checking for --with-pythondir... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for --with-pythonexec" >&5
+$as_echo_n "checking for --with-pythonexec... " >&6; }
-# Check whether --with-pythondir was given.
-if test "${with_pythondir+set}" = set; then :
- withval=$with_pythondir;
+# Check whether --with-pythonexec was given.
+if test "${with_pythonexec+set}" = set; then :
+ withval=$with_pythonexec;
fi
-if test -n "$with_pythondir"; then
- if test "$with_pythondir" = yes ; then
- as_fn_error $? "You must specify --with-pythondir=/full/path/to/python" "$LINENO" 5
- elif test "$with_pythondir" = no ; then
- with_pythondir=
+if test -n "$with_pythonexec"; then
+ if test "$with_pythonexec" = yes ; then
+ as_fn_error $? "You must specify --with-pythonexec=/full/path/to/python" "$LINENO" 5
+ elif test "$with_pythonexec" = no ; then
+ with_pythonexec=/usr/bin/python2
else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_pythondir" >&5
-$as_echo "$with_pythondir" >&6; }
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_pythonexec" >&5
+$as_echo "$with_pythonexec" >&6; }
fi
else
- with_pythondir=
+ with_pythonexec=/usr/bin/python2
fi
@@ -18391,6 +18378,14 @@ if test -n "$with_perldir"; then
else
perlexec='/usr/bin/env perl'
fi
+
+# This will let us change over the python version easier in the future.
+if test -n "$with_pythonexec"; then
+ pythonexec="$with_pythonexec"
+else
+ pythonexec='/usr/bin/env python2'
+fi
+
# we use stty in perl scripts to disable password echo
# this doesn't work unless the full absolute path of the
# stty command is used e.g. system("stty -echo") does not
@@ -18663,6 +18658,7 @@ fi
+
# set default initconfigdir if not already set
# value will be set so as to be relative to $(sysconfdir)
if test -z "$initconfigdir" ; then
@@ -21259,24 +21255,18 @@ fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for Systemd with pkg-config" >&5
$as_echo_n "checking for Systemd with pkg-config... " >&6; }
- if test "$with_journald" = yes; then
- if test -n "$PKG_CONFIG" && $PKG_CONFIG --exists systemd libsystemd-journal libsystemd-daemon ; then
- systemd_inc=`$PKG_CONFIG --cflags-only-I systemd libsystemd-journal libsystemd-daemon`
- systemd_lib=`$PKG_CONFIG --libs-only-l systemd libsystemd-journal libsystemd-daemon`
- systemd_defs="-DWITH_SYSTEMD -DHAVE_JOURNALD"
- else
- as_fn_error $? "no Systemd / Journald pkg-config files" "$LINENO" 5
- fi
+ if test -n "$PKG_CONFIG" && $PKG_CONFIG --exists libsystemd ; then
+ systemd_inc=`$PKG_CONFIG --cflags-only-I libsystemd`
+ systemd_lib=`$PKG_CONFIG --libs-only-l libsystemd`
else
+ as_fn_error $? "no Systemd pkg-config files" "$LINENO" 5
+ fi
- if test -n "$PKG_CONFIG" && $PKG_CONFIG --exists systemd libsystemd-daemon ; then
- systemd_inc=`$PKG_CONFIG --cflags-only-I systemd libsystemd-daemon`
- systemd_lib=`$PKG_CONFIG --libs-only-l systemd libsystemd-daemon`
- systemd_defs="-DWITH_SYSTEMD"
- else
- as_fn_error $? "no Systemd pkg-config files" "$LINENO" 5
- fi
+ if test "$with_journald" = yes; then
+ systemd_defs="-DWITH_SYSTEMD -DHAVE_JOURNALD"
+ else
+ systemd_defs="-DWITH_SYSTEMD"
fi
# Check for the pkg config provided unit paths
@@ -21333,11 +21323,10 @@ $as_echo "$with_systemdsystemconfdir" >&6; }
fi
- if test -n "$with_systemdsystemunitdir" -o -n "$with_systemdsystemconfdir" ; then
- if test -z "$with_systemdgroupname" ; then
- with_systemdgroupname=$PACKAGE_NAME.target
- fi
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking for --with-systemdgroupname" >&5
+ if test -z "$with_systemdgroupname" ; then
+ with_systemdgroupname=$PACKAGE_NAME.target
+ fi
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for --with-systemdgroupname" >&5
$as_echo_n "checking for --with-systemdgroupname... " >&6; }
# Check whether --with-systemdgroupname was given.
@@ -21345,18 +21334,17 @@ if test "${with_systemdgroupname+set}" = set; then :
withval=$with_systemdgroupname;
fi
- if test "$with_systemdgroupname" = yes ; then
- as_fn_error $? "You must specify --with-systemdgroupname=name.of.group" "$LINENO" 5
- elif test "$with_systemdgroupname" = no ; then
- as_fn_error $? "You must specify --with-systemdgroupname=name.of.group" "$LINENO" 5
- else
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_systemdgroupname" >&5
+ if test "$with_systemdgroupname" = yes ; then
+ as_fn_error $? "You must specify --with-systemdgroupname=name.of.group" "$LINENO" 5
+ elif test "$with_systemdgroupname" = no ; then
+ as_fn_error $? "You must specify --with-systemdgroupname=name.of.group" "$LINENO" 5
+ else
+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_systemdgroupname" >&5
$as_echo "$with_systemdgroupname" >&6; }
- fi
-
fi
+
fi
# End of with_systemd
@@ -21653,6 +21641,10 @@ if test -z "${am__fastdepCCAS_TRUE}" && test -z "${am__fastdepCCAS_FALSE}"; then
as_fn_error $? "conditional \"am__fastdepCCAS\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
fi
+if test -z "${enable_asan_TRUE}" && test -z "${enable_asan_FALSE}"; then
+ as_fn_error $? "conditional \"enable_asan\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
if test -z "${RPM_HARDEND_CC_TRUE}" && test -z "${RPM_HARDEND_CC_FALSE}"; then
as_fn_error $? "conditional \"RPM_HARDEND_CC\" was never defined.
Usually this means the macro was only invoked conditionally." "$LINENO" 5
diff --git a/ldap/admin/src/scripts/readnsstate.in b/ldap/admin/src/scripts/readnsstate.in
new file mode 100644
index 0000000..2b3c464
--- /dev/null
+++ b/ldap/admin/src/scripts/readnsstate.in
@@ -0,0 +1,100 @@
+#!@pythonexec@
+
+import sys
+from struct import pack, unpack, calcsize
+import base64
+import time
+from datetime import timedelta
+
+def flipend(end):
+ if end == '<':
+ return '>'
+ if end == '>':
+ return '<'
+
+def printGenState(dn, nsstate, flip):
+ if pack('<h', 1) == pack('=h',1):
+ print("Little Endian")
+ end = '<'
+ if flip:
+ end = flipend(end)
+ elif pack('>h', 1) == pack('=h',1):
+ print("Big Endian")
+ end = '>'
+ if flip:
+ end = flipend(end)
+ else:
+ print("Unknown Endian")
+ sys.exit(-1) # blow up
+ print("For replica", dn)
+ thelen = len(nsstate)
+ if thelen <= 20:
+ pad = 2 # padding for short H values
+ timefmt = 'I' # timevals are unsigned 32-bit int
+ else:
+ pad = 6 # padding for short H values
+ timefmt = 'Q' # timevals are unsigned 64-bit int
+
+ base_fmtstr = "H%dx3%sH%dx" % (pad, timefmt, pad)
+ print(" fmtstr=[%s]" % base_fmtstr)
+ print(" size=%d" % calcsize(base_fmtstr))
+ print(" len of nsstate is", thelen)
+ fmtstr = end + base_fmtstr
+ (rid, sampled_time, local_offset, remote_offset, seq_num) = unpack(fmtstr, nsstate)
+ now = int(time.time())
+ tdiff = now-sampled_time
+ wrongendian = False
+ try:
+ tdelta = timedelta(seconds=tdiff)
+ wrongendian = tdelta.days > 10*365
+ except OverflowError: # int overflow
+ wrongendian = True
+ # if the sampled time is more than 20 years off, this is
+ # probably the wrong endianness
+ if wrongendian:
+ print("The difference in days is", tdiff/86400)
+ print("This is probably the wrong bit-endianness - flipping")
+ end = flipend(end)
+ fmtstr = end + base_fmtstr
+ (rid, sampled_time, local_offset, remote_offset, seq_num) = unpack(fmtstr, nsstate)
+ tdiff = now-sampled_time
+ tdelta = timedelta(seconds=tdiff)
+ print(""" CSN generator state:
+ Replica ID : %d
+ Sampled Time : %d
+ Gen as csn : %08x%04d%04d0000
+ Time as str : %s
+ Local Offset : %d
+ Remote Offset : %d
+ Seq. num : %d
+ System time : %s
+ Diff in sec. : %d
+ Day:sec diff : %d:%d
+""" % (rid, sampled_time, sampled_time, seq_num, rid, time.ctime(sampled_time), local_offset,
+ remote_offset, seq_num, time.ctime(now), tdiff, tdelta.days, tdelta.seconds))
+
+def main():
+ dn = ''
+ nsstate = ''
+ if len(sys.argv) < 2:
+ print("Usage: readnsstate.py /path/to/dse.ldif")
+ sys.exit(1)
+ if len(sys.argv) > 2:
+ flip = True
+ else:
+ flip = False
+ for line in open(sys.argv[1]):
+ if line.startswith("dn: "):
+ dn = line[4:].strip()
+ if line.lower().startswith("nsstate:: ") and dn.startswith("cn=replica"):
+ b64val = line[10:].strip()
+ print("nsState is", b64val)
+ nsstate = base64.decodestring(b64val.encode())
+ printGenState(dn, nsstate, flip)
+ if not nsstate:
+ print("Error: nsstate not found in file for cn=replica", sys.argv[1])
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main()
+
diff --git a/man/man1/readnsstate.1 b/man/man1/readnsstate.1
new file mode 100644
index 0000000..0edf352
--- /dev/null
+++ b/man/man1/readnsstate.1
@@ -0,0 +1,50 @@
+.TH READNSSTATE 1 "May 13 2016"
+.SH NAME
+readnsstate \- interpret the contents of cn=replica's nsState value
+.B readnsstate
+/etc/dirsrv/slapd-INSTANCE/dse.ldif
+.PP
+.SH DESCRIPTION
+Decode and display the content of the nsState attribute from a directory servers cn=replicate object.
+.PP
+.SH EXAMPLES
+.PP
+.nf
+.RS
+sudo /usr/bin/readnsstate /etc/dirsrv/slapd-localhost/dse.ldif
+.RE
+.fi
+.PP
+.nf
+.RS
+For replica cn=replica,cn=dc\3Dexample\2Cdc\3Dcom,cn=mapping tree,cn=config
+ fmtstr=[H6x3QH6x]
+ size=40
+ len of nsstate is 40
+ CSN generator state:
+ Replica ID : 5
+ Sampled Time : 1463100468
+ Gen as csn : 57352434000000050000
+ Time as str : Fri May 13 10:47:48 2016
+ Local Offset : 0
+ Remote Offset : 276
+ Seq. num : 0
+ System time : Fri May 13 14:01:25 2016
+ Diff in sec. : 11617
+ Day:sec diff : 0:11617
+.RE
+.fi
+.PP
+.SH AUTHOR
+readnsstate was written by the 389 Project by richm.
+.SH "REPORTING BUGS"
+Report bugs to https://fedorahosted.org/389/newticket.
+.SH COPYRIGHT
+Copyright \(co 2016 Red Hat, Inc.
+.br
+This manual page was written by William Brown <wibrown(a)redhat.com>,
+for the 389 Directory Server Project.
+.br
+This is free software. You may redistribute copies of it under the terms of
+the Directory Server license found in the LICENSE file of this
+software distribution.
7 years, 10 months
rpm/389-ds-base.spec.in
by William Brown
rpm/389-ds-base.spec.in | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
New commits:
commit c2bc6707ba784c7cd673517519cbc8ac8611f922
Author: William Brown <firstyear(a)redhat.com>
Date: Thu Jun 9 11:06:23 2016 +1000
Ticket 48877 - Fixes for RPM spec with spectool
Bug Description: spectool expects a valid rpm file during parsing, but instead
it trims it after the %description ..... So instead, because we wrapped this
with an %if for asan, this now creates an incomplete and broken spec. This in
turn causes make rpms to fail in some cases.
Fix Description: Move the if condition inside the %description, to prevent the
parser error.
https://fedorahosted.org/389/ticket/48877
Author: wibrown
Review by: nhosoi (Thanks!)
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index 4169388..c0ceb98 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -143,17 +143,13 @@ Source2: %{name}-devel.README
Source3: https://git.fedorahosted.org/cgit/nunc-stans.git/snapshot/nunc-stans-%{nu...
%endif
-%if %{use_asan}
%description
389 Directory Server is an LDAPv3 compliant server. The base package includes
the LDAP server and command line utilities for server administration.
+%if %{use_asan}
WARNING! This build is linked to Address Sanitisation libraries. This probably
isn't what you want. Please contact support immediately.
Please see http://seclists.org/oss-sec/2016/q1/363 for more information.
-%else
-%description
-389 Directory Server is an LDAPv3 compliant server. The base package includes
-the LDAP server and command line utilities for server administration.
%endif
# upgrade path from monolithic %{name} (including -libs & -devel) to %{name} + %{name}-snmp
7 years, 10 months
rpm/389-ds-base.spec.in
by William Brown
rpm/389-ds-base.spec.in | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
New commits:
commit db0f124e55b6e74bda35b6d33f5ef0412e105528
Author: William Brown <firstyear(a)redhat.com>
Date: Thu Jun 9 13:30:58 2016 +1000
Ticket 48404 - libslapd owned by libs and devel
Bug Description: our .so files were not handled consistently, some in -libs
some in -devel.
Fix Description: move all versioned .so files to -libs, and unversioned to
-devel. Given that the unversion is a symlink to the the versioned .so, this
is a development convinience.
https://fedorahosted.org/389/ticket/48404
Author: wibrown
Review by: nhosoi (thanks!)
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index 6cf5fda..4169388 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -458,6 +458,7 @@ fi
%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel
%{_includedir}/%{pkgname}
%{_libdir}/%{pkgname}/libslapd.so
+%{_libdir}/%{pkgname}/libns-dshttpd.so
%if %{use_nunc_stans}
%{_libdir}/%{pkgname}/libnunc-stans.so
%endif
@@ -468,9 +469,9 @@ fi
%doc LICENSE LICENSE.GPLv3+ LICENSE.openssl README.devel
%dir %{_libdir}/%{pkgname}
%{_libdir}/%{pkgname}/libslapd.so.*
-%{_libdir}/%{pkgname}/libns-dshttpd.so*
+%{_libdir}/%{pkgname}/libns-dshttpd.so.*
%if %{use_nunc_stans}
-%{_libdir}/%{pkgname}/libnunc-stans.so*
+%{_libdir}/%{pkgname}/libnunc-stans.so.*
%endif
%files snmp
7 years, 10 months
dirsrvtests/tests
by Simon Pichugin
dirsrvtests/tests/suites/config/config_test.py | 403 +++++++++++++++++--------
dirsrvtests/tests/tickets/ticket48326_test.py | 230 --------------
2 files changed, 283 insertions(+), 350 deletions(-)
New commits:
commit db116a434eb7b7bab94769a42144b339e7310aa1
Author: Simon Pichugin <spichugi(a)redhat.com>
Date: Fri Jul 3 15:52:23 2015 +0200
Ticket 48326 - Move CI test to config test suite and refactor
Description: Refactor config test suite to make addition of new test
case possible. Move test case from tickets/ticket48326.py to
suites/config/config_test.py and refator it afterwards.
Use a big value for an attribute instead of an ldif file.
https://fedorahosted.org/389/ticket/48326
Reviewed by: nhosoi (Thanks!)
diff --git a/dirsrvtests/tests/suites/config/config_test.py b/dirsrvtests/tests/suites/config/config_test.py
index d3631e3..4670bb1 100644
--- a/dirsrvtests/tests/suites/config/config_test.py
+++ b/dirsrvtests/tests/suites/config/config_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
+# Copyright (C) 2016 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -18,181 +18,344 @@ from lib389._constants import *
from lib389.properties import *
from lib389.tasks import *
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
+DEBUGGING = False
+USER_DN = 'uid=test_user,%s' % DEFAULT_SUFFIX
+
+if DEBUGGING:
+ logging.getLogger(__name__).setLevel(logging.DEBUG)
+else:
+ logging.getLogger(__name__).setLevel(logging.INFO)
-installation1_prefix = None
+log = logging.getLogger(__name__)
-class TopologyStandalone(object):
- def __init__(self, standalone):
- standalone.open()
- self.standalone = standalone
+class TopologyReplication(object):
+ """The Replication Topology Class"""
+ def __init__(self, master1, master2):
+ """Init"""
+ master1.open()
+ self.master1 = master1
+ master2.open()
+ self.master2 = master2
@pytest.fixture(scope="module")
def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating standalone instance ...
- standalone = DirSrv(verbose=False)
- args_instance[SER_HOST] = HOST_STANDALONE
- args_instance[SER_PORT] = PORT_STANDALONE
- args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ """Create Replication Deployment"""
+
+ # Creating master 1...
+ if DEBUGGING:
+ master1 = DirSrv(verbose=True)
+ else:
+ master1 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_MASTER_1
+ args_instance[SER_PORT] = PORT_MASTER_1
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master1.allocate(args_master)
+ instance_master1 = master1.exists()
+ if instance_master1:
+ master1.delete()
+ master1.create()
+ master1.open()
+ master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
+
+ # Creating master 2...
+ if DEBUGGING:
+ master2 = DirSrv(verbose=True)
+ else:
+ master2 = DirSrv(verbose=False)
+ args_instance[SER_HOST] = HOST_MASTER_2
+ args_instance[SER_PORT] = PORT_MASTER_2
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_standalone = args_instance.copy()
- standalone.allocate(args_standalone)
- instance_standalone = standalone.exists()
- if instance_standalone:
- standalone.delete()
- standalone.create()
- standalone.open()
+ args_master = args_instance.copy()
+ master2.allocate(args_master)
+ instance_master2 = master2.exists()
+ if instance_master2:
+ master2.delete()
+ master2.create()
+ master2.open()
+ master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
+
+ #
+ # Create all the agreements
+ #
+ # Creating agreement from master 1 to master 2
+ properties = {RA_NAME: 'meTo_' + master2.host + ':' + str(master2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
+ if not m1_m2_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m1_m2_agmt)
+
+ # Creating agreement from master 2 to master 1
+ properties = {RA_NAME: 'meTo_' + master1.host + ':' + str(master1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
+ if not m2_m1_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_m1_agmt)
+
+ # Allow the replicas to get situated with the new agreements...
+ time.sleep(5)
+
+ #
+ # Initialize all the agreements
+ #
+ master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+ master1.waitForReplInit(m1_m2_agmt)
+
+ # Check replication is working...
+ if master1.testReplication(DEFAULT_SUFFIX, master2):
+ log.info('Replication is working.')
+ else:
+ log.fatal('Replication is not working.')
+ raise
+
+ def fin():
+ """If we are debugging just stop the instances, otherwise remove
+ them
+ """
+ if DEBUGGING:
+ master1.stop()
+ master2.stop()
+ else:
+ master1.delete()
+ master2.delete()
+
+ request.addfinalizer(fin)
# Clear out the tmp dir
- standalone.clearTmpDir(__file__)
+ master1.clearTmpDir(__file__)
+
+ return TopologyReplication(master1, master2)
+
+
+(a)pytest.fixture(scope="module")
+def big_file():
+ TEMP_BIG_FILE = ''
+ # 1024*1024=1048576
+ # B for 1 MiB
+ # Big for 3 MiB
+ for x in range(1048576):
+ TEMP_BIG_FILE += '+'
+
+ return TEMP_BIG_FILE
+
+
+(a)pytest.fixture
+def test_user(topology):
+ """Add and remove test user"""
+
+ try:
+ topology.master1.add_s(Entry((USER_DN, {
+ 'uid': 'test_user',
+ 'givenName': 'test_user',
+ 'objectclass': ['top', 'person',
+ 'organizationalPerson',
+ 'inetorgperson'],
+ 'cn': 'test_user',
+ 'sn': 'test_user'})))
+ time.sleep(1)
+ except ldap.LDAPError as e:
+ log.fatal('Failed to add user (%s): error (%s)' % (USER_DN,
+ e.message['desc']))
+ raise
+
+ def fin():
+ try:
+ topology.master1.delete_s(USER_DN)
+ time.sleep(1)
+ except ldap.LDAPError as e:
+ log.fatal('Failed to delete user (%s): error (%s)' % (
+ USER_DN,
+ e.message['desc']))
+ raise
+
+
+def test_maxbersize_repl(topology, test_user, big_file):
+ """maxbersize is ignored in the replicated operations.
+
+ :Feature: Config
+
+ :Setup: MMR with two masters, test user,
+ 1 MiB big value for attribute
+
+ :Steps: 1. Set 20KiB small maxbersize on master2
+ 2. Add big value to master2
+ 3. Add big value to master1
+
+ :Assert: Adding the big value to master2 is failed,
+ adding the big value to master1 is succeed,
+ the big value is successfully replicated to master2
+ """
+ log.info("Set nsslapd-maxbersize: 20K to master2")
+ try:
+ topology.master2.modify_s("cn=config", [(ldap.MOD_REPLACE,
+ 'nsslapd-maxbersize', '20480')])
+ except ldap.LDAPError as e:
+ log.error('Failed to set nsslapd-maxbersize == 20480: error ' +
+ e.message['desc'])
+ raise
+
+ topology.master2.restart(20)
- return TopologyStandalone(standalone)
+ log.info('Try to add attribute with a big value to master2 - expect to FAIL')
+ with pytest.raises(ldap.SERVER_DOWN):
+ topology.master2.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'jpegphoto', big_file)])
+ topology.master2.restart(20)
+ topology.master1.restart(20)
-def test_config_init(topology):
- '''
- Initialization function
- '''
- return
+ log.info('Try to add attribute with a big value to master1 - expect to PASS')
+ try:
+ topology.master1.modify_s(USER_DN, [(ldap.MOD_REPLACE,
+ 'jpegphoto', big_file)])
+ except ldap.SERVER_DOWN as e:
+ log.fatal('Failed to add a big attribute, error: ' + e.message['desc'])
+ raise
+
+ time.sleep(1)
+
+ log.info('Check if a big value was successfully added to master1')
+ try:
+ entries = topology.master1.search_s(USER_DN, ldap.SCOPE_BASE,
+ '(cn=*)',
+ ['jpegphoto'])
+ assert entries[0].data['jpegphoto']
+ except ldap.LDAPError as e:
+ log.fatal('Search failed, error: ' + e.message['desc'])
+ raise
+
+ log.info('Check if a big value was successfully replicated to master2')
+ try:
+ entries = topology.master2.search_s(USER_DN, ldap.SCOPE_BASE,
+ '(cn=*)',
+ ['jpegphoto'])
+ assert entries[0].data['jpegphoto']
+ except ldap.LDAPError as e:
+ log.fatal('Search failed, error: ' + e.message['desc'])
+ raise
+
+ log.info("Set nsslapd-maxbersize: 2097152 (default) to master2")
+ try:
+ topology.master2.modify_s("cn=config", [(ldap.MOD_REPLACE,
+ 'nsslapd-maxbersize', '2097152')])
+ except ldap.LDAPError as e:
+ log.error('Failed to set nsslapd-maxbersize == 2097152 error ' +
+ e.message['desc'])
+ raise
def test_config_listen_backport_size(topology):
- '''
- We need to check that we can search on nsslapd-listen-backlog-size,
+ """We need to check that we can search on nsslapd-listen-backlog-size,
and change its value: to a psoitive number and a negative number.
Verify invalid value is rejected.
- '''
-
- log.info('Running test_config_listen_backport_size...')
+ """
try:
- entry = topology.standalone.search_s(DN_CONFIG, ldap.SCOPE_BASE, 'objectclass=top',
- ['nsslapd-listen-backlog-size'])
- default_val = entry[0].getValue('nsslapd-listen-backlog-size')
- if not default_val:
- log.fatal('test_config_listen_backport_size: Failed to get nsslapd-listen-backlog-size from config')
- assert False
+ entry = topology.master1.search_s(DN_CONFIG, ldap.SCOPE_BASE, 'objectclass=top',
+ ['nsslapd-listen-backlog-size'])
+ default_val = entry[0].data['nsslapd-listen-backlog-size'][0]
+ assert default_val, 'Failed to get nsslapd-listen-backlog-size from config'
except ldap.LDAPError as e:
- log.fatal('test_config_listen_backport_size: Failed to search config, error: ' + e.message('desc'))
- assert False
+ log.fatal('Failed to search config, error: ' + e.message('desc'))
+ raise
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', '256')])
+ topology.master1.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-listen-backlog-size',
+ '256')])
except ldap.LDAPError as e:
- log.fatal('test_config_listen_backport_size: Failed to modify config, error: ' + e.message('desc'))
- assert False
+ log.fatal('Failed to modify config, error: ' + e.message('desc'))
+ raise
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', '-1')])
+ topology.master1.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-listen-backlog-size',
+ '-1')])
except ldap.LDAPError as e:
- log.fatal('test_config_listen_backport_size: Failed to modify config(negative value), error: ' +
+ log.fatal('Failed to modify config(negative value), error: ' +
e.message('desc'))
- assert False
+ raise
- try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', 'ZZ')])
- log.fatal('test_config_listen_backport_size: Invalid value was successfully added')
- assert False
- except ldap.LDAPError as e:
- pass
+ with pytest.raises(ldap.LDAPError):
+ topology.master1.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-listen-backlog-size',
+ 'ZZ')])
+ log.fatal('Invalid value was successfully added')
- #
# Cleanup - undo what we've done
- #
try:
- topology.standalone.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE, 'nsslapd-listen-backlog-size', default_val)])
+ topology.master1.modify_s(DN_CONFIG, [(ldap.MOD_REPLACE,
+ 'nsslapd-listen-backlog-size',
+ default_val)])
except ldap.LDAPError as e:
- log.fatal('test_config_listen_backport_size: Failed to reset config, error: ' + e.message('desc'))
- assert False
-
- log.info('test_config_listen_backport_size: PASSED')
+ log.fatal('Failed to reset config, error: ' + e.message('desc'))
+ raise
def test_config_deadlock_policy(topology):
- '''
- We need to check that nsslapd-db-deadlock-policy exists, that we can
+ """We need to check that nsslapd-db-deadlock-policy exists, that we can
change the value, and invalid values are rejected
- '''
-
- log.info('Running test_config_deadlock_policy...')
+ """
LDBM_DN = 'cn=config,cn=ldbm database,cn=plugins,cn=config'
default_val = '9'
try:
- entry = topology.standalone.search_s(LDBM_DN, ldap.SCOPE_BASE, 'objectclass=top',
- ['nsslapd-db-deadlock-policy'])
- val = entry[0].getValue('nsslapd-db-deadlock-policy')
- if not val:
- log.fatal('test_config_deadlock_policy: Failed to get nsslapd-db-deadlock-policy from config')
- assert False
- if val != default_val:
- log.fatal('test_config_deadlock_policy: The wrong derfualt value was present: (%s) but expected (%s)' %
- (val, default_val))
- assert False
+ entry = topology.master1.search_s(LDBM_DN, ldap.SCOPE_BASE, 'objectclass=top',
+ ['nsslapd-db-deadlock-policy'])
+ val = entry[0].data['nsslapd-db-deadlock-policy'][0]
+ assert val, 'Failed to get nsslapd-db-deadlock-policy from config'
+ assert val == default_val, 'The wrong derfualt value was present'
except ldap.LDAPError as e:
- log.fatal('test_config_deadlock_policy: Failed to search config, error: ' + e.message('desc'))
- assert False
+ log.fatal('Failed to search config, error: ' + e.message('desc'))
+ raise
# Try a range of valid values
for val in ('0', '5', '9'):
try:
- topology.standalone.modify_s(LDBM_DN, [(ldap.MOD_REPLACE, 'nsslapd-db-deadlock-policy', val)])
+ topology.master1.modify_s(LDBM_DN, [(ldap.MOD_REPLACE,
+ 'nsslapd-db-deadlock-policy',
+ val)])
except ldap.LDAPError as e:
- log.fatal('test_config_deadlock_policy: Failed to modify config: nsslapd-db-deadlock-policy to (%s), error: %s' %
+ log.fatal('Failed to modify config: nsslapd-db-deadlock-policy to (%s), error: %s' %
(val, e.message('desc')))
- assert False
+ raise
# Try a range of invalid values
for val in ('-1', '10'):
- try:
- topology.standalone.modify_s(LDBM_DN, [(ldap.MOD_REPLACE, 'nsslapd-db-deadlock-policy', val)])
- log.fatal('test_config_deadlock_policy: Able to add invalid value to nsslapd-db-deadlock-policy(%s)' % (val))
- assert False
- except ldap.LDAPError as e:
- pass
- #
+ with pytest.raises(ldap.LDAPError):
+ topology.master1.modify_s(LDBM_DN, [(ldap.MOD_REPLACE,
+ 'nsslapd-db-deadlock-policy',
+ val)])
+ log.fatal('Able to add invalid value to nsslapd-db-deadlock-policy(%s)' % (val))
+
# Cleanup - undo what we've done
- #
try:
- topology.standalone.modify_s(LDBM_DN, [(ldap.MOD_REPLACE, 'nsslapd-db-deadlock-policy', default_val)])
+ topology.master1.modify_s(LDBM_DN, [(ldap.MOD_REPLACE,
+ 'nsslapd-db-deadlock-policy',
+ default_val)])
except ldap.LDAPError as e:
- log.fatal('test_config_deadlock_policy: Failed to reset nsslapd-db-deadlock-policy to the default value(%s), error: %s' %
+ log.fatal('Failed to reset nsslapd-db-deadlock-policy to the default value(%s), error: %s' %
(default_val, e.message('desc')))
-
- log.info('test_config_deadlock_policy: PASSED')
-
-
-def test_config_final(topology):
- topology.standalone.delete()
- log.info('Testcase PASSED')
-
-
-def run_isolated():
- '''
- This test suite is designed to test all things cn=config Like, the core cn=config settings,
- or the ldbm database settings, etc. This suite shoud not test individual plugins - there
- should be individual suites for each plugin.
- '''
- global installation1_prefix
- installation1_prefix = None
-
- topo = topology(True)
- test_config_init(topo)
-
- test_config_listen_backport_size(topo)
- test_config_deadlock_policy(topo)
-
- test_config_final(topo)
+ raise
if __name__ == '__main__':
- run_isolated()
-
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
diff --git a/dirsrvtests/tests/tickets/ticket48326_test.py b/dirsrvtests/tests/tickets/ticket48326_test.py
deleted file mode 100644
index 1434f2d..0000000
--- a/dirsrvtests/tests/tickets/ticket48326_test.py
+++ /dev/null
@@ -1,230 +0,0 @@
-# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2015 Red Hat, Inc.
-# All rights reserved.
-#
-# License: GPL (version 3 or any later version).
-# See LICENSE for details.
-# --- END COPYRIGHT BLOCK ---
-#
-import os
-import sys
-import time
-import shlex
-import subprocess
-import ldap
-import logging
-import pytest
-from lib389 import DirSrv, Entry, tools, tasks
-from lib389.tools import DirSrvTools
-from lib389._constants import *
-from lib389.properties import *
-from lib389.tasks import *
-from lib389.utils import *
-
-logging.getLogger(__name__).setLevel(logging.DEBUG)
-log = logging.getLogger(__name__)
-
-installation1_prefix = None
-
-MYDN = 'uid=tuser1M,dc=example,dc=com'
-MYLDIF = 'ticket48326.ldif'
-
-class TopologyReplication(object):
- def __init__(self, master1, master2):
- master1.open()
- self.master1 = master1
- master2.open()
- self.master2 = master2
-
-
-(a)pytest.fixture(scope="module")
-def topology(request):
- global installation1_prefix
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
-
- # Creating master 1...
- master1 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_MASTER_1
- args_instance[SER_PORT] = PORT_MASTER_1
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master1.allocate(args_master)
- instance_master1 = master1.exists()
- if instance_master1:
- master1.delete()
- master1.create()
- master1.open()
- master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
-
- # Creating master 2...
- master2 = DirSrv(verbose=False)
- if installation1_prefix:
- args_instance[SER_DEPLOYED_DIR] = installation1_prefix
- args_instance[SER_HOST] = HOST_MASTER_2
- args_instance[SER_PORT] = PORT_MASTER_2
- args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
- args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
- args_master = args_instance.copy()
- master2.allocate(args_master)
- instance_master2 = master2.exists()
- if instance_master2:
- master2.delete()
- master2.create()
- master2.open()
- master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
-
- #
- # Create all the agreements
- #
- # Creating agreement from master 1 to master 2
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
- if not m1_m2_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m1_m2_agmt)
-
- # Creating agreement from master 2 to master 1
- properties = {RA_NAME: r'meTo_$host:$port',
- RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
- RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
- RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
- RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
- m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
- if not m2_m1_agmt:
- log.fatal("Fail to create a master -> master replica agreement")
- sys.exit(1)
- log.debug("%s created" % m2_m1_agmt)
-
- # Allow the replicas to get situated with the new agreements...
- time.sleep(5)
-
- #
- # Initialize all the agreements
- #
- master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
- master1.waitForReplInit(m1_m2_agmt)
-
- # Check replication is working...
- if master1.testReplication(DEFAULT_SUFFIX, master2):
- log.info('Replication is working.')
- else:
- log.fatal('Replication is not working.')
- assert False
-
- # Delete each instance in the end
- def fin():
- master1.delete()
- master2.delete()
- request.addfinalizer(fin)
-
- # Clear out the tmp dir
- master1.clearTmpDir(__file__)
-
- return TopologyReplication(master1, master2)
-
-
-(a)pytest.fixture(scope="module")
-
-
-def add_entry(topology, server, serverPort, expectToFail):
- """
- Adding 1Mentry to the given server
- Check the add result based upon the expectToFail info.
- """
- if expectToFail:
- log.info("Adding 1M entry to %s expecting to fail." % server)
- else:
- log.info("Adding 1M entry to %s expecting to succeed." % server)
-
- data_dir_path = topology.master1.getDir(__file__, DATA_DIR)
- ldif_file = data_dir_path + MYLDIF
-
- #strcmdline = '/usr/bin/ldapmodify -x -h localhost -p' + str(serverPort) + '-D' + DN_DM + '-w' + PW_DM + '-af' + ldif_file
- #cmdline = shlex.split(strcmdline)
- cmdline = ['/usr/bin/ldapmodify', '-x', '-h', 'localhost', '-p', str(serverPort),
- '-D', DN_DM, '-w', PW_DM, '-af', ldif_file]
- log.info("Running cmdline (%s): %s" % (server, cmdline))
-
- try:
- proc = subprocess.Popen(cmdline, stderr=subprocess.PIPE)
- except Exception as e:
- log.info("%s caught in exception: %s" % (cmdline, e))
- assert False
-
- Found = False
- Expected = "ldap_result: Can't contact LDAP server"
- while True:
- l = proc.stderr.readline()
- if l == "":
- break
- if Expected in l:
- Found = True
- break
-
- if expectToFail:
- if Found:
- log.info("Adding 1M entry to %s failed as expected: %s" % (server, l))
- else:
- log.fatal("Expected error message %s was not returned: %s" % Expected)
- assert False
- else:
- if Found:
- log.fatal("%s failed although expecting to succeed: %s" % (cmdline, l))
- assert False
- else:
- log.info("Adding 1M entry to %s succeeded as expected" % server)
-
-
-def test_ticket48326(topology):
- """
- maxbersize is ignored in the replicated operations.
- [settings]
- master1 has default size maxbersize (2MB).
- master2 has much saller size maxbersize (20KB).
- [test case]
- Adding an entry which size is larger than 20KB to master2 fails.
- But adding an entry which size is larger than 20KB and less than 2MB to master1 succeeds
- and the entry is successfully replicated to master2.
- """
- log.info("Ticket 48326 - it could be nice to have nsslapd-maxbersize default to bigger than 2Mb")
- log.info("Set nsslapd-maxbersize: 20K to master2")
- try:
- topology.master2.modify_s("cn=config", [(ldap.MOD_REPLACE, 'nsslapd-maxbersize', '20480')])
- except ldap.LDAPError as e:
- log.error('Failed to set nsslapd-maxbersize == 20480: error ' + e.message['desc'])
- assert False
-
- add_entry(topology, "master2", PORT_MASTER_2, True)
-
- add_entry(topology, "master1", PORT_MASTER_1, False)
-
- time.sleep(1)
-
- log.info('Searching for %s on master2...', MYDN)
- try:
- entries = topology.master2.search_s(MYDN, ldap.SCOPE_BASE, '(objectclass=*)')
- if not entries:
- log.fatal('Entry %s failed to repliate to master2.' % MYDN)
- assert False
- else:
- log.info('SUCCESS: Entry %s is successfully replicated to master2.' % MYDN)
- except ldap.LDAPError as e:
- log.fatal('Search failed: ' + e.message['desc'])
- assert False
-
-
-if __name__ == '__main__':
- # Run isolated
- # -s for DEBUG mode
-
- CURRENT_FILE = os.path.realpath(__file__)
- pytest.main("-s %s" % CURRENT_FILE)
7 years, 10 months
Branch '389-ds-base-1.3.4' - 2 commits - dirsrvtests/tests ldap/admin ldap/ldif ldap/servers Makefile.am Makefile.in
by Noriko Hosoi
Makefile.am | 3
Makefile.in | 3
dirsrvtests/tests/tickets/ticket48755_test.py | 222 ++++++++++++++++
ldap/admin/src/scripts/91reindex.pl.in | 103 +++++++
ldap/admin/src/scripts/91subtreereindex.pl | 8
ldap/admin/src/scripts/setup-ds.res.in | 1
ldap/ldif/template-dse.ldif.in | 1
ldap/servers/plugins/replication/repl5_replica_config.c | 7
ldap/servers/plugins/replication/repl5_tot_protocol.c | 169 ++++++++----
ldap/servers/slapd/back-ldbm/back-ldbm.h | 6
ldap/servers/slapd/back-ldbm/dblayer.c | 5
ldap/servers/slapd/back-ldbm/filterindex.c | 18 -
ldap/servers/slapd/back-ldbm/idl_new.c | 87 +++++-
ldap/servers/slapd/back-ldbm/index.c | 22 +
ldap/servers/slapd/back-ldbm/init.c | 2
ldap/servers/slapd/back-ldbm/misc.c | 1
ldap/servers/slapd/entry.c | 4
ldap/servers/slapd/slap.h | 4
ldap/servers/slapd/slapi-plugin.h | 8
19 files changed, 588 insertions(+), 86 deletions(-)
New commits:
commit 80893ff5f6158ee5b5fdefde250fe22acc5d5c30
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Thu May 26 14:40:52 2016 -0700
Ticket #48755 - CI test: test case for ticket 48755
Description: moving an entry could make the online init fail
(cherry picked from commit e218a187678133455c4138481c825852e099298a)
diff --git a/dirsrvtests/tests/tickets/ticket48755_test.py b/dirsrvtests/tests/tickets/ticket48755_test.py
new file mode 100644
index 0000000..e3b7b61
--- /dev/null
+++ b/dirsrvtests/tests/tickets/ticket48755_test.py
@@ -0,0 +1,222 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2016 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import os
+import sys
+import time
+import shlex
+import subprocess
+import ldap
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from lib389.utils import *
+
+logging.getLogger(__name__).setLevel(logging.DEBUG)
+log = logging.getLogger(__name__)
+
+installation1_prefix = None
+
+m1_m2_agmt = None
+
+class TopologyReplication(object):
+ def __init__(self, master1, master2):
+ master1.open()
+ self.master1 = master1
+ master2.open()
+ self.master2 = master2
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ global installation1_prefix
+ if installation1_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+
+ # Creating master 1...
+ master1 = DirSrv(verbose=False)
+ if installation1_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+ args_instance[SER_HOST] = HOST_MASTER_1
+ args_instance[SER_PORT] = PORT_MASTER_1
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_1
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master1.allocate(args_master)
+ instance_master1 = master1.exists()
+ if instance_master1:
+ master1.delete()
+ master1.create()
+ master1.open()
+ master1.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_1)
+
+ # Creating master 2...
+ master2 = DirSrv(verbose=False)
+ if installation1_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation1_prefix
+ args_instance[SER_HOST] = HOST_MASTER_2
+ args_instance[SER_PORT] = PORT_MASTER_2
+ args_instance[SER_SERVERID_PROP] = SERVERID_MASTER_2
+ args_instance[SER_CREATION_SUFFIX] = DEFAULT_SUFFIX
+ args_master = args_instance.copy()
+ master2.allocate(args_master)
+ instance_master2 = master2.exists()
+ if instance_master2:
+ master2.delete()
+ master2.create()
+ master2.open()
+ master2.replica.enableReplication(suffix=SUFFIX, role=REPLICAROLE_MASTER, replicaId=REPLICAID_MASTER_2)
+
+ #
+ # Create all the agreements
+ #
+ # Creating agreement from master 1 to master 2
+ properties = {RA_NAME: r'meTo_%s:%s' % (master2.host, master2.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ global m1_m2_agmt
+ m1_m2_agmt = master1.agreement.create(suffix=SUFFIX, host=master2.host, port=master2.port, properties=properties)
+ if not m1_m2_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m1_m2_agmt)
+
+ # Creating agreement from master 2 to master 1
+ properties = {RA_NAME: r'meTo_%s:%s' % (master1.host, master1.port),
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ m2_m1_agmt = master2.agreement.create(suffix=SUFFIX, host=master1.host, port=master1.port, properties=properties)
+ if not m2_m1_agmt:
+ log.fatal("Fail to create a master -> master replica agreement")
+ sys.exit(1)
+ log.debug("%s created" % m2_m1_agmt)
+
+ # Allow the replicas to get situated with the new agreements...
+ time.sleep(5)
+
+ #
+ # Initialize all the agreements
+ #
+ master1.agreement.init(SUFFIX, HOST_MASTER_2, PORT_MASTER_2)
+ master1.waitForReplInit(m1_m2_agmt)
+
+ # Check replication is working...
+ if master1.testReplication(DEFAULT_SUFFIX, master2):
+ log.info('Replication is working.')
+ else:
+ log.fatal('Replication is not working.')
+ assert False
+
+ # Delete each instance in the end
+ def fin():
+ master1.delete()
+ master2.delete()
+ request.addfinalizer(fin)
+
+ # Clear out the tmp dir
+ master1.clearTmpDir(__file__)
+
+ return TopologyReplication(master1, master2)
+
+
+(a)pytest.fixture(scope="module")
+
+def add_ou_entry(server, idx, myparent):
+ name = 'OU%d' % idx
+ dn = 'ou=%s,%s' % (name, myparent)
+ server.add_s(Entry((dn, {'objectclass': ['top', 'organizationalunit'],
+ 'ou': name})))
+
+def add_user_entry(server, idx, myparent):
+ name = 'tuser%d' % idx
+ dn = 'uid=%s,%s' % (name, myparent)
+ server.add_s(Entry((dn, {'objectclass': ['top', 'person', 'organizationalPerson', 'inetorgperson'],
+ 'givenname': 'test',
+ 'sn': 'user%d' % idx,
+ 'cn': 'Test User%d' % idx,
+ 'userpassword': 'password'})))
+
+def test_ticket48755(topology):
+ log.info("Ticket 48755 - moving an entry could make the online init fail")
+
+ M1 = topology.master1
+ M2 = topology.master2
+
+ log.info("Generating DIT_0")
+ idx = 0
+ add_ou_entry(M1, idx, DEFAULT_SUFFIX)
+
+ ou0 = 'ou=OU%d' % idx
+ parent0 = '%s,%s' % (ou0, DEFAULT_SUFFIX)
+ add_ou_entry(M1, idx, parent0)
+
+ parent00 = 'ou=OU%d,%s' % (idx, parent0)
+ for idx in range(0, 9):
+ add_user_entry(M1, idx, parent00)
+
+ log.info('%s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, parent0, parent00))
+
+ log.info("Generating DIT_1")
+ idx = 1
+ add_ou_entry(M1, idx, DEFAULT_SUFFIX)
+
+ parent1 = 'ou=OU%d,%s' % (idx, DEFAULT_SUFFIX)
+ add_ou_entry(M1, idx, parent1)
+
+ log.info("Moving %s to DIT_1" % parent00)
+ M1.rename_s(parent00, ou0, newsuperior=parent1, delold=1)
+
+ log.info("Moving %s to DIT_1" % parent0)
+ parent01 = '%s,%s' % (ou0, parent1)
+ M1.rename_s(parent0, ou0, newsuperior=parent01, delold=1)
+
+ parent001 = '%s,%s' % (ou0, parent01)
+ log.info("Moving USERS to %s" % parent0)
+ for idx in range(0, 9):
+ name = 'tuser%d' % idx
+ rdn = 'uid=%s' % name
+ dn = 'uid=%s,%s' % (name, parent01)
+ M1.rename_s(dn, rdn, newsuperior=parent001, delold=1)
+
+ log.info('%s => %s => %s => %s => 10 USERS' % (DEFAULT_SUFFIX, parent1, parent01, parent001))
+
+ log.info("Deleting 5 USERS to turn them into a tombstone entries")
+ for idx in range(5, 9):
+ name = 'tuser%d' % idx
+ rdn = 'uid=%s' % name
+ dn = 'uid=%s,%s' % (name, parent001)
+ M1.delete_s(dn)
+
+ log.info("Run Consumer Initialization.")
+ global m1_m2_agmt
+ M1.startReplication_async(m1_m2_agmt)
+ M1.waitForReplInit(m1_m2_agmt)
+
+ m1entries = M1.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))')
+ m2entries = M2.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, '(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))')
+
+ log.info("m1entry count - %d", len(m1entries))
+ log.info("m2entry count - %d", len(m2entries))
+
+ assert len(m1entries) == len(m2entries)
+ log.info('PASSED')
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
commit 4e70ab58808cae57642f459ff00a298e69265e08
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Thu May 26 15:14:06 2016 -0700
Ticket #48755 - moving an entry could make the online init fail
Bug Description: Online init (aka Total update, bulk import) scans the
primary id2entry db in the order of ID. If Entry A is moved under a
new superior Entry B which was generated after Entry A, when Entry A
is sent to a consumer using online init, its parent entry does not
exist on the consumer and the online init fails.
Fix Description:
- Added a command BACK_INFO_IS_ENTRYRDN to slapi_back_get_info, which
returns the status of entryrdn switch maintained in the backend.
- If slapi_backend_get_info(BACK_INFO_IS_ENTRYRDN) returns true for
the replicated backend, repl5_tot_run searches the entry with the
filter:
(|(parentid>=1)(objectclass=ldapsubentry)(objectclass=nstombstone))
instead of:
(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))".
- In addition, idl_new_range_fetch had to be modified so that ...
* A range search for parentid ignores nsslapd-idlistscanlimit by
setting SLAPI_OP_RANGE_NO_ALLIDS as well as it skips sorting the
IDlist by ID by setting SLAPI_OP_RANGE_NO_IDL_SORT.
* In case SLAPI_OP_RANGE_NO_IDL_SORT is set, idl_new_range_fetch
checks whether the key (in this case parentid) is in the IDlist.
If it exists, the ID is appended. If it does not, the ID is in
the leftover list and appended when the parent ID is found in the
IDlist.
- Increased the version of rdn-format-# in DBVERSION to 3.
- Upgrade script 91reindex.pl.in is added which reindex the parentid
index file in the integer order if the version of rdn-format-# in
DBVERSION is less than 3.
https://fedorahosted.org/389/ticket/48755
Reviewed by wibrown(a)redhat.com and lkrispen(a)redhat.com (Thanks, William and Ludwig!)
(cherry picked from commit 3606b78bacce984ab2226755c5921dffac9552c2)
diff --git a/Makefile.am b/Makefile.am
index 8dcdb36..2d19a74 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -589,7 +589,8 @@ update_DATA = ldap/admin/src/scripts/exampleupdate.pl \
ldap/admin/src/scripts/50AES-pbe-plugin.ldif\
ldap/admin/src/scripts/50updateconfig.ldif \
ldap/admin/src/scripts/52updateAESplugin.pl \
- ldap/admin/src/scripts/dnaplugindepends.ldif
+ ldap/admin/src/scripts/dnaplugindepends.ldif \
+ ldap/admin/src/scripts/91reindex.pl
update_SCRIPTS = ldap/admin/src/scripts/exampleupdate.sh
diff --git a/Makefile.in b/Makefile.in
index a29509d..4298c57 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -1997,7 +1997,8 @@ update_DATA = ldap/admin/src/scripts/exampleupdate.pl \
ldap/admin/src/scripts/50AES-pbe-plugin.ldif\
ldap/admin/src/scripts/50updateconfig.ldif \
ldap/admin/src/scripts/52updateAESplugin.pl \
- ldap/admin/src/scripts/dnaplugindepends.ldif
+ ldap/admin/src/scripts/dnaplugindepends.ldif \
+ ldap/admin/src/scripts/91reindex.pl
update_SCRIPTS = ldap/admin/src/scripts/exampleupdate.sh
diff --git a/ldap/admin/src/scripts/91reindex.pl.in b/ldap/admin/src/scripts/91reindex.pl.in
new file mode 100644
index 0000000..c861f64
--- /dev/null
+++ b/ldap/admin/src/scripts/91reindex.pl.in
@@ -0,0 +1,103 @@
+use Mozilla::LDAP::Conn;
+use Mozilla::LDAP::Utils qw(normalizeDN);
+use Mozilla::LDAP::API qw(:constant ldap_url_parse ldap_explode_dn);
+use DSUpdate qw(isOffline);
+
+sub runinst {
+ my ($inf, $inst, $dseldif, $conn) = @_;
+ my $rc, @errs;
+
+ # List of index to be reindexed
+ my @toreindex = qw(parentid);
+ # rdn-format value. See $rdn_format set below.
+ # If equal to or greater than this value, no need to reindex.
+ # If it needs to be unconditionally reindexed, set 0.
+ my @rdnconditions = (4)
+
+ my $config = $conn->search("cn=config", "base", "(objectclass=*)");
+ if (!$config) {
+ push @errs, ['error_finding_config_entry', 'cn=config',
+ $conn->getErrorString()];
+ return @errs;
+ }
+
+ ($rc, @errs) = isOffline($inf, $inst, $conn);
+ if (!$rc) {
+ return @errs;
+ }
+
+ my $reindex = "@sbindir@/db2index -Z $inst";
+ my @errs;
+ my $instconf = $conn->search("cn=ldbm database,cn=plugins,cn=config", "onelevel", "(objectclass=*)");
+ if (!$instconf) {
+ push @errs, ['error_finding_config_entry', 'cn=*,cn=ldbm database,cn=plugins,cn=config', $conn->getErrorString()];
+ return @errs;
+ }
+
+ my $dbconf = $conn->search("cn=config,cn=ldbm database,cn=plugins,cn=config", "base", "(objectclass=*)");
+ if (!$dbconf) {
+ push @errs, ['error_finding_config_entry',
+ 'cn=config,cn=ldbm database,cn=plugins,cn=config',
+ $conn->getErrorString()];
+ return @errs;
+ }
+
+ # Get the value of nsslapd-subtree-rename-switch.
+ my $switch = $dbconf->getValues('nsslapd-subtree-rename-switch');
+ if ("" eq $switch) {
+ return (); # subtree-rename-switch does not exist; do nothing.
+ } elsif ("off" eq $switch || "OFF" eq $switch) {
+ return (); # subtree-rename-switch is OFF; do nothing.
+ }
+
+ my $dbdir = $dbconf->getValues('nsslapd-directory');
+ my $dbversion0 = $dbdir . "/DBVERSION";
+ my $rdn_format = 0;
+ my $dbversionstr = "";
+ if (!open(DBVERSION, "$dbversion0")) {
+ push @errs, ['error_opening_file', $dbversion0, $!];
+ return @errs;
+ } else {
+ while (<DBVERSION>) {
+ if ($_ =~ /rdn-format/) {
+ $rdn_format = 1;
+ $dbversionstr = $_;
+ if ($_ =~ /rdn-format-1/) {
+ $rdn_format = 2;
+ } elsif ($_ =~ /rdn-format-2/) {
+ $rdn_format = 3;
+ } elsif ($_ =~ /rdn-format-3/) {
+ $rdn_format = 4;
+ } elsif ($_ =~ /rdn-format-4/) {
+ $rdn_format = 5;
+ } elsif ($_ =~ /rdn-format-5/) {
+ $rdn_format = 6;
+ } elsif ($_ =~ /rdn-format-/) {
+ # assume greater than -5
+ $rdn_format = 7;
+ }
+ }
+ }
+ close DBVERSION;
+ }
+
+ while ($instconf) {
+ my $backend= $instconf->getValues('cn');
+ if (($backend eq "config") || ($backend eq "monitor")) {
+ goto NEXT;
+ }
+
+ for (my $idx = 0; $ <= $#toreindex; $idx++) {
+ if (0 == $rdnconditions[$idx] || $rdnconditions[$idx] > $rdn_format) {
+ my $rc = system("$reindex -n $backend -t $idx");
+ if ($rc) {
+ push @errs, ["error_reindexng", $idx, $backend, $rc];
+ }
+ }
+ }
+NEXT:
+ $instconf = $conn->nextEntry();
+ }
+
+ return @errs;
+}
diff --git a/ldap/admin/src/scripts/91subtreereindex.pl b/ldap/admin/src/scripts/91subtreereindex.pl
index a031cc1..c4b40a3 100644
--- a/ldap/admin/src/scripts/91subtreereindex.pl
+++ b/ldap/admin/src/scripts/91subtreereindex.pl
@@ -51,14 +51,18 @@ sub runinst {
if ($_ =~ /rdn-format-1/) {
$is_rdn_format = 2;
}
- if ($_ =~ /rdn-format-2/) {
+ elsif ($_ =~ /rdn-format-2/) {
$is_rdn_format = 3;
}
+ elsif ($_ =~ /rdn-format-/) {
+ # assume greater than -2
+ $is_rdn_format = 4;
+ }
}
}
close DBVERSION;
- if (3 == $is_rdn_format) {
+ if (3 <= $is_rdn_format) {
return (); # DB already has the new rdn format.
}
diff --git a/ldap/admin/src/scripts/setup-ds.res.in b/ldap/admin/src/scripts/setup-ds.res.in
index 7134e25..760db6f 100644
--- a/ldap/admin/src/scripts/setup-ds.res.in
+++ b/ldap/admin/src/scripts/setup-ds.res.in
@@ -208,3 +208,4 @@ error_opening_file = Opening file '%s' failed. Error: %s\n
error_format_error = '%s' has invalid format.\n
error_update_not_offline = Error: offline mode selected but the server [%s] is still running.\n
error_update_all = Failed to update all the Directory Server instances.\n
+error_reindexing = Failed to reindex '%s' in backend '%s'. Error: %s\n
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index 6acbfae..2988cb9 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -927,6 +927,7 @@ objectclass: nsIndex
cn: parentid
nssystemindex: true
nsindextype: eq
+nsmatchingrule: integerOrderingMatch
dn: cn=seeAlso,cn=default indexes, cn=config,cn=ldbm database,cn=plugins,cn=config
objectclass: top
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index 4d7135c..fa436ac 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -405,7 +405,7 @@ replica_config_modify (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry*
{
if (apply_mods)
replica_set_precise_purging(r, 0);
- }
+ }
else
{
*returncode = LDAP_UNWILLING_TO_PERFORM;
@@ -567,8 +567,7 @@ replica_config_modify (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry*
{
if (apply_mods)
{
- if (apply_mods && config_attr_value[0])
- {
+ if (config_attr_value[0]) {
PRUint64 on_off = 0;
if (strcasecmp(config_attr_value, "on") == 0){
@@ -587,7 +586,7 @@ replica_config_modify (Slapi_PBlock *pb, Slapi_Entry* entryBefore, Slapi_Entry*
break;
}
replica_set_precise_purging(r, on_off);
- } else if (apply_mods) {
+ } else {
replica_set_precise_purging(r, 0);
}
}
diff --git a/ldap/servers/plugins/replication/repl5_tot_protocol.c b/ldap/servers/plugins/replication/repl5_tot_protocol.c
index d0c4402..03d0c3e 100644
--- a/ldap/servers/plugins/replication/repl5_tot_protocol.c
+++ b/ldap/servers/plugins/replication/repl5_tot_protocol.c
@@ -323,6 +323,10 @@ repl5_tot_run(Private_Repl_Protocol *prp)
int init_retry = 0;
Replica *replica;
ReplicaId rid = 0; /* Used to create the replica keep alive subentry */
+ Slapi_Entry *suffix = NULL;
+ char **instances = NULL;
+ Slapi_Backend *be = NULL;
+ int is_entryrdn = 0;
PR_ASSERT(NULL != prp);
@@ -354,21 +358,21 @@ retry:
*/
if (rc != ACQUIRE_SUCCESS)
{
- int optype, ldaprc, wait_retry;
- conn_get_error(prp->conn, &optype, &ldaprc);
- if (rc == ACQUIRE_TRANSIENT_ERROR && INIT_RETRY_MAX > init_retry++) {
- wait_retry = init_retry * INIT_RETRY_INT;
- slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
- "acquire replica for total update, error: %d,"
+ int optype, ldaprc, wait_retry;
+ conn_get_error(prp->conn, &optype, &ldaprc);
+ if (rc == ACQUIRE_TRANSIENT_ERROR && INIT_RETRY_MAX > init_retry++) {
+ wait_retry = init_retry * INIT_RETRY_INT;
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
+ "acquire replica for total update, error: %d,"
" retrying in %d seconds.\n",
- ldaprc, wait_retry);
- DS_Sleep(PR_SecondsToInterval(wait_retry));
- goto retry;
- } else {
- agmt_set_last_init_status(prp->agmt, ldaprc,
- prp->last_acquire_response_code, 0, NULL);
- goto done;
- }
+ ldaprc, wait_retry);
+ DS_Sleep(PR_SecondsToInterval(wait_retry));
+ goto retry;
+ } else {
+ agmt_set_last_init_status(prp->agmt, ldaprc,
+ prp->last_acquire_response_code, 0, NULL);
+ goto done;
+ }
}
else if (prp->terminate)
{
@@ -405,48 +409,121 @@ retry:
and that the order implies that perent entry is always ahead of the
child entry in the list. Otherwise, the consumer would not be
properly updated because bulk import at the moment skips orphand entries. */
- /* XXXggood above assumption may not be valid if orphaned entry moved???? */
+ /* XXXggood above assumption may not be valid if orphaned entry moved???? */
agmt_set_last_init_status(prp->agmt, 0, 0, 0, "Total update in progress");
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Beginning total update of replica "
- "\"%s\".\n", agmt_get_long_name(prp->agmt));
+ "\"%s\".\n", agmt_get_long_name(prp->agmt));
/* RMREPL - need to send schema here */
pb = slapi_pblock_new ();
- /* we need to provide managedsait control so that referral entries can
- be replicated */
- ctrls = (LDAPControl **)slapi_ch_calloc (3, sizeof (LDAPControl *));
- ctrls[0] = create_managedsait_control ();
- ctrls[1] = create_backend_control(area_sdn);
+ replica = (Replica*) object_get_data(prp->replica_object);
+ /*
+ * Get the info about the entryrdn vs. entrydn from the backend.
+ * If NOT is_entryrdn, its ancestor entries are always found prior to an entry.
+ */
+ rc = slapi_lookup_instance_name_by_suffix((char *)slapi_sdn_get_dn(area_sdn), NULL, &instances, 1);
+ if (rc || !instances) {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
+ "get the instance name for the suffix \"%s\".\n", slapi_sdn_get_dn(area_sdn));
+ goto done;
+ }
+ be = slapi_be_select_by_instance_name(instances[0]);
+ if (!be) {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
+ "get the instance for the suffix \"%s\".\n", slapi_sdn_get_dn(area_sdn));
+ goto done;
+ }
+ rc = slapi_back_get_info(be, BACK_INFO_IS_ENTRYRDN, (void **)&is_entryrdn);
+ if (is_entryrdn) {
+ /*
+ * Supporting entries out of order -- parent could have a larger id than its children.
+ * Entires are retireved sorted by parentid without the allid threshold.
+ */
+ /* Get suffix */
+ rc = slapi_search_internal_get_entry(area_sdn, NULL, &suffix, repl_get_plugin_identity(PLUGIN_MULTIMASTER_REPLICATION));
+ if (rc) {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
+ "get the suffix entry \"%s\".\n", slapi_sdn_get_dn(area_sdn));
+ goto done;
+ }
- /* Time to make sure it exists a keep alive subentry for that replica */
- replica = (Replica*) object_get_data(prp->replica_object);
- if (replica)
- {
- rid = replica_get_rid(replica);
- }
- replica_subentry_check(area_sdn, rid);
-
- slapi_search_internal_set_pb (pb, slapi_sdn_get_dn (area_sdn),
- LDAP_SCOPE_SUBTREE, "(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))", NULL, 0, ctrls, NULL,
- repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0);
-
- cb_data.prp = prp;
- cb_data.rc = 0;
- cb_data.num_entries = 0UL;
- cb_data.sleep_on_busy = 0UL;
- cb_data.last_busy = current_time ();
- cb_data.flowcontrol_detection = 0;
- cb_data.lock = PR_NewLock();
-
- /* This allows during perform_operation to check the callback data
- * especially to do flow contol on delta send msgid / recv msgid
- */
- conn_set_tot_update_cb(prp->conn, (void *) &cb_data);
+ cb_data.prp = prp;
+ cb_data.rc = 0;
+ cb_data.num_entries = 1UL;
+ cb_data.sleep_on_busy = 0UL;
+ cb_data.last_busy = current_time ();
+ cb_data.flowcontrol_detection = 0;
+ cb_data.lock = PR_NewLock();
+
+ /* This allows during perform_operation to check the callback data
+ * especially to do flow contol on delta send msgid / recv msgid
+ */
+ conn_set_tot_update_cb(prp->conn, (void *) &cb_data);
+
+ /* Send suffix first. */
+ rc = send_entry(suffix, (void *)&cb_data);
+ if (rc) {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "Warning: unable to "
+ "send the suffix entry \"%s\" to the consumer.\n", slapi_sdn_get_dn(area_sdn));
+ goto done;
+ }
+
+ /* we need to provide managedsait control so that referral entries can
+ be replicated */
+ ctrls = (LDAPControl **)slapi_ch_calloc (3, sizeof (LDAPControl *));
+ ctrls[0] = create_managedsait_control ();
+ ctrls[1] = create_backend_control(area_sdn);
+
+ /* Time to make sure it exists a keep alive subentry for that replica */
+ if (replica)
+ {
+ rid = replica_get_rid(replica);
+ }
+ replica_subentry_check(area_sdn, rid);
+ /* Send the subtree of the suffix in the order of parentid index plus ldapsubentry and nstombstone. */
+ slapi_search_internal_set_pb(pb, slapi_sdn_get_dn (area_sdn),
+ LDAP_SCOPE_SUBTREE, "(|(parentid>=1)(objectclass=ldapsubentry)(objectclass=nstombstone))", NULL, 0, ctrls, NULL,
+ repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0);
+ cb_data.num_entries = 0UL;
+ } else {
+ /* Original total update */
+ /* we need to provide managedsait control so that referral entries can
+ be replicated */
+ ctrls = (LDAPControl **)slapi_ch_calloc (3, sizeof (LDAPControl *));
+ ctrls[0] = create_managedsait_control ();
+ ctrls[1] = create_backend_control(area_sdn);
+
+ /* Time to make sure it exists a keep alive subentry for that replica */
+ replica = (Replica*) object_get_data(prp->replica_object);
+ if (replica)
+ {
+ rid = replica_get_rid(replica);
+ }
+ replica_subentry_check(area_sdn, rid);
+
+ slapi_search_internal_set_pb (pb, slapi_sdn_get_dn (area_sdn),
+ LDAP_SCOPE_SUBTREE, "(|(objectclass=ldapsubentry)(objectclass=nstombstone)(nsuniqueid=*))", NULL, 0, ctrls, NULL,
+ repl_get_plugin_identity (PLUGIN_MULTIMASTER_REPLICATION), 0);
+
+ cb_data.prp = prp;
+ cb_data.rc = 0;
+ cb_data.num_entries = 0UL;
+ cb_data.sleep_on_busy = 0UL;
+ cb_data.last_busy = current_time ();
+ cb_data.flowcontrol_detection = 0;
+ cb_data.lock = PR_NewLock();
+
+ /* This allows during perform_operation to check the callback data
+ * especially to do flow contol on delta send msgid / recv msgid
+ */
+ conn_set_tot_update_cb(prp->conn, (void *) &cb_data);
+ }
+
/* Before we get started on sending entries to the replica, we need to
* setup things for async propagation:
* 1. Create a thread that will read the LDAP results from the connection.
@@ -470,7 +547,7 @@ retry:
slapi_search_internal_callback_pb (pb, &cb_data /* callback data */,
get_result /* result callback */,
send_entry /* entry callback */,
- NULL /* referral callback*/);
+ NULL /* referral callback*/);
/*
* After completing the sending operation (or optionally failing), we need to clean up
diff --git a/ldap/servers/slapd/back-ldbm/back-ldbm.h b/ldap/servers/slapd/back-ldbm/back-ldbm.h
index 9499292..2d77a8a 100644
--- a/ldap/servers/slapd/back-ldbm/back-ldbm.h
+++ b/ldap/servers/slapd/back-ldbm/back-ldbm.h
@@ -132,7 +132,7 @@ typedef unsigned short u_int16_t;
#define BDB_BACKEND "libback-ldbm" /* This backend plugin */
#define BDB_NEWIDL "newidl" /* new idl format */
#define BDB_RDNFORMAT "rdn-format" /* Subtree rename enabled */
-#define BDB_RDNFORMAT_VERSION "2" /* rdn-format version (by default, 0) */
+#define BDB_RDNFORMAT_VERSION "3" /* rdn-format version (by default, 0) */
#define BDB_DNFORMAT "dn-4514" /* DN format RFC 4514 compliant */
#define BDB_DNFORMAT_VERSION "1" /* DN format version */
@@ -808,11 +808,11 @@ typedef struct _back_search_result_set
/* #define LDBM_ENTRYRDN_OID "2.16.840.1.113730.3.1.2097" */
#define LDBM_ANCESTORID_STR "ancestorid"
-#define LDBM_ENTRYDN_STR "entrydn"
+#define LDBM_ENTRYDN_STR SLAPI_ATTR_ENTRYDN
#define LDBM_ENTRYRDN_STR "entryrdn"
#define LDBM_NUMSUBORDINATES_STR "numsubordinates"
#define LDBM_TOMBSTONE_NUMSUBORDINATES_STR "tombstonenumsubordinates"
-#define LDBM_PARENTID_STR "parentid"
+#define LDBM_PARENTID_STR SLAPI_ATTR_PARENTID
/* Name of psuedo attribute used to track default indexes */
#define LDBM_PSEUDO_ATTR_DEFAULT ".default"
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 33506f4..9e74d9b 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -7485,6 +7485,11 @@ ldbm_back_get_info(Slapi_Backend *be, int cmd, void **info)
}
break;
}
+ case BACK_INFO_IS_ENTRYRDN:
+ {
+ *(int *)info = entryrdn_get_switch();
+ break;
+ }
default:
break;
}
diff --git a/ldap/servers/slapd/back-ldbm/filterindex.c b/ldap/servers/slapd/back-ldbm/filterindex.c
index 9c14de4..9a7e7be 100644
--- a/ldap/servers/slapd/back-ldbm/filterindex.c
+++ b/ldap/servers/slapd/back-ldbm/filterindex.c
@@ -552,6 +552,7 @@ range_candidates(
struct berval *low = NULL, *high = NULL;
struct berval **lows = NULL, **highs = NULL;
back_txn txn = {NULL};
+ int operator = 0;
LDAPDebug(LDAP_DEBUG_TRACE, "=> range_candidates attr=%s\n", type, 0, 0);
@@ -578,18 +579,21 @@ range_candidates(
}
high = attr_value_lowest(highs, slapi_berval_cmp);
}
-
+ if (entryrdn_get_switch() && !strcasecmp(type, LDBM_PARENTID_STR)) {
+ /* parentid is treated specially that is needed for the bulk import. (See #48755) */
+ operator = SLAPI_OP_RANGE_NO_IDL_SORT|SLAPI_OP_RANGE_NO_ALLIDS;
+ }
if (low == NULL) {
- idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY,
- SLAPI_OP_LESS_OR_EQUAL,
+ operator |= SLAPI_OP_LESS_OR_EQUAL;
+ idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY, operator,
high, NULL, 0, &txn, err, allidslimit);
} else if (high == NULL) {
- idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY,
- SLAPI_OP_GREATER_OR_EQUAL,
+ operator |= SLAPI_OP_GREATER_OR_EQUAL;
+ idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY, operator,
low, NULL, 0, &txn, err, allidslimit);
} else {
- idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY,
- SLAPI_OP_LESS_OR_EQUAL,
+ operator |= SLAPI_OP_LESS_OR_EQUAL;
+ idl = index_range_read_ext(pb, be, type, (char*)indextype_EQUALITY, operator,
low, high, 1, &txn, err, allidslimit);
}
diff --git a/ldap/servers/slapd/back-ldbm/idl_new.c b/ldap/servers/slapd/back-ldbm/idl_new.c
index 25b3bfa..6ca6c96 100644
--- a/ldap/servers/slapd/back-ldbm/idl_new.c
+++ b/ldap/servers/slapd/back-ldbm/idl_new.c
@@ -350,17 +350,31 @@ error:
return idl;
}
+typedef struct _range_id_pair {
+ ID key;
+ ID id;
+} idl_range_id_pair;
/*
* Perform the range search in the idl layer instead of the index layer
* to improve the performance.
*/
+/*
+ * NOTE:
+ * In the total update (bulk import), an entry requires its ancestors already added.
+ * To guarantee it, the range search with parentid is used with setting the flag
+ * SLAPI_OP_RANGE_NO_IDL_SORT in operator.
+ *
+ * If the flag is set,
+ * 1. the IDList is not sorted by the ID.
+ * 2. holding to add an ID to the IDList unless the key is found in the IDList.
+ */
IDList *
idl_new_range_fetch(
- backend *be,
- DB* db,
- DBT *lowerkey,
+ backend *be,
+ DB* db,
+ DBT *lowerkey,
DBT *upperkey,
- DB_TXN *txn,
+ DB_TXN *txn,
struct attrinfo *ai,
int *flag_err,
int allidslimit,
@@ -380,7 +394,7 @@ idl_new_range_fetch(
size_t count = 0;
#ifdef DB_USE_BULK_FETCH
/* beware that a large buffer on the stack might cause a stack overflow on some platforms */
- char buffer[BULK_FETCH_BUFFER_SIZE];
+ char buffer[BULK_FETCH_BUFFER_SIZE];
void *ptr;
DBT dataret;
#endif
@@ -388,15 +402,21 @@ idl_new_range_fetch(
struct ldbminfo *li = (struct ldbminfo *)be->be_database->plg_private;
time_t curtime;
void *saved_key = NULL;
+ int coreop = operator & SLAPI_OP_RANGE;
+ ID key;
+ ID suffix;
+ idl_range_id_pair *leftover = NULL;
+ size_t leftoverlen = 32;
+ int leftovercnt = 0;
if (NULL == flag_err) {
return NULL;
}
- *flag_err = 0;
if (NEW_IDL_NOOP == *flag_err) {
return NULL;
}
+
dblayer_txn_init(li, &s_txn);
if (txn) {
dblayer_read_txn_begin(be, txn, &s_txn);
@@ -460,7 +480,7 @@ idl_new_range_fetch(
#ifdef DB_USE_BULK_FETCH
while (cur_key.data &&
(upperkey && upperkey->data ?
- ((operator == SLAPI_OP_LESS) ?
+ ((coreop == SLAPI_OP_LESS) ?
DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) < 0 :
DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) <= 0) :
PR_TRUE /* e.g., (x > a) */)) {
@@ -496,6 +516,9 @@ idl_new_range_fetch(
goto error;
}
}
+ if (operator & SLAPI_OP_RANGE_NO_IDL_SORT) {
+ key = (ID)strtol((char *)cur_key.data+1 , (char **)NULL, 10);
+ }
while (PR_TRUE) {
DB_MULTIPLE_NEXT(ptr, &data, dataret.data, dataret.size);
if (dataret.data == NULL) break;
@@ -524,7 +547,29 @@ idl_new_range_fetch(
/* note the last id read to check for dups */
lastid = id;
/* we got another ID, add it to our IDL */
- idl_rc = idl_append_extend(&idl, id);
+ if (operator & SLAPI_OP_RANGE_NO_IDL_SORT) {
+ if (!idl) {
+ /* First time. Keep the suffix ID. */
+ suffix = key;
+ idl_rc = idl_append_extend(&idl, id);
+ } else if ((key == suffix) || idl_id_is_in_idlist(idl, key)) {
+ /* the parent is the suffix or already in idl. */
+ idl_rc = idl_append_extend(&idl, id);
+ } else {
+ /* Otherwise, keep the {key,id} in leftover array */
+ if (!leftover) {
+ leftover = (idl_range_id_pair *)slapi_ch_calloc(leftoverlen, sizeof(idl_range_id_pair));
+ } else if (leftovercnt == leftoverlen) {
+ leftover = (idl_range_id_pair *)slapi_ch_realloc((char *)leftover, 2 * leftoverlen * sizeof(idl_range_id_pair));
+ memset(leftover + leftovercnt, 0, leftoverlen);
+ leftoverlen *= 2;
+ }
+ leftover[leftovercnt].key = key;
+ leftover[leftovercnt++].id = id;
+ }
+ } else {
+ idl_rc = idl_append_extend(&idl, id);
+ }
if (idl_rc) {
LDAPDebug1Arg(LDAP_DEBUG_ANY,
"unable to extend id list (err=%d)\n", idl_rc);
@@ -581,7 +626,7 @@ idl_new_range_fetch(
}
#else
while (upperkey && upperkey->data ?
- ((operator == SLAPI_OP_LESS) ?
+ ((coreop == SLAPI_OP_LESS) ?
DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) < 0 :
DBTcmp(&cur_key, upperkey, ai->ai_key_cmp_fn) <= 0) :
PR_TRUE /* e.g., (x > a) */) {
@@ -698,9 +743,27 @@ error:
*flag_err = ret;
/* sort idl */
- if (idl && !ALLIDS(idl)) {
- qsort((void *)&idl->b_ids[0], idl->b_nids,
- (size_t)sizeof(ID), idl_sort_cmp);
+ if (idl && !ALLIDS(idl) && !(operator & SLAPI_OP_RANGE_NO_IDL_SORT)) {
+ qsort((void *)&idl->b_ids[0], idl->b_nids, (size_t)sizeof(ID), idl_sort_cmp);
+ }
+ if (operator & SLAPI_OP_RANGE_NO_IDL_SORT) {
+ int i;
+ int left = leftovercnt;
+ while (left) {
+ for (i = 0; i < leftovercnt; i++) {
+ if (leftover[i].key && idl_id_is_in_idlist(idl, leftover[i].key)) {
+ idl_rc = idl_append_extend(&idl, leftover[i].id);
+ if (idl_rc) {
+ LDAPDebug1Arg(LDAP_DEBUG_ANY, "unable to extend id list (err=%d)\n", idl_rc);
+ idl_free(&idl);
+ return NULL;
+ }
+ leftover[i].key = 0;
+ left--;
+ }
+ }
+ }
+ slapi_ch_free((void **)&leftover);
}
return idl;
}
diff --git a/ldap/servers/slapd/back-ldbm/index.c b/ldap/servers/slapd/back-ldbm/index.c
index 00e78a7..81d6621 100644
--- a/ldap/servers/slapd/back-ldbm/index.c
+++ b/ldap/servers/slapd/back-ldbm/index.c
@@ -1232,6 +1232,7 @@ index_range_read_ext(
int timelimit = -1;
back_search_result_set *sr = NULL;
int isroot = 0;
+ int coreop = operator & SLAPI_OP_RANGE;
if (!pb) {
LDAPDebug(LDAP_DEBUG_ANY, "index_range_read: NULL pblock\n",
@@ -1278,7 +1279,7 @@ index_range_read_ext(
LDAPDebug1Arg(LDAP_DEBUG_TRACE, "index_range_read lookthrough_limit=%d\n",
lookthrough_limit);
- switch( operator ) {
+ switch( coreop ) {
case SLAPI_OP_LESS:
case SLAPI_OP_LESS_OR_EQUAL:
case SLAPI_OP_GREATER_OR_EQUAL:
@@ -1287,7 +1288,7 @@ index_range_read_ext(
default:
LDAPDebug( LDAP_DEBUG_ANY,
"<= index_range_read(%s,%s) NULL (operator %i)\n",
- type, prefix, operator );
+ type, prefix, coreop );
index_free_prefix(prefix);
return( NULL );
}
@@ -1343,7 +1344,7 @@ index_range_read_ext(
if (range != 1) { /* open range search */
char *tmpbuf = NULL;
/* this is a search with only one boundary value */
- switch( operator ) {
+ switch( coreop ) {
case SLAPI_OP_LESS:
case SLAPI_OP_LESS_OR_EQUAL:
lowerkey.dptr = slapi_ch_strdup(prefix);
@@ -1451,8 +1452,17 @@ index_range_read_ext(
cur_key.data = lowerkey.data;
cur_key.size = lowerkey.size;
lowerkey.data = NULL; /* Don't need this any more, since the memory will be freed from cur_key */
- if (operator == SLAPI_OP_GREATER) {
- *err = index_range_next_key(db,&cur_key,db_txn);
+ *err = 0;
+ if (coreop == SLAPI_OP_GREATER) {
+ *err = index_range_next_key(db, &cur_key, db_txn);
+ if (*err) {
+ LDAPDebug(LDAP_DEBUG_ANY, "<= index_range_read(%s,%s) op==GREATER, no next key: %i)\n",
+ type, prefix, *err );
+ goto error;
+ }
+ }
+ if (operator & SLAPI_OP_RANGE_NO_ALLIDS) {
+ *err = NEW_IDL_NO_ALLID;
}
if (idl_get_idl_new()) { /* new idl */
idl = idl_new_range_fetch(be, db, &cur_key, &upperkey, db_txn,
@@ -1462,7 +1472,7 @@ index_range_read_ext(
int retry_count = 0;
while (*err == 0 &&
(upperkey.data &&
- (operator == SLAPI_OP_LESS) ?
+ (coreop == SLAPI_OP_LESS) ?
DBTcmp(&cur_key, &upperkey, ai->ai_key_cmp_fn) < 0 :
DBTcmp(&cur_key, &upperkey, ai->ai_key_cmp_fn) <= 0)) {
/* exit the loop when we either run off the end of the table,
diff --git a/ldap/servers/slapd/back-ldbm/init.c b/ldap/servers/slapd/back-ldbm/init.c
index a531abb..04cc936 100644
--- a/ldap/servers/slapd/back-ldbm/init.c
+++ b/ldap/servers/slapd/back-ldbm/init.c
@@ -36,7 +36,7 @@ ldbm_back_add_schema( Slapi_PBlock *pb )
SLAPI_ATTR_FLAG_NOUSERMOD );
rc |= slapi_add_internal_attr_syntax( LDBM_PARENTID_STR,
- LDBM_PARENTID_OID, DIRSTRING_SYNTAX_OID, CASEIGNOREMATCH_NAME,
+ LDBM_PARENTID_OID, INTEGER_SYNTAX_OID, INTEGERMATCH_NAME,
SLAPI_ATTR_FLAG_SINGLE|SLAPI_ATTR_FLAG_NOUSERMOD );
rc |= slapi_add_internal_attr_syntax( "entryid",
diff --git a/ldap/servers/slapd/back-ldbm/misc.c b/ldap/servers/slapd/back-ldbm/misc.c
index fe3d01b..77c1e70 100644
--- a/ldap/servers/slapd/back-ldbm/misc.c
+++ b/ldap/servers/slapd/back-ldbm/misc.c
@@ -79,6 +79,7 @@ static const char *systemIndexes[] = {
SLAPI_ATTR_NSCP_ENTRYDN,
ATTR_NSDS5_REPLCONFLICT,
SLAPI_ATTR_ENTRYUSN,
+ SLAPI_ATTR_PARENTID,
NULL
};
diff --git a/ldap/servers/slapd/entry.c b/ldap/servers/slapd/entry.c
index 3124ff6..d38f970 100644
--- a/ldap/servers/slapd/entry.c
+++ b/ldap/servers/slapd/entry.c
@@ -3429,8 +3429,8 @@ slapi_entry_rename(Slapi_Entry *e, const char *newrdn, int deleteoldrdn, Slapi_D
/* We remove the parentid and entrydn since the backend will change these.
* We don't want to give the caller an inconsistent entry. */
- slapi_entry_attr_delete(e, "parentid");
- slapi_entry_attr_delete(e, "entrydn");
+ slapi_entry_attr_delete(e, SLAPI_ATTR_PARENTID);
+ slapi_entry_attr_delete(e, SLAPI_ATTR_ENTRYDN);
/* Build new DN. If newsuperior is set, just use "newrdn,newsuperior". If
* newsuperior is not set, need to add newrdn to old superior. */
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 6615868..4ce6a3d 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -529,12 +529,16 @@ typedef int (*SyntaxEnumFunc)(char **names, Slapi_PluginDesc *plugindesc,
/* OIDs for some commonly used matching rules */
#define DNMATCH_OID "2.5.13.1" /* distinguishedNameMatch */
#define CASEIGNOREMATCH_OID "2.5.13.2" /* caseIgnoreMatch */
+#define INTEGERMATCH_OID "2.5.13.14" /* integerMatch */
+#define INTEGERORDERINGMATCH_OID "2.5.13.15" /* integerOrderingMatch */
#define INTFIRSTCOMPMATCH_OID "2.5.13.29" /* integerFirstComponentMatch */
#define OIDFIRSTCOMPMATCH_OID "2.5.13.30" /* objectIdentifierFirstComponentMatch */
/* Names for some commonly used matching rules */
#define DNMATCH_NAME "distinguishedNameMatch"
#define CASEIGNOREMATCH_NAME "caseIgnoreMatch"
+#define INTEGERMATCH_NAME "integerMatch"
+#define INTEGERORDERINGMATCH_NAME "integerOrderingMatch"
#define INTFIRSTCOMPMATCH_NAME "integerFirstComponentMatch"
#define OIDFIRSTCOMPMATCH_NAME "objectIdentifierFirstComponentMatch"
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index a193aad..b296060 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -483,6 +483,7 @@ NSPR_API(PRUint32) PR_fprintf(struct PRFileDesc* fd, const char *fmt, ...)
#define SLAPI_ATTR_ENTRYDN "entrydn"
#define SLAPI_ATTR_DN "dn"
#define SLAPI_ATTR_RDN "rdn"
+#define SLAPI_ATTR_PARENTID "parentid"
#define SLAPI_ATTR_UNIQUEID_LENGTH 10
#define SLAPI_ATTR_OBJECTCLASS_LENGTH 11
#define SLAPI_ATTR_VALUE_TOMBSTONE_LENGTH 11
@@ -494,6 +495,7 @@ NSPR_API(PRUint32) PR_fprintf(struct PRFileDesc* fd, const char *fmt, ...)
#define SLAPI_ATTR_ENTRYDN_LENGTH 7
#define SLAPI_ATTR_DN_LENGTH 2
#define SLAPI_ATTR_RDN_LENGTH 3
+#define SLAPI_ATTR_PARENTID_LENGTH 8
/* plugin shared config area */
#define SLAPI_PLUGIN_SHARED_CONFIG_AREA "nsslapd-pluginConfigArea"
@@ -6975,6 +6977,9 @@ typedef struct slapi_plugindesc {
#define SLAPI_OP_GREATER_OR_EQUAL 4
#define SLAPI_OP_GREATER 5
#define SLAPI_OP_SUBSTRING 6
+#define SLAPI_OP_RANGE 0xff
+#define SLAPI_OP_RANGE_NO_IDL_SORT 0x100
+#define SLAPI_OP_RANGE_NO_ALLIDS 0x200
/* Defined values of SLAPI_PLUGIN_MR_USAGE: */
#define SLAPI_PLUGIN_MR_USAGE_INDEX 0
@@ -7552,7 +7557,8 @@ enum
BACK_INFO_CRYPT_ENCRYPT_VALUE, /* Ctrl: clcrypt_encrypt_value */
BACK_INFO_CRYPT_DECRYPT_VALUE, /* Ctrl: clcrypt_decrypt_value */
BACK_INFO_DIRECTORY, /* Get the directory path */
- BACK_INFO_LOG_DIRECTORY /* Get the txn log directory */
+ BACK_INFO_LOG_DIRECTORY, /* Get the txn log directory */
+ BACK_INFO_IS_ENTRYRDN /* Get the flag for entryrdn */
};
struct _back_info_crypt_init {
7 years, 10 months