Branch '389-ds-base-1.2.11' - ldap/servers
by Richard Allen Megginson
ldap/servers/slapd/entrywsi.c | 2 ++
1 file changed, 2 insertions(+)
New commits:
commit 1dbf87ae7e9203ffe5968994701bfc8b71605c8f
Author: Rich Megginson <rmeggins(a)redhat.com>
Date: Thu Jul 10 14:18:06 2014 -0600
Ticket #47692 single valued attribute replicated ADD does not work
https://fedorahosted.org/389/ticket/47692
Reviewed by: nhosoi (Thanks!)
Branch: 389-ds-base-1.2.11
Fix Description: Previous fix was incomplete - needed to move the
deleted attribute to the present attribute list.
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
(cherry picked from commit 4b54366593e50d71239588343f532eab39ef56e0)
(cherry picked from commit 60d8bf9f77c298197e4c8ecee585f8a2e6c0457c)
(cherry picked from commit 45dcda2425a865bf529ad0c8423664091392b865)
(cherry picked from commit aead2be2a9cc5392713c7849c18a9bf218beb533)
diff --git a/ldap/servers/slapd/entrywsi.c b/ldap/servers/slapd/entrywsi.c
index 8cee986..919ff91 100644
--- a/ldap/servers/slapd/entrywsi.c
+++ b/ldap/servers/slapd/entrywsi.c
@@ -1184,6 +1184,8 @@ resolve_attribute_state_single_valued(Slapi_Entry *e, Slapi_Attr *a, int attribu
*/
/* just remove the deleted value */
entry_deleted_value_to_zapped_value(a,pending_value);
+ /* move the attribute to the present attributes list */
+ entry_deleted_attribute_to_present_attribute(e,a);
pending_value = NULL;
attr_set_deletion_csn(a,NULL);
return; /* we are done - we are keeping the present value */
9 years, 9 months
ldap/servers
by Richard Allen Megginson
ldap/servers/slapd/entrywsi.c | 2 ++
1 file changed, 2 insertions(+)
New commits:
commit 4b54366593e50d71239588343f532eab39ef56e0
Author: Rich Megginson <rmeggins(a)redhat.com>
Date: Thu Jul 10 14:18:06 2014 -0600
Ticket #47692 single valued attribute replicated ADD does not work
https://fedorahosted.org/389/ticket/47692
Reviewed by: nhosoi (Thanks!)
Branch: master
Fix Description: Previous fix was incomplete - needed to move the
deleted attribute to the present attribute list.
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/slapd/entrywsi.c b/ldap/servers/slapd/entrywsi.c
index 5059457..5512b5b 100644
--- a/ldap/servers/slapd/entrywsi.c
+++ b/ldap/servers/slapd/entrywsi.c
@@ -1402,6 +1402,8 @@ resolve_attribute_state_single_valued(Slapi_Entry *e, Slapi_Attr *a, int attribu
*/
/* just remove the deleted value */
entry_deleted_value_to_zapped_value(a,pending_value);
+ /* move the attribute to the present attributes list */
+ entry_deleted_attribute_to_present_attribute(e,a);
pending_value = NULL;
attr_set_deletion_csn(a,NULL);
return; /* we are done - we are keeping the present value */
9 years, 9 months
Branch '389-ds-base-1.3.1' - ldap/servers
by Richard Allen Megginson
ldap/servers/slapd/entrywsi.c | 2 ++
1 file changed, 2 insertions(+)
New commits:
commit 45dcda2425a865bf529ad0c8423664091392b865
Author: Rich Megginson <rmeggins(a)redhat.com>
Date: Thu Jul 10 14:18:06 2014 -0600
Ticket #47692 single valued attribute replicated ADD does not work
https://fedorahosted.org/389/ticket/47692
Reviewed by: nhosoi (Thanks!)
Branch: 389-ds-base-1.3.1
Fix Description: Previous fix was incomplete - needed to move the
deleted attribute to the present attribute list.
Platforms tested: RHEL6 x86_64
Flag Day: no
Doc impact: no
(cherry picked from commit 4b54366593e50d71239588343f532eab39ef56e0)
(cherry picked from commit 60d8bf9f77c298197e4c8ecee585f8a2e6c0457c)
diff --git a/ldap/servers/slapd/entrywsi.c b/ldap/servers/slapd/entrywsi.c
index ba844c6..f3b9f1a 100644
--- a/ldap/servers/slapd/entrywsi.c
+++ b/ldap/servers/slapd/entrywsi.c
@@ -1218,6 +1218,8 @@ resolve_attribute_state_single_valued(Slapi_Entry *e, Slapi_Attr *a, int attribu
*/
/* just remove the deleted value */
entry_deleted_value_to_zapped_value(a,pending_value);
+ /* move the attribute to the present attributes list */
+ entry_deleted_attribute_to_present_attribute(e,a);
pending_value = NULL;
attr_set_deletion_csn(a,NULL);
return; /* we are done - we are keeping the present value */
9 years, 9 months
ldap/servers
by Noriko Hosoi
ldap/servers/plugins/replication/windows_inc_protocol.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
New commits:
commit e2c55059a28207a0d288bcfbac4e2f8f07aa4ace
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Thu Jul 10 12:17:17 2014 -0700
Ticket #47852 - Updating winsync one-way sync does not affect the behaviour dynamically
Description: The value of the oneWaySync attribute is retrieved from
the Windows Sync Agreement outside of the incremental loop. That is,
if the value is modified after the incremental loop is started, the
change is not applied to the behaviour. This patch picks the value
inside the loop and makes the change affect the sync behaviour
dynamically.
https://fedorahosted.org/389/ticket/47852
Reviewed by rmeggins(a)redhat.com (Thank you, Rich!!)
diff --git a/ldap/servers/plugins/replication/windows_inc_protocol.c b/ldap/servers/plugins/replication/windows_inc_protocol.c
index f9bc409..d62deec 100644
--- a/ldap/servers/plugins/replication/windows_inc_protocol.c
+++ b/ldap/servers/plugins/replication/windows_inc_protocol.c
@@ -312,11 +312,10 @@ windows_inc_run(Private_Repl_Protocol *prp)
windows_private_load_dirsync_cookie(prp->agmt);
- one_way = windows_private_get_one_way(prp->agmt);
-
do {
int rc = 0;
+ one_way = windows_private_get_one_way(prp->agmt);
/* Take action, based on current state, and compute new state. */
switch (current_state)
{
9 years, 9 months
ldap/servers
by Ludwig Krispenz
ldap/servers/plugins/replication/repl5_agmt.c | 5 +++++
1 file changed, 5 insertions(+)
New commits:
commit a9b0d43102ee5ad3de2889cfb01bc452cd254848
Author: Ludwig Krispenz <lkrispen(a)redhat.com>
Date: Thu Jul 10 14:47:09 2014 +0200
Ticket 47846 - server crashes deleting a replication agreement
Bug Description: when an agreement is deleted the ruv in
the repl agreemnet object is cleaned.
But if the agrrement is disabled, the
object doesn't exist and attempt to access
it crashes
Fix Description: remove maxcsn only if protocol is started
https://fedorahosted.org/389/ticket/47846
Reviewed by: Rich, thanks
diff --git a/ldap/servers/plugins/replication/repl5_agmt.c b/ldap/servers/plugins/replication/repl5_agmt.c
index 9d617e7..7c5c37c 100644
--- a/ldap/servers/plugins/replication/repl5_agmt.c
+++ b/ldap/servers/plugins/replication/repl5_agmt.c
@@ -3032,6 +3032,11 @@ agmt_remove_maxcsn(Repl_Agmt *ra)
char *attrs[2];
int rc;
+ if (ra->protocol == NULL) {
+ /* nothing to do, agmt is not started */
+ return;
+ }
+
pb = slapi_pblock_new();
if (!pb) {
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "agmt_set_maxcsn: Out of memory\n");
9 years, 9 months
dirsrvtests/tickets ldap/ldif ldap/servers
by thierry bordaz
dirsrvtests/tickets/ticket47823_test.py | 1046 ++++++++++++++++++++++++++++++++
ldap/ldif/template-dse.ldif.in | 5
ldap/servers/plugins/uiduniq/uid.c | 457 ++++++++++---
3 files changed, 1406 insertions(+), 102 deletions(-)
New commits:
commit c66b5e9f83a81d75d8137e86b5e7631507592099
Author: Thierry bordaz (tbordaz) <tbordaz(a)redhat.com>
Date: Mon Jul 7 15:29:58 2014 +0200
Ticket 47823 - attribute uniqueness enforced on all subtrees
Bug Description:
Attribute uniqueness plugin enforces uniqueness on
each defined subtrees where the modified/added entry is located.
We need the ability to check uniqueness across all the defined subtrees.
It requires a new configuration attribute for the plugin.
The name of the new configuration attribute is more explicit ('uniqueness-across-all-subtrees')
than the old style: nsslapd-pluginarg0, nsslapd-pluginarg1,...
The new attribute is only supported in new configuration style
* uniqueness-attribute-name: uid
* uniqueness-subtrees: dc=people,dc=example,dc=com
* uniqueness-subtrees: dc=sales, dc=example,dc=com
* uniqueness-across-all-subtrees: on
Fix Description:
The fix support new configuration style but still support the old one:
* nsslapd-pluginarg0: uid
* nsslapd-pluginarg1: dc=people,dc=example,dc=com
* nsslapd-pluginarg2: dc=sales, dc=example,dc=com
A mix of configuration style likely results in invalid configuration, that
prevent to start the plugin -> prevent to start the server
https://fedorahosted.org/389/ticket/47823
Reviewed by: Rich Megginson (thanks Rich for reviews and tips !!)
Platforms tested: F17/F20
Flag Day: no
Doc impact: yes
diff --git a/dirsrvtests/tickets/ticket47823_test.py b/dirsrvtests/tickets/ticket47823_test.py
new file mode 100644
index 0000000..2322e71
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47823_test.py
@@ -0,0 +1,1046 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import socket
+import pytest
+import re
+import shutil
+from lib389 import DirSrv, Entry, tools
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from constants import *
+
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+PROVISIONING_CN = "provisioning"
+PROVISIONING_DN = "cn=%s,%s" % (PROVISIONING_CN, SUFFIX)
+
+ACTIVE_CN = "accounts"
+STAGE_CN = "staged users"
+DELETE_CN = "deleted users"
+ACTIVE_DN = "cn=%s,%s" % (ACTIVE_CN, SUFFIX)
+STAGE_DN = "cn=%s,%s" % (STAGE_CN, PROVISIONING_DN)
+DELETE_DN = "cn=%s,%s" % (DELETE_CN, PROVISIONING_DN)
+
+STAGE_USER_CN = "stage guy"
+STAGE_USER_DN = "cn=%s,%s" % (STAGE_USER_CN, STAGE_DN)
+
+ACTIVE_USER_CN = "active guy"
+ACTIVE_USER_DN = "cn=%s,%s" % (ACTIVE_USER_CN, ACTIVE_DN)
+
+ACTIVE_USER_1_CN = "test_1"
+ACTIVE_USER_1_DN = "cn=%s,%s" % (ACTIVE_USER_1_CN, ACTIVE_DN)
+ACTIVE_USER_2_CN = "test_2"
+ACTIVE_USER_2_DN = "cn=%s,%s" % (ACTIVE_USER_2_CN, ACTIVE_DN)
+
+STAGE_USER_1_CN = ACTIVE_USER_1_CN
+STAGE_USER_1_DN = "cn=%s,%s" % (STAGE_USER_1_CN, STAGE_DN)
+STAGE_USER_2_CN = ACTIVE_USER_2_CN
+STAGE_USER_2_DN = "cn=%s,%s" % (STAGE_USER_2_CN, STAGE_DN)
+
+ALL_CONFIG_ATTRS = ['nsslapd-pluginarg0', 'nsslapd-pluginarg1', 'nsslapd-pluginarg2',
+ 'uniqueness-attribute-name', 'uniqueness-subtrees', 'uniqueness-across-all-subtrees']
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ '''
+ This fixture is used to standalone topology for the 'module'.
+ At the beginning, It may exists a standalone instance.
+ It may also exists a backup for the standalone instance.
+
+ Principle:
+ If standalone instance exists:
+ restart it
+ If backup of standalone exists:
+ create/rebind to standalone
+
+ restore standalone instance from backup
+ else:
+ Cleanup everything
+ remove instance
+ remove backup
+ Create instance
+ Create backup
+ '''
+ global installation_prefix
+
+ if installation_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+ standalone = DirSrv(verbose=False)
+
+ # Args for the standalone instance
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+
+ # Get the status of the backups
+ backup_standalone = standalone.checkBackupFS()
+
+ # Get the status of the instance and restart it if it exists
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ # assuming the instance is already stopped, just wait 5 sec max
+ standalone.stop(timeout=5)
+ try:
+ standalone.start(timeout=10)
+ except ldap.SERVER_DOWN:
+ pass
+
+ if backup_standalone:
+ # The backup exist, assuming it is correct
+ # we just re-init the instance with it
+ if not instance_standalone:
+ standalone.create()
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # restore standalone instance from backup
+ standalone.stop(timeout=10)
+ standalone.restoreFS(backup_standalone)
+ standalone.start(timeout=10)
+
+ else:
+ # We should be here only in two conditions
+ # - This is the first time a test involve standalone instance
+ # - Something weird happened (instance/backup destroyed)
+ # so we discard everything and recreate all
+
+ # Remove the backup. So even if we have a specific backup file
+ # (e.g backup_standalone) we clear backup that an instance may have created
+ if backup_standalone:
+ standalone.clearBackupFS()
+
+ # Remove the instance
+ if instance_standalone:
+ standalone.delete()
+
+ # Create the instance
+ standalone.create()
+
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # Time to create the backups
+ standalone.stop(timeout=10)
+ standalone.backupfile = standalone.backupFS()
+ standalone.start(timeout=10)
+
+ #
+ # Here we have standalone instance up and running
+ # Either coming from a backup recovery
+ # or from a fresh (re)init
+ # Time to return the topology
+ return TopologyStandalone(standalone)
+
+def _header(topology, label):
+ topology.standalone.log.info("\n\n###############################################")
+ topology.standalone.log.info("#######")
+ topology.standalone.log.info("####### %s" % label)
+ topology.standalone.log.info("#######")
+ topology.standalone.log.info("###############################################")
+
+def _uniqueness_config_entry(topology, name=None):
+ if not name:
+ return None
+
+ ent = topology.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE,
+ "(objectclass=nsSlapdPlugin)",
+ ['objectClass', 'cn', 'nsslapd-pluginPath', 'nsslapd-pluginInitfunc',
+ 'nsslapd-pluginType', 'nsslapd-pluginEnabled', 'nsslapd-plugin-depends-on-type',
+ 'nsslapd-pluginId', 'nsslapd-pluginVersion', 'nsslapd-pluginVendor',
+ 'nsslapd-pluginDescription'])
+ ent.dn = "cn=%s uniqueness,%s" % (name, DN_PLUGIN)
+ return ent
+
+def _build_config(topology, attr_name='cn', subtree_1=None, subtree_2=None, type_config='old', across_subtrees=False):
+ assert topology
+ assert attr_name
+ assert subtree_1
+
+ if type_config == 'old':
+ # enable the 'cn' uniqueness on Active
+ config = _uniqueness_config_entry(topology, attr_name)
+ config.setValue('nsslapd-pluginarg0', attr_name)
+ config.setValue('nsslapd-pluginarg1', subtree_1)
+ if subtree_2:
+ config.setValue('nsslapd-pluginarg2', subtree_2)
+ else:
+ # prepare the config entry
+ config = _uniqueness_config_entry(topology, attr_name)
+ config.setValue('uniqueness-attribute-name', attr_name)
+ config.setValue('uniqueness-subtrees', subtree_1)
+ if subtree_2:
+ config.setValue('uniqueness-subtrees', subtree_2)
+ if across_subtrees:
+ config.setValue('uniqueness-across-all-subtrees', 'on')
+ return config
+
+def _active_container_invalid_cfg_add(topology):
+ '''
+ Check uniqueness is not enforced with ADD (invalid config)
+ '''
+ topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': ACTIVE_USER_1_CN})))
+
+ topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_2_CN,
+ 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
+
+ topology.standalone.delete_s(ACTIVE_USER_1_DN)
+ topology.standalone.delete_s(ACTIVE_USER_2_DN)
+
+def _active_container_add(topology, type_config='old'):
+ '''
+ Check uniqueness in a single container (Active)
+ Add an entry with a given 'cn', then check we can not add an entry with the same 'cn' value
+
+ '''
+ config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False)
+
+ # remove the 'cn' uniqueness entry
+ try:
+ topology.standalone.delete_s(config.dn)
+
+ except ldap.NO_SUCH_OBJECT:
+ pass
+ topology.standalone.restart(timeout=120)
+
+ topology.standalone.log.info('Uniqueness not enforced: create the entries')
+
+ topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': ACTIVE_USER_1_CN})))
+
+ topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_2_CN,
+ 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
+
+ topology.standalone.delete_s(ACTIVE_USER_1_DN)
+ topology.standalone.delete_s(ACTIVE_USER_2_DN)
+
+
+ topology.standalone.log.info('Uniqueness enforced: checks second entry is rejected')
+
+ # enable the 'cn' uniqueness on Active
+ topology.standalone.add_s(config)
+ topology.standalone.restart(timeout=120)
+ topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': ACTIVE_USER_1_CN})))
+
+ try:
+ topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_2_CN,
+ 'cn': [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN]})))
+ except ldap.CONSTRAINT_VIOLATION:
+ # yes it is expected
+ pass
+
+ # cleanup the stuff now
+ topology.standalone.delete_s(config.dn)
+ topology.standalone.delete_s(ACTIVE_USER_1_DN)
+
+
+
+def _active_container_mod(topology, type_config='old'):
+ '''
+ Check uniqueness in a single container (active)
+ Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value
+
+ '''
+
+ config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False)
+
+ # enable the 'cn' uniqueness on Active
+ topology.standalone.add_s(config)
+ topology.standalone.restart(timeout=120)
+
+ topology.standalone.log.info('Uniqueness enforced: checks MOD ADD entry is rejected')
+ topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': ACTIVE_USER_1_CN})))
+
+ topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_2_CN,
+ 'cn': ACTIVE_USER_2_CN})))
+
+ try:
+ topology.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_ADD, 'cn', ACTIVE_USER_1_CN)])
+ except ldap.CONSTRAINT_VIOLATION:
+ # yes it is expected
+ pass
+
+ topology.standalone.log.info('Uniqueness enforced: checks MOD REPLACE entry is rejected')
+ try:
+ topology.standalone.modify_s(ACTIVE_USER_2_DN, [(ldap.MOD_REPLACE, 'cn', [ACTIVE_USER_1_CN, ACTIVE_USER_2_CN])])
+ except ldap.CONSTRAINT_VIOLATION:
+ # yes it is expected
+ pass
+
+ # cleanup the stuff now
+ topology.standalone.delete_s(config.dn)
+ topology.standalone.delete_s(ACTIVE_USER_1_DN)
+ topology.standalone.delete_s(ACTIVE_USER_2_DN)
+
+def _active_container_modrdn(topology, type_config='old'):
+ '''
+ Check uniqueness in a single container
+ Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value
+
+ '''
+ config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config=type_config, across_subtrees=False)
+
+ # enable the 'cn' uniqueness on Active
+ topology.standalone.add_s(config)
+ topology.standalone.restart(timeout=120)
+
+ topology.standalone.log.info('Uniqueness enforced: checks MODRDN entry is rejected')
+
+ topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': [ACTIVE_USER_1_CN, 'dummy']})))
+
+ topology.standalone.add_s(Entry((ACTIVE_USER_2_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_2_CN,
+ 'cn': ACTIVE_USER_2_CN})))
+
+ try:
+ topology.standalone.rename_s(ACTIVE_USER_2_DN, 'cn=dummy', delold=0)
+ except ldap.CONSTRAINT_VIOLATION:
+ # yes it is expected
+ pass
+
+
+ # cleanup the stuff now
+ topology.standalone.delete_s(config.dn)
+ topology.standalone.delete_s(ACTIVE_USER_1_DN)
+ topology.standalone.delete_s(ACTIVE_USER_2_DN)
+
+def _active_stage_containers_add(topology, type_config='old', across_subtrees=False):
+ '''
+ Check uniqueness in several containers
+ Add an entry on a container with a given 'cn'
+ with across_subtrees=False check we CAN add an entry with the same 'cn' value on the other container
+ with across_subtrees=True check we CAN NOT add an entry with the same 'cn' value on the other container
+
+ '''
+ config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False)
+
+ topology.standalone.add_s(config)
+ topology.standalone.restart(timeout=120)
+ topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': ACTIVE_USER_1_CN})))
+ try:
+
+ # adding an entry on a separated contains with the same 'cn'
+ topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': STAGE_USER_1_CN,
+ 'cn': ACTIVE_USER_1_CN})))
+ except ldap.CONSTRAINT_VIOLATION:
+ assert across_subtrees
+
+ # cleanup the stuff now
+ topology.standalone.delete_s(config.dn)
+ topology.standalone.delete_s(ACTIVE_USER_1_DN)
+ topology.standalone.delete_s(STAGE_USER_1_DN)
+
+def _active_stage_containers_mod(topology, type_config='old', across_subtrees=False):
+ '''
+ Check uniqueness in a several containers
+ Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container
+
+ '''
+ config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False)
+
+ topology.standalone.add_s(config)
+ topology.standalone.restart(timeout=120)
+ # adding an entry on active with a different 'cn'
+ topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': ACTIVE_USER_2_CN})))
+
+ # adding an entry on a stage with a different 'cn'
+ topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': STAGE_USER_1_CN,
+ 'cn': STAGE_USER_1_CN})))
+
+ try:
+
+ # modify add same value
+ topology.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_ADD, 'cn', [ACTIVE_USER_2_CN])])
+ except ldap.CONSTRAINT_VIOLATION:
+ assert across_subtrees
+
+ topology.standalone.delete_s(STAGE_USER_1_DN)
+ topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': STAGE_USER_1_CN,
+ 'cn': STAGE_USER_2_CN})))
+ try:
+ # modify replace same value
+ topology.standalone.modify_s(STAGE_USER_1_DN, [(ldap.MOD_REPLACE, 'cn', [STAGE_USER_2_CN, ACTIVE_USER_1_CN])])
+ except ldap.CONSTRAINT_VIOLATION:
+ assert across_subtrees
+
+ # cleanup the stuff now
+ topology.standalone.delete_s(config.dn)
+ topology.standalone.delete_s(ACTIVE_USER_1_DN)
+ topology.standalone.delete_s(STAGE_USER_1_DN)
+
+def _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=False):
+ '''
+ Check uniqueness in a several containers
+ Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container
+
+ '''
+
+ config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=STAGE_DN, type_config=type_config, across_subtrees=False)
+
+ # enable the 'cn' uniqueness on Active and Stage
+ topology.standalone.add_s(config)
+ topology.standalone.restart(timeout=120)
+ topology.standalone.add_s(Entry((ACTIVE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': ACTIVE_USER_1_CN,
+ 'cn': [ACTIVE_USER_1_CN, 'dummy']})))
+
+ topology.standalone.add_s(Entry((STAGE_USER_1_DN, {
+ 'objectclass': "top person".split(),
+ 'sn': STAGE_USER_1_CN,
+ 'cn': STAGE_USER_1_CN})))
+
+
+ try:
+
+ topology.standalone.rename_s(STAGE_USER_1_DN, 'cn=dummy', delold=0)
+
+ # check stage entry has 'cn=dummy'
+ stage_ent = topology.standalone.getEntry("cn=dummy,%s" % (STAGE_DN), ldap.SCOPE_BASE, "objectclass=*", ['cn'])
+ assert stage_ent.hasAttr('cn')
+ found = False
+ for value in stage_ent.getValues('cn'):
+ if value == 'dummy':
+ found = True
+ assert found
+
+ # check active entry has 'cn=dummy'
+ active_ent = topology.standalone.getEntry(ACTIVE_USER_1_DN, ldap.SCOPE_BASE, "objectclass=*", ['cn'])
+ assert active_ent.hasAttr('cn')
+ found = False
+ for value in stage_ent.getValues('cn'):
+ if value == 'dummy':
+ found = True
+ assert found
+
+ topology.standalone.delete_s("cn=dummy,%s" % (STAGE_DN))
+ except ldap.CONSTRAINT_VIOLATION:
+ assert across_subtrees
+ topology.standalone.delete_s(STAGE_USER_1_DN)
+
+
+
+ # cleanup the stuff now
+ topology.standalone.delete_s(config.dn)
+ topology.standalone.delete_s(ACTIVE_USER_1_DN)
+
+def _config_file(topology, action='save'):
+ dse_ldif = topology.standalone.confdir + '/dse.ldif'
+ sav_file = topology.standalone.confdir + '/dse.ldif.ticket47823'
+ if action == 'save':
+ shutil.copy(dse_ldif, sav_file)
+ else:
+ shutil.copy(sav_file, dse_ldif)
+
+def _pattern_errorlog(file, log_pattern):
+ try:
+ _pattern_errorlog.last_pos += 1
+ except AttributeError:
+ _pattern_errorlog.last_pos = 0
+
+ found = None
+ log.debug("_pattern_errorlog: start at offset %d" % _pattern_errorlog.last_pos)
+ file.seek(_pattern_errorlog.last_pos)
+
+ # Use a while true iteration because 'for line in file: hit a
+ # python bug that break file.tell()
+ while True:
+ line = file.readline()
+ log.debug("_pattern_errorlog: [%d] %s" % (file.tell(), line))
+ found = log_pattern.search(line)
+ if ((line == '') or (found)):
+ break
+
+ log.debug("_pattern_errorlog: end at offset %d" % file.tell())
+ _pattern_errorlog.last_pos = file.tell()
+ return found
+
+def test_ticket47823_init(topology):
+ """
+
+ """
+
+ # Enabled the plugins
+ topology.standalone.plugins.enable(name=PLUGIN_ATTR_UNIQUENESS)
+ topology.standalone.restart(timeout=120)
+
+ topology.standalone.add_s(Entry((PROVISIONING_DN, {'objectclass': "top nscontainer".split(),
+ 'cn': PROVISIONING_CN})))
+ topology.standalone.add_s(Entry((ACTIVE_DN, {'objectclass': "top nscontainer".split(),
+ 'cn': ACTIVE_CN})))
+ topology.standalone.add_s(Entry((STAGE_DN, {'objectclass': "top nscontainer".split(),
+ 'cn': STAGE_CN})))
+ topology.standalone.add_s(Entry((DELETE_DN, {'objectclass': "top nscontainer".split(),
+ 'cn': DELETE_CN})))
+ topology.standalone.errorlog_file = open(topology.standalone.errlog, "r")
+
+ topology.standalone.stop(timeout=120)
+ time.sleep(1)
+ topology.standalone.start(timeout=120)
+ time.sleep(3)
+
+
+def test_ticket47823_one_container_add(topology):
+ '''
+ Check uniqueness in a single container
+ Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value
+
+ '''
+ _header(topology, "With former config (args), check attribute uniqueness with 'cn' (ADD) ")
+
+ _active_container_add(topology, type_config='old')
+
+ _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) ")
+
+ _active_container_add(topology, type_config='new')
+
+def test_ticket47823_one_container_mod(topology):
+ '''
+ Check uniqueness in a single container
+ Add and entry with a given 'cn', then check we can not modify an entry with the same 'cn' value
+
+ '''
+ _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MOD)")
+
+ _active_container_mod(topology, type_config='old')
+
+ _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD)")
+
+ _active_container_mod(topology, type_config='new')
+
+
+
+def test_ticket47823_one_container_modrdn(topology):
+ '''
+ Check uniqueness in a single container
+ Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value
+
+ '''
+ _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)")
+
+ _active_container_modrdn(topology, type_config='old')
+
+ _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN)")
+
+ _active_container_modrdn(topology, type_config='new')
+
+def test_ticket47823_multi_containers_add(topology):
+ '''
+ Check uniqueness in a several containers
+ Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value
+
+ '''
+ _header(topology, "With former config (args), check attribute uniqueness with 'cn' (ADD) ")
+
+ _active_stage_containers_add(topology, type_config='old', across_subtrees=False)
+
+ _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) ")
+
+ _active_stage_containers_add(topology, type_config='new', across_subtrees=False)
+
+def test_ticket47823_multi_containers_mod(topology):
+ '''
+ Check uniqueness in a several containers
+ Add an entry on a container with a given 'cn', then check we CAN mod an entry with the same 'cn' value on the other container
+
+ '''
+ _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MOD) on separated container")
+
+
+ topology.standalone.log.info('Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers')
+ _active_stage_containers_mod(topology, type_config='old', across_subtrees=False)
+
+ _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD) on separated container")
+
+
+ topology.standalone.log.info('Uniqueness not enforced: if same \'cn\' modified (add/replace) on separated containers')
+ _active_stage_containers_mod(topology, type_config='new', across_subtrees=False)
+
+def test_ticket47823_multi_containers_modrdn(topology):
+ '''
+ Check uniqueness in a several containers
+ Add and entry with a given 'cn', then check we CAN modrdn an entry with the same 'cn' value on the other container
+
+ '''
+ _header(topology, "With former config (args), check attribute uniqueness with 'cn' (MODRDN) on separated containers")
+
+ topology.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers')
+ _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=False)
+
+ topology.standalone.log.info('Uniqueness not enforced: checks MODRDN entry is accepted on separated containers')
+ _active_stage_containers_modrdn(topology, type_config='old')
+
+def test_ticket47823_across_multi_containers_add(topology):
+ '''
+ Check uniqueness across several containers, uniquely with the new configuration
+ Add and entry with a given 'cn', then check we can not add an entry with the same 'cn' value
+
+ '''
+ _header(topology, "With new config (args), check attribute uniqueness with 'cn' (ADD) across several containers")
+
+ _active_stage_containers_add(topology, type_config='old', across_subtrees=True)
+
+def test_ticket47823_across_multi_containers_mod(topology):
+ '''
+ Check uniqueness across several containers, uniquely with the new configuration
+ Add and entry with a given 'cn', then check we can not modifiy an entry with the same 'cn' value
+
+ '''
+ _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MOD) across several containers")
+
+ _active_stage_containers_mod(topology, type_config='old', across_subtrees=True)
+
+def test_ticket47823_across_multi_containers_modrdn(topology):
+ '''
+ Check uniqueness across several containers, uniquely with the new configuration
+ Add and entry with a given 'cn', then check we can not modrdn an entry with the same 'cn' value
+
+ '''
+ _header(topology, "With new config (args), check attribute uniqueness with 'cn' (MODRDN) across several containers")
+
+ _active_stage_containers_modrdn(topology, type_config='old', across_subtrees=True)
+
+def test_ticket47823_invalid_config_1(topology):
+ '''
+ Check that an invalid config is detected. No uniqueness enforced
+ Using old config: arg0 is missing
+ '''
+ _header(topology, "Invalid config (old): arg0 is missing")
+
+ _config_file(topology, action='save')
+
+ # create an invalid config without arg0
+ config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+
+ del config.data['nsslapd-pluginarg0']
+ # replace 'cn' uniqueness entry
+ try:
+ topology.standalone.delete_s(config.dn)
+
+ except ldap.NO_SUCH_OBJECT:
+ pass
+ topology.standalone.add_s(config)
+
+ topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+
+ # Check the server did not restart
+ try:
+ topology.standalone.restart(timeout=5)
+ ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ if ent:
+ # be sure to restore a valid config before assert
+ _config_file(topology, action='restore')
+ assert not ent
+ except ldap.SERVER_DOWN:
+ pass
+
+ # Check the expected error message
+ regex = re.compile("Config info: attribute name not defined")
+ res =_pattern_errorlog(topology.standalone.errorlog_file, regex)
+ if not res:
+ # be sure to restore a valid config before assert
+ _config_file(topology, action='restore')
+ assert res
+
+ # Check we can restart the server
+ _config_file(topology, action='restore')
+ topology.standalone.start(timeout=5)
+ try:
+ topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ except ldap.NO_SUCH_OBJECT:
+ pass
+
+def test_ticket47823_invalid_config_2(topology):
+ '''
+ Check that an invalid config is detected. No uniqueness enforced
+ Using old config: arg1 is missing
+ '''
+ _header(topology, "Invalid config (old): arg1 is missing")
+
+ _config_file(topology, action='save')
+
+ # create an invalid config without arg0
+ config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+
+ del config.data['nsslapd-pluginarg1']
+ # replace 'cn' uniqueness entry
+ try:
+ topology.standalone.delete_s(config.dn)
+
+ except ldap.NO_SUCH_OBJECT:
+ pass
+ topology.standalone.add_s(config)
+
+ topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+
+ # Check the server did not restart
+ try:
+ topology.standalone.restart(timeout=5)
+ ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ if ent:
+ # be sure to restore a valid config before assert
+ _config_file(topology, action='restore')
+ assert not ent
+ except ldap.SERVER_DOWN:
+ pass
+
+ # Check the expected error message
+ regex = re.compile("Config info: No valid subtree is defined")
+ res =_pattern_errorlog(topology.standalone.errorlog_file, regex)
+ if not res:
+ # be sure to restore a valid config before assert
+ _config_file(topology, action='restore')
+ assert res
+
+ # Check we can restart the server
+ _config_file(topology, action='restore')
+ topology.standalone.start(timeout=5)
+ try:
+ topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ except ldap.NO_SUCH_OBJECT:
+ pass
+
+def test_ticket47823_invalid_config_3(topology):
+ '''
+ Check that an invalid config is detected. No uniqueness enforced
+ Using old config: arg0 is missing
+ '''
+ _header(topology, "Invalid config (old): arg0 is missing but new config attrname exists")
+
+ _config_file(topology, action='save')
+
+ # create an invalid config without arg0
+ config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+
+ del config.data['nsslapd-pluginarg0']
+ config.data['uniqueness-attribute-name'] = 'cn'
+ # replace 'cn' uniqueness entry
+ try:
+ topology.standalone.delete_s(config.dn)
+
+ except ldap.NO_SUCH_OBJECT:
+ pass
+ topology.standalone.add_s(config)
+
+ topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+
+ # Check the server did not restart
+ try:
+ topology.standalone.restart(timeout=5)
+ ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ if ent:
+ # be sure to restore a valid config before assert
+ _config_file(topology, action='restore')
+ assert not ent
+ except ldap.SERVER_DOWN:
+ pass
+
+ # Check the expected error message
+ regex = re.compile("Config info: objectclass for subtree entries is not defined")
+ res =_pattern_errorlog(topology.standalone.errorlog_file, regex)
+ if not res:
+ # be sure to restore a valid config before assert
+ _config_file(topology, action='restore')
+ assert res
+
+ # Check we can restart the server
+ _config_file(topology, action='restore')
+ topology.standalone.start(timeout=5)
+ try:
+ topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ except ldap.NO_SUCH_OBJECT:
+ pass
+
+def test_ticket47823_invalid_config_4(topology):
+ '''
+ Check that an invalid config is detected. No uniqueness enforced
+ Using old config: arg1 is missing
+ '''
+ _header(topology, "Invalid config (old): arg1 is missing but new config exist")
+
+ _config_file(topology, action='save')
+
+ # create an invalid config without arg0
+ config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='old', across_subtrees=False)
+
+ del config.data['nsslapd-pluginarg1']
+ config.data['uniqueness-subtrees'] = ACTIVE_DN
+ # replace 'cn' uniqueness entry
+ try:
+ topology.standalone.delete_s(config.dn)
+
+ except ldap.NO_SUCH_OBJECT:
+ pass
+ topology.standalone.add_s(config)
+
+ topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+
+ # Check the server did not restart
+ try:
+ topology.standalone.restart(timeout=5)
+ ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ if ent:
+ # be sure to restore a valid config before assert
+ _config_file(topology, action='restore')
+ assert not ent
+ except ldap.SERVER_DOWN:
+ pass
+
+ # Check the expected error message
+ regex = re.compile("Config info: No valid subtree is defined")
+ res =_pattern_errorlog(topology.standalone.errorlog_file, regex)
+ if not res:
+ # be sure to restore a valid config before assert
+ _config_file(topology, action='restore')
+ assert res
+
+ # Check we can restart the server
+ _config_file(topology, action='restore')
+ topology.standalone.start(timeout=5)
+ try:
+ topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ except ldap.NO_SUCH_OBJECT:
+ pass
+
+def test_ticket47823_invalid_config_5(topology):
+ '''
+ Check that an invalid config is detected. No uniqueness enforced
+ Using new config: uniqueness-attribute-name is missing
+ '''
+ _header(topology, "Invalid config (new): uniqueness-attribute-name is missing")
+
+ _config_file(topology, action='save')
+
+ # create an invalid config without arg0
+ config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False)
+
+ del config.data['uniqueness-attribute-name']
+ # replace 'cn' uniqueness entry
+ try:
+ topology.standalone.delete_s(config.dn)
+
+ except ldap.NO_SUCH_OBJECT:
+ pass
+ topology.standalone.add_s(config)
+
+ topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+
+ # Check the server did not restart
+ try:
+ topology.standalone.restart(timeout=5)
+ ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ if ent:
+ # be sure to restore a valid config before assert
+ _config_file(topology, action='restore')
+ assert not ent
+ except ldap.SERVER_DOWN:
+ pass
+
+ # Check the expected error message
+ regex = re.compile("Config info: attribute name not defined")
+ res =_pattern_errorlog(topology.standalone.errorlog_file, regex)
+ if not res:
+ # be sure to restore a valid config before assert
+ _config_file(topology, action='restore')
+ assert res
+
+ # Check we can restart the server
+ _config_file(topology, action='restore')
+ topology.standalone.start(timeout=5)
+ try:
+ topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ except ldap.NO_SUCH_OBJECT:
+ pass
+
+def test_ticket47823_invalid_config_6(topology):
+ '''
+ Check that an invalid config is detected. No uniqueness enforced
+ Using new config: uniqueness-subtrees is missing
+ '''
+ _header(topology, "Invalid config (new): uniqueness-subtrees is missing")
+
+ _config_file(topology, action='save')
+
+ # create an invalid config without arg0
+ config = _build_config(topology, attr_name='cn', subtree_1=ACTIVE_DN, subtree_2=None, type_config='new', across_subtrees=False)
+
+ del config.data['uniqueness-subtrees']
+ # replace 'cn' uniqueness entry
+ try:
+ topology.standalone.delete_s(config.dn)
+
+ except ldap.NO_SUCH_OBJECT:
+ pass
+ topology.standalone.add_s(config)
+
+ topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+
+ # Check the server did not restart
+ try:
+ topology.standalone.restart(timeout=5)
+ ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ if ent:
+ # be sure to restore a valid config before assert
+ _config_file(topology, action='restore')
+ assert not ent
+ except ldap.SERVER_DOWN:
+ pass
+
+ # Check the expected error message
+ regex = re.compile("Config info: objectclass for subtree entries is not defined")
+ res =_pattern_errorlog(topology.standalone.errorlog_file, regex)
+ if not res:
+ # be sure to restore a valid config before assert
+ _config_file(topology, action='restore')
+ assert res
+
+ # Check we can restart the server
+ _config_file(topology, action='restore')
+ topology.standalone.start(timeout=5)
+ try:
+ topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ except ldap.NO_SUCH_OBJECT:
+ pass
+
+def test_ticket47823_invalid_config_7(topology):
+ '''
+ Check that an invalid config is detected. No uniqueness enforced
+ Using new config: uniqueness-subtrees is missing
+ '''
+ _header(topology, "Invalid config (new): uniqueness-subtrees are invalid")
+
+ _config_file(topology, action='save')
+
+ # create an invalid config without arg0
+ config = _build_config(topology, attr_name='cn', subtree_1="this_is dummy DN", subtree_2="an other=dummy DN", type_config='new', across_subtrees=False)
+
+ # replace 'cn' uniqueness entry
+ try:
+ topology.standalone.delete_s(config.dn)
+
+ except ldap.NO_SUCH_OBJECT:
+ pass
+ topology.standalone.add_s(config)
+
+ topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+
+ # Check the server did not restart
+ try:
+ topology.standalone.restart(timeout=5)
+ ent = topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ if ent:
+ # be sure to restore a valid config before assert
+ _config_file(topology, action='restore')
+ assert not ent
+ except ldap.SERVER_DOWN:
+ pass
+
+ # Check the expected error message
+ regex = re.compile("Config info: No valid subtree is defined")
+ res =_pattern_errorlog(topology.standalone.errorlog_file, regex)
+ if not res:
+ # be sure to restore a valid config before assert
+ _config_file(topology, action='restore')
+ assert res
+
+ # Check we can restart the server
+ _config_file(topology, action='restore')
+ topology.standalone.start(timeout=5)
+ try:
+ topology.standalone.getEntry(config.dn, ldap.SCOPE_BASE, "(objectclass=nsSlapdPlugin)", ALL_CONFIG_ATTRS)
+ except ldap.NO_SUCH_OBJECT:
+ pass
+def test_ticket47823_final(topology):
+ topology.standalone.stop(timeout=10)
+
+
+def run_isolated():
+ '''
+ run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+ To run isolated without py.test, you need to
+ - edit this file and comment '@pytest.fixture' line before 'topology' function.
+ - set the installation prefix
+ - run this program
+ '''
+ global installation_prefix
+ installation_prefix = None
+
+ topo = topology(True)
+ test_ticket47823_init(topo)
+
+ # run old/new config style that makes uniqueness checking on one subtree
+ test_ticket47823_one_container_add(topo)
+ test_ticket47823_one_container_mod(topo)
+ test_ticket47823_one_container_modrdn(topo)
+
+ # run old config style that makes uniqueness checking on each defined subtrees
+ test_ticket47823_multi_containers_add(topo)
+ test_ticket47823_multi_containers_mod(topo)
+ test_ticket47823_multi_containers_modrdn(topo)
+ test_ticket47823_across_multi_containers_add(topo)
+ test_ticket47823_across_multi_containers_mod(topo)
+ test_ticket47823_across_multi_containers_modrdn(topo)
+
+ test_ticket47823_invalid_config_1(topo)
+ test_ticket47823_invalid_config_2(topo)
+ test_ticket47823_invalid_config_3(topo)
+ test_ticket47823_invalid_config_4(topo)
+ test_ticket47823_invalid_config_5(topo)
+ test_ticket47823_invalid_config_6(topo)
+ test_ticket47823_invalid_config_7(topo)
+
+ test_ticket47823_final(topo)
+
+
+if __name__ == '__main__':
+ run_isolated()
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index 85662a3..c613c23 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -626,8 +626,9 @@ nsslapd-pluginpath: libattr-unique-plugin
nsslapd-plugininitfunc: NSUniqueAttr_Init
nsslapd-plugintype: betxnpreoperation
nsslapd-pluginenabled: off
-nsslapd-pluginarg0: uid
-nsslapd-pluginarg1: %ds_suffix%
+uniqueness-attribute-name: uid
+uniqueness-subtrees: %ds_suffix%
+uniqueness-across-all-subtrees: off
nsslapd-plugin-depends-on-type: database
dn: cn=7-bit check,cn=plugins,cn=config
diff --git a/ldap/servers/plugins/uiduniq/uid.c b/ldap/servers/plugins/uiduniq/uid.c
index d4f0c84..f4a9a8d 100644
--- a/ldap/servers/plugins/uiduniq/uid.c
+++ b/ldap/servers/plugins/uiduniq/uid.c
@@ -99,7 +99,23 @@ pluginDesc = {
"Enforce unique attribute values"
};
static void* plugin_identity = NULL;
-
+typedef struct attr_uniqueness_config {
+ char *attr;
+ Slapi_DN **subtrees;
+ PRBool unique_in_all_subtrees;
+ char *top_entry_oc;
+ char *subtree_entries_oc;
+ struct attr_uniqueness_config *next;
+} attr_uniqueness_config_t;
+
+#define ATTR_UNIQUENESS_ATTRIBUTE_NAME "uniqueness-attribute-name"
+#define ATTR_UNIQUENESS_SUBTREES "uniqueness-subtrees"
+#define ATTR_UNIQUENESS_ACROSS_ALL_SUBTREES "uniqueness-across-all-subtrees"
+#define ATTR_UNIQUENESS_TOP_ENTRY_OC "uniqueness-top-entry-oc"
+#define ATTR_UNIQUENESS_SUBTREE_ENTRIES_OC "uniqueness-subtree-entries-oc"
+
+static int getArguments(Slapi_PBlock *pb, char **attrName, char **markerObjectClass, char **requiredObjectClass);
+static struct attr_uniqueness_config *uniqueness_entry_to_config(Slapi_PBlock *pb, Slapi_Entry *config_entry);
/*
* More information about constraint failure
@@ -108,6 +124,262 @@ static char *moreInfo =
"Another entry with the same attribute value already exists (attribute: \"%s\")";
static void
+free_uniqueness_config(struct attr_uniqueness_config *config)
+{
+ int i;
+
+ slapi_ch_free_string((char **) &config->attr);
+ for (i = 0; config->subtrees && config->subtrees[i]; i++) {
+ slapi_sdn_free(&config->subtrees[i]);
+ }
+ slapi_ch_free((void **) &config->subtrees);
+ slapi_ch_free_string((char **) &config->top_entry_oc);
+ slapi_ch_free_string((char **) &config->subtree_entries_oc);
+}
+
+/*
+ * New styles:
+ * ----------
+ *
+ * uniqueness-attribute-name: uid
+ * uniqueness-subtrees: dc=people,dc=example,dc=com
+ * uniqueness-subtrees: dc=sales, dc=example,dc=com
+ * uniqueness-across-all-subtrees: on
+ *
+ * or
+ *
+ * uniqueness-attribute-name: uid
+ * uniqueness-top-entry-oc: organizationalUnit
+ * uniqueness-subtree-entries-oc: person
+ *
+ * If both are present:
+ * - uniqueness-subtrees
+ * - uniqueness-top-entry-oc/uniqueness-subtree-entries-oc
+ * Then uniqueness-subtrees has the priority
+ *
+ * Old styles:
+ * ----------
+ *
+ * nsslapd-pluginarg0: uid
+ * nsslapd-pluginarg1: dc=people,dc=example,dc=com
+ * nsslapd-pluginarg2: dc=sales, dc=example,dc=com
+ *
+ * or
+ *
+ * nsslapd-pluginarg0: attribute=uid
+ * nsslapd-pluginarg1: markerobjectclass=organizationalUnit
+ * nsslapd-pluginarg2: requiredobjectclass=person
+ *
+ * From a Slapi_Entry of the config entry, it creates a attr_uniqueness_config.
+ * It returns a (attr_uniqueness_config *) if the configuration is valid
+ * Else it returns NULL
+ */
+static struct attr_uniqueness_config *
+uniqueness_entry_to_config(Slapi_PBlock *pb, Slapi_Entry *config_entry)
+{
+ attr_uniqueness_config_t *tmp_config = NULL;
+ char **values = NULL;
+ int argc;
+ char **argv = NULL;
+ int rc = SLAPI_PLUGIN_SUCCESS;
+ int i;
+ int nb_subtrees = 0;
+
+ if (config_entry == NULL) {
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto done;
+ }
+
+
+ /* We are going to fill tmp_config in a first phase */
+ if ((tmp_config = (attr_uniqueness_config_t *) slapi_ch_calloc(1, sizeof (attr_uniqueness_config_t))) == NULL) {
+ slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "load_config failed to allocate configuration\n");
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto done;
+ } else {
+ /* set these to -1 for config validation */
+
+ }
+
+ /* Check if this is new/old config style */
+ slapi_pblock_get(pb, SLAPI_PLUGIN_ARGC, &argc);
+ if (argc == 0) {
+ /* This is new config style
+ * uniqueness-attribute-name: uid
+ * uniqueness-subtrees: dc=people,dc=example,dc=com
+ * uniqueness-subtrees: dc=sales, dc=example,dc=com
+ * uniqueness-across-all-subtrees: on
+ *
+ * or
+ *
+ * uniqueness-attribute-name: uid
+ * uniqueness-top-entry-oc: organizationalUnit
+ * uniqueness-subtree-entries-oc: person
+ */
+
+ /* Attribute name of the attribute we are going to check value uniqueness */
+ tmp_config->attr = slapi_entry_attr_get_charptr(config_entry, ATTR_UNIQUENESS_ATTRIBUTE_NAME);
+
+ /* Subtrees where uniqueness is tested */
+ values = slapi_entry_attr_get_charray(config_entry, ATTR_UNIQUENESS_SUBTREES);
+ if (values) {
+
+
+ for (i = 0; values && values[i]; i++);
+ if ((tmp_config->subtrees = (Slapi_DN **) slapi_ch_calloc(i + 1, sizeof (Slapi_DN *))) == NULL) {
+ slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "Config info: Fail to allocate subtree array \n");
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto done;
+ }
+
+ /* copy the valid subtree DN into the config */
+ for (i = 0, nb_subtrees = 0; values && values[i]; i++) {
+ if (slapi_dn_syntax_check(pb, values[i], 1)) { /* syntax check failed */
+ slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "Config info: Invalid DN (skipped): %s\n", values[i]);
+ continue;
+ }
+ tmp_config->subtrees[nb_subtrees] = slapi_sdn_new_dn_byval(values[i]);
+ nb_subtrees++;
+
+ }
+
+ slapi_ch_array_free(values);
+ values = NULL;
+ }
+
+ /* Uniqueness may be enforced accross all the subtrees, by default it is not */
+ tmp_config->unique_in_all_subtrees = slapi_entry_attr_get_bool(config_entry, ATTR_UNIQUENESS_ACROSS_ALL_SUBTREES);
+
+ /* enforce uniqueness only if the modified entry has this objectclass */
+ tmp_config->top_entry_oc = slapi_entry_attr_get_charptr(config_entry, ATTR_UNIQUENESS_TOP_ENTRY_OC);
+
+ /* enforce uniqueness, in the modified entry subtree, only to entries having this objectclass */
+ tmp_config->subtree_entries_oc = slapi_entry_attr_get_charptr(config_entry, ATTR_UNIQUENESS_SUBTREE_ENTRIES_OC);
+
+ } else {
+ int result;
+ char *attrName = NULL;
+ char *markerObjectClass = NULL;
+ char *requiredObjectClass = NULL;
+
+ /* using the old style of configuration */
+ result = getArguments(pb, &attrName, &markerObjectClass, &requiredObjectClass);
+ if (LDAP_OPERATIONS_ERROR == result) {
+ slapi_log_error(SLAPI_LOG_PLUGIN, plugin_name, "Config fail: unable to parse old style\n");
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto done;
+
+ }
+ if (UNTAGGED_PARAMETER == result) {
+ /* This is
+ * nsslapd-pluginarg0: uid
+ * nsslapd-pluginarg1: dc=people,dc=example,dc=com
+ * nsslapd-pluginarg2: dc=sales, dc=example,dc=com
+ *
+ * config attribute are in argc/argv
+ *
+ * attrName is set
+ * markerObjectClass/requiredObjectClass are NOT set
+ */
+
+ if (slapi_pblock_get(pb, SLAPI_PLUGIN_ARGC, &argc) || slapi_pblock_get(pb, SLAPI_PLUGIN_ARGV, &argv)) {
+ slapi_log_error(SLAPI_LOG_PLUGIN, plugin_name, "Config fail: Only attribute name is valid\n");
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto done;
+ }
+
+ /* Store attrName in the config */
+ tmp_config->attr = slapi_ch_strdup(attrName);
+ argc--;
+ argv++; /* First argument was attribute name and remaining are subtrees */
+
+ /* Store the subtrees */
+ nb_subtrees = 0;
+ if ((tmp_config->subtrees = (Slapi_DN **) slapi_ch_calloc(argc + 1, sizeof (Slapi_DN *))) == NULL) {
+ slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "Config info: Fail to allocate subtree array\n");
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto done;
+ }
+
+
+ for (; argc > 0; argc--, argv++) {
+ if (slapi_dn_syntax_check(pb, *argv, 1)) { /* syntax check failed */
+ slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "Config info: Invalid DN (skipped): %s\n", *argv);
+ continue;
+ }
+ tmp_config->subtrees[nb_subtrees] = slapi_sdn_new_dn_byval(*argv);
+ nb_subtrees++;
+ }
+
+ /* this interface does not configure accross subtree uniqueness*/
+ tmp_config->unique_in_all_subtrees = PR_FALSE;
+
+ /* Not really usefull, but it clarifies the config */
+ tmp_config->subtree_entries_oc = NULL;
+ tmp_config->top_entry_oc = NULL;
+ } else {
+ /* This is
+ * nsslapd-pluginarg0: attribute=uid
+ * nsslapd-pluginarg1: markerobjectclass=organizationalUnit
+ * nsslapd-pluginarg2: requiredobjectclass=person
+ *
+ * config attributes are in
+ * - attrName
+ * - markerObjectClass
+ * - requiredObjectClass
+ */
+ /* Store attrName in the config */
+ tmp_config->attr = slapi_ch_strdup(attrName);
+
+ /* There is no subtrees */
+ tmp_config->subtrees = NULL;
+
+ /* this interface does not configure accross subtree uniqueness*/
+ tmp_config->unique_in_all_subtrees = PR_FALSE;
+
+ /* set the objectclasses retrieved by getArgument */
+ tmp_config->subtree_entries_oc = slapi_ch_strdup(requiredObjectClass);
+ tmp_config->top_entry_oc = slapi_ch_strdup(markerObjectClass);
+
+ }
+
+ }
+
+ /* Time to check that the new configuration is valid */
+ if (tmp_config->attr == NULL) {
+ slapi_log_error( SLAPI_LOG_FATAL, plugin_name, "Config info: attribute name not defined \n");
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto done;
+ }
+
+ if (tmp_config->subtrees == NULL) {
+ /* Uniqueness is enforced on entries matching objectclass */
+ if (tmp_config->subtree_entries_oc == NULL) {
+ slapi_log_error( SLAPI_LOG_FATAL, plugin_name, "Config info: objectclass for subtree entries is not defined\n");
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto done;
+ }
+ } else if (tmp_config->subtrees[0] == NULL) {
+ /* Uniqueness is enforced on subtrees but none are defined */
+ slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "Config info: No valid subtree is defined \n");
+ rc = SLAPI_PLUGIN_FAILURE;
+ goto done;
+ }
+
+done:
+ if (rc != SLAPI_PLUGIN_SUCCESS) {
+ if (tmp_config) {
+ free_uniqueness_config(tmp_config);
+ slapi_ch_free((void **) &tmp_config);
+ }
+ return NULL;
+ } else {
+
+ return tmp_config;
+ }
+}
+
+static void
freePblock( Slapi_PBlock *spb ) {
if ( spb )
{
@@ -390,29 +662,49 @@ search_one_berval(Slapi_DN *baseDN, const char *attrName,
* LDAP_OPERATIONS_ERROR - a server failure.
*/
static int
-searchAllSubtrees(int argc, char *argv[], const char *attrName,
+searchAllSubtrees(Slapi_DN **subtrees, const char *attrName,
Slapi_Attr *attr, struct berval **values, const char *requiredObjectClass,
- Slapi_DN *dn)
+ Slapi_DN *dn, PRBool unique_in_all_subtrees)
{
int result = LDAP_SUCCESS;
+ int i;
+ if (unique_in_all_subtrees) {
+ PRBool in_a_subtree = PR_FALSE;
+
+ /* we need to check that the added values of this attribute
+ * are unique in all the monitored subtrees
+ */
+
+ /* First check the target entry is in one of
+ * the monitored subtree, so adding 'values' would
+ * violate constraint
+ */
+ for (i = 0;subtrees && subtrees[i]; i++) {
+ if (slapi_sdn_issuffix(dn, subtrees[i])) {
+ in_a_subtree = PR_TRUE;
+ break;
+ }
+ }
+ if (! in_a_subtree) {
+ return result;
+ }
+ }
+
/*
* For each DN in the managed list, do uniqueness checking if
* the target DN is a subnode in the tree.
*/
- for(;argc > 0;argc--,argv++)
+ for(i = 0;subtrees && subtrees[i]; i++)
{
- Slapi_DN *sufdn = slapi_sdn_new_dn_byref(*argv);
+ Slapi_DN *sufdn = subtrees[i];
/*
* The DN should already be normalized, so we don't have to
* worry about that here.
*/
- if (slapi_sdn_issuffix(dn, sufdn)) {
+ if (unique_in_all_subtrees || slapi_sdn_issuffix(dn, sufdn)) {
result = search(sufdn, attrName, attr, values, requiredObjectClass, dn);
- slapi_sdn_free(&sufdn);
if (result) break;
- } else {
- slapi_sdn_free(&sufdn);
}
}
return result;
@@ -561,8 +853,7 @@ preop_add(Slapi_PBlock *pb)
int isupdatedn;
Slapi_Entry *e;
Slapi_Attr *attr;
- int argc;
- char **argv = NULL;
+ struct attr_uniqueness_config *config = NULL;
/*
* If this is a replication update, just be a noop.
@@ -573,28 +864,19 @@ preop_add(Slapi_PBlock *pb)
{
break;
}
-
- /*
- * Get the arguments
- */
- result = getArguments(pb, &attrName, &markerObjectClass,
- &requiredObjectClass);
- if (UNTAGGED_PARAMETER == result)
- {
- slapi_log_error(SLAPI_LOG_PLUGIN, plugin_name,
- "ADD parameter untagged: %s\n", attrName);
- result = LDAP_SUCCESS;
- /* Statically defined subtrees to monitor */
- err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGC, &argc);
- if (err) { result = uid_op_error(53); break; }
-
- err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGV, &argv);
- if (err) { result = uid_op_error(54); break; }
- argc--; argv++; /* First argument was attribute name */
- } else if (0 != result)
- {
- break;
- }
+ slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &config);
+ if (config == NULL) {
+ slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "preop_modrdn fail to retrieve the config\n");
+ result = LDAP_OPERATIONS_ERROR;
+ break;
+ }
+
+ /*
+ * Get the arguments
+ */
+ attrName = config->attr;
+ markerObjectClass = config->top_entry_oc;
+ requiredObjectClass = config->subtree_entries_oc;
/*
* Get the target DN for this add operation
@@ -642,8 +924,8 @@ preop_add(Slapi_PBlock *pb)
} else
{
/* Subtrees listed on invocation line */
- result = searchAllSubtrees(argc, argv, attrName, attr, NULL,
- requiredObjectClass, sdn);
+ result = searchAllSubtrees(config->subtrees, attrName, attr, NULL,
+ requiredObjectClass, sdn, config->unique_in_all_subtrees);
}
END
@@ -696,6 +978,7 @@ preop_modify(Slapi_PBlock *pb)
int checkmodsCapacity = 0;
char *errtext = NULL;
char *attrName = NULL;
+ struct attr_uniqueness_config *config = NULL;
#ifdef DEBUG
slapi_log_error(SLAPI_LOG_PLUGIN, plugin_name,
@@ -712,8 +995,6 @@ preop_modify(Slapi_PBlock *pb)
LDAPMod *mod;
Slapi_DN *sdn = NULL;
int isupdatedn;
- int argc;
- char **argv = NULL;
/*
* If this is a replication update, just be a noop.
@@ -725,27 +1006,20 @@ preop_modify(Slapi_PBlock *pb)
break;
}
+ slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &config);
+ if (config == NULL) {
+ slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "preop_modrdn fail to retrieve the config\n");
+ result = LDAP_OPERATIONS_ERROR;
+ break;
+ }
/*
* Get the arguments
*/
- result = getArguments(pb, &attrName, &markerObjectClass,
- &requiredObjectClass);
- if (UNTAGGED_PARAMETER == result)
- {
- result = LDAP_SUCCESS;
- /* Statically defined subtrees to monitor */
- err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGC, &argc);
- if (err) { result = uid_op_error(53); break; }
-
- err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGV, &argv);
- if (err) { result = uid_op_error(54); break; }
- argc--; /* First argument was attribute name */
- argv++;
- } else if (0 != result)
- {
- break;
- }
+ attrName = config->attr;
+ markerObjectClass = config->top_entry_oc;
+ requiredObjectClass = config->subtree_entries_oc;
+
err = slapi_pblock_get(pb, SLAPI_MODIFY_MODS, &mods);
if (err) { result = uid_op_error(61); break; }
@@ -809,8 +1083,8 @@ preop_modify(Slapi_PBlock *pb)
} else
{
/* Subtrees listed on invocation line */
- result = searchAllSubtrees(argc, argv, attrName, NULL,
- mod->mod_bvalues, requiredObjectClass, sdn);
+ result = searchAllSubtrees(config->subtrees, attrName, NULL,
+ mod->mod_bvalues, requiredObjectClass, sdn, config->unique_in_all_subtrees);
}
}
END
@@ -852,6 +1126,7 @@ preop_modrdn(Slapi_PBlock *pb)
Slapi_Value *sv_requiredObjectClass = NULL;
char *errtext = NULL;
char *attrName = NULL;
+ struct attr_uniqueness_config *config = NULL;
#ifdef DEBUG
slapi_log_error(SLAPI_LOG_PLUGIN, plugin_name,
@@ -868,8 +1143,6 @@ preop_modrdn(Slapi_PBlock *pb)
int deloldrdn = 0;
int isupdatedn;
Slapi_Attr *attr;
- int argc;
- char **argv = NULL;
/*
* If this is a replication update, just be a noop.
@@ -881,26 +1154,18 @@ preop_modrdn(Slapi_PBlock *pb)
break;
}
- /*
- * Get the arguments
- */
- result = getArguments(pb, &attrName, &markerObjectClass,
- &requiredObjectClass);
- if (UNTAGGED_PARAMETER == result)
- {
- result = LDAP_SUCCESS;
- /* Statically defined subtrees to monitor */
- err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGC, &argc);
- if (err) { result = uid_op_error(53); break; }
-
- err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGV, &argv);
- if (err) { result = uid_op_error(54); break; }
- argc--; /* First argument was attribute name */
- argv++;
- } else if (0 != result)
- {
- break;
- }
+ slapi_pblock_get(pb, SLAPI_PLUGIN_PRIVATE, &config);
+ if (config == NULL) {
+ slapi_log_error(SLAPI_LOG_FATAL, plugin_name, "preop_modrdn fail to retrieve the config\n");
+ result = LDAP_OPERATIONS_ERROR;
+ break;
+ }
+ /*
+ * Get the arguments
+ */
+ attrName = config->attr;
+ markerObjectClass = config->top_entry_oc;
+ requiredObjectClass = config->subtree_entries_oc;
/* Create a Slapi_Value for the requiredObjectClass to use
* for checking the entry. */
@@ -978,8 +1243,8 @@ preop_modrdn(Slapi_PBlock *pb)
} else
{
/* Subtrees listed on invocation line */
- result = searchAllSubtrees(argc, argv, attrName, attr, NULL,
- requiredObjectClass, sdn);
+ result = searchAllSubtrees(config->subtrees, attrName, attr, NULL,
+ requiredObjectClass, sdn, config->unique_in_all_subtrees);
}
END
/* Clean-up */
@@ -1021,16 +1286,15 @@ NSUniqueAttr_Init(Slapi_PBlock *pb)
int preadd = SLAPI_PLUGIN_PRE_ADD_FN;
int premod = SLAPI_PLUGIN_PRE_MODIFY_FN;
int premdn = SLAPI_PLUGIN_PRE_MODRDN_FN;
+ struct attr_uniqueness_config *config = NULL;
BEGIN
- int argc;
- char **argv;
/* Declare plugin version */
err = slapi_pblock_set(pb, SLAPI_PLUGIN_VERSION,
SLAPI_PLUGIN_VERSION_01);
if (err) break;
-
+
/*
* Get plugin identity and store it for later use
* Used for internal operations
@@ -1049,24 +1313,12 @@ NSUniqueAttr_Init(Slapi_PBlock *pb)
}
slapi_ch_free_string(&plugin_type);
- /*
- * Get and normalize arguments
- */
- err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGC, &argc);
- if (err) break;
-
- err = slapi_pblock_get(pb, SLAPI_PLUGIN_ARGV, &argv);
- if (err) break;
-
- /* First argument is the unique attribute name */
- if (argc < 1) { err = -1; break; }
- argv++; argc--;
-
- for(;argc > 0;argc--, argv++) {
- char *normdn = slapi_create_dn_string_case("%s", *argv);
- slapi_ch_free_string(argv);
- *argv = normdn;
+ /* load the config into the config list */
+ if ((config = uniqueness_entry_to_config(pb, plugin_entry)) == NULL) {
+ err = SLAPI_PLUGIN_FAILURE;
+ break;
}
+ slapi_pblock_set(pb, SLAPI_PLUGIN_PRIVATE, (void*) config);
/* Provide descriptive information */
err = slapi_pblock_set(pb, SLAPI_PLUGIN_DESCRIPTION,
@@ -1088,6 +1340,11 @@ NSUniqueAttr_Init(Slapi_PBlock *pb)
if (err) {
slapi_log_error(SLAPI_LOG_PLUGIN, "NSUniqueAttr_Init",
"Error: %d\n", err);
+ if (config) {
+ slapi_pblock_set(pb, SLAPI_PLUGIN_PRIVATE, NULL);
+ free_uniqueness_config(config);
+ slapi_ch_free((void **) &config);
+ }
err = -1;
}
else
9 years, 9 months
ldap/servers
by Mark Reynolds
ldap/servers/slapd/back-ldbm/dblayer.c | 15 +++++++++++----
1 file changed, 11 insertions(+), 4 deletions(-)
New commits:
commit 16e5ce768bc70a3d30dca139ec8fc9330d071168
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Wed Jul 9 17:39:38 2014 -0400
Ticket 47654 - Fix regression (deadlock/crash)
Bug Description: dblayer_close() is called for shutdowns, and when the backend
is being disabled for certain tasks like db2bak/bak2db. The
original fix assuemd this fucntion was only called during shutdowns.
Fix Description: Only free certain ersouces when the server is actually shutting
down.
https://fedorahosted.org/389/ticket/47654
Reviewed by: rmeggins(Thanks!)
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 0fda6d3..4f38845 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -2896,6 +2896,7 @@ int dblayer_post_close(struct ldbminfo *li, int dbmode)
dblayer_private *priv = 0;
int return_value = 0;
dblayer_private_env *pEnv;
+ int shutdown = g_get_shutdown();
PR_ASSERT(NULL != li);
priv = (dblayer_private*)li->li_dblayer_private;
@@ -2928,14 +2929,17 @@ int dblayer_post_close(struct ldbminfo *li, int dbmode)
charray_free(priv->dblayer_data_directories);
priv->dblayer_data_directories = NULL;
}
- slapi_ch_free_string(&priv->dblayer_dbhome_directory);
- slapi_ch_free_string(&priv->dblayer_home_directory);
+ if(shutdown){
+ slapi_ch_free_string(&priv->dblayer_dbhome_directory);
+ slapi_ch_free_string(&priv->dblayer_home_directory);
+ }
return return_value;
}
/*
- * This function is called when the server is shutting down.
+ * This function is called when the server is shutting down, or when the
+ * backend is being disabled (e.g. backup/restore).
* This is not safe to call while other threads are calling into the open
* databases !!! So: DON'T !
*/
@@ -2945,6 +2949,7 @@ int dblayer_close(struct ldbminfo *li, int dbmode)
ldbm_instance *inst;
Object *inst_obj;
int return_value = 0;
+ int shutdown = g_get_shutdown();
dblayer_pre_close(li);
@@ -2957,7 +2962,9 @@ int dblayer_close(struct ldbminfo *li, int dbmode)
for (inst_obj = objset_first_obj(li->li_instance_set); inst_obj;
inst_obj = objset_next_obj(li->li_instance_set, inst_obj)) {
inst = (ldbm_instance *)object_get_data(inst_obj);
- vlv_close(inst);
+ if(shutdown){
+ vlv_close(inst);
+ }
be = inst->inst_be;
if (NULL != be->be_instance_info) {
return_value |= dblayer_instance_close(be);
9 years, 9 months
7 commits - ldap/servers
by Noriko Hosoi
ldap/servers/slapd/back-ldbm/dblayer.c | 3 --
ldap/servers/slapd/ch_malloc.c | 37 ---------------------------------
ldap/servers/slapd/localhost.c | 3 --
ldap/servers/slapd/main.c | 13 +----------
ldap/servers/slapd/slapi-plugin.h | 1
ldap/servers/slapd/task.c | 10 +++-----
ldap/servers/slapd/tools/dbscan.c | 14 ++++--------
ldap/servers/snmp/main.c | 7 +-----
8 files changed, 15 insertions(+), 73 deletions(-)
New commits:
commit 3a66ec7ca61ee273eb016fe234a3da8202e58e83
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Wed Jul 9 16:08:21 2014 -0700
Revert "Ticket #47835 - Coverity: 12687..12692"
This reverts commit 43c6ff2e7801ff6bbc03961b3161dd60aebf707a.
diff --git a/ldap/servers/slapd/tools/dbscan.c b/ldap/servers/slapd/tools/dbscan.c
index bbfcd0e..023fade 100644
--- a/ldap/servers/slapd/tools/dbscan.c
+++ b/ldap/servers/slapd/tools/dbscan.c
@@ -1077,17 +1077,16 @@ is_changelog(char *filename)
static void usage(char *argv0)
{
- long arg_max = sysconf(_SC_ARG_MAX);
- char *copy = strndup(argv0, arg_max);
+ char *copy = strdup(argv0);
char *p0 = NULL, *p1 = NULL;
- if (copy && (strlen(copy) < arg_max)) {
+ if (NULL != copy) {
/* the full path is not needed in the usages */
- p0 = strrchr(copy, '/');
- if (p0) {
+ p0 = strrchr(argv0, '/');
+ if (NULL != p0) {
*p0 = '\0';
p0++;
} else {
- p0 = copy;
+ p0 = argv0;
}
p1 = strrchr(p0, '-'); /* get rid of -bin from the usage */
if (NULL != p1) {
@@ -1125,9 +1124,6 @@ static void usage(char *argv0)
printf(" # display summary of objectclass.db4\n");
printf(" %s -f objectclass.db4\n", p0);
printf("\n");
- if (copy) {
- free(copy);
- }
exit(1);
}
commit dc4527b6f6ebe7a2c42eb57942af535c31c5cd59
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Wed Jul 9 16:08:11 2014 -0700
Revert "Ticket #47835 - Coverity: 12687..12692"
This reverts commit 0a546bcb3d4625d6db1dcbb342922b4ddb3bee37.
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 1726e67..d577514 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -650,8 +650,6 @@ main( int argc, char **argv)
int return_value = 0;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
daemon_ports_t ports_info = {0};
- long arg_max = 0;
-
#ifndef __LP64__
#if defined(__hpux) && !defined(__ia64)
/* for static constructors */
@@ -724,16 +722,9 @@ main( int argc, char **argv)
#endif /* _WIN32 */
if ( (myname = strrchr( argv[0], '/' )) == NULL ) {
- arg_max = sysconf(_SC_ARG_MAX);
- myname = slapi_ch_strndup( argv[0], arg_max );
+ myname = slapi_ch_strdup( argv[0] );
} else {
- myname = slapi_ch_strndup( myname + 1, arg_max );
- }
- if (strlen(myname) > arg_max) {
- LDAPDebug(LDAP_DEBUG_ANY,
- "proc name \"%s\" is longer than the allowed max size: %dB\n",
- myname, arg_max, 0);
- exit(1);
+ myname = slapi_ch_strdup( myname + 1 );
}
#if defined( XP_WIN32 )
commit f5014300dcdf46d9e432e1a8cee640595b862d6f
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Wed Jul 9 16:07:59 2014 -0700
Revert "Ticket #47835 - Coverity: 12687..12692"
This reverts commit 48f2ea020bca3ee750885a6e5f423fae5e3606ca.
diff --git a/ldap/servers/slapd/back-ldbm/dblayer.c b/ldap/servers/slapd/back-ldbm/dblayer.c
index 9e6c072..0fda6d3 100644
--- a/ldap/servers/slapd/back-ldbm/dblayer.c
+++ b/ldap/servers/slapd/back-ldbm/dblayer.c
@@ -4140,7 +4140,6 @@ print_ttilist(txn_test_iter **ttilist, size_t tticnt)
}
#define TXN_TEST_IDX_OK_IF_NULL "nscpEntryDN"
-#define TXN_TEST_MAX_INDEX_LIST_LEN 4096
static void
txn_test_init_cfg(txn_test_cfg *cfg)
@@ -4153,7 +4152,7 @@ txn_test_init_cfg(txn_test_cfg *cfg)
cfg->flags = getenv(TXN_TEST_USE_RMW) ? DB_RMW : 0;
cfg->use_txn = getenv(TXN_TEST_USE_TXN) ? 1 : 0;
if (getenv(TXN_TEST_INDEXES)) {
- indexlist_copy = slapi_ch_strndup(getenv(TXN_TEST_INDEXES), TXN_TEST_MAX_INDEX_LIST_LEN);
+ indexlist_copy = slapi_ch_strdup(getenv(TXN_TEST_INDEXES));
} else {
indexlist_copy = slapi_ch_strdup(indexlist);
}
commit 50654969bcfeb0aa379c10ceda3cfd72993a527c
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Wed Jul 9 16:07:48 2014 -0700
Revert "Ticket #47835 - Coverity: 12687..12692"
This reverts commit 162604a620ba75f2a5eed3095930a2aaa823a645.
diff --git a/ldap/servers/slapd/ch_malloc.c b/ldap/servers/slapd/ch_malloc.c
index 46aecc0..b2afbe0 100644
--- a/ldap/servers/slapd/ch_malloc.c
+++ b/ldap/servers/slapd/ch_malloc.c
@@ -301,43 +301,6 @@ slapi_ch_strdup ( const char* s1)
#endif
return newmem;
}
-
-char*
-slapi_ch_strndup ( const char* s1, size_t n)
-{
- char* newmem;
-
- /* strdup pukes on NULL strings...bail out now */
- if ((NULL == s1) || (0 == n)) {
- return NULL;
- }
- newmem = strndup (s1, n);
- if (newmem == NULL) {
- int oserr = errno;
- oom_occurred();
-
- slapi_log_error( SLAPI_LOG_FATAL, SLAPD_MODULE,
- "strdup of %lu characters failed; OS error %d (%s)%s\n",
- (unsigned long)n, oserr, slapd_system_strerror( oserr ),
- oom_advice );
- exit (1);
- }
- if(!counters_created)
- {
- create_counters();
- counters_created= 1;
- }
- PR_INCREMENT_COUNTER(slapi_ch_counter_strdup);
- PR_INCREMENT_COUNTER(slapi_ch_counter_created);
- PR_INCREMENT_COUNTER(slapi_ch_counter_exist);
-#if defined(_WIN32) && defined(DEBUG)
- if(recording)
- {
- add_memory_record(newmem,strlen(s1)+1);
- }
-#endif
- return newmem;
-}
#endif /* !MEMPOOL_EXPERIMENTAL */
struct berval*
diff --git a/ldap/servers/slapd/localhost.c b/ldap/servers/slapd/localhost.c
index fc7de23..c946e8d 100644
--- a/ldap/servers/slapd/localhost.c
+++ b/ldap/servers/slapd/localhost.c
@@ -119,9 +119,8 @@ find_localhost_DNS()
return NULL;
}
if (strchr (hp->h_name, '.') != NULL) {
- long host_name_max = sysconf(_SC_HOST_NAME_MAX);
LDAPDebug (LDAP_DEBUG_CONFIG, "h_name == %s\n", hp->h_name, 0, 0);
- return slapi_ch_strndup (hp->h_name, host_name_max);
+ return slapi_ch_strdup (hp->h_name);
} else if (hp->h_aliases != NULL) {
for (alias = hp->h_aliases; *alias != NULL; ++alias) {
if (strchr (*alias, '.') != NULL &&
diff --git a/ldap/servers/slapd/slapi-plugin.h b/ldap/servers/slapd/slapi-plugin.h
index 126200c..b83b08a 100644
--- a/ldap/servers/slapd/slapi-plugin.h
+++ b/ldap/servers/slapd/slapi-plugin.h
@@ -5799,7 +5799,6 @@ char * slapi_ch_malloc( unsigned long size );
char * slapi_ch_realloc( char *block, unsigned long size );
char * slapi_ch_calloc( unsigned long nelem, unsigned long size );
char * slapi_ch_strdup( const char *s );
-char * slapi_ch_strndup( const char *s, size_t size );
void slapi_ch_free( void **ptr );
void slapi_ch_free_string( char **s );
struct berval* slapi_ch_bvdup(const struct berval*);
commit cef82090cc3ece1a72c2ec1b62205b1f68b1074f
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Wed Jul 9 16:07:34 2014 -0700
Revert "Ticket #47835 - Coverity: 12687..12692"
This reverts commit f25c7f1f988783d620171f7b648f946dc6704c81.
diff --git a/ldap/servers/snmp/main.c b/ldap/servers/snmp/main.c
index fd06dd4..0373877 100644
--- a/ldap/servers/snmp/main.c
+++ b/ldap/servers/snmp/main.c
@@ -75,7 +75,6 @@ main (int argc, char *argv[]) {
struct stat logdir_s;
pid_t child_pid;
FILE *pid_fp;
- long arg_max = 0;
/* Load options */
while ((--argc > 0) && ((*++argv)[0] == '-')) {
@@ -91,13 +90,11 @@ main (int argc, char *argv[]) {
}
}
- if ((argc != 1) || (NULL == *argv)) {
+ if (argc != 1)
exit_usage();
- }
/* load config file */
- arg_max = sysconf(_SC_ARG_MAX);
- if ((config_file = strndup(*argv, arg_max)) == NULL) {
+ if ((config_file = strdup(*argv)) == NULL) {
printf("ldap-agent: Memory error loading config file\n");
exit(1);
}
commit 8247976f25c22799a31be08074cc150e07f5dcce
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Wed Jul 9 16:07:09 2014 -0700
Revert "Ticket #47835 - Coverity: 12687..12692"
This reverts commit 8dc3806d75b6e3d4722047e230db68ac20ab3e69.
diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c
index 1243492..6340db8 100644
--- a/ldap/servers/slapd/task.c
+++ b/ldap/servers/slapd/task.c
@@ -1949,8 +1949,6 @@ task_sysconfig_reload_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter,
if ( file != NULL ){
char line[4096];
char *s = NULL;
- /* fgets() reads in at most one less than size characters */
- char *end_of_line = line + sizeof(line) - 1;
if(logchanges){
LDAPDebug(LDAP_DEBUG_ANY, "sysconfig reload task: processing file (%s)\n",
@@ -1962,8 +1960,8 @@ task_sysconfig_reload_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter,
/* skip comments */
continue;
} else {
- char env_value[sizeof(line)];
- char env_var[sizeof(line)];
+ char env_value[4096];
+ char env_var[4096];
int using_setenv = 0;
int value_index = 0;
int start_value = 0;
@@ -1999,7 +1997,7 @@ task_sysconfig_reload_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter,
using_setenv = 1;
}
if(strncmp(s, "export ", 7) == 0){
- /* strip off "export " */
+ /* strip off "export " */
s = s + 7;
} else if(strncmp(s, "set ", 4) == 0){
/* strip off "set " */
@@ -2023,7 +2021,7 @@ task_sysconfig_reload_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter,
/*
* Start parsing the names and values
*/
- for (; s && (s < end_of_line) && *s; s++){
+ for (; s && *s; s++){
/*
* If using "setenv", allow the first space/tab only, and start on the env value
*/
commit 4b66c032996c7a313202f315683aa75e83d5f361
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Wed Jul 9 16:06:42 2014 -0700
Revert "Ticket #47835 - Coverity: 12687..12692"
This reverts commit 6e175e3308f04de528254871f6b554fe992dd3df.
diff --git a/ldap/servers/slapd/main.c b/ldap/servers/slapd/main.c
index 6e806d4..1726e67 100644
--- a/ldap/servers/slapd/main.c
+++ b/ldap/servers/slapd/main.c
@@ -723,8 +723,8 @@ main( int argc, char **argv)
#endif /* _WIN32 */
- arg_max = sysconf(_SC_ARG_MAX);
if ( (myname = strrchr( argv[0], '/' )) == NULL ) {
+ arg_max = sysconf(_SC_ARG_MAX);
myname = slapi_ch_strndup( argv[0], arg_max );
} else {
myname = slapi_ch_strndup( myname + 1, arg_max );
9 years, 9 months
dirsrvtests/tickets
by Mark Reynolds
dirsrvtests/tickets/ticket47781_test.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
New commits:
commit daf4b42389b4cd191752c073d97d4df80b5176c3
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Jul 8 09:13:13 2014 -0400
Ticket 47781 - CI test - use predefined property name variables
https://fedorahosted.org/389/ticket/47781
Reviewed by: tbordaz(Thanks!)
diff --git a/dirsrvtests/tickets/ticket47781_test.py b/dirsrvtests/tickets/ticket47781_test.py
index f7bf881..8eb68ba 100644
--- a/dirsrvtests/tickets/ticket47781_test.py
+++ b/dirsrvtests/tickets/ticket47781_test.py
@@ -167,7 +167,7 @@ def test_ticket47781(topology):
# export the replication ldif
#
log.info('Exporting replication ldif...')
- args = {'repl-info': True}
+ args = {EXPORT_REPL_INFO: True}
exportTask = Tasks(topology.standalone)
try:
exportTask.exportLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args)
@@ -186,7 +186,7 @@ def test_ticket47781(topology):
#
log.info('Import replication LDIF file...')
importTask = Tasks(topology.standalone)
- args = {'wait': True}
+ args = {TASK_WAIT: True}
try:
importTask.importLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args)
os.remove("/tmp/export.ldif")
9 years, 9 months
dirsrvtests/tickets
by Mark Reynolds
dirsrvtests/tickets/ticket47781_test.py | 235 ++++++++++++++++++++++++++++++++
1 file changed, 235 insertions(+)
New commits:
commit 2f2d95b3e3bdbbc73049133ce1766f4873e81ae2
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Mon Jul 7 19:12:34 2014 -0400
Ticket 47781 - Add CI test
Description: Adding test script to verify ticket 47781.
Reviewed by: rmeggins(Thanks!)
diff --git a/dirsrvtests/tickets/ticket47781_test.py b/dirsrvtests/tickets/ticket47781_test.py
new file mode 100644
index 0000000..f7bf881
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47781_test.py
@@ -0,0 +1,235 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import socket
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from constants import *
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ '''
+ This fixture is used to standalone topology for the 'module'.
+ At the beginning, It may exists a standalone instance.
+ It may also exists a backup for the standalone instance.
+
+ Principle:
+ If standalone instance exists:
+ restart it
+ If backup of standalone exists:
+ create/rebind to standalone
+
+ restore standalone instance from backup
+ else:
+ Cleanup everything
+ remove instance
+ remove backup
+ Create instance
+ Create backup
+ '''
+ global installation_prefix
+
+ if installation_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+ standalone = DirSrv(verbose=False)
+
+ # Args for the standalone instance
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+
+ # Get the status of the backups
+ backup_standalone = standalone.checkBackupFS()
+
+ # Get the status of the instance and restart it if it exists
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ # assuming the instance is already stopped, just wait 5 sec max
+ standalone.stop(timeout=5)
+ standalone.start(timeout=10)
+
+ if backup_standalone:
+ # The backup exist, assuming it is correct
+ # we just re-init the instance with it
+ if not instance_standalone:
+ standalone.create()
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # restore standalone instance from backup
+ standalone.stop(timeout=10)
+ standalone.restoreFS(backup_standalone)
+ standalone.start(timeout=10)
+
+ else:
+ # We should be here only in two conditions
+ # - This is the first time a test involve standalone instance
+ # - Something weird happened (instance/backup destroyed)
+ # so we discard everything and recreate all
+
+ # Remove the backup. So even if we have a specific backup file
+ # (e.g backup_standalone) we clear backup that an instance may have created
+ if backup_standalone:
+ standalone.clearBackupFS()
+
+ # Remove the instance
+ if instance_standalone:
+ standalone.delete()
+
+ # Create the instance
+ standalone.create()
+
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # Time to create the backups
+ standalone.stop(timeout=10)
+ standalone.backupfile = standalone.backupFS()
+ standalone.start(timeout=10)
+
+ #
+ # Here we have standalone instance up and running
+ # Either coming from a backup recovery
+ # or from a fresh (re)init
+ # Time to return the topology
+ return TopologyStandalone(standalone)
+
+
+def test_ticket47781(topology):
+ """
+ Testing for a deadlock after doing an online import of an LDIF with
+ replication data. The replication agreement should be invalid.
+ """
+
+ log.info('Testing Ticket 47781 - Testing for deadlock after importing LDIF with replication data')
+
+ #
+ # Setup Replication
+ #
+ log.info('Setting up replication...')
+ topology.standalone.replica.enableReplication(suffix=DEFAULT_SUFFIX, role=REPLICAROLE_MASTER,
+ replicaId=REPLICAID_MASTER_1)
+
+ properties = {RA_NAME: r'meTo_$host:$port',
+ RA_BINDDN: defaultProperties[REPLICATION_BIND_DN],
+ RA_BINDPW: defaultProperties[REPLICATION_BIND_PW],
+ RA_METHOD: defaultProperties[REPLICATION_BIND_METHOD],
+ RA_TRANSPORT_PROT: defaultProperties[REPLICATION_TRANSPORT]}
+ # The agreement should point to a server that does NOT exist (invalid port)
+ repl_agreement = topology.standalone.agreement.create(suffix=DEFAULT_SUFFIX,
+ host=topology.standalone.host,
+ port=5555,
+ properties=properties)
+
+ #
+ # add two entries
+ #
+ log.info('Adding two entries...')
+ try:
+ topology.standalone.add_s(Entry(('cn=entry1,dc=example,dc=com', {
+ 'objectclass': 'top person'.split(),
+ 'sn': 'user',
+ 'cn': 'entry1'})))
+ except ldap.LDAPError, e:
+ log.error('Failed to add entry 1: ' + e.message['desc'])
+ assert False
+
+ try:
+ topology.standalone.add_s(Entry(('cn=entry2,dc=example,dc=com', {
+ 'objectclass': 'top person'.split(),
+ 'sn': 'user',
+ 'cn': 'entry2'})))
+ except ldap.LDAPError, e:
+ log.error('Failed to add entry 2: ' + e.message['desc'])
+ assert False
+
+ #
+ # export the replication ldif
+ #
+ log.info('Exporting replication ldif...')
+ args = {'repl-info': True}
+ exportTask = Tasks(topology.standalone)
+ try:
+ exportTask.exportLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args)
+ except ValueError:
+ assert False
+
+ #
+ # Restart the server
+ #
+ log.info('Restarting server...')
+ topology.standalone.stop(timeout=5)
+ topology.standalone.start(timeout=5)
+
+ #
+ # Import the ldif
+ #
+ log.info('Import replication LDIF file...')
+ importTask = Tasks(topology.standalone)
+ args = {'wait': True}
+ try:
+ importTask.importLDIF(DEFAULT_SUFFIX, None, "/tmp/export.ldif", args)
+ os.remove("/tmp/export.ldif")
+ except ValueError:
+ os.remove("/tmp/export.ldif")
+ assert False
+
+ #
+ # Search for tombstones - we should not hang/timeout
+ #
+ log.info('Search for tombstone entries(should find one and not hang)...')
+ topology.standalone.set_option(ldap.OPT_NETWORK_TIMEOUT, 5);
+ topology.standalone.set_option(ldap.OPT_TIMEOUT, 5);
+ try:
+ entries = topology.standalone.search_s(DEFAULT_SUFFIX, ldap.SCOPE_SUBTREE, 'objectclass=nsTombstone')
+ if not entries:
+ log.fatal('Search failed to find any entries.')
+ assert PR_False
+ except ldap.LDAPError, e:
+ log.fatal('Search failed: ' + e.message['desc'])
+ assert PR_False
+
+ # If we got here we passed!
+ log.info('Ticket47781 Test - Passed')
+
+
+def test_ticket47781_final(topology):
+ topology.standalone.stop(timeout=10)
+
+
+def run_isolated():
+ '''
+ run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+ To run isolated without py.test, you need to
+ - edit this file and comment '@pytest.fixture' line before 'topology' function.
+ - set the installation prefix
+ - run this program
+ '''
+ global installation_prefix
+ installation_prefix = None
+
+ topo = topology(True)
+ test_ticket47781(topo)
+
+if __name__ == '__main__':
+ run_isolated()
9 years, 9 months