Branch '389-ds-base-1.3.1' - dirsrvtests/tickets
by Mark Reynolds
dirsrvtests/tickets/ticket47970_test.py | 206 ++++++++++++++++++++++++++++++++
1 file changed, 206 insertions(+)
New commits:
commit 5c804282eac5cefb0490c3ccbaf85387b10cf64b
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Thu Dec 4 15:57:40 2014 -0500
Ticket 47970 - add lib389 testcase
https://fedorahosted.org/389/ticket/47970
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit f6929c9b7c24a43b019e966b1fc37d33b21274a1)
diff --git a/dirsrvtests/tickets/ticket47970_test.py b/dirsrvtests/tickets/ticket47970_test.py
new file mode 100644
index 0000000..49d505a
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47970_test.py
@@ -0,0 +1,206 @@
+import os
+import sys
+import time
+import ldap
+import ldap.sasl
+import logging
+import socket
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from constants import *
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX
+USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX
+
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ '''
+ This fixture is used to standalone topology for the 'module'.
+ At the beginning, It may exists a standalone instance.
+ It may also exists a backup for the standalone instance.
+
+ Principle:
+ If standalone instance exists:
+ restart it
+ If backup of standalone exists:
+ create/rebind to standalone
+
+ restore standalone instance from backup
+ else:
+ Cleanup everything
+ remove instance
+ remove backup
+ Create instance
+ Create backup
+ '''
+ global installation_prefix
+
+ if installation_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+ standalone = DirSrv(verbose=False)
+
+ # Args for the standalone instance
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+
+ # Get the status of the backups
+ backup_standalone = standalone.checkBackupFS()
+
+ # Get the status of the instance and restart it if it exists
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ # assuming the instance is already stopped, just wait 5 sec max
+ standalone.stop(timeout=5)
+ standalone.start(timeout=10)
+
+ if backup_standalone:
+ # The backup exist, assuming it is correct
+ # we just re-init the instance with it
+ if not instance_standalone:
+ standalone.create()
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # restore standalone instance from backup
+ standalone.stop(timeout=10)
+ standalone.restoreFS(backup_standalone)
+ standalone.start(timeout=10)
+
+ else:
+ # We should be here only in two conditions
+ # - This is the first time a test involve standalone instance
+ # - Something weird happened (instance/backup destroyed)
+ # so we discard everything and recreate all
+
+ # Remove the backup. So even if we have a specific backup file
+ # (e.g backup_standalone) we clear backup that an instance may have created
+ if backup_standalone:
+ standalone.clearBackupFS()
+
+ # Remove the instance
+ if instance_standalone:
+ standalone.delete()
+
+ # Create the instance
+ standalone.create()
+
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # Time to create the backups
+ standalone.stop(timeout=10)
+ standalone.backupfile = standalone.backupFS()
+ standalone.start(timeout=10)
+
+ # clear the tmp directory
+ standalone.clearTmpDir(__file__)
+
+ #
+ # Here we have standalone instance up and running
+ # Either coming from a backup recovery
+ # or from a fresh (re)init
+ # Time to return the topology
+ return TopologyStandalone(standalone)
+
+
+def test_ticket47970(topology):
+ """
+ Testing that a failed SASL bind does not trigger account lockout -
+ which would attempt to update the passwordRetryCount on the root dse entry
+ """
+
+ log.info('Testing Ticket 47970 - Testing that a failed SASL bind does not trigger account lockout')
+
+ #
+ # Enable account lockout
+ #
+ try:
+ topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', 'on')])
+ log.info('account lockout enabled.')
+ except ldap.LDAPError, e:
+ log.error('Failed to enable account lockout: ' + e.message['desc'])
+ assert False
+
+ try:
+ topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', '5')])
+ log.info('passwordMaxFailure set.')
+ except ldap.LDAPError, e:
+ log.error('Failed to to set passwordMaxFailure: ' + e.message['desc'])
+ assert False
+
+ #
+ # Perform SASL bind that should fail
+ #
+ failed_as_expected = False
+ try:
+ user_name = "mark"
+ pw = "secret"
+ auth_tokens = ldap.sasl.digest_md5(user_name, pw)
+ topology.standalone.sasl_interactive_bind_s("", auth_tokens)
+ except ldap.INVALID_CREDENTIALS, e:
+ log.info("SASL Bind failed as expected")
+ failed_as_expected = True
+
+ if not failed_as_expected:
+ log.error("SASL bind unexpectedly succeeded!")
+ assert False
+
+ #
+ # Check that passwordRetryCount was not set on the root dse entry
+ #
+ try:
+ entry = topology.standalone.search_s("", ldap.SCOPE_BASE,
+ "passwordRetryCount=*",
+ ['passwordRetryCount'])
+ except ldap.LDAPError, e:
+ log.error('Failed to search Root DSE entry: ' + e.message['desc'])
+ assert False
+
+ if entry:
+ log.error('Root DSE was incorrectly updated')
+ assert False
+
+ # We passed
+ log.info('Root DSE was correctly not updated')
+ log.info("Test Passed.")
+
+
+def test_ticket47970_final(topology):
+ topology.standalone.stop(timeout=10)
+
+
+def run_isolated():
+ '''
+ run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+ To run isolated without py.test, you need to
+ - edit this file and comment '@pytest.fixture' line before 'topology' function.
+ - set the installation prefix
+ - run this program
+ '''
+ global installation_prefix
+ installation_prefix = None
+
+ topo = topology(True)
+ test_ticket47970(topo)
+
+if __name__ == '__main__':
+ run_isolated()
\ No newline at end of file
9 years, 4 months
Branch '389-ds-base-1.3.2' - dirsrvtests/tickets
by Mark Reynolds
dirsrvtests/tickets/ticket47970_test.py | 206 ++++++++++++++++++++++++++++++++
1 file changed, 206 insertions(+)
New commits:
commit 80c34a5653ee2179d09ed6174912d7465455e463
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Thu Dec 4 15:57:40 2014 -0500
Ticket 47970 - add lib389 testcase
https://fedorahosted.org/389/ticket/47970
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit f6929c9b7c24a43b019e966b1fc37d33b21274a1)
diff --git a/dirsrvtests/tickets/ticket47970_test.py b/dirsrvtests/tickets/ticket47970_test.py
new file mode 100644
index 0000000..49d505a
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47970_test.py
@@ -0,0 +1,206 @@
+import os
+import sys
+import time
+import ldap
+import ldap.sasl
+import logging
+import socket
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from constants import *
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX
+USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX
+
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ '''
+ This fixture is used to standalone topology for the 'module'.
+ At the beginning, It may exists a standalone instance.
+ It may also exists a backup for the standalone instance.
+
+ Principle:
+ If standalone instance exists:
+ restart it
+ If backup of standalone exists:
+ create/rebind to standalone
+
+ restore standalone instance from backup
+ else:
+ Cleanup everything
+ remove instance
+ remove backup
+ Create instance
+ Create backup
+ '''
+ global installation_prefix
+
+ if installation_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+ standalone = DirSrv(verbose=False)
+
+ # Args for the standalone instance
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+
+ # Get the status of the backups
+ backup_standalone = standalone.checkBackupFS()
+
+ # Get the status of the instance and restart it if it exists
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ # assuming the instance is already stopped, just wait 5 sec max
+ standalone.stop(timeout=5)
+ standalone.start(timeout=10)
+
+ if backup_standalone:
+ # The backup exist, assuming it is correct
+ # we just re-init the instance with it
+ if not instance_standalone:
+ standalone.create()
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # restore standalone instance from backup
+ standalone.stop(timeout=10)
+ standalone.restoreFS(backup_standalone)
+ standalone.start(timeout=10)
+
+ else:
+ # We should be here only in two conditions
+ # - This is the first time a test involve standalone instance
+ # - Something weird happened (instance/backup destroyed)
+ # so we discard everything and recreate all
+
+ # Remove the backup. So even if we have a specific backup file
+ # (e.g backup_standalone) we clear backup that an instance may have created
+ if backup_standalone:
+ standalone.clearBackupFS()
+
+ # Remove the instance
+ if instance_standalone:
+ standalone.delete()
+
+ # Create the instance
+ standalone.create()
+
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # Time to create the backups
+ standalone.stop(timeout=10)
+ standalone.backupfile = standalone.backupFS()
+ standalone.start(timeout=10)
+
+ # clear the tmp directory
+ standalone.clearTmpDir(__file__)
+
+ #
+ # Here we have standalone instance up and running
+ # Either coming from a backup recovery
+ # or from a fresh (re)init
+ # Time to return the topology
+ return TopologyStandalone(standalone)
+
+
+def test_ticket47970(topology):
+ """
+ Testing that a failed SASL bind does not trigger account lockout -
+ which would attempt to update the passwordRetryCount on the root dse entry
+ """
+
+ log.info('Testing Ticket 47970 - Testing that a failed SASL bind does not trigger account lockout')
+
+ #
+ # Enable account lockout
+ #
+ try:
+ topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', 'on')])
+ log.info('account lockout enabled.')
+ except ldap.LDAPError, e:
+ log.error('Failed to enable account lockout: ' + e.message['desc'])
+ assert False
+
+ try:
+ topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', '5')])
+ log.info('passwordMaxFailure set.')
+ except ldap.LDAPError, e:
+ log.error('Failed to to set passwordMaxFailure: ' + e.message['desc'])
+ assert False
+
+ #
+ # Perform SASL bind that should fail
+ #
+ failed_as_expected = False
+ try:
+ user_name = "mark"
+ pw = "secret"
+ auth_tokens = ldap.sasl.digest_md5(user_name, pw)
+ topology.standalone.sasl_interactive_bind_s("", auth_tokens)
+ except ldap.INVALID_CREDENTIALS, e:
+ log.info("SASL Bind failed as expected")
+ failed_as_expected = True
+
+ if not failed_as_expected:
+ log.error("SASL bind unexpectedly succeeded!")
+ assert False
+
+ #
+ # Check that passwordRetryCount was not set on the root dse entry
+ #
+ try:
+ entry = topology.standalone.search_s("", ldap.SCOPE_BASE,
+ "passwordRetryCount=*",
+ ['passwordRetryCount'])
+ except ldap.LDAPError, e:
+ log.error('Failed to search Root DSE entry: ' + e.message['desc'])
+ assert False
+
+ if entry:
+ log.error('Root DSE was incorrectly updated')
+ assert False
+
+ # We passed
+ log.info('Root DSE was correctly not updated')
+ log.info("Test Passed.")
+
+
+def test_ticket47970_final(topology):
+ topology.standalone.stop(timeout=10)
+
+
+def run_isolated():
+ '''
+ run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+ To run isolated without py.test, you need to
+ - edit this file and comment '@pytest.fixture' line before 'topology' function.
+ - set the installation prefix
+ - run this program
+ '''
+ global installation_prefix
+ installation_prefix = None
+
+ topo = topology(True)
+ test_ticket47970(topo)
+
+if __name__ == '__main__':
+ run_isolated()
\ No newline at end of file
9 years, 4 months
Branch '389-ds-base-1.3.3' - dirsrvtests/tickets
by Mark Reynolds
dirsrvtests/tickets/ticket47970_test.py | 206 ++++++++++++++++++++++++++++++++
1 file changed, 206 insertions(+)
New commits:
commit b6cd13f49713fecf6b2c94a31e25cd726e216c65
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Thu Dec 4 15:57:40 2014 -0500
Ticket 47970 - add lib389 testcase
https://fedorahosted.org/389/ticket/47970
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit f6929c9b7c24a43b019e966b1fc37d33b21274a1)
diff --git a/dirsrvtests/tickets/ticket47970_test.py b/dirsrvtests/tickets/ticket47970_test.py
new file mode 100644
index 0000000..49d505a
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47970_test.py
@@ -0,0 +1,206 @@
+import os
+import sys
+import time
+import ldap
+import ldap.sasl
+import logging
+import socket
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from constants import *
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX
+USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX
+
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ '''
+ This fixture is used to standalone topology for the 'module'.
+ At the beginning, It may exists a standalone instance.
+ It may also exists a backup for the standalone instance.
+
+ Principle:
+ If standalone instance exists:
+ restart it
+ If backup of standalone exists:
+ create/rebind to standalone
+
+ restore standalone instance from backup
+ else:
+ Cleanup everything
+ remove instance
+ remove backup
+ Create instance
+ Create backup
+ '''
+ global installation_prefix
+
+ if installation_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+ standalone = DirSrv(verbose=False)
+
+ # Args for the standalone instance
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+
+ # Get the status of the backups
+ backup_standalone = standalone.checkBackupFS()
+
+ # Get the status of the instance and restart it if it exists
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ # assuming the instance is already stopped, just wait 5 sec max
+ standalone.stop(timeout=5)
+ standalone.start(timeout=10)
+
+ if backup_standalone:
+ # The backup exist, assuming it is correct
+ # we just re-init the instance with it
+ if not instance_standalone:
+ standalone.create()
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # restore standalone instance from backup
+ standalone.stop(timeout=10)
+ standalone.restoreFS(backup_standalone)
+ standalone.start(timeout=10)
+
+ else:
+ # We should be here only in two conditions
+ # - This is the first time a test involve standalone instance
+ # - Something weird happened (instance/backup destroyed)
+ # so we discard everything and recreate all
+
+ # Remove the backup. So even if we have a specific backup file
+ # (e.g backup_standalone) we clear backup that an instance may have created
+ if backup_standalone:
+ standalone.clearBackupFS()
+
+ # Remove the instance
+ if instance_standalone:
+ standalone.delete()
+
+ # Create the instance
+ standalone.create()
+
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # Time to create the backups
+ standalone.stop(timeout=10)
+ standalone.backupfile = standalone.backupFS()
+ standalone.start(timeout=10)
+
+ # clear the tmp directory
+ standalone.clearTmpDir(__file__)
+
+ #
+ # Here we have standalone instance up and running
+ # Either coming from a backup recovery
+ # or from a fresh (re)init
+ # Time to return the topology
+ return TopologyStandalone(standalone)
+
+
+def test_ticket47970(topology):
+ """
+ Testing that a failed SASL bind does not trigger account lockout -
+ which would attempt to update the passwordRetryCount on the root dse entry
+ """
+
+ log.info('Testing Ticket 47970 - Testing that a failed SASL bind does not trigger account lockout')
+
+ #
+ # Enable account lockout
+ #
+ try:
+ topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', 'on')])
+ log.info('account lockout enabled.')
+ except ldap.LDAPError, e:
+ log.error('Failed to enable account lockout: ' + e.message['desc'])
+ assert False
+
+ try:
+ topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', '5')])
+ log.info('passwordMaxFailure set.')
+ except ldap.LDAPError, e:
+ log.error('Failed to to set passwordMaxFailure: ' + e.message['desc'])
+ assert False
+
+ #
+ # Perform SASL bind that should fail
+ #
+ failed_as_expected = False
+ try:
+ user_name = "mark"
+ pw = "secret"
+ auth_tokens = ldap.sasl.digest_md5(user_name, pw)
+ topology.standalone.sasl_interactive_bind_s("", auth_tokens)
+ except ldap.INVALID_CREDENTIALS, e:
+ log.info("SASL Bind failed as expected")
+ failed_as_expected = True
+
+ if not failed_as_expected:
+ log.error("SASL bind unexpectedly succeeded!")
+ assert False
+
+ #
+ # Check that passwordRetryCount was not set on the root dse entry
+ #
+ try:
+ entry = topology.standalone.search_s("", ldap.SCOPE_BASE,
+ "passwordRetryCount=*",
+ ['passwordRetryCount'])
+ except ldap.LDAPError, e:
+ log.error('Failed to search Root DSE entry: ' + e.message['desc'])
+ assert False
+
+ if entry:
+ log.error('Root DSE was incorrectly updated')
+ assert False
+
+ # We passed
+ log.info('Root DSE was correctly not updated')
+ log.info("Test Passed.")
+
+
+def test_ticket47970_final(topology):
+ topology.standalone.stop(timeout=10)
+
+
+def run_isolated():
+ '''
+ run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+ To run isolated without py.test, you need to
+ - edit this file and comment '@pytest.fixture' line before 'topology' function.
+ - set the installation prefix
+ - run this program
+ '''
+ global installation_prefix
+ installation_prefix = None
+
+ topo = topology(True)
+ test_ticket47970(topo)
+
+if __name__ == '__main__':
+ run_isolated()
\ No newline at end of file
9 years, 4 months
dirsrvtests/tickets
by Mark Reynolds
dirsrvtests/tickets/ticket47970_test.py | 206 ++++++++++++++++++++++++++++++++
1 file changed, 206 insertions(+)
New commits:
commit f6929c9b7c24a43b019e966b1fc37d33b21274a1
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Thu Dec 4 15:57:40 2014 -0500
Ticket 47970 - add lib389 testcase
https://fedorahosted.org/389/ticket/47970
Reviewed by: nhosoi(Thanks!)
diff --git a/dirsrvtests/tickets/ticket47970_test.py b/dirsrvtests/tickets/ticket47970_test.py
new file mode 100644
index 0000000..49d505a
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47970_test.py
@@ -0,0 +1,206 @@
+import os
+import sys
+import time
+import ldap
+import ldap.sasl
+import logging
+import socket
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from constants import *
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+USER1_DN = "uid=user1,%s" % DEFAULT_SUFFIX
+USER2_DN = "uid=user2,%s" % DEFAULT_SUFFIX
+
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ '''
+ This fixture is used to standalone topology for the 'module'.
+ At the beginning, It may exists a standalone instance.
+ It may also exists a backup for the standalone instance.
+
+ Principle:
+ If standalone instance exists:
+ restart it
+ If backup of standalone exists:
+ create/rebind to standalone
+
+ restore standalone instance from backup
+ else:
+ Cleanup everything
+ remove instance
+ remove backup
+ Create instance
+ Create backup
+ '''
+ global installation_prefix
+
+ if installation_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+ standalone = DirSrv(verbose=False)
+
+ # Args for the standalone instance
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+
+ # Get the status of the backups
+ backup_standalone = standalone.checkBackupFS()
+
+ # Get the status of the instance and restart it if it exists
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ # assuming the instance is already stopped, just wait 5 sec max
+ standalone.stop(timeout=5)
+ standalone.start(timeout=10)
+
+ if backup_standalone:
+ # The backup exist, assuming it is correct
+ # we just re-init the instance with it
+ if not instance_standalone:
+ standalone.create()
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # restore standalone instance from backup
+ standalone.stop(timeout=10)
+ standalone.restoreFS(backup_standalone)
+ standalone.start(timeout=10)
+
+ else:
+ # We should be here only in two conditions
+ # - This is the first time a test involve standalone instance
+ # - Something weird happened (instance/backup destroyed)
+ # so we discard everything and recreate all
+
+ # Remove the backup. So even if we have a specific backup file
+ # (e.g backup_standalone) we clear backup that an instance may have created
+ if backup_standalone:
+ standalone.clearBackupFS()
+
+ # Remove the instance
+ if instance_standalone:
+ standalone.delete()
+
+ # Create the instance
+ standalone.create()
+
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # Time to create the backups
+ standalone.stop(timeout=10)
+ standalone.backupfile = standalone.backupFS()
+ standalone.start(timeout=10)
+
+ # clear the tmp directory
+ standalone.clearTmpDir(__file__)
+
+ #
+ # Here we have standalone instance up and running
+ # Either coming from a backup recovery
+ # or from a fresh (re)init
+ # Time to return the topology
+ return TopologyStandalone(standalone)
+
+
+def test_ticket47970(topology):
+ """
+ Testing that a failed SASL bind does not trigger account lockout -
+ which would attempt to update the passwordRetryCount on the root dse entry
+ """
+
+ log.info('Testing Ticket 47970 - Testing that a failed SASL bind does not trigger account lockout')
+
+ #
+ # Enable account lockout
+ #
+ try:
+ topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordLockout', 'on')])
+ log.info('account lockout enabled.')
+ except ldap.LDAPError, e:
+ log.error('Failed to enable account lockout: ' + e.message['desc'])
+ assert False
+
+ try:
+ topology.standalone.modify_s("cn=config", [(ldap.MOD_REPLACE, 'passwordMaxFailure', '5')])
+ log.info('passwordMaxFailure set.')
+ except ldap.LDAPError, e:
+ log.error('Failed to to set passwordMaxFailure: ' + e.message['desc'])
+ assert False
+
+ #
+ # Perform SASL bind that should fail
+ #
+ failed_as_expected = False
+ try:
+ user_name = "mark"
+ pw = "secret"
+ auth_tokens = ldap.sasl.digest_md5(user_name, pw)
+ topology.standalone.sasl_interactive_bind_s("", auth_tokens)
+ except ldap.INVALID_CREDENTIALS, e:
+ log.info("SASL Bind failed as expected")
+ failed_as_expected = True
+
+ if not failed_as_expected:
+ log.error("SASL bind unexpectedly succeeded!")
+ assert False
+
+ #
+ # Check that passwordRetryCount was not set on the root dse entry
+ #
+ try:
+ entry = topology.standalone.search_s("", ldap.SCOPE_BASE,
+ "passwordRetryCount=*",
+ ['passwordRetryCount'])
+ except ldap.LDAPError, e:
+ log.error('Failed to search Root DSE entry: ' + e.message['desc'])
+ assert False
+
+ if entry:
+ log.error('Root DSE was incorrectly updated')
+ assert False
+
+ # We passed
+ log.info('Root DSE was correctly not updated')
+ log.info("Test Passed.")
+
+
+def test_ticket47970_final(topology):
+ topology.standalone.stop(timeout=10)
+
+
+def run_isolated():
+ '''
+ run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+ To run isolated without py.test, you need to
+ - edit this file and comment '@pytest.fixture' line before 'topology' function.
+ - set the installation prefix
+ - run this program
+ '''
+ global installation_prefix
+ installation_prefix = None
+
+ topo = topology(True)
+ test_ticket47970(topo)
+
+if __name__ == '__main__':
+ run_isolated()
\ No newline at end of file
9 years, 4 months
Branch '389-ds-base-1.2.11' - ldap/servers
by Mark Reynolds
ldap/servers/slapd/result.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
New commits:
commit 90ab84c6240dff835210dfff7d2804cac77a27b3
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 2 14:10:46 2014 -0500
Ticket 47970 - Account lockout attributes incorrectly updated after failed SASL Bind
Bug Description: When a SASL bind fails, the target DN is not set. If password policy
account lockout is configured, it attempts to update the password retry
count on the dn ("") - which is the Root DSE entry, not a user entry.
This also confuses the COS plugin, and it incorrectly triggers a COS
cache rebuild after the failed login.
Fix Description: Do not update the password retry counters if it is a failed SASL bind.
https://fedorahosted.org/389/ticket/47970
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit 17e79688e05908f7fff319bdeb5167cbeaaf922c)
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index 993dc9e..caf3014 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -285,16 +285,18 @@ send_ldap_result_ext(
BerElement *ber
)
{
- Connection *conn = pb->pb_conn;
- int i, rc, logit = 0;
- ber_tag_t tag;
- int flush_ber_element = 1;
Slapi_Operation *operation;
- const char *dn = NULL;
+ passwdPolicy *pwpolicy = NULL;
+ Connection *conn = pb->pb_conn;
Slapi_DN *sdn = NULL;
+ const char *dn = NULL;
+ ber_tag_t tag;
+ int flush_ber_element = 1;
+ int bind_method = 0;
int internal_op;
- passwdPolicy *pwpolicy = NULL;
-
+ int i, rc, logit = 0;
+
+ slapi_pblock_get (pb, SLAPI_BIND_METHOD, &bind_method);
slapi_pblock_get (pb, SLAPI_OPERATION, &operation);
if (operation->o_status == SLAPI_OP_STATUS_RESULT_SENT) {
@@ -372,7 +374,7 @@ send_ldap_result_ext(
/* invalid password. Update the password retry here */
/* put this here for now. It could be a send_result pre-op plugin. */
- if (err == LDAP_INVALID_CREDENTIALS) {
+ if (err == LDAP_INVALID_CREDENTIALS && bind_method != LDAP_AUTH_SASL ) {
slapi_pblock_get( pb, SLAPI_TARGET_SDN, &sdn );
dn = slapi_sdn_get_dn(sdn);
pwpolicy = new_passwdPolicy(pb, dn);
9 years, 4 months
Branch '389-ds-base-1.3.1' - ldap/servers
by Mark Reynolds
ldap/servers/slapd/result.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
New commits:
commit 5b45bd7dfcb44c3574faf470a8a49590d6c9f455
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 2 14:10:46 2014 -0500
Ticket 47970 - Account lockout attributes incorrectly updated after failed SASL Bind
Bug Description: When a SASL bind fails, the target DN is not set. If password policy
account lockout is configured, it attempts to update the password retry
count on the dn ("") - which is the Root DSE entry, not a user entry.
This also confuses the COS plugin, and it incorrectly triggers a COS
cache rebuild after the failed login.
Fix Description: Do not update the password retry counters if it is a failed SASL bind.
https://fedorahosted.org/389/ticket/47970
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit 17e79688e05908f7fff319bdeb5167cbeaaf922c)
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index 0fc3349..82f127c 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -285,16 +285,18 @@ send_ldap_result_ext(
BerElement *ber
)
{
- Connection *conn = pb->pb_conn;
- int i, rc, logit = 0;
- ber_tag_t tag;
- int flush_ber_element = 1;
Slapi_Operation *operation;
- const char *dn = NULL;
+ passwdPolicy *pwpolicy = NULL;
+ Connection *conn = pb->pb_conn;
Slapi_DN *sdn = NULL;
+ const char *dn = NULL;
+ ber_tag_t tag;
+ int flush_ber_element = 1;
+ int bind_method = 0;
int internal_op;
- passwdPolicy *pwpolicy = NULL;
-
+ int i, rc, logit = 0;
+
+ slapi_pblock_get (pb, SLAPI_BIND_METHOD, &bind_method);
slapi_pblock_get (pb, SLAPI_OPERATION, &operation);
if (operation->o_status == SLAPI_OP_STATUS_RESULT_SENT) {
@@ -372,7 +374,7 @@ send_ldap_result_ext(
/* invalid password. Update the password retry here */
/* put this here for now. It could be a send_result pre-op plugin. */
- if (err == LDAP_INVALID_CREDENTIALS) {
+ if (err == LDAP_INVALID_CREDENTIALS && bind_method != LDAP_AUTH_SASL ) {
slapi_pblock_get( pb, SLAPI_TARGET_SDN, &sdn );
dn = slapi_sdn_get_dn(sdn);
pwpolicy = new_passwdPolicy(pb, dn);
9 years, 4 months
Branch '389-ds-base-1.3.2' - ldap/servers
by Mark Reynolds
ldap/servers/slapd/result.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
New commits:
commit 534f5d27706c8d56ae6c1c4f34fbf5507b9cbaaf
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 2 14:10:46 2014 -0500
Ticket 47970 - Account lockout attributes incorrectly updated after failed SASL Bind
Bug Description: When a SASL bind fails, the target DN is not set. If password policy
account lockout is configured, it attempts to update the password retry
count on the dn ("") - which is the Root DSE entry, not a user entry.
This also confuses the COS plugin, and it incorrectly triggers a COS
cache rebuild after the failed login.
Fix Description: Do not update the password retry counters if it is a failed SASL bind.
https://fedorahosted.org/389/ticket/47970
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit 17e79688e05908f7fff319bdeb5167cbeaaf922c)
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index 6795ea1..d2f1199 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -364,16 +364,18 @@ send_ldap_result_ext(
BerElement *ber
)
{
- Connection *conn = pb->pb_conn;
- int i, rc, logit = 0;
- ber_tag_t tag;
- int flush_ber_element = 1;
Slapi_Operation *operation;
- const char *dn = NULL;
+ passwdPolicy *pwpolicy = NULL;
+ Connection *conn = pb->pb_conn;
Slapi_DN *sdn = NULL;
+ const char *dn = NULL;
+ ber_tag_t tag;
+ int flush_ber_element = 1;
+ int bind_method = 0;
int internal_op;
- passwdPolicy *pwpolicy = NULL;
-
+ int i, rc, logit = 0;
+
+ slapi_pblock_get (pb, SLAPI_BIND_METHOD, &bind_method);
slapi_pblock_get (pb, SLAPI_OPERATION, &operation);
if (operation->o_status == SLAPI_OP_STATUS_RESULT_SENT) {
@@ -451,7 +453,7 @@ send_ldap_result_ext(
/* invalid password. Update the password retry here */
/* put this here for now. It could be a send_result pre-op plugin. */
- if (err == LDAP_INVALID_CREDENTIALS) {
+ if (err == LDAP_INVALID_CREDENTIALS && bind_method != LDAP_AUTH_SASL ) {
slapi_pblock_get( pb, SLAPI_TARGET_SDN, &sdn );
dn = slapi_sdn_get_dn(sdn);
pwpolicy = new_passwdPolicy(pb, dn);
9 years, 4 months
Branch '389-ds-base-1.3.3' - ldap/servers
by Mark Reynolds
ldap/servers/slapd/result.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
New commits:
commit 36f0d05b15a8e984c64631fb7ed070358dd8c68f
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 2 14:10:46 2014 -0500
Ticket 47970 - Account lockout attributes incorrectly updated after failed SASL Bind
Bug Description: When a SASL bind fails, the target DN is not set. If password policy
account lockout is configured, it attempts to update the password retry
count on the dn ("") - which is the Root DSE entry, not a user entry.
This also confuses the COS plugin, and it incorrectly triggers a COS
cache rebuild after the failed login.
Fix Description: Do not update the password retry counters if it is a failed SASL bind.
https://fedorahosted.org/389/ticket/47970
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit 17e79688e05908f7fff319bdeb5167cbeaaf922c)
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index ca2fa43..2198337 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -364,16 +364,18 @@ send_ldap_result_ext(
BerElement *ber
)
{
- Connection *conn = pb->pb_conn;
- int i, rc, logit = 0;
- ber_tag_t tag;
- int flush_ber_element = 1;
Slapi_Operation *operation;
- const char *dn = NULL;
+ passwdPolicy *pwpolicy = NULL;
+ Connection *conn = pb->pb_conn;
Slapi_DN *sdn = NULL;
+ const char *dn = NULL;
+ ber_tag_t tag;
+ int flush_ber_element = 1;
+ int bind_method = 0;
int internal_op;
- passwdPolicy *pwpolicy = NULL;
-
+ int i, rc, logit = 0;
+
+ slapi_pblock_get (pb, SLAPI_BIND_METHOD, &bind_method);
slapi_pblock_get (pb, SLAPI_OPERATION, &operation);
if (operation->o_status == SLAPI_OP_STATUS_RESULT_SENT) {
@@ -451,7 +453,7 @@ send_ldap_result_ext(
/* invalid password. Update the password retry here */
/* put this here for now. It could be a send_result pre-op plugin. */
- if (err == LDAP_INVALID_CREDENTIALS) {
+ if (err == LDAP_INVALID_CREDENTIALS && bind_method != LDAP_AUTH_SASL ) {
slapi_pblock_get( pb, SLAPI_TARGET_SDN, &sdn );
dn = slapi_sdn_get_dn(sdn);
pwpolicy = new_passwdPolicy(pb, dn);
9 years, 4 months
ldap/servers
by Mark Reynolds
ldap/servers/slapd/result.c | 18 ++++++++++--------
1 file changed, 10 insertions(+), 8 deletions(-)
New commits:
commit 17e79688e05908f7fff319bdeb5167cbeaaf922c
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 2 14:10:46 2014 -0500
Ticket 47970 - Account lockout attributes incorrectly updated after failed SASL Bind
Bug Description: When a SASL bind fails, the target DN is not set. If password policy
account lockout is configured, it attempts to update the password retry
count on the dn ("") - which is the Root DSE entry, not a user entry.
This also confuses the COS plugin, and it incorrectly triggers a COS
cache rebuild after the failed login.
Fix Description: Do not update the password retry counters if it is a failed SASL bind.
https://fedorahosted.org/389/ticket/47970
Reviewed by: nhosoi(Thanks!)
diff --git a/ldap/servers/slapd/result.c b/ldap/servers/slapd/result.c
index ca2fa43..2198337 100644
--- a/ldap/servers/slapd/result.c
+++ b/ldap/servers/slapd/result.c
@@ -364,16 +364,18 @@ send_ldap_result_ext(
BerElement *ber
)
{
- Connection *conn = pb->pb_conn;
- int i, rc, logit = 0;
- ber_tag_t tag;
- int flush_ber_element = 1;
Slapi_Operation *operation;
- const char *dn = NULL;
+ passwdPolicy *pwpolicy = NULL;
+ Connection *conn = pb->pb_conn;
Slapi_DN *sdn = NULL;
+ const char *dn = NULL;
+ ber_tag_t tag;
+ int flush_ber_element = 1;
+ int bind_method = 0;
int internal_op;
- passwdPolicy *pwpolicy = NULL;
-
+ int i, rc, logit = 0;
+
+ slapi_pblock_get (pb, SLAPI_BIND_METHOD, &bind_method);
slapi_pblock_get (pb, SLAPI_OPERATION, &operation);
if (operation->o_status == SLAPI_OP_STATUS_RESULT_SENT) {
@@ -451,7 +453,7 @@ send_ldap_result_ext(
/* invalid password. Update the password retry here */
/* put this here for now. It could be a send_result pre-op plugin. */
- if (err == LDAP_INVALID_CREDENTIALS) {
+ if (err == LDAP_INVALID_CREDENTIALS && bind_method != LDAP_AUTH_SASL ) {
slapi_pblock_get( pb, SLAPI_TARGET_SDN, &sdn );
dn = slapi_sdn_get_dn(sdn);
pwpolicy = new_passwdPolicy(pb, dn);
9 years, 4 months
Branch '389-ds-base-1.2.11' - ldap/servers
by Mark Reynolds
ldap/servers/plugins/cos/cos_cache.c | 2 ++
1 file changed, 2 insertions(+)
New commits:
commit 2444e0c4362615c4c65ffceeeb30d9380fb61e49
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Dec 2 13:38:06 2014 -0500
Ticket 47969 - COS memory leak when rebuilding the cache
Bug Description: When the COS cache is released, not all of the schema
objectclasses are freed. So every time we rebuild the
COS cache we leak memory.
Fix Description: After we free the schema attributes, the very first
attribute still needs to be freed. It is not freed
initially because of the duplicate checking logic, so
it is now done after the loop.
https://fedorahosted.org/389/ticket/47969
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit d2dfda95c543f106443f898436151b00c68e4270)
diff --git a/ldap/servers/plugins/cos/cos_cache.c b/ldap/servers/plugins/cos/cos_cache.c
index 9c49c12..db34d15 100644
--- a/ldap/servers/plugins/cos/cos_cache.c
+++ b/ldap/servers/plugins/cos/cos_cache.c
@@ -1907,6 +1907,8 @@ static void cos_cache_del_schema(cosCache *pCache)
}
}
}
+ /* Finally, remove the first attribute's objectclass list */
+ cos_cache_del_attrval_list(&(pCache->ppAttrIndex[0]->pObjectclasses));
LDAPDebug( LDAP_DEBUG_TRACE, "<-- cos_cache_del_schema\n",0,0,0);
}
9 years, 4 months