Branch '389-ds-base-1.3.2' - ldap/ldif ldap/schema ldap/servers
by Noriko Hosoi
ldap/ldif/template-dse.ldif.in | 1 +
ldap/schema/01core389.ldif | 1 +
ldap/servers/slapd/libglobs.c | 32 ++++++++++++++++++++++++++++++++
ldap/servers/slapd/proto-slap.h | 2 ++
ldap/servers/slapd/pw.c | 5 +++--
ldap/servers/slapd/slap.h | 2 ++
6 files changed, 41 insertions(+), 2 deletions(-)
New commits:
commit 749897299831fd5efeea7d299f9ab9c449017f23
Author: Nathan Kinder <nkinder(a)redhat.com>
Date: Mon Mar 17 17:35:24 2014 -0700
Ticket 47753 - Add switch to disable pre-hashed password checking
By default, 389 DS doesn't allow pre-hashed passwords to be set by
anyone other than Directory Manager. This privilege can be delegated
to other users by adding them to the Password Administrators group.
This works fine for most cases, but there are cases where one might
want to allow anyone to set pre-hashed passwords. An example is the
FreeIPA project, who has their own SLAPI plug-in that controls
pre-hashed password checking. We should add a switch to completely
disable pre-hashed password checking to support this case.
https://fedorahosted.org/389/ticket/47753
Reviewed by mreynolds(a)redhat.com
(cherry picked from commit ab6438901fd1481ceceb40d6dff8935ac656dc04)
diff --git a/ldap/ldif/template-dse.ldif.in b/ldap/ldif/template-dse.ldif.in
index 9a52bc5..17555a3 100644
--- a/ldap/ldif/template-dse.ldif.in
+++ b/ldap/ldif/template-dse.ldif.in
@@ -58,6 +58,7 @@ nsslapd-maxdescriptors: 1024
nsslapd-max-filter-nest-level: 40
nsslapd-ndn-cache-enabled: off
nsslapd-sasl-mapping-fallback: off
+nsslapd-allow-hashed-passwords: off
dn: cn=features,cn=config
objectclass: top
diff --git a/ldap/schema/01core389.ldif b/ldap/schema/01core389.ldif
index 41cdd8b..07a673b 100644
--- a/ldap/schema/01core389.ldif
+++ b/ldap/schema/01core389.ldif
@@ -158,6 +158,7 @@ attributeTypes: ( 2.16.840.1.113730.3.1.2165 NAME 'schemaUpdateObjectclassAccept
attributeTypes: ( 2.16.840.1.113730.3.1.2166 NAME 'schemaUpdateObjectclassReject' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2167 NAME 'schemaUpdateAttributeAccept' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape Directory Server' )
attributeTypes: ( 2.16.840.1.113730.3.1.2168 NAME 'schemaUpdateAttributeReject' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 X-ORIGIN 'Netscape Directory Server' )
+attributeTypes: ( 2.16.840.1.113730.3.1.2307 NAME 'nsslapd-allow-hashed-passwords' DESC 'Netscape defined attribute type' SYNTAX 1.3.6.1.4.1.1466.115.121.1.15 SINGLE-VALUE X-ORIGIN 'Netscape Directory Server' )
#
# objectclasses
#
diff --git a/ldap/servers/slapd/libglobs.c b/ldap/servers/slapd/libglobs.c
index 2a8e05d..94144c8 100644
--- a/ldap/servers/slapd/libglobs.c
+++ b/ldap/servers/slapd/libglobs.c
@@ -228,6 +228,7 @@ slapi_onoff_t init_pw_is_legacy;
slapi_onoff_t init_pw_track_update_time;
slapi_onoff_t init_pw_change;
slapi_onoff_t init_pw_exp;
+slapi_onoff_t init_allow_hashed_pw;
slapi_onoff_t init_pw_syntax;
slapi_onoff_t init_schemacheck;
slapi_onoff_t init_schemamod;
@@ -742,6 +743,10 @@ static struct config_get_and_set {
log_set_expirationtimeunit, SLAPD_AUDIT_LOG,
(void**)&global_slapdFrontendConfig.auditlog_exptimeunit,
CONFIG_STRING_OR_UNKNOWN, NULL, INIT_AUDITLOG_EXPTIMEUNIT},
+ {CONFIG_ALLOW_HASHED_PW_ATTRIBUTE, config_set_allow_hashed_pw,
+ NULL, 0,
+ (void**)&global_slapdFrontendConfig.allow_hashed_pw,
+ CONFIG_ON_OFF, NULL, &init_allow_hashed_pw},
{CONFIG_PW_SYNTAX_ATTRIBUTE, config_set_pw_syntax,
NULL, 0,
(void**)&global_slapdFrontendConfig.pw_policy.pw_syntax,
@@ -1446,6 +1451,7 @@ FrontendConfig_init () {
init_pwpolicy_local = cfg->pwpolicy_local = LDAP_OFF;
init_pw_change = cfg->pw_policy.pw_change = LDAP_ON;
init_pw_must_change = cfg->pw_policy.pw_must_change = LDAP_OFF;
+ init_allow_hashed_pw = cfg->allow_hashed_pw = LDAP_OFF;
init_pw_syntax = cfg->pw_policy.pw_syntax = LDAP_OFF;
init_pw_exp = cfg->pw_policy.pw_exp = LDAP_OFF;
cfg->pw_policy.pw_minlength = 8;
@@ -2514,6 +2520,20 @@ config_set_pwpolicy_local( const char *attrname, char *value, char *errorbuf, in
}
int
+config_set_allow_hashed_pw( const char *attrname, char *value, char *errorbuf, int apply ) {
+ int retVal = LDAP_SUCCESS;
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+
+ retVal = config_set_onoff ( attrname,
+ value,
+ &(slapdFrontendConfig->allow_hashed_pw),
+ errorbuf,
+ apply);
+
+ return retVal;
+}
+
+int
config_set_pw_syntax( const char *attrname, char *value, char *errorbuf, int apply ) {
int retVal = LDAP_SUCCESS;
slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
@@ -4767,6 +4787,18 @@ config_get_pw_must_change() {
return retVal;
}
+int
+config_get_allow_hashed_pw()
+{
+ slapdFrontendConfig_t *slapdFrontendConfig = getFrontendConfig();
+ int retVal;
+
+ CFG_ONOFF_LOCK_READ(slapdFrontendConfig);
+ retVal = (int)slapdFrontendConfig->allow_hashed_pw;
+ CFG_ONOFF_UNLOCK_READ(slapdFrontendConfig);
+
+ return retVal;
+}
int
config_get_pw_syntax() {
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index a0b9b12..b8d563d 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -322,6 +322,7 @@ int config_set_errorlog(const char *attrname, char *value, char *errorbuf, int a
int config_set_pw_change(const char *attrname, char *value, char *errorbuf, int apply );
int config_set_pw_must_change(const char *attrname, char *value, char *errorbuf, int apply );
int config_set_pwpolicy_local(const char *attrname, char *value, char *errorbuf, int apply );
+int config_set_allow_hashed_pw( const char *attrname, char *value, char *errorbuf, int apply );
int config_set_pw_syntax(const char *attrname, char *value, char *errorbuf, int apply );
int config_set_pw_minlength(const char *attrname, char *value, char *errorbuf, int apply );
int config_set_pw_mindigits(const char *attrname, char *value, char *errorbuf, int apply );
@@ -446,6 +447,7 @@ char *config_get_pw_storagescheme();
int config_get_pw_change();
int config_get_pw_history();
int config_get_pw_must_change();
+int config_get_allow_hashed_pw();
int config_get_pw_syntax();
int config_get_pw_minlength();
int config_get_pw_mindigits();
diff --git a/ldap/servers/slapd/pw.c b/ldap/servers/slapd/pw.c
index b4a3295..a4d2dc6 100644
--- a/ldap/servers/slapd/pw.c
+++ b/ldap/servers/slapd/pw.c
@@ -820,8 +820,9 @@ check_pw_syntax_ext ( Slapi_PBlock *pb, const Slapi_DN *sdn, Slapi_Value **vals,
*/
for ( i = 0; vals[ i ] != NULL; ++i ){
if (slapi_is_encoded((char *)slapi_value_get_string(vals[i]))) {
- if ((!is_replication && ((internal_op && pb->pb_conn && !slapi_dn_isroot(pb->pb_conn->c_dn)) ||
- (!internal_op && !pw_is_pwp_admin(pb, pwpolicy))))) {
+ if (!is_replication && !config_get_allow_hashed_pw() &&
+ ((internal_op && pb->pb_conn && !slapi_dn_isroot(pb->pb_conn->c_dn)) ||
+ (!internal_op && !pw_is_pwp_admin(pb, pwpolicy)))) {
PR_snprintf( errormsg, BUFSIZ,
"invalid password syntax - passwords with storage scheme are not allowed");
if ( pwresponse_req == 1 ) {
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 5a53113..45635d5 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -2056,6 +2056,7 @@ typedef struct _slapdEntryPoints {
#define CONFIG_GROUPEVALNESTLEVEL_ATTRIBUTE "nsslapd-groupevalnestlevel"
#define CONFIG_NAGLE_ATTRIBUTE "nsslapd-nagle"
#define CONFIG_PWPOLICY_LOCAL_ATTRIBUTE "nsslapd-pwpolicy-local"
+#define CONFIG_ALLOW_HASHED_PW_ATTRIBUTE "nsslapd-allow-hashed-passwords"
#define CONFIG_PW_CHANGE_ATTRIBUTE "passwordChange"
#define CONFIG_PW_MUSTCHANGE_ATTRIBUTE "passwordMustChange"
#define CONFIG_PW_SYNTAX_ATTRIBUTE "passwordCheckSyntax"
@@ -2264,6 +2265,7 @@ typedef struct _slapdFrontendConfig {
slapi_onoff_t pwpolicy_local;
slapi_onoff_t pw_is_global_policy;
+ slapi_onoff_t allow_hashed_pw;
passwdPolicy pw_policy;
/* ACCESS LOG */
9 years, 8 months
ldap/servers
by Mark Reynolds
ldap/servers/slapd/task.c | 10 ++++------
1 file changed, 4 insertions(+), 6 deletions(-)
New commits:
commit ca3d08a6772e36987286354bd178ca913d3d6da8
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Mon Jul 21 10:14:14 2014 -0400
Ticket 47819 - Fix memory leak
Description: Coverity resource leak 12720, also did a little code
cleanup around slapi_ch_free() -> slapi_ch_free_string()
https://fedorahosted.org/389/ticket/47819
Reviewed by: rmeggins(Thanks!)
diff --git a/ldap/servers/slapd/task.c b/ldap/servers/slapd/task.c
index 1089353..a4d85a8 100644
--- a/ldap/servers/slapd/task.c
+++ b/ldap/servers/slapd/task.c
@@ -1439,7 +1439,6 @@ static int task_backup_add(Slapi_PBlock *pb, Slapi_Entry *e,
database_type = my_database_type;
/* get backend that has db2archive and the database type matches. */
- cookie = NULL;
be = slapi_get_first_backend(&cookie);
while (be) {
if (NULL != be->be_database->plg_db2archive &&
@@ -1448,7 +1447,7 @@ static int task_backup_add(Slapi_PBlock *pb, Slapi_Entry *e,
be = (backend *)slapi_get_next_backend (cookie);
}
- slapi_ch_free((void **)&cookie);
+ slapi_ch_free_string(&cookie);
if (NULL == be || NULL == be->be_database->plg_db2archive) {
LDAPDebug(LDAP_DEBUG_ANY,
"ERROR: no db2archive function defined.\n", 0, 0, 0);
@@ -1578,7 +1577,6 @@ static int task_restore_add(Slapi_PBlock *pb, Slapi_Entry *e,
instance_name = fetch_attr(e, "nsInstance", NULL);
/* get backend that has archive2db and the database type matches. */
- cookie = NULL;
be = slapi_get_first_backend (&cookie);
while (be) {
if (NULL != be->be_database->plg_archive2db &&
@@ -1587,7 +1585,7 @@ static int task_restore_add(Slapi_PBlock *pb, Slapi_Entry *e,
be = (backend *)slapi_get_next_backend (cookie);
}
- slapi_ch_free((void **)&cookie);
+ slapi_ch_free_string(&cookie);
if (NULL == be || NULL == be->be_database->plg_archive2db) {
LDAPDebug(LDAP_DEBUG_ANY,
"ERROR: no archive2db function defined.\n", 0, 0, 0);
@@ -1837,7 +1835,6 @@ task_upgradedb_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter,
force = fetch_attr(e, "nsForceToReindex", NULL);
/* get backend that has db2archive and the database type matches. */
- cookie = NULL;
be = slapi_get_first_backend(&cookie);
while (be) {
if (NULL != be->be_database->plg_upgradedb)
@@ -1845,7 +1842,7 @@ task_upgradedb_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter,
be = (backend *)slapi_get_next_backend (cookie);
}
- slapi_ch_free((void **)&cookie);
+ slapi_ch_free_string(&cookie);
if (NULL == be) {
LDAPDebug(LDAP_DEBUG_ANY,
"ERROR: no upgradedb is defined.\n", 0, 0, 0);
@@ -2367,6 +2364,7 @@ task_fixup_tombstones_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter,
}
be = slapi_get_next_backend(cookie);
}
+ slapi_ch_free_string(&cookie);
}
task = slapi_new_task(slapi_entry_get_ndn(e));
9 years, 8 months
Branch '389-ds-base-1.2.11' - ldap/admin
by Mark Reynolds
ldap/admin/src/scripts/60upgradeschemafiles.pl | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
New commits:
commit f1551fef32f40b3264a4b536e17347aaf0ee410f
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Mon Jul 21 12:01:37 2014 -0400
Ticket 47861 - Certain schema files are not replaced during upgrade
Bug Description: Several schema files have been updated, but the upgrade
process does not replace all the schema files.
Fix Description: Add the updated schema files to the schema upgrade script.
https://fedorahosted.org/389/ticket/47861
Reviewed by: lkrispenz(Thanks!)
(cherry picked from commit fe59338764daf081c1e84684943e3e6acae16483)
Conflicts:
ldap/admin/src/scripts/60upgradeschemafiles.pl
(cherry picked from commit ba604880431bc610c5dff92356b7c897d495f0a6)
diff --git a/ldap/admin/src/scripts/60upgradeschemafiles.pl b/ldap/admin/src/scripts/60upgradeschemafiles.pl
index 1208f4d..5a2c019 100644
--- a/ldap/admin/src/scripts/60upgradeschemafiles.pl
+++ b/ldap/admin/src/scripts/60upgradeschemafiles.pl
@@ -11,7 +11,7 @@ sub runinst {
# these schema files are obsolete, or we want to replace
# them with newer versions
- my @toremove = qw(00core.ldif 01core389.ldif 01common.ldif 02common.ldif 05rfc2247.ldif 05rfc4523.ldif 05rfc4524.ldif 06inetorgperson.ldif 10presence.ldif 28pilot.ldif 30ns-common.ldif 50ns-directory.ldif 60mozilla.ldif 60pam-plugin.ldif 60sudo.ldif);
+ my @toremove = qw(00core.ldif 01core389.ldif 01common.ldif 02common.ldif 05rfc2247.ldif 05rfc4523.ldif 05rfc4524.ldif 06inetorgperson.ldif 10presence.ldif 10dna-plugin.ldif 28pilot.ldif 30ns-common.ldif 50ns-mail.ldif 50ns-directory.ldif 60qmail.ldif 60radius.ldif 60mozilla.ldif 60pam-plugin.ldif 60sudo.ldif 60rfc3712.ldif 60samba3.ldif 60posix-winsync-plugin.ldif 60sabayon.ldif 60nis.ldif 60sendmail.ldif);
# these hashes will be used to check for obsolete schema
# in 99user.ldif
9 years, 8 months
Branch '389-ds-base-1.3.1' - ldap/admin
by Mark Reynolds
ldap/admin/src/scripts/60upgradeschemafiles.pl | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
New commits:
commit ba604880431bc610c5dff92356b7c897d495f0a6
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Mon Jul 21 12:01:37 2014 -0400
Ticket 47861 - Certain schema files are not replaced during upgrade
Bug Description: Several schema files have been updated, but the upgrade
process does not replace all the schema files.
Fix Description: Add the updated schema files to the schema upgrade script.
https://fedorahosted.org/389/ticket/47861
Reviewed by: lkrispenz(Thanks!)
(cherry picked from commit fe59338764daf081c1e84684943e3e6acae16483)
Conflicts:
ldap/admin/src/scripts/60upgradeschemafiles.pl
diff --git a/ldap/admin/src/scripts/60upgradeschemafiles.pl b/ldap/admin/src/scripts/60upgradeschemafiles.pl
index 1208f4d..5a2c019 100644
--- a/ldap/admin/src/scripts/60upgradeschemafiles.pl
+++ b/ldap/admin/src/scripts/60upgradeschemafiles.pl
@@ -11,7 +11,7 @@ sub runinst {
# these schema files are obsolete, or we want to replace
# them with newer versions
- my @toremove = qw(00core.ldif 01core389.ldif 01common.ldif 02common.ldif 05rfc2247.ldif 05rfc4523.ldif 05rfc4524.ldif 06inetorgperson.ldif 10presence.ldif 28pilot.ldif 30ns-common.ldif 50ns-directory.ldif 60mozilla.ldif 60pam-plugin.ldif 60sudo.ldif);
+ my @toremove = qw(00core.ldif 01core389.ldif 01common.ldif 02common.ldif 05rfc2247.ldif 05rfc4523.ldif 05rfc4524.ldif 06inetorgperson.ldif 10presence.ldif 10dna-plugin.ldif 28pilot.ldif 30ns-common.ldif 50ns-mail.ldif 50ns-directory.ldif 60qmail.ldif 60radius.ldif 60mozilla.ldif 60pam-plugin.ldif 60sudo.ldif 60rfc3712.ldif 60samba3.ldif 60posix-winsync-plugin.ldif 60sabayon.ldif 60nis.ldif 60sendmail.ldif);
# these hashes will be used to check for obsolete schema
# in 99user.ldif
9 years, 8 months
Branch '389-ds-base-1.3.2' - ldap/admin
by Mark Reynolds
ldap/admin/src/scripts/60upgradeschemafiles.pl | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
New commits:
commit daffe9711c0d8803b75cbcb6804bb598e4fb7e4a
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Mon Jul 21 12:01:37 2014 -0400
Ticket 47861 - Certain schema files are not replaced during upgrade
Bug Description: Several schema files have been updated, but the upgrade
process does not replace all the schema files.
Fix Description: Add the updated schema files to the schema upgrade script.
https://fedorahosted.org/389/ticket/47861
Reviewed by: lkrispenz(Thanks!)
(cherry picked from commit fe59338764daf081c1e84684943e3e6acae16483)
diff --git a/ldap/admin/src/scripts/60upgradeschemafiles.pl b/ldap/admin/src/scripts/60upgradeschemafiles.pl
index ba574a5..5a2c019 100644
--- a/ldap/admin/src/scripts/60upgradeschemafiles.pl
+++ b/ldap/admin/src/scripts/60upgradeschemafiles.pl
@@ -11,7 +11,7 @@ sub runinst {
# these schema files are obsolete, or we want to replace
# them with newer versions
- my @toremove = qw(00core.ldif 01core389.ldif 01common.ldif 02common.ldif 05rfc2247.ldif 05rfc4523.ldif 05rfc4524.ldif 06inetorgperson.ldif 10presence.ldif 28pilot.ldif 30ns-common.ldif 50ns-directory.ldif 60mozilla.ldif 60pam-plugin.ldif 60sudo.ldif 60rfc3712.ldif);
+ my @toremove = qw(00core.ldif 01core389.ldif 01common.ldif 02common.ldif 05rfc2247.ldif 05rfc4523.ldif 05rfc4524.ldif 06inetorgperson.ldif 10presence.ldif 10dna-plugin.ldif 28pilot.ldif 30ns-common.ldif 50ns-mail.ldif 50ns-directory.ldif 60qmail.ldif 60radius.ldif 60mozilla.ldif 60pam-plugin.ldif 60sudo.ldif 60rfc3712.ldif 60samba3.ldif 60posix-winsync-plugin.ldif 60sabayon.ldif 60nis.ldif 60sendmail.ldif);
# these hashes will be used to check for obsolete schema
# in 99user.ldif
9 years, 8 months
ldap/admin
by Mark Reynolds
ldap/admin/src/scripts/60upgradeschemafiles.pl | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
New commits:
commit fe59338764daf081c1e84684943e3e6acae16483
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Mon Jul 21 12:01:37 2014 -0400
Ticket 47861 - Certain schema files are not replaced during upgrade
Bug Description: Several schema files have been updated, but the upgrade
process does not replace all the schema files.
Fix Description: Add the updated schema files to the schema upgrade script.
https://fedorahosted.org/389/ticket/47861
Reviewed by: lkrispenz(Thanks!)
diff --git a/ldap/admin/src/scripts/60upgradeschemafiles.pl b/ldap/admin/src/scripts/60upgradeschemafiles.pl
index ba574a5..5a2c019 100644
--- a/ldap/admin/src/scripts/60upgradeschemafiles.pl
+++ b/ldap/admin/src/scripts/60upgradeschemafiles.pl
@@ -11,7 +11,7 @@ sub runinst {
# these schema files are obsolete, or we want to replace
# them with newer versions
- my @toremove = qw(00core.ldif 01core389.ldif 01common.ldif 02common.ldif 05rfc2247.ldif 05rfc4523.ldif 05rfc4524.ldif 06inetorgperson.ldif 10presence.ldif 28pilot.ldif 30ns-common.ldif 50ns-directory.ldif 60mozilla.ldif 60pam-plugin.ldif 60sudo.ldif 60rfc3712.ldif);
+ my @toremove = qw(00core.ldif 01core389.ldif 01common.ldif 02common.ldif 05rfc2247.ldif 05rfc4523.ldif 05rfc4524.ldif 06inetorgperson.ldif 10presence.ldif 10dna-plugin.ldif 28pilot.ldif 30ns-common.ldif 50ns-mail.ldif 50ns-directory.ldif 60qmail.ldif 60radius.ldif 60mozilla.ldif 60pam-plugin.ldif 60sudo.ldif 60rfc3712.ldif 60samba3.ldif 60posix-winsync-plugin.ldif 60sabayon.ldif 60nis.ldif 60sendmail.ldif);
# these hashes will be used to check for obsolete schema
# in 99user.ldif
9 years, 8 months
dirsrvtests/tickets
by Noriko Hosoi
dirsrvtests/tickets/ticket47714_test.py | 15 ---------------
1 file changed, 15 deletions(-)
New commits:
commit 65216aba18cc66c5b36d11ab2202b620d4df6b4c
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Fri Jul 18 15:35:41 2014 -0700
Ticket 47714 - CI test: add test case for ticket 47714
Description: clean up the unused local method.
diff --git a/dirsrvtests/tickets/ticket47714_test.py b/dirsrvtests/tickets/ticket47714_test.py
index 0e0f816..6f32224 100644
--- a/dirsrvtests/tickets/ticket47714_test.py
+++ b/dirsrvtests/tickets/ticket47714_test.py
@@ -135,19 +135,6 @@ def _header(topology, label):
topology.standalone.log.info("#######")
topology.standalone.log.info("###############################################")
-def _uniqueness_config_entry(topology, name=None):
- if not name:
- return None
-
- ent = topology.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE,
- "(objectclass=nsSlapdPlugin)",
- ['objectClass', 'cn', 'nsslapd-pluginPath', 'nsslapd-pluginInitfunc',
- 'nsslapd-pluginType', 'nsslapd-pluginEnabled', 'nsslapd-plugin-depends-on-type',
- 'nsslapd-pluginId', 'nsslapd-pluginVersion', 'nsslapd-pluginVendor',
- 'nsslapd-pluginDescription'])
- ent.dn = "cn=%s uniqueness,%s" % (name, DN_PLUGIN)
- return ent
-
def test_ticket47714_init(topology):
"""
1. Add account policy entry to the DB
@@ -169,8 +156,6 @@ def test_ticket47714_init(topology):
'userPassword': TEST_USER_PW,
'acctPolicySubentry': ACCT_POLICY_DN})))
- log.info("\n######################### Adding cos entry ######################\n")
-
def test_ticket47714_run_0(topology):
"""
Check this change has no inpact to the existing functionality.
9 years, 8 months
2 commits - dirsrvtests/tickets ldap/servers
by Noriko Hosoi
dirsrvtests/tickets/ticket47714_test.py | 327 ++++++++++++++++++++++++++
ldap/servers/plugins/acctpolicy/acct_config.c | 10
ldap/servers/plugins/acctpolicy/acct_plugin.c | 8
ldap/servers/plugins/acctpolicy/acctpolicy.h | 2
4 files changed, 343 insertions(+), 4 deletions(-)
New commits:
commit 39b6a18b4c4cf53fd3ac52b5e331e2e043cd9d1f
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Thu Jul 17 12:22:27 2014 -0700
Ticket 47714 - CI test: add test case for ticket 47713
Description: [RFE] Update lastLoginTime also in Account Policy
plugin if account lockout is based on passwordExpirationTime.
Test Description:
1) Check this change has no inpact to the existing functionality.
1-1. Set account policy config without the new attr alwaysRecordLoginAttr
1-2. Bind as a test user
1-3. Bind as the test user again and check the lastLoginTime is updated
1-4. Waint longer than the accountInactivityLimit time and bind as the
test user, which should fail with CONSTANT_VIOLATION.
2) Verify a new config attr alwaysRecordLoginAttr
2-1. Set account policy config with the new attr alwaysRecordLoginAttr:
lastLoginTime
Note: bogus attr is set to stateattrname.
altstateattrname type value is used for checking whether the account
is idle or not.
2-2. Bind as a test user
2-3. Bind as the test user again and check the alwaysRecordLoginAttr:
lastLoginTime is updated
https://fedorahosted.org/389/ticket/47714
Reviewded by mreynolds(a)redhat.com (Thanks, Mark!!)
diff --git a/dirsrvtests/tickets/ticket47714_test.py b/dirsrvtests/tickets/ticket47714_test.py
new file mode 100644
index 0000000..0e0f816
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47714_test.py
@@ -0,0 +1,327 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import socket
+import pytest
+import shutil
+from lib389 import DirSrv, Entry, tools
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from constants import *
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+ACCT_POLICY_CONFIG_DN = 'cn=config,cn=%s,cn=plugins,cn=config' % PLUGIN_ACCT_POLICY
+ACCT_POLICY_DN = 'cn=Account Inactivation Pplicy,%s' % SUFFIX
+INACTIVITY_LIMIT = '9'
+SEARCHFILTER = '(objectclass=*)'
+
+TEST_USER = 'ticket47714user'
+TEST_USER_DN = 'uid=%s,%s' % (TEST_USER, SUFFIX)
+TEST_USER_PW = '%s' % TEST_USER
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ '''
+ This fixture is used to standalone topology for the 'module'.
+ At the beginning, It may exists a standalone instance.
+ It may also exists a backup for the standalone instance.
+
+ Principle:
+ If standalone instance exists:
+ restart it
+ If backup of standalone exists:
+ create/rebind to standalone
+
+ restore standalone instance from backup
+ else:
+ Cleanup everything
+ remove instance
+ remove backup
+ Create instance
+ Create backup
+ '''
+ global installation_prefix
+
+ if installation_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+ standalone = DirSrv(verbose=False)
+
+ # Args for the standalone instance
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+
+ # Get the status of the backups
+ backup_standalone = standalone.checkBackupFS()
+
+ # Get the status of the instance and restart it if it exists
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ # assuming the instance is already stopped, just wait 5 sec max
+ standalone.stop(timeout=5)
+ try:
+ standalone.start(timeout=10)
+ except ldap.SERVER_DOWN:
+ pass
+
+ if backup_standalone:
+ # The backup exist, assuming it is correct
+ # we just re-init the instance with it
+ if not instance_standalone:
+ standalone.create()
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # restore standalone instance from backup
+ standalone.stop(timeout=10)
+ standalone.restoreFS(backup_standalone)
+ standalone.start(timeout=10)
+
+ else:
+ # We should be here only in two conditions
+ # - This is the first time a test involve standalone instance
+ # - Something weird happened (instance/backup destroyed)
+ # so we discard everything and recreate all
+
+ # Remove the backup. So even if we have a specific backup file
+ # (e.g backup_standalone) we clear backup that an instance may have created
+ if backup_standalone:
+ standalone.clearBackupFS()
+
+ # Remove the instance
+ if instance_standalone:
+ standalone.delete()
+
+ # Create the instance
+ standalone.create()
+
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # Time to create the backups
+ standalone.stop(timeout=10)
+ standalone.backupfile = standalone.backupFS()
+ standalone.start(timeout=10)
+
+ # clear the tmp directory
+ standalone.clearTmpDir(__file__)
+
+ #
+ # Here we have standalone instance up and running
+ # Either coming from a backup recovery
+ # or from a fresh (re)init
+ # Time to return the topology
+ return TopologyStandalone(standalone)
+
+def _header(topology, label):
+ topology.standalone.log.info("\n\n###############################################")
+ topology.standalone.log.info("#######")
+ topology.standalone.log.info("####### %s" % label)
+ topology.standalone.log.info("#######")
+ topology.standalone.log.info("###############################################")
+
+def _uniqueness_config_entry(topology, name=None):
+ if not name:
+ return None
+
+ ent = topology.standalone.getEntry("cn=%s,%s" % (PLUGIN_ATTR_UNIQUENESS, DN_PLUGIN), ldap.SCOPE_BASE,
+ "(objectclass=nsSlapdPlugin)",
+ ['objectClass', 'cn', 'nsslapd-pluginPath', 'nsslapd-pluginInitfunc',
+ 'nsslapd-pluginType', 'nsslapd-pluginEnabled', 'nsslapd-plugin-depends-on-type',
+ 'nsslapd-pluginId', 'nsslapd-pluginVersion', 'nsslapd-pluginVendor',
+ 'nsslapd-pluginDescription'])
+ ent.dn = "cn=%s uniqueness,%s" % (name, DN_PLUGIN)
+ return ent
+
+def test_ticket47714_init(topology):
+ """
+ 1. Add account policy entry to the DB
+ 2. Add a test user to the DB
+ """
+ _header(topology, 'Testing Ticket 47714 - [RFE] Update lastLoginTime also in Account Policy plugin if account lockout is based on passwordExpirationTime.')
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+
+ log.info("\n######################### Adding Account Policy entry: %s ######################\n" % ACCT_POLICY_DN)
+ topology.standalone.add_s(Entry((ACCT_POLICY_DN, {'objectclass': "top ldapsubentry extensibleObject accountpolicy".split(),
+ 'accountInactivityLimit': INACTIVITY_LIMIT})))
+
+ log.info("\n######################### Adding Test User entry: %s ######################\n" % TEST_USER_DN)
+ topology.standalone.add_s(Entry((TEST_USER_DN, {'objectclass': "top person organizationalPerson inetOrgPerson".split(),
+ 'cn': TEST_USER,
+ 'sn': TEST_USER,
+ 'givenname': TEST_USER,
+ 'userPassword': TEST_USER_PW,
+ 'acctPolicySubentry': ACCT_POLICY_DN})))
+
+ log.info("\n######################### Adding cos entry ######################\n")
+
+def test_ticket47714_run_0(topology):
+ """
+ Check this change has no inpact to the existing functionality.
+ 1. Set account policy config without the new attr alwaysRecordLoginAttr
+ 2. Bind as a test user
+ 3. Bind as the test user again and check the lastLoginTime is updated
+ 4. Waint longer than the accountInactivityLimit time and bind as the test user,
+ which should fail with CONSTANT_VIOLATION.
+ """
+ _header(topology, 'Account Policy - No new attr alwaysRecordLoginAttr in config')
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+
+ # Modify Account Policy config entry
+ topology.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
+ (ldap.MOD_REPLACE, 'stateattrname', 'lastLoginTime'),
+ (ldap.MOD_REPLACE, 'altstateattrname', 'createTimestamp'),
+ (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'),
+ (ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')])
+
+ # Enable the plugins
+ topology.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
+
+ topology.standalone.restart(timeout=120)
+
+ log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN)
+ try:
+ topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+ except ldap.CONSTRAINT_VIOLATION, e:
+ log.error('CONSTRAINT VIOLATION ' + e.message['desc'])
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
+
+ lastLoginTime0 = entry[0].lastLoginTime
+
+ time.sleep(2)
+
+ log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN)
+ try:
+ topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+ except ldap.CONSTRAINT_VIOLATION, e:
+ log.error('CONSTRAINT VIOLATION ' + e.message['desc'])
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
+
+ lastLoginTime1 = entry[0].lastLoginTime
+
+ log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1))
+ assert lastLoginTime0 < lastLoginTime1
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ entry = topology.standalone.search_s(ACCT_POLICY_DN, ldap.SCOPE_BASE, SEARCHFILTER)
+ log.info("\n######################### %s ######################\n" % ACCT_POLICY_CONFIG_DN)
+ log.info("accountInactivityLimit: %s" % entry[0].accountInactivityLimit)
+ log.info("\n######################### %s DONE ######################\n" % ACCT_POLICY_CONFIG_DN)
+
+ time.sleep(10)
+
+ log.info("\n######################### Bind as %s again to fail ######################\n" % TEST_USER_DN)
+ try:
+ topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+ except ldap.CONSTRAINT_VIOLATION, e:
+ log.info('CONSTRAINT VIOLATION ' + e.message['desc'])
+ log.info("%s was successfully inactivated." % TEST_USER_DN)
+ pass
+
+def test_ticket47714_run_1(topology):
+ """
+ Verify a new config attr alwaysRecordLoginAttr
+ 1. Set account policy config with the new attr alwaysRecordLoginAttr: lastLoginTime
+ Note: bogus attr is set to stateattrname.
+ altstateattrname type value is used for checking whether the account is idle or not.
+ 2. Bind as a test user
+ 3. Bind as the test user again and check the alwaysRecordLoginAttr: lastLoginTime is updated
+ """
+ _header(topology, 'Account Policy - With new attr alwaysRecordLoginAttr in config')
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology.standalone.modify_s(TEST_USER_DN, [(ldap.MOD_DELETE, 'lastLoginTime', None)])
+
+ # Modify Account Policy config entry
+ topology.standalone.modify_s(ACCT_POLICY_CONFIG_DN, [(ldap.MOD_REPLACE, 'alwaysrecordlogin', 'yes'),
+ (ldap.MOD_REPLACE, 'stateattrname', 'bogus'),
+ (ldap.MOD_REPLACE, 'altstateattrname', 'modifyTimestamp'),
+ (ldap.MOD_REPLACE, 'alwaysRecordLoginAttr', 'lastLoginTime'),
+ (ldap.MOD_REPLACE, 'specattrname', 'acctPolicySubentry'),
+ (ldap.MOD_REPLACE, 'limitattrname', 'accountInactivityLimit')])
+
+ # Enable the plugins
+ topology.standalone.plugins.enable(name=PLUGIN_ACCT_POLICY)
+
+ topology.standalone.restart(timeout=120)
+
+ log.info("\n######################### Bind as %s ######################\n" % TEST_USER_DN)
+ try:
+ topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+ except ldap.CONSTRAINT_VIOLATION, e:
+ log.error('CONSTRAINT VIOLATION ' + e.message['desc'])
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
+
+ lastLoginTime0 = entry[0].lastLoginTime
+
+ time.sleep(2)
+
+ log.info("\n######################### Bind as %s again ######################\n" % TEST_USER_DN)
+ try:
+ topology.standalone.simple_bind_s(TEST_USER_DN, TEST_USER_PW)
+ except ldap.CONSTRAINT_VIOLATION, e:
+ log.error('CONSTRAINT VIOLATION ' + e.message['desc'])
+
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ entry = topology.standalone.search_s(TEST_USER_DN, ldap.SCOPE_BASE, SEARCHFILTER, ['lastLoginTime'])
+
+ lastLoginTime1 = entry[0].lastLoginTime
+
+ log.info("First lastLoginTime: %s, Second lastLoginTime: %s" % (lastLoginTime0, lastLoginTime1))
+ assert lastLoginTime0 < lastLoginTime1
+
+ topology.standalone.log.info("ticket47714 was successfully verified.");
+
+def test_ticket47714_final(topology):
+ log.info("\n######################### Adding Account Policy entry: %s ######################\n" % ACCT_POLICY_DN)
+ # Enabled the plugins
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+ topology.standalone.plugins.disable(name=PLUGIN_ACCT_POLICY)
+ topology.standalone.stop(timeout=10)
+
+def run_isolated():
+ '''
+ run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+ To run isolated without py.test, you need to
+ - edit this file and comment '@pytest.fixture' line before 'topology' function.
+ - set the installation prefix
+ - run this program
+ '''
+ global installation_prefix
+ installation_prefix = None
+
+ topo = topology(True)
+ test_ticket47714_init(topo)
+
+ test_ticket47714_run_0(topo)
+
+ test_ticket47714_run_1(topo)
+
+ test_ticket47714_final(topo)
+
+
+if __name__ == '__main__':
+ run_isolated()
commit 533f250275c745fd2b0decc74b6d560502209495
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Wed Jul 16 15:23:28 2014 -0700
Ticket #47714 - [RFE] Update lastLoginTime also in Account Policy plugin if account lockout is based on passwordExpirationTime.
Description: Introducing a new attribute alwaysRecordLoginAttr to
Account Policy config entry (cn=config,cn=Account Policy Plugin,
cn=plugins,cn=config) to distinguish an attribute for checking the
account's activity (e.g., passwordExpirationTime) and an attribute
to be updated at the successful login (e.g., lastLoginTime).
https://fedorahosted.org/389/ticket/47714
Reviewed by mreynolds(a)redhat.com (Thanks, Mark!!)
diff --git a/ldap/servers/plugins/acctpolicy/acct_config.c b/ldap/servers/plugins/acctpolicy/acct_config.c
index 5277103..25352b1 100644
--- a/ldap/servers/plugins/acctpolicy/acct_config.c
+++ b/ldap/servers/plugins/acctpolicy/acct_config.c
@@ -100,6 +100,15 @@ acct_policy_entry2config( Slapi_Entry *e, acctPluginCfg *newcfg ) {
slapi_ch_free_string( &newcfg->alt_state_attr_name ); /*none - NULL */
} /* else use configured value */
+ newcfg->always_record_login_attr = get_attr_string_val( e, CFG_RECORD_LOGIN_ATTR );
+ /* What user attribute will store the last login time
+ * of a user. If empty, should have the same value as
+ * stateattrname. default value: empty
+ */
+ if( newcfg->always_record_login_attr == NULL ) {
+ newcfg->always_record_login_attr = slapi_ch_strdup( newcfg->state_attr_name );
+ }
+
newcfg->spec_attr_name = get_attr_string_val( e, CFG_SPEC_ATTR );
if( newcfg->spec_attr_name == NULL ) {
newcfg->spec_attr_name = slapi_ch_strdup( DEFAULT_SPEC_ATTR );
@@ -159,5 +168,6 @@ free_config()
slapi_ch_free_string(&globalcfg.alt_state_attr_name);
slapi_ch_free_string(&globalcfg.spec_attr_name);
slapi_ch_free_string(&globalcfg.limit_attr_name);
+ slapi_ch_free_string(&globalcfg.always_record_login_attr);
}
diff --git a/ldap/servers/plugins/acctpolicy/acct_plugin.c b/ldap/servers/plugins/acctpolicy/acct_plugin.c
index b4db811..5719f27 100644
--- a/ldap/servers/plugins/acctpolicy/acct_plugin.c
+++ b/ldap/servers/plugins/acctpolicy/acct_plugin.c
@@ -111,7 +111,7 @@ acct_record_login( const char *dn )
/* if we are not allowed to modify the state attr we're done
* this could be intentional, so just return
*/
- if (! update_is_allowed_attr(cfg->state_attr_name) )
+ if (! update_is_allowed_attr(cfg->always_record_login_attr) )
return rc;
plugin_id = get_identity();
@@ -124,7 +124,7 @@ acct_record_login( const char *dn )
vals [1] = NULL;
mod.mod_op = LDAP_MOD_REPLACE | LDAP_MOD_BVALUES;
- mod.mod_type = cfg->state_attr_name;
+ mod.mod_type = cfg->always_record_login_attr;
mod.mod_bvalues = vals;
mods[0] = &mod;
@@ -142,13 +142,13 @@ acct_record_login( const char *dn )
if (ldrc != LDAP_SUCCESS) {
slapi_log_error( SLAPI_LOG_FATAL, POST_PLUGIN_NAME,
- "Recording %s=%s failed on \"%s\" err=%d\n", cfg->state_attr_name,
+ "Recording %s=%s failed on \"%s\" err=%d\n", cfg->always_record_login_attr,
timestr, dn, ldrc );
rc = -1;
goto done;
} else {
slapi_log_error( SLAPI_LOG_PLUGIN, POST_PLUGIN_NAME,
- "Recorded %s=%s on \"%s\"\n", cfg->state_attr_name, timestr, dn );
+ "Recorded %s=%s on \"%s\"\n", cfg->always_record_login_attr, timestr, dn );
}
done:
diff --git a/ldap/servers/plugins/acctpolicy/acctpolicy.h b/ldap/servers/plugins/acctpolicy/acctpolicy.h
index fcfd120..2185b95 100644
--- a/ldap/servers/plugins/acctpolicy/acctpolicy.h
+++ b/ldap/servers/plugins/acctpolicy/acctpolicy.h
@@ -28,6 +28,7 @@ Hewlett-Packard Development Company, L.P.
#define CFG_SPEC_ATTR "specAttrName"
#define CFG_INACT_LIMIT_ATTR "limitAttrName"
#define CFG_RECORD_LOGIN "alwaysRecordLogin"
+#define CFG_RECORD_LOGIN_ATTR "alwaysRecordLoginAttr"
#define DEFAULT_LASTLOGIN_STATE_ATTR "lastLoginTime"
#define DEFAULT_ALT_LASTLOGIN_STATE_ATTR "createTimestamp"
@@ -56,6 +57,7 @@ typedef struct acct_plugin_cfg {
char* spec_attr_name;
char* limit_attr_name;
int always_record_login;
+ char* always_record_login_attr;
unsigned long inactivitylimit;
} acctPluginCfg;
9 years, 8 months
ldap/admin
by Mark Reynolds
ldap/admin/src/logconv.pl | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
New commits:
commit ae3a53fe80dc32923a0307633467c840d82eb11d
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Wed Jul 16 14:13:20 2014 -0400
Ticket 47812 - logconv.pl missing -U option from usage
Original patch submitted by: Marko Myllynen <myllynen(a)redhat.com>
Description: The usage ouput was missing the "U" option
https://fedorahosted.org/389/ticket/47812
Reviewed by: mreynolds
diff --git a/ldap/admin/src/logconv.pl b/ldap/admin/src/logconv.pl
index 19be032..3ca3498 100755
--- a/ldap/admin/src/logconv.pl
+++ b/ldap/admin/src/logconv.pl
@@ -1551,7 +1551,8 @@ sub displayUsage {
print " j Recommendations\n";
print " u Unindexed Search Stats\n";
print " y Connection Latency Stats\n";
- print " p Open Connection ID Stats\n\n";
+ print " p Open Connection ID Stats\n";
+ print " U Unindexed Search Summary\n\n";
print " Examples:\n\n";
9 years, 8 months
2 commits - dirsrvtests/tickets ldap/servers
by Noriko Hosoi
dirsrvtests/tickets/ticket47664_test.py | 268 ++++++++++++++++++++++++++++++++
ldap/servers/slapd/opshared.c | 33 ++-
2 files changed, 290 insertions(+), 11 deletions(-)
New commits:
commit 2bbde88012376cf32c7d91a9ba5b5aa1a6c00922
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Tue Jul 15 16:41:02 2014 -0700
Ticket 47664 - CI test: add test case for ticket 47664
Bug Description: Passing Simple Paged Results control and Get Effective
Rights control at the same time, the search ignores paging.
Test Description: Import 20 entries.
Run Simple Paged Results + Get Effective Rights search with the page size 4.
If it returns 4 entries with Get Effective Rights attributes / page AND
the page count is 5, it passes the test.
https://fedorahosted.org/389/ticket/47664
Reviewed by mreynolds(a)redhat.com (Thank you, Mark!!)
diff --git a/dirsrvtests/tickets/ticket47664_test.py b/dirsrvtests/tickets/ticket47664_test.py
new file mode 100644
index 0000000..f374302
--- /dev/null
+++ b/dirsrvtests/tickets/ticket47664_test.py
@@ -0,0 +1,268 @@
+import os
+import sys
+import time
+import ldap
+import logging
+import socket
+import time
+import logging
+import pytest
+from lib389 import DirSrv, Entry, tools, tasks
+from lib389.tools import DirSrvTools
+from lib389._constants import *
+from lib389.properties import *
+from lib389.tasks import *
+from constants import *
+from ldap.controls import SimplePagedResultsControl
+from ldap.controls.simple import GetEffectiveRightsControl
+
+log = logging.getLogger(__name__)
+
+installation_prefix = None
+
+MYSUFFIX = 'o=ticket47664.org'
+MYSUFFIXBE = 'ticket47664'
+
+_MYLDIF = 'ticket47664.ldif'
+
+SEARCHFILTER = '(objectclass=*)'
+
+class TopologyStandalone(object):
+ def __init__(self, standalone):
+ standalone.open()
+ self.standalone = standalone
+
+
+(a)pytest.fixture(scope="module")
+def topology(request):
+ '''
+ This fixture is used to standalone topology for the 'module'.
+ At the beginning, It may exists a standalone instance.
+ It may also exists a backup for the standalone instance.
+
+ Principle:
+ If standalone instance exists:
+ restart it
+ If backup of standalone exists:
+ create/rebind to standalone
+
+ restore standalone instance from backup
+ else:
+ Cleanup everything
+ remove instance
+ remove backup
+ Create instance
+ Create backup
+ '''
+ global installation_prefix
+
+ if installation_prefix:
+ args_instance[SER_DEPLOYED_DIR] = installation_prefix
+
+ standalone = DirSrv(verbose=False)
+
+ # Args for the standalone instance
+ args_instance[SER_HOST] = HOST_STANDALONE
+ args_instance[SER_PORT] = PORT_STANDALONE
+ args_instance[SER_SERVERID_PROP] = SERVERID_STANDALONE
+ args_standalone = args_instance.copy()
+ standalone.allocate(args_standalone)
+
+ # Get the status of the backups
+ backup_standalone = standalone.checkBackupFS()
+
+ # Get the status of the instance and restart it if it exists
+ instance_standalone = standalone.exists()
+ if instance_standalone:
+ # assuming the instance is already stopped, just wait 5 sec max
+ standalone.stop(timeout=5)
+ standalone.start(timeout=10)
+
+ if backup_standalone:
+ # The backup exist, assuming it is correct
+ # we just re-init the instance with it
+ if not instance_standalone:
+ standalone.create()
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # restore standalone instance from backup
+ standalone.stop(timeout=10)
+ standalone.restoreFS(backup_standalone)
+ standalone.start(timeout=10)
+
+ else:
+ # We should be here only in two conditions
+ # - This is the first time a test involve standalone instance
+ # - Something weird happened (instance/backup destroyed)
+ # so we discard everything and recreate all
+
+ # Remove the backup. So even if we have a specific backup file
+ # (e.g backup_standalone) we clear backup that an instance may have created
+ if backup_standalone:
+ standalone.clearBackupFS()
+
+ # Remove the instance
+ if instance_standalone:
+ standalone.delete()
+
+ # Create the instance
+ standalone.create()
+
+ # Used to retrieve configuration information (dbdir, confdir...)
+ standalone.open()
+
+ # Time to create the backups
+ standalone.stop(timeout=10)
+ standalone.backupfile = standalone.backupFS()
+ standalone.start(timeout=10)
+
+ # clear the tmp directory
+ standalone.clearTmpDir(__file__)
+
+ #
+ # Here we have standalone instance up and running
+ # Either coming from a backup recovery
+ # or from a fresh (re)init
+ # Time to return the topology
+ return TopologyStandalone(standalone)
+
+
+def test_ticket47664_run(topology):
+ """
+ Import 20 entries
+ Search with Simple Paged Results Control (pagesize = 4) + Get Effective Rights Control (attrs list = ['cn'])
+ If Get Effective Rights attribute (attributeLevelRights for 'cn') is returned 4 attrs / page AND
+ the page count == 20/4, then the fix is verified.
+ """
+ log.info('Testing Ticket 47664 - paged results control is not working in some cases when we have a subsuffix')
+
+ # bind as directory manager
+ topology.standalone.log.info("Bind as %s" % DN_DM)
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+
+ topology.standalone.log.info("\n\n######################### SETUP SUFFIX o=ticket47664.org ######################\n")
+
+ topology.standalone.backend.create(MYSUFFIX, {BACKEND_NAME: MYSUFFIXBE})
+ topology.standalone.mappingtree.create(MYSUFFIX, bename=MYSUFFIXBE)
+
+ topology.standalone.log.info("\n\n######################### Generate Test data ######################\n")
+
+ # get tmp dir
+ mytmp = topology.standalone.getDir(__file__, TMP_DIR)
+ if mytmp == None:
+ mytmp = "/tmp"
+
+ MYLDIF = '%s%s' % (mytmp, _MYLDIF)
+ os.system('ls %s' % MYLDIF)
+ os.system('rm -f %s' % MYLDIF)
+ os.system('dbgen.pl -s %s -o %s -n 14' % (MYSUFFIX, MYLDIF))
+
+ cmdline = 'egrep dn: %s | wc -l' % MYLDIF
+ p = os.popen(cmdline, "r")
+ dnnumstr = p.readline()
+ dnnum = int(dnnumstr)
+ topology.standalone.log.info("We have %d entries.\n", dnnum)
+
+ topology.standalone.log.info("\n\n######################### Import Test data ######################\n")
+
+ args = {TASK_WAIT: True}
+ importTask = Tasks(topology.standalone)
+ importTask.importLDIF(MYSUFFIX, MYSUFFIXBE, MYLDIF, args)
+
+ topology.standalone.log.info("\n\n######################### SEARCH ALL ######################\n")
+ topology.standalone.log.info("Bind as %s and add the READ/SEARCH SELFDN aci" % DN_DM)
+ topology.standalone.simple_bind_s(DN_DM, PASSWORD)
+
+ entries = topology.standalone.search_s(MYSUFFIX, ldap.SCOPE_SUBTREE, SEARCHFILTER)
+ topology.standalone.log.info("Returned %d entries.\n", len(entries))
+
+ #print entries
+
+ assert dnnum == len(entries)
+
+ topology.standalone.log.info('%d entries are successfully imported.' % dnnum)
+
+ topology.standalone.log.info("\n\n######################### SEARCH WITH SIMPLE PAGED RESULTS CONTROL ######################\n")
+
+ page_size = 4
+ spr_req_ctrl = SimplePagedResultsControl(True, size=page_size, cookie='')
+ ger_req_ctrl = GetEffectiveRightsControl(True, "dn: " + DN_DM)
+
+ known_ldap_resp_ctrls = {
+ SimplePagedResultsControl.controlType:SimplePagedResultsControl,
+ }
+
+ topology.standalone.log.info("Calling search_ext...")
+ msgid = topology.standalone.search_ext(MYSUFFIX,
+ ldap.SCOPE_SUBTREE,
+ SEARCHFILTER,
+ ['cn'],
+ serverctrls=[spr_req_ctrl, ger_req_ctrl])
+ attrlevelrightscnt = 0
+ pageddncnt = 0
+ pages = 0
+ while True:
+ pages += 1
+
+ topology.standalone.log.info("Getting page %d" % pages)
+ rtype, rdata, rmsgid, responcectrls = topology.standalone.result3(msgid, resp_ctrl_classes=known_ldap_resp_ctrls)
+ topology.standalone.log.info("%d results" % len(rdata))
+ pageddncnt += len(rdata)
+
+ topology.standalone.log.info("Results:")
+ for dn, attrs in rdata:
+ topology.standalone.log.info("dn: %s" % dn)
+ topology.standalone.log.info("attributeLevelRights: %s" % attrs['attributeLevelRights'][0])
+ if attrs['attributeLevelRights'][0] != "":
+ attrlevelrightscnt += 1
+
+ pctrls = [
+ c for c in responcectrls if c.controlType == SimplePagedResultsControl.controlType
+ ]
+ if not pctrls:
+ topology.standalone.log.info('Warning: Server ignores RFC 2696 control.')
+ break
+
+ if pctrls[0].cookie:
+ spr_req_ctrl.cookie = pctrls[0].cookie
+ topology.standalone.log.info("cookie: %s" % spr_req_ctrl.cookie)
+ msgid = topology.standalone.search_ext(MYSUFFIX,
+ ldap.SCOPE_SUBTREE,
+ SEARCHFILTER,
+ ['cn'],
+ serverctrls=[spr_req_ctrl, ger_req_ctrl])
+ else:
+ topology.standalone.log.info("No cookie")
+ break
+
+ topology.standalone.log.info("Paged result search returned %d entries in %d pages.\n", pageddncnt, pages)
+
+ assert dnnum == len(entries)
+ assert dnnum == attrlevelrightscnt
+ assert pages == (dnnum / page_size)
+ topology.standalone.log.info("ticket47664 was successfully verified.");
+
+def test_ticket47664_final(topology):
+ topology.standalone.stop(timeout=10)
+
+def run_isolated():
+ '''
+ run_isolated is used to run these test cases independently of a test scheduler (xunit, py.test..)
+ To run isolated without py.test, you need to
+ - edit this file and comment '@pytest.fixture' line before 'topology' function.
+ - set the installation prefix
+ - run this program
+ '''
+ global installation_prefix
+ installation_prefix = None
+
+ topo = topology(True)
+ test_ticket47664_run(topo)
+
+ test_ticket47664_final(topo)
+
+
+if __name__ == '__main__':
+ run_isolated()
+
commit f646bd326a4494cb1a0e7d2d3ea556677b5197eb
Author: Noriko Hosoi <nhosoi(a)redhat.com>
Date: Tue Jul 15 13:33:11 2014 -0700
Ticket #47664 - Page control does not work if effective rights control is specified
Bug Description: If an effective rights control and a simple paged
results control were specified in one search request, the simple
paged results control was ignored.
Fix Description: In the search iteration code, handling the simple
paged results was not fully implemented. This patch adds it.
https://fedorahosted.org/389/ticket/47664
Reviewed by mreynolds(a)redhat.com (Thank you, Mark!!)
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index e222b05..4e06652 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -1302,16 +1302,26 @@ iterate(Slapi_PBlock *pb, Slapi_Backend *be, int send_result,
slapi_pblock_get(pb, SLAPI_SEARCH_RESULT_ENTRY, &e);
/* Check for possible get_effective_rights control */
- if ( operation->o_flags & OP_FLAG_GET_EFFECTIVE_RIGHTS )
- {
+ if (e) {
+ if (operation->o_flags & OP_FLAG_GET_EFFECTIVE_RIGHTS) {
char *errbuf = NULL;
char **gerattrs = NULL;
char **gerattrsdup = NULL;
char **gap = NULL;
char *gapnext = NULL;
- slapi_pblock_get( pb, SLAPI_SEARCH_GERATTRS, &gerattrs );
+ if (PAGEDRESULTS_PAGE_END == pr_stat)
+ {
+ /*
+ * read ahead -- there is at least more entry.
+ * undo it and return the PAGE_END
+ */
+ be->be_prev_search_results(pb);
+ done = 1;
+ continue;
+ }
+ slapi_pblock_get( pb, SLAPI_SEARCH_GERATTRS, &gerattrs );
gerattrsdup = cool_charray_dup(gerattrs);
gap = gerattrsdup;
do
@@ -1414,15 +1424,15 @@ iterate(Slapi_PBlock *pb, Slapi_Backend *be, int send_result,
while (gap && ++gap && *gap);
slapi_pblock_set( pb, SLAPI_SEARCH_GERATTRS, gerattrs );
cool_charray_free(gerattrsdup);
- if (NULL == e)
- {
- /* no more entries */
- done = 1;
- pr_stat = PAGEDRESULTS_SEARCH_END;
+ if (pagesize == *pnentries)
+ {
+ /* PAGED RESULTS: reached the pagesize */
+ /* We don't set "done = 1" here.
+ * We read ahead next entry to check whether there is
+ * more entries to return or not. */
+ pr_stat = PAGEDRESULTS_PAGE_END;
}
- }
- else if (e)
- {
+ } else { /* not GET_EFFECTIVE_RIGHTS */
if (PAGEDRESULTS_PAGE_END == pr_stat)
{
/*
@@ -1469,6 +1479,7 @@ iterate(Slapi_PBlock *pb, Slapi_Backend *be, int send_result,
* more entries to return or not. */
pr_stat = PAGEDRESULTS_PAGE_END;
}
+ }
}
else
{
9 years, 8 months