Branch '389-ds-base-1.3.4' - ldap/servers
by Mark Reynolds
ldap/servers/plugins/replication/cl5_api.c | 162 ++++++++++------
ldap/servers/plugins/replication/cl5_api.h | 2
ldap/servers/plugins/replication/repl5_replica_config.c | 2
3 files changed, 106 insertions(+), 60 deletions(-)
New commits:
commit 792aa66d60469a2262869680f61bb607a0e1012e
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Aug 23 12:06:30 2016 -0400
Ticket 48964 - cleanAllRUV changelog purging incorrectly
processes all backends
Bug Description: When the changelog was being purged of "cleaned" rids it was checking
all the backend changelogs, and not the one from which the
cleanAllRUV task originated from. This could corrupt a different
backend's changelog if both backends used the same RID.
Fix Description: Purge the changelog associated with the backend that is specified in
the cleanAllRUV task. Also moved the "purging" to its own function,
and fixed a few compiler warnings.
https://fedorahosted.org/389/ticket/48965
Reviewed by: nhosoi(Thanks!)
(cherry picked from commit fda00435a7536c1ded72bb78a975f3370d09a3be)
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
index 3adaf86..6a09aea 100644
--- a/ldap/servers/plugins/replication/cl5_api.c
+++ b/ldap/servers/plugins/replication/cl5_api.c
@@ -317,7 +317,7 @@ static int _cl5CheckMissingCSN (const CSN *minCsn, const RUV *supplierRUV, CL5DB
static int _cl5TrimInit ();
static void _cl5TrimCleanup ();
static int _cl5TrimMain (void *param);
-static void _cl5DoTrimming (ReplicaId rid);
+static void _cl5DoTrimming ();
static void _cl5CompactDBs();
static void _cl5PurgeRID(Object *obj, ReplicaId cleaned_rid);
static int _cl5PurgeGetFirstEntry (Object *obj, CL5Entry *entry, void **iterator, DB_TXN *txnid, int rid, DBT *key);
@@ -3447,43 +3447,37 @@ static int _cl5TrimMain (void *param)
return 0;
}
-/* We remove an entry if it has been replayed to all consumers and
- and the number of entries in the changelog is larger than maxEntries
- or age of the entry is larger than maxAge.
- Also we can't purge entries which correspond to max csns in the
- supplier's ruv. Here is a example where we can get into trouble:
- The server is setup with time based trimming and no consumer's
- At some point all the entries are trimmed from the changelog.
- At a later point a consumer is added and initialized online
- Then a change is made on the supplier.
- To update the consumer, the supplier would attempt to locate
- the last change sent to the consumer in the changelog and will
- fail because the change was removed.
-
+/*
+ * We remove an entry if it has been replayed to all consumers and the number
+ * of entries in the changelog is larger than maxEntries or age of the entry
+ * is larger than maxAge. Also we can't purge entries which correspond to max
+ * csns in the supplier's ruv. Here is a example where we can get into trouble:
+ *
+ * The server is setup with time based trimming and no consumer's
+ * At some point all the entries are trimmed from the changelog.
+ * At a later point a consumer is added and initialized online.
+ * Then a change is made on the supplier.
+ * To update the consumer, the supplier would attempt to locate the last
+ * change sent to the consumer in the changelog and will fail because the
+ * change was removed.
*/
-
-static void _cl5DoTrimming (ReplicaId rid)
+static void _cl5DoTrimming ()
{
Object *obj;
long numToTrim;
PR_Lock (s_cl5Desc.dbTrim.lock);
- /* ONREPL We trim file by file which means that some files will be
- trimmed more often than other. We might have to fix that by, for
- example, randomizing starting point */
+ /*
+ * We are trimming all the changelogs. We trim file by file which
+ * means that some files will be trimmed more often than other. We
+ * might have to fix that by, for example, randomizing the starting
+ * point.
+ */
obj = objset_first_obj (s_cl5Desc.dbFiles);
- while (obj && (_cl5CanTrim ((time_t)0, &numToTrim) || rid))
+ while (obj && _cl5CanTrim ((time_t)0, &numToTrim))
{
- if (rid){
- /*
- * We are cleaning an invalid rid, and need to strip it
- * from the changelog.
- */
- _cl5PurgeRID (obj, rid);
- } else {
- _cl5TrimFile (obj, &numToTrim);
- }
+ _cl5TrimFile (obj, &numToTrim);
obj = objset_next_obj (s_cl5Desc.dbFiles, obj);
}
@@ -3495,6 +3489,43 @@ static void _cl5DoTrimming (ReplicaId rid)
return;
}
+/*
+ * We are purging a changelog after a cleanAllRUV task. Find the specific
+ * changelog for the backend that is being cleaned, and purge all the records
+ * with the cleaned rid.
+ */
+static void _cl5DoPurging (Replica *replica)
+{
+ ReplicaId rid = replica_get_rid(replica);
+ const Slapi_DN *sdn = replica_get_root(replica);
+ const char *replName = replica_get_name(replica);
+ char *replGen = replica_get_generation(replica);
+ char *fileName;
+ Object *obj;
+
+ PR_Lock (s_cl5Desc.dbTrim.lock);
+ fileName = _cl5MakeFileName (replName, replGen);
+ obj = objset_find(s_cl5Desc.dbFiles, _cl5CompareDBFile, fileName);
+ if (obj) {
+ /* We found our changelog, now purge it */
+ _cl5PurgeRID (obj, rid);
+ object_release (obj);
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl,
+ "Purged rid (%d) from suffix (%s)\n",
+ rid, slapi_sdn_get_dn(sdn));
+ } else {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl,
+ "Purge rid (%d) failed to find changelog file (%s) for suffix (%s)\n",
+ rid, fileName, slapi_sdn_get_dn(sdn));
+ }
+ PR_Unlock (s_cl5Desc.dbTrim.lock);
+
+ slapi_ch_free_string(&replGen);
+ slapi_ch_free_string(&fileName);
+
+ return;
+}
+
/* clear free page files to reduce changelog */
static void
_cl5CompactDBs()
@@ -4072,23 +4103,25 @@ static PRBool _cl5CanTrim (time_t time, long *numToTrim)
{
*numToTrim = 0;
- if (s_cl5Desc.dbTrim.maxAge == 0 && s_cl5Desc.dbTrim.maxEntries == 0)
+ if (s_cl5Desc.dbTrim.maxAge == 0 && s_cl5Desc.dbTrim.maxEntries == 0) {
return PR_FALSE;
-
+ }
if (s_cl5Desc.dbTrim.maxAge == 0)
{
*numToTrim = cl5GetOperationCount (NULL) - s_cl5Desc.dbTrim.maxEntries;
return ( *numToTrim > 0 );
}
- if (s_cl5Desc.dbTrim.maxEntries > 0 &&
- (*numToTrim = cl5GetOperationCount (NULL) - s_cl5Desc.dbTrim.maxEntries) > 0)
- return PR_TRUE;
+ if (s_cl5Desc.dbTrim.maxEntries > 0 &&
+ (*numToTrim = cl5GetOperationCount (NULL) - s_cl5Desc.dbTrim.maxEntries) > 0) {
+ return PR_TRUE;
+ }
- if (time)
+ if (time) {
return (current_time () - time > s_cl5Desc.dbTrim.maxAge);
- else
- return PR_TRUE;
+ } else {
+ return PR_TRUE;
+ }
}
static int _cl5ReadRUV (const char *replGen, Object *obj, PRBool purge)
@@ -4101,7 +4134,6 @@ static int _cl5ReadRUV (const char *replGen, Object *obj, PRBool purge)
char *pos;
char *agmt_name;
-
PR_ASSERT (replGen && obj);
file = (CL5DBFile*)object_get_data (obj);
@@ -4109,13 +4141,12 @@ static int _cl5ReadRUV (const char *replGen, Object *obj, PRBool purge)
agmt_name = get_thread_private_agmtname();
- if (purge) /* read purge vector entry */
- key.data = _cl5GetHelperEntryKey (PURGE_RUV_TIME, csnStr);
- else /* read upper bound vector */
- key.data = _cl5GetHelperEntryKey (MAX_RUV_TIME, csnStr);
-
+ if (purge) { /* read purge vector entry */
+ key.data = _cl5GetHelperEntryKey (PURGE_RUV_TIME, csnStr);
+ } else { /* read upper bound vector */
+ key.data = _cl5GetHelperEntryKey (MAX_RUV_TIME, csnStr);
+ }
key.size = CSN_STRSIZE;
-
data.flags = DB_DBT_MALLOC;
rc = file->db->get(file->db, NULL/*txn*/, &key, &data, 0);
@@ -4125,13 +4156,13 @@ static int _cl5ReadRUV (const char *replGen, Object *obj, PRBool purge)
rc = _cl5ReadBervals (&vals, &pos, data.size);
slapi_ch_free (&(data.data));
if (rc != CL5_SUCCESS)
- goto done;
+ goto done;
- if (purge)
+ if (purge) {
rc = ruv_init_from_bervals(vals, &file->purgeRUV);
- else
+ } else {
rc = ruv_init_from_bervals(vals, &file->maxRUV);
-
+ }
if (rc != RUV_SUCCESS)
{
slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl,
@@ -4139,7 +4170,7 @@ static int _cl5ReadRUV (const char *replGen, Object *obj, PRBool purge)
"RUV error %d\n", agmt_name, purge? "purge" : "upper bound", rc);
rc = CL5_RUV_ERROR;
- goto done;
+ goto done;
}
/* delete the entry; it is re-added when file
@@ -4151,7 +4182,7 @@ static int _cl5ReadRUV (const char *replGen, Object *obj, PRBool purge)
case DB_NOTFOUND: /* RUV is lost - need to construct */
rc = _cl5ConstructRUV (replGen, obj, purge);
- goto done;
+ goto done;
default: slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl,
"%s: _cl5ReadRUV: failed to get purge RUV; "
@@ -6946,12 +6977,14 @@ cl5CleanRUV(ReplicaId rid){
slapi_rwlock_unlock (s_cl5Desc.stLock);
}
-void trigger_cl_purging(ReplicaId rid){
+/*
+ * Create a thread to purge a changelog of cleaned RIDs
+ */
+void trigger_cl_purging(Replica *replica){
PRThread *trim_tid = NULL;
- slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl, "trigger_cl_purging: rid (%d)\n",(int)rid);
trim_tid = PR_CreateThread(PR_USER_THREAD, (VFP)(void*)trigger_cl_purging_thread,
- (void *)&rid, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+ (void *)replica, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
PR_UNJOINABLE_THREAD, DEFAULT_THREAD_STACKSIZE);
if (NULL == trim_tid){
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl,
@@ -6963,19 +6996,32 @@ void trigger_cl_purging(ReplicaId rid){
}
}
+/*
+ * Purge a changelog of entries that originated from a particular replica(rid)
+ */
void
trigger_cl_purging_thread(void *arg){
- ReplicaId rid = *(ReplicaId *)arg;
+ Replica *replica = (Replica *)arg;
- /* make sure we have a change log, and we aren't closing it */
- if(s_cl5Desc.dbState == CL5_STATE_CLOSED || s_cl5Desc.dbState == CL5_STATE_CLOSING){
+ /* Make sure we have a change log, and we aren't closing it */
+ if (replica == NULL ||
+ s_cl5Desc.dbState == CL5_STATE_CLOSED ||
+ s_cl5Desc.dbState == CL5_STATE_CLOSING) {
return;
}
+
+ /* Bump the changelog thread count */
if (CL5_SUCCESS != _cl5AddThread()) {
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl,
- "trigger_cl_purging: failed to increment thread count "
+ "trigger_cl_purging: Abort - failed to increment thread count "
"NSPR error - %d\n", PR_GetError ());
+ return;
}
- _cl5DoTrimming(rid);
+
+ /* Purge the changelog */
+ _cl5DoPurging(replica);
_cl5RemoveThread();
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl,
+ "trigger_cl_purging: purged changelog for (%s) rid (%d)\n",
+ slapi_sdn_get_dn(replica_get_root(replica)), replica_get_rid(replica));
}
diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
index 4c3b8e8..1a1c2f5 100644
--- a/ldap/servers/plugins/replication/cl5_api.h
+++ b/ldap/servers/plugins/replication/cl5_api.h
@@ -467,6 +467,6 @@ int cl5WriteRUV();
int cl5DeleteRUV();
void cl5CleanRUV(ReplicaId rid);
void cl5NotifyCleanup(int rid);
-void trigger_cl_purging(ReplicaId rid);
+void trigger_cl_purging(Replica *replica);
#endif
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index 092f04e..096c829 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -1467,7 +1467,7 @@ replica_execute_cleanruv_task (Object *r, ReplicaId rid, char *returntext /* not
/*
* Now purge the changelog
*/
- trigger_cl_purging(rid);
+ trigger_cl_purging(replica);
if (rc != RUV_SUCCESS){
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "cleanruv_task: task failed(%d)\n",rc);
7 years, 8 months
ldap/servers
by Mark Reynolds
ldap/servers/plugins/replication/cl5_api.c | 162 ++++++++++------
ldap/servers/plugins/replication/cl5_api.h | 2
ldap/servers/plugins/replication/repl5_replica_config.c | 2
3 files changed, 106 insertions(+), 60 deletions(-)
New commits:
commit fda00435a7536c1ded72bb78a975f3370d09a3be
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Tue Aug 23 12:06:30 2016 -0400
Ticket 48964 - cleanAllRUV changelog purging incorrectly
processes all backends
Bug Description: When the changelog was being purged of "cleaned" rids it was checking
all the backend changelogs, and not the one from which the
cleanAllRUV task originated from. This could corrupt a different
backend's changelog if both backends used the same RID.
Fix Description: Purge the changelog associated with the backend that is specified in
the cleanAllRUV task. Also moved the "purging" to its own function,
and fixed a few compiler warnings.
https://fedorahosted.org/389/ticket/48965
Reviewed by: nhosoi(Thanks!)
diff --git a/ldap/servers/plugins/replication/cl5_api.c b/ldap/servers/plugins/replication/cl5_api.c
index 3adaf86..6a09aea 100644
--- a/ldap/servers/plugins/replication/cl5_api.c
+++ b/ldap/servers/plugins/replication/cl5_api.c
@@ -317,7 +317,7 @@ static int _cl5CheckMissingCSN (const CSN *minCsn, const RUV *supplierRUV, CL5DB
static int _cl5TrimInit ();
static void _cl5TrimCleanup ();
static int _cl5TrimMain (void *param);
-static void _cl5DoTrimming (ReplicaId rid);
+static void _cl5DoTrimming ();
static void _cl5CompactDBs();
static void _cl5PurgeRID(Object *obj, ReplicaId cleaned_rid);
static int _cl5PurgeGetFirstEntry (Object *obj, CL5Entry *entry, void **iterator, DB_TXN *txnid, int rid, DBT *key);
@@ -3447,43 +3447,37 @@ static int _cl5TrimMain (void *param)
return 0;
}
-/* We remove an entry if it has been replayed to all consumers and
- and the number of entries in the changelog is larger than maxEntries
- or age of the entry is larger than maxAge.
- Also we can't purge entries which correspond to max csns in the
- supplier's ruv. Here is a example where we can get into trouble:
- The server is setup with time based trimming and no consumer's
- At some point all the entries are trimmed from the changelog.
- At a later point a consumer is added and initialized online
- Then a change is made on the supplier.
- To update the consumer, the supplier would attempt to locate
- the last change sent to the consumer in the changelog and will
- fail because the change was removed.
-
+/*
+ * We remove an entry if it has been replayed to all consumers and the number
+ * of entries in the changelog is larger than maxEntries or age of the entry
+ * is larger than maxAge. Also we can't purge entries which correspond to max
+ * csns in the supplier's ruv. Here is a example where we can get into trouble:
+ *
+ * The server is setup with time based trimming and no consumer's
+ * At some point all the entries are trimmed from the changelog.
+ * At a later point a consumer is added and initialized online.
+ * Then a change is made on the supplier.
+ * To update the consumer, the supplier would attempt to locate the last
+ * change sent to the consumer in the changelog and will fail because the
+ * change was removed.
*/
-
-static void _cl5DoTrimming (ReplicaId rid)
+static void _cl5DoTrimming ()
{
Object *obj;
long numToTrim;
PR_Lock (s_cl5Desc.dbTrim.lock);
- /* ONREPL We trim file by file which means that some files will be
- trimmed more often than other. We might have to fix that by, for
- example, randomizing starting point */
+ /*
+ * We are trimming all the changelogs. We trim file by file which
+ * means that some files will be trimmed more often than other. We
+ * might have to fix that by, for example, randomizing the starting
+ * point.
+ */
obj = objset_first_obj (s_cl5Desc.dbFiles);
- while (obj && (_cl5CanTrim ((time_t)0, &numToTrim) || rid))
+ while (obj && _cl5CanTrim ((time_t)0, &numToTrim))
{
- if (rid){
- /*
- * We are cleaning an invalid rid, and need to strip it
- * from the changelog.
- */
- _cl5PurgeRID (obj, rid);
- } else {
- _cl5TrimFile (obj, &numToTrim);
- }
+ _cl5TrimFile (obj, &numToTrim);
obj = objset_next_obj (s_cl5Desc.dbFiles, obj);
}
@@ -3495,6 +3489,43 @@ static void _cl5DoTrimming (ReplicaId rid)
return;
}
+/*
+ * We are purging a changelog after a cleanAllRUV task. Find the specific
+ * changelog for the backend that is being cleaned, and purge all the records
+ * with the cleaned rid.
+ */
+static void _cl5DoPurging (Replica *replica)
+{
+ ReplicaId rid = replica_get_rid(replica);
+ const Slapi_DN *sdn = replica_get_root(replica);
+ const char *replName = replica_get_name(replica);
+ char *replGen = replica_get_generation(replica);
+ char *fileName;
+ Object *obj;
+
+ PR_Lock (s_cl5Desc.dbTrim.lock);
+ fileName = _cl5MakeFileName (replName, replGen);
+ obj = objset_find(s_cl5Desc.dbFiles, _cl5CompareDBFile, fileName);
+ if (obj) {
+ /* We found our changelog, now purge it */
+ _cl5PurgeRID (obj, rid);
+ object_release (obj);
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl,
+ "Purged rid (%d) from suffix (%s)\n",
+ rid, slapi_sdn_get_dn(sdn));
+ } else {
+ slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl,
+ "Purge rid (%d) failed to find changelog file (%s) for suffix (%s)\n",
+ rid, fileName, slapi_sdn_get_dn(sdn));
+ }
+ PR_Unlock (s_cl5Desc.dbTrim.lock);
+
+ slapi_ch_free_string(&replGen);
+ slapi_ch_free_string(&fileName);
+
+ return;
+}
+
/* clear free page files to reduce changelog */
static void
_cl5CompactDBs()
@@ -4072,23 +4103,25 @@ static PRBool _cl5CanTrim (time_t time, long *numToTrim)
{
*numToTrim = 0;
- if (s_cl5Desc.dbTrim.maxAge == 0 && s_cl5Desc.dbTrim.maxEntries == 0)
+ if (s_cl5Desc.dbTrim.maxAge == 0 && s_cl5Desc.dbTrim.maxEntries == 0) {
return PR_FALSE;
-
+ }
if (s_cl5Desc.dbTrim.maxAge == 0)
{
*numToTrim = cl5GetOperationCount (NULL) - s_cl5Desc.dbTrim.maxEntries;
return ( *numToTrim > 0 );
}
- if (s_cl5Desc.dbTrim.maxEntries > 0 &&
- (*numToTrim = cl5GetOperationCount (NULL) - s_cl5Desc.dbTrim.maxEntries) > 0)
- return PR_TRUE;
+ if (s_cl5Desc.dbTrim.maxEntries > 0 &&
+ (*numToTrim = cl5GetOperationCount (NULL) - s_cl5Desc.dbTrim.maxEntries) > 0) {
+ return PR_TRUE;
+ }
- if (time)
+ if (time) {
return (current_time () - time > s_cl5Desc.dbTrim.maxAge);
- else
- return PR_TRUE;
+ } else {
+ return PR_TRUE;
+ }
}
static int _cl5ReadRUV (const char *replGen, Object *obj, PRBool purge)
@@ -4101,7 +4134,6 @@ static int _cl5ReadRUV (const char *replGen, Object *obj, PRBool purge)
char *pos;
char *agmt_name;
-
PR_ASSERT (replGen && obj);
file = (CL5DBFile*)object_get_data (obj);
@@ -4109,13 +4141,12 @@ static int _cl5ReadRUV (const char *replGen, Object *obj, PRBool purge)
agmt_name = get_thread_private_agmtname();
- if (purge) /* read purge vector entry */
- key.data = _cl5GetHelperEntryKey (PURGE_RUV_TIME, csnStr);
- else /* read upper bound vector */
- key.data = _cl5GetHelperEntryKey (MAX_RUV_TIME, csnStr);
-
+ if (purge) { /* read purge vector entry */
+ key.data = _cl5GetHelperEntryKey (PURGE_RUV_TIME, csnStr);
+ } else { /* read upper bound vector */
+ key.data = _cl5GetHelperEntryKey (MAX_RUV_TIME, csnStr);
+ }
key.size = CSN_STRSIZE;
-
data.flags = DB_DBT_MALLOC;
rc = file->db->get(file->db, NULL/*txn*/, &key, &data, 0);
@@ -4125,13 +4156,13 @@ static int _cl5ReadRUV (const char *replGen, Object *obj, PRBool purge)
rc = _cl5ReadBervals (&vals, &pos, data.size);
slapi_ch_free (&(data.data));
if (rc != CL5_SUCCESS)
- goto done;
+ goto done;
- if (purge)
+ if (purge) {
rc = ruv_init_from_bervals(vals, &file->purgeRUV);
- else
+ } else {
rc = ruv_init_from_bervals(vals, &file->maxRUV);
-
+ }
if (rc != RUV_SUCCESS)
{
slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl,
@@ -4139,7 +4170,7 @@ static int _cl5ReadRUV (const char *replGen, Object *obj, PRBool purge)
"RUV error %d\n", agmt_name, purge? "purge" : "upper bound", rc);
rc = CL5_RUV_ERROR;
- goto done;
+ goto done;
}
/* delete the entry; it is re-added when file
@@ -4151,7 +4182,7 @@ static int _cl5ReadRUV (const char *replGen, Object *obj, PRBool purge)
case DB_NOTFOUND: /* RUV is lost - need to construct */
rc = _cl5ConstructRUV (replGen, obj, purge);
- goto done;
+ goto done;
default: slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl,
"%s: _cl5ReadRUV: failed to get purge RUV; "
@@ -6946,12 +6977,14 @@ cl5CleanRUV(ReplicaId rid){
slapi_rwlock_unlock (s_cl5Desc.stLock);
}
-void trigger_cl_purging(ReplicaId rid){
+/*
+ * Create a thread to purge a changelog of cleaned RIDs
+ */
+void trigger_cl_purging(Replica *replica){
PRThread *trim_tid = NULL;
- slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl, "trigger_cl_purging: rid (%d)\n",(int)rid);
trim_tid = PR_CreateThread(PR_USER_THREAD, (VFP)(void*)trigger_cl_purging_thread,
- (void *)&rid, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+ (void *)replica, PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
PR_UNJOINABLE_THREAD, DEFAULT_THREAD_STACKSIZE);
if (NULL == trim_tid){
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl,
@@ -6963,19 +6996,32 @@ void trigger_cl_purging(ReplicaId rid){
}
}
+/*
+ * Purge a changelog of entries that originated from a particular replica(rid)
+ */
void
trigger_cl_purging_thread(void *arg){
- ReplicaId rid = *(ReplicaId *)arg;
+ Replica *replica = (Replica *)arg;
- /* make sure we have a change log, and we aren't closing it */
- if(s_cl5Desc.dbState == CL5_STATE_CLOSED || s_cl5Desc.dbState == CL5_STATE_CLOSING){
+ /* Make sure we have a change log, and we aren't closing it */
+ if (replica == NULL ||
+ s_cl5Desc.dbState == CL5_STATE_CLOSED ||
+ s_cl5Desc.dbState == CL5_STATE_CLOSING) {
return;
}
+
+ /* Bump the changelog thread count */
if (CL5_SUCCESS != _cl5AddThread()) {
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name_cl,
- "trigger_cl_purging: failed to increment thread count "
+ "trigger_cl_purging: Abort - failed to increment thread count "
"NSPR error - %d\n", PR_GetError ());
+ return;
}
- _cl5DoTrimming(rid);
+
+ /* Purge the changelog */
+ _cl5DoPurging(replica);
_cl5RemoveThread();
+ slapi_log_error(SLAPI_LOG_REPL, repl_plugin_name_cl,
+ "trigger_cl_purging: purged changelog for (%s) rid (%d)\n",
+ slapi_sdn_get_dn(replica_get_root(replica)), replica_get_rid(replica));
}
diff --git a/ldap/servers/plugins/replication/cl5_api.h b/ldap/servers/plugins/replication/cl5_api.h
index 4c3b8e8..1a1c2f5 100644
--- a/ldap/servers/plugins/replication/cl5_api.h
+++ b/ldap/servers/plugins/replication/cl5_api.h
@@ -467,6 +467,6 @@ int cl5WriteRUV();
int cl5DeleteRUV();
void cl5CleanRUV(ReplicaId rid);
void cl5NotifyCleanup(int rid);
-void trigger_cl_purging(ReplicaId rid);
+void trigger_cl_purging(Replica *replica);
#endif
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index 59d3374..011e4ca 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -1467,7 +1467,7 @@ replica_execute_cleanruv_task (Object *r, ReplicaId rid, char *returntext /* not
/*
* Now purge the changelog
*/
- trigger_cl_purging(rid);
+ trigger_cl_purging(replica);
if (rc != RUV_SUCCESS){
slapi_log_error(SLAPI_LOG_FATAL, repl_plugin_name, "cleanruv_task: task failed(%d)\n",rc);
7 years, 8 months
rpm.mk
by Mark Reynolds
rpm.mk | 31 +++++++++++++++++--------------
1 file changed, 17 insertions(+), 14 deletions(-)
New commits:
commit f2ecfccf4e73d4399b34224cfd1e266e7d683293
Author: Viktor Ashirov <vashirov(a)redhat.com>
Date: Tue Aug 23 16:44:20 2016 +0200
Ticket 48965 - Fix building rpms using rpm.mk
Description: building rpms using rpm.mk was broken due to changes in
spec file.
Fix description: fix rpm.mk by introducing different variables for rpm
and tar files.
https://fedorahosted.org/389/ticket/48965
Reviewed by: mreynolds(a)redhat.com (Thanks!)
Signed-off-by: Mark Reynolds <mreynolds(a)redhat.com>
diff --git a/rpm.mk b/rpm.mk
index 19a85d7..8171c61 100644
--- a/rpm.mk
+++ b/rpm.mk
@@ -1,10 +1,13 @@
+PWD ?= $(shell pwd)
RPMBUILD ?= $(PWD)/rpmbuild
RPM_VERSION ?= $(shell $(PWD)/rpm/rpmverrel.sh version)
RPM_RELEASE ?= $(shell $(PWD)/rpm/rpmverrel.sh release)
VERSION_PREREL ?= $(shell $(PWD)/rpm/rpmverrel.sh prerel)
+RPM_VERSION_PREREL ?= $(shell $(PWD)/rpm/rpmverrel.sh prerel | sed -e 's/\./-/')
PACKAGE = 389-ds-base
-RPM_NAME_VERSION = $(PACKAGE)-$(RPM_VERSION)
-TARBALL = $(RPM_NAME_VERSION).tar.bz2
+RPM_NAME_VERSION = $(PACKAGE)-$(RPM_VERSION)$(RPM_VERSION_PREREL)
+NAME_VERSION = $(PACKAGE)-$(RPM_VERSION)$(VERSION_PREREL)
+TARBALL = $(NAME_VERSION).tar.bz2
NUNC_STANS_URL ?= $(shell rpmspec -P -D 'use_nunc_stans 1' $(RPMBUILD)/SPECS/389-ds-base.spec | awk '/^Source3:/ {print $$2}')
NUNC_STANS_TARBALL ?= $(shell basename "$(NUNC_STANS_URL)")
NUNC_STANS_ON = 1
@@ -15,13 +18,13 @@ clean:
rm -rf rpmbuild
local-archive:
- -mkdir -p dist/$(RPM_NAME_VERSION)
- rsync -a --exclude=dist --exclude=.git --exclude=rpmbuild . dist/$(RPM_NAME_VERSION)
+ -mkdir -p dist/$(NAME_VERSION)
+ rsync -a --exclude=dist --exclude=.git --exclude=rpmbuild . dist/$(NAME_VERSION)
tarballs: local-archive
-mkdir -p dist/sources
- cd dist; tar cfj sources/$(TARBALL) $(RPM_NAME_VERSION)
- rm -rf dist/$(RPM_NAME_VERSION)
+ cd dist; tar cfj sources/$(TARBALL) $(NAME_VERSION)
+ rm -rf dist/$(NAME_VERSION)
cd dist/sources ; \
if [ $(NUNC_STANS_ON) -eq 1 ]; then \
wget $(NUNC_STANS_URL) ; \
@@ -56,28 +59,28 @@ rpmbuildprep:
srpms: rpmroot srpmdistdir tarballs rpmbuildprep
rpmbuild --define "_topdir $(RPMBUILD)" -bs $(RPMBUILD)/SPECS/$(PACKAGE).spec
- cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)-*.src.rpm dist/srpms/
+ cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)*.src.rpm dist/srpms/
rm -rf $(RPMBUILD)
patch_srpms: rpmroot srpmdistdir tarballs rpmbuildprep
cp rpm/*.patch $(RPMBUILD)/SOURCES/
rpm/add_patches.sh rpm $(RPMBUILD)/SPECS/$(PACKAGE).spec
rpmbuild --define "_topdir $(RPMBUILD)" -bs $(RPMBUILD)/SPECS/$(PACKAGE).spec
- cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)-*.src.rpm dist/srpms/
+ cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)*.src.rpm dist/srpms/
rm -rf $(RPMBUILD)
rpms: rpmroot srpmdistdir rpmdistdir tarballs rpmbuildprep
rpmbuild --define "_topdir $(RPMBUILD)" -ba $(RPMBUILD)/SPECS/$(PACKAGE).spec
- cp $(RPMBUILD)/RPMS/*/$(RPM_NAME_VERSION)-*.rpm dist/rpms/
- cp $(RPMBUILD)/RPMS/*/$(PACKAGE)-*-$(RPM_VERSION)-*.rpm dist/rpms/
- cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)-*.src.rpm dist/srpms/
+ cp $(RPMBUILD)/RPMS/*/$(RPM_NAME_VERSION)*.rpm dist/rpms/
+ cp $(RPMBUILD)/RPMS/*/$(PACKAGE)-*-$(RPM_VERSION)*.rpm dist/rpms/
+ cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)*.src.rpm dist/srpms/
rm -rf $(RPMBUILD)
patch_rpms: rpmroot srpmdistdir rpmdistdir tarballs rpmbuildprep
cp rpm/*.patch $(RPMBUILD)/SOURCES/
rpm/add_patches.sh rpm $(RPMBUILD)/SPECS/$(PACKAGE).spec
rpmbuild --define "_topdir $(RPMBUILD)" -ba $(RPMBUILD)/SPECS/$(PACKAGE).spec
- cp $(RPMBUILD)/RPMS/*/$(RPM_NAME_VERSION)-*.rpm dist/rpms/
- cp $(RPMBUILD)/RPMS/*/$(PACKAGE)-*-$(RPM_VERSION)-*.rpm dist/rpms/
- cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)-*.src.rpm dist/srpms/
+ cp $(RPMBUILD)/RPMS/*/$(RPM_NAME_VERSION)*.rpm dist/rpms/
+ cp $(RPMBUILD)/RPMS/*/$(PACKAGE)-*-$(RPM_VERSION)*.rpm dist/rpms/
+ cp $(RPMBUILD)/SRPMS/$(RPM_NAME_VERSION)*.src.rpm dist/srpms/
rm -rf $(RPMBUILD)
7 years, 8 months
Branch '389-ds-base-1.2.11' - ldap/servers
by thierry bordaz
ldap/servers/slapd/back-ldbm/import-threads.c | 5 +++++
ldap/servers/slapd/back-ldbm/import.c | 15 ++++++++++++++-
2 files changed, 19 insertions(+), 1 deletion(-)
New commits:
commit 855c34e46373cdb9747a391acf1099e2e3df696f
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Fri Aug 19 14:32:47 2016 +0200
Ticket 48960 Crash in import_wait_for_space_in_fifo().
Bug Description:
At online total import on a consumer, the total import startup
function allocates a fifo queue and monitor the overall import.
This queue contain the entries later received during import.
When monitoring ends (import complete or error) it frees
the queue.
Under error condition, there is a possibility that monitoring
ends while entries are still received (bulk_import_queue).
So there is a risk that the received entries will be added into
the queue at the same time the monitoring thread frees the queue
Fix Description:
The thread storing the entries into the queue runs while
holding the job->wire_lock.
To prevent the monitoring thread to frees the queue under
bulk_import_queue, make sure to acquire job->wire_lock
before calling import_free_job
https://fedorahosted.org/389/ticket/48960
Reviewed by: Mark Reynolds (thanks Mark !)
Platforms tested: F23
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c
index 954abf2..df19424 100644
--- a/ldap/servers/slapd/back-ldbm/import-threads.c
+++ b/ldap/servers/slapd/back-ldbm/import-threads.c
@@ -2845,6 +2845,11 @@ static int bulk_import_queue(ImportJob *job, Slapi_Entry *entry)
return -1;
}
+ /* The import is aborted, just ignore that entry */
+ if(job->flags & FLAG_ABORT) {
+ return -1;
+ }
+
PR_Lock(job->wire_lock);
/* Let's do this inside the lock !*/
id = job->lead_ID + 1;
diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c
index e3966ff..81c3c15 100644
--- a/ldap/servers/slapd/back-ldbm/import.c
+++ b/ldap/servers/slapd/back-ldbm/import.c
@@ -386,8 +386,21 @@ void import_free_job(ImportJob *job)
ldbm_back_free_incl_excl(job->include_subtrees, job->exclude_subtrees);
charray_free(job->input_filenames);
- if (job->fifo.size)
+ if (job->fifo.size) {
+ /* bulk_import_queue is running, while holding the job lock.
+ * bulk_import_queue is using the fifo queue.
+ * To avoid freeing fifo queue under bulk_import_queue use
+ * job lock to synchronize
+ */
+ if (job->wire_lock)
+ PR_Lock(job->wire_lock);
+
import_fifo_destroy(job);
+
+ if (job->wire_lock)
+ PR_Unlock(job->wire_lock);
+ }
+
if (NULL != job->uuid_namespace)
slapi_ch_free((void **)&job->uuid_namespace);
if (job->wire_lock)
7 years, 8 months
Branch '389-ds-base-1.3.4' - ldap/servers
by thierry bordaz
ldap/servers/slapd/back-ldbm/import-threads.c | 5 +++++
ldap/servers/slapd/back-ldbm/import.c | 15 ++++++++++++++-
2 files changed, 19 insertions(+), 1 deletion(-)
New commits:
commit c7fb671f4927d597afdd7d10adc44b35bfa88393
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Fri Aug 19 14:32:47 2016 +0200
Ticket 48960 Crash in import_wait_for_space_in_fifo().
Bug Description:
At online total import on a consumer, the total import startup
function allocates a fifo queue and monitor the overall import.
This queue contain the entries later received during import.
When monitoring ends (import complete or error) it frees
the queue.
Under error condition, there is a possibility that monitoring
ends while entries are still received (bulk_import_queue).
So there is a risk that the received entries will be added into
the queue at the same time the monitoring thread frees the queue
Fix Description:
The thread storing the entries into the queue runs while
holding the job->wire_lock.
To prevent the monitoring thread to frees the queue under
bulk_import_queue, make sure to acquire job->wire_lock
before calling import_free_job
https://fedorahosted.org/389/ticket/48960
Reviewed by: Mark Reynolds (thanks Mark !)
Platforms tested: F23
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c
index bae76c9..18a366a 100644
--- a/ldap/servers/slapd/back-ldbm/import-threads.c
+++ b/ldap/servers/slapd/back-ldbm/import-threads.c
@@ -3199,6 +3199,11 @@ static int bulk_import_queue(ImportJob *job, Slapi_Entry *entry)
return -1;
}
+ /* The import is aborted, just ignore that entry */
+ if(job->flags & FLAG_ABORT) {
+ return -1;
+ }
+
PR_Lock(job->wire_lock);
/* Let's do this inside the lock !*/
id = job->lead_ID + 1;
diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c
index 08e31da..a59f980 100644
--- a/ldap/servers/slapd/back-ldbm/import.c
+++ b/ldap/servers/slapd/back-ldbm/import.c
@@ -369,8 +369,21 @@ void import_free_job(ImportJob *job)
ldbm_back_free_incl_excl(job->include_subtrees, job->exclude_subtrees);
charray_free(job->input_filenames);
- if (job->fifo.size)
+ if (job->fifo.size) {
+ /* bulk_import_queue is running, while holding the job lock.
+ * bulk_import_queue is using the fifo queue.
+ * To avoid freeing fifo queue under bulk_import_queue use
+ * job lock to synchronize
+ */
+ if (job->wire_lock)
+ PR_Lock(job->wire_lock);
+
import_fifo_destroy(job);
+
+ if (job->wire_lock)
+ PR_Unlock(job->wire_lock);
+ }
+
if (NULL != job->uuid_namespace)
slapi_ch_free((void **)&job->uuid_namespace);
if (job->wire_lock)
7 years, 8 months
configure configure.ac Makefile.am Makefile.in rpm/389-ds-base.spec.in rpm.mk rpm/rpmverrel.sh VERSION.sh
by Noriko Hosoi
Makefile.am | 2 +-
Makefile.in | 3 ++-
VERSION.sh | 14 ++++++--------
configure | 1 +
configure.ac | 1 +
rpm.mk | 2 ++
rpm/389-ds-base.spec.in | 2 +-
rpm/rpmverrel.sh | 2 ++
8 files changed, 16 insertions(+), 11 deletions(-)
New commits:
commit 9daee2890e2d01ed2d9c11f2bf9bab3474d55f55
Author: Viktor Ashirov <vashirov(a)redhat.com>
Date: Tue Aug 16 20:54:09 2016 +0200
Ticket 48965 - Fix generation of the pre-release version
Description: Previously, when building DS from git checkout, pre-release
version didn't contain git commit id, though supporting code existed.
Fix description: Generate pre-release version according to Fedora Naming
Guidelines for snapshot packages [1]:
- change date format to YYYYMMDD
- remove '-' since it's not allowed
https://fedorahosted.org/389/ticket/48965
Reviewed by: mreynolds(a)redhat.com (Thanks!)
[1] https://fedoraproject.org/wiki/Packaging:Naming?rd=Packaging:NamingGuidel...
diff --git a/Makefile.am b/Makefile.am
index 3e1bf47..a2f834e 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1999,7 +1999,7 @@ rpmbrprep: dist-bzip2 rpmroot
cp $(distdir).tar.bz2 $(RPMBUILD)/SOURCES
cp $(srcdir)/rpm/389-ds-base-git.sh $(RPMBUILD)/SOURCES
cp $(srcdir)/rpm/389-ds-base-devel.README $(RPMBUILD)/SOURCES
- sed -e "s/__VERSION__/$(RPM_VERSION)/" -e "s/__RELEASE__/$(RPM_RELEASE)/" -e "s/__NUNC_STANS_ON__/$(NUNC_STANS_ON)/" -e "s/__ASAN_ON__/$(ASAN_ON)/" < $(abs_builddir)/rpm/389-ds-base.spec > $(RPMBUILD)/SPECS/389-ds-base.spec
+ sed -e "s/__VERSION__/$(RPM_VERSION)/" -e "s/__RELEASE__/$(RPM_RELEASE)/" -e "s/__VERSION_PREREL__/$(VERSION_PREREL)/" -e "s/__NUNC_STANS_ON__/$(NUNC_STANS_ON)/" -e "s/__ASAN_ON__/$(ASAN_ON)/" < $(abs_builddir)/rpm/389-ds-base.spec > $(RPMBUILD)/SPECS/389-ds-base.spec
# Requires rpmdevtools. Consider making this a dependancy of rpms.
rpmsources: rpmbrprep
diff --git a/Makefile.in b/Makefile.in
index 6788fe1..17b8d73 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -1368,6 +1368,7 @@ SHELL = @SHELL@
STRIP = @STRIP@
THREADLIB = @THREADLIB@
VERSION = @VERSION@
+VERSION_PREREL = @VERSION_PREREL@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
@@ -10858,7 +10859,7 @@ rpmbrprep: dist-bzip2 rpmroot
cp $(distdir).tar.bz2 $(RPMBUILD)/SOURCES
cp $(srcdir)/rpm/389-ds-base-git.sh $(RPMBUILD)/SOURCES
cp $(srcdir)/rpm/389-ds-base-devel.README $(RPMBUILD)/SOURCES
- sed -e "s/__VERSION__/$(RPM_VERSION)/" -e "s/__RELEASE__/$(RPM_RELEASE)/" -e "s/__NUNC_STANS_ON__/$(NUNC_STANS_ON)/" -e "s/__ASAN_ON__/$(ASAN_ON)/" < $(abs_builddir)/rpm/389-ds-base.spec > $(RPMBUILD)/SPECS/389-ds-base.spec
+ sed -e "s/__VERSION__/$(RPM_VERSION)/" -e "s/__RELEASE__/$(RPM_RELEASE)/" -e "s/__VERSION_PREREL__/$(VERSION_PREREL)/" -e "s/__NUNC_STANS_ON__/$(NUNC_STANS_ON)/" -e "s/__ASAN_ON__/$(ASAN_ON)/" < $(abs_builddir)/rpm/389-ds-base.spec > $(RPMBUILD)/SPECS/389-ds-base.spec
# Requires rpmdevtools. Consider making this a dependancy of rpms.
rpmsources: rpmbrprep
diff --git a/VERSION.sh b/VERSION.sh
index f831270..f83c1ec 100644
--- a/VERSION.sh
+++ b/VERSION.sh
@@ -13,8 +13,7 @@ VERSION_MINOR=3
VERSION_MAINT=5.13
# NOTE: VERSION_PREREL is automatically set for builds made out of a git tree
VERSION_PREREL=
-VERSION_DATE=`date -u +%Y%m%d%H%M%S`
-GIT_CHECKOUT=`git log -1 >/dev/null 2>&1`
+VERSION_DATE=$(date -u +%Y%m%d)
# Set the version and release numbers for local developer RPM builds. We
# set these here because we do not want the git commit hash in the RPM
@@ -23,17 +22,16 @@ GIT_CHECKOUT=`git log -1 >/dev/null 2>&1`
RPM_RELEASE=${VERSION_DATE}
RPM_VERSION=${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_MAINT}
-if test -n "$GIT_CHECKOUT"; then
-# if the source is from a git repo, put the last commit
-# in the version
+if $(git -C "$srcdir" rev-parse --is-inside-work-tree > /dev/null 2>&1); then
+# Check if the source is from a git repo
# if this is not a git repo, git log will say
# fatal: Not a git repository
# to stderr and stdout will be empty
# this tells git to print the short commit hash from the last commit
- COMMIT=`cd $srcdir ; git log -1 --pretty=format:%h 2> /dev/null`
+ COMMIT=$(git -C "$srcdir" log -1 --pretty=format:%h 2> /dev/null)
if test -n "$COMMIT" ; then
- VERSION_PREREL=.${VERSION_DATE}-git$COMMIT
- RPM_RELEASE=$RPM_RELEASE-git$COMMIT
+ VERSION_PREREL=.${VERSION_DATE}git$COMMIT
+ RPM_RELEASE=${RPM_RELEASE}git$COMMIT
fi
fi
diff --git a/configure b/configure
index 3f26631..c2a4af6 100755
--- a/configure
+++ b/configure
@@ -840,6 +840,7 @@ MAINT
MAINTAINER_MODE_FALSE
MAINTAINER_MODE_TRUE
CONSOLE_VERSION
+VERSION_PREREL
RPM_RELEASE
RPM_VERSION
AM_BACKSLASH
diff --git a/configure.ac b/configure.ac
index a7f0bbf..c18d2d9 100644
--- a/configure.ac
+++ b/configure.ac
@@ -19,6 +19,7 @@ AC_DEFINE_UNQUOTED([VERSION], "$VERSION", [package version])
AC_DEFINE_UNQUOTED([PACKAGE], "$PACKAGE", [package tar name])
AC_SUBST([RPM_VERSION])
AC_SUBST([RPM_RELEASE])
+AC_SUBST([VERSION_PREREL])
AC_SUBST([CONSOLE_VERSION])
AM_MAINTAINER_MODE
AC_CANONICAL_HOST
diff --git a/rpm.mk b/rpm.mk
index a880e62..19a85d7 100644
--- a/rpm.mk
+++ b/rpm.mk
@@ -1,6 +1,7 @@
RPMBUILD ?= $(PWD)/rpmbuild
RPM_VERSION ?= $(shell $(PWD)/rpm/rpmverrel.sh version)
RPM_RELEASE ?= $(shell $(PWD)/rpm/rpmverrel.sh release)
+VERSION_PREREL ?= $(shell $(PWD)/rpm/rpmverrel.sh prerel)
PACKAGE = 389-ds-base
RPM_NAME_VERSION = $(PACKAGE)-$(RPM_VERSION)
TARBALL = $(RPM_NAME_VERSION).tar.bz2
@@ -34,6 +35,7 @@ rpmroot:
mkdir -p $(RPMBUILD)/SPECS
mkdir -p $(RPMBUILD)/SRPMS
sed -e s/__VERSION__/$(RPM_VERSION)/ -e s/__RELEASE__/$(RPM_RELEASE)/ \
+ -e s/__VERSION_PREREL__/$(VERSION_PREREL)/ \
-e s/__NUNC_STANS_ON__/$(NUNC_STANS_ON)/ \
-e s/__ASAN_ON__/$(ASAN_ON)/ \
rpm/$(PACKAGE).spec.in > $(RPMBUILD)/SPECS/$(PACKAGE).spec
diff --git a/rpm/389-ds-base.spec.in b/rpm/389-ds-base.spec.in
index 33bf470..e5d824e 100644
--- a/rpm/389-ds-base.spec.in
+++ b/rpm/389-ds-base.spec.in
@@ -6,7 +6,7 @@
# for a pre-release, define the prerel field e.g. .a1 .rc2 - comment out for official release
# also remove the space between % and global - this space is needed because
# fedpkg verrel stupidly ignores comment lines
-#% global prerel .rc3
+%global prerel __VERSION_PREREL__%{nil}
# also need the relprefix field for a pre-release e.g. .0 - also comment out for official release
#% global relprefix 0.
diff --git a/rpm/rpmverrel.sh b/rpm/rpmverrel.sh
index 06e97c7..0b034d4 100755
--- a/rpm/rpmverrel.sh
+++ b/rpm/rpmverrel.sh
@@ -12,4 +12,6 @@ if [ "$1" = "version" ]; then
echo $RPM_VERSION
elif [ "$1" = "release" ]; then
echo $RPM_RELEASE
+elif [ "$1" = "prerel" ]; then
+ echo $VERSION_PREREL
fi
7 years, 8 months
man/man8
by Noriko Hosoi
man/man8/ns-accountstatus.pl.8 | 6 ------
1 file changed, 6 deletions(-)
New commits:
commit 370a70c431d5f235d4371e4cb080215ac4500b6c
Author: kamlesh <kchaudha(a)redhat.com>
Date: Mon Aug 22 14:20:27 2016 +0530
Bugzilla: 1368956 man page of ns-accountstatus.pl shows redundant entries for -p port option
Bug Description:
Description of problem:
man page of ns-accountstatus.pl contain redundant entries for -p option
-p port
Port number of the Directory Server.
-p port
Port number of the Directory Server.
-p port
Port number of the Directory Server.
Fix Description:
Delete the redundant entrys
Platforms tested: RHEL7.3
Flag Day: no
Doc impact: yes
Signed-off-by: kamlesh <kchaudha(a)redhat.com>
diff --git a/man/man8/ns-accountstatus.pl.8 b/man/man8/ns-accountstatus.pl.8
index be3a8e9..9ffc4d3 100644
--- a/man/man8/ns-accountstatus.pl.8
+++ b/man/man8/ns-accountstatus.pl.8
@@ -57,12 +57,6 @@ Host name of the Directory Server.
.B \fB\-p\fR \fIport\fR
Port number of the Directory Server.
.TP
-.B \fB\-p\fR \fIport\fR
-Port number of the Directory Server.
-.TP
-.B \fB\-p\fR \fIport\fR
-Port number of the Directory Server.
-.TP
.B \fB\-b\fR \fIbasedn\fR
The suffix DN from which to search from.
.TP
7 years, 8 months
ldap/servers
by thierry bordaz
ldap/servers/slapd/back-ldbm/import-threads.c | 5 +++++
ldap/servers/slapd/back-ldbm/import.c | 15 ++++++++++++++-
2 files changed, 19 insertions(+), 1 deletion(-)
New commits:
commit 776d94214295cc95f9a906d4bb6268397a6bf091
Author: Thierry Bordaz <tbordaz(a)redhat.com>
Date: Fri Aug 19 14:32:47 2016 +0200
Ticket 48960 Crash in import_wait_for_space_in_fifo().
Bug Description:
At online total import on a consumer, the total import startup
function allocates a fifo queue and monitor the overall import.
This queue contain the entries later received during import.
When monitoring ends (import complete or error) it frees
the queue.
Under error condition, there is a possibility that monitoring
ends while entries are still received (bulk_import_queue).
So there is a risk that the received entries will be added into
the queue at the same time the monitoring thread frees the queue
Fix Description:
The thread storing the entries into the queue runs while
holding the job->wire_lock.
To prevent the monitoring thread to frees the queue under
bulk_import_queue, make sure to acquire job->wire_lock
before calling import_free_job
https://fedorahosted.org/389/ticket/48960
Reviewed by: Mark Reynolds (thanks Mark !)
Platforms tested: F23
Flag Day: no
Doc impact: no
diff --git a/ldap/servers/slapd/back-ldbm/import-threads.c b/ldap/servers/slapd/back-ldbm/import-threads.c
index 1759478..c3fca2b 100644
--- a/ldap/servers/slapd/back-ldbm/import-threads.c
+++ b/ldap/servers/slapd/back-ldbm/import-threads.c
@@ -3201,6 +3201,11 @@ static int bulk_import_queue(ImportJob *job, Slapi_Entry *entry)
return -1;
}
+ /* The import is aborted, just ignore that entry */
+ if(job->flags & FLAG_ABORT) {
+ return -1;
+ }
+
PR_Lock(job->wire_lock);
/* Let's do this inside the lock !*/
id = job->lead_ID + 1;
diff --git a/ldap/servers/slapd/back-ldbm/import.c b/ldap/servers/slapd/back-ldbm/import.c
index 9b6ae0d..78aefbf 100644
--- a/ldap/servers/slapd/back-ldbm/import.c
+++ b/ldap/servers/slapd/back-ldbm/import.c
@@ -408,8 +408,21 @@ void import_free_job(ImportJob *job)
ldbm_back_free_incl_excl(job->include_subtrees, job->exclude_subtrees);
charray_free(job->input_filenames);
- if (job->fifo.size)
+ if (job->fifo.size) {
+ /* bulk_import_queue is running, while holding the job lock.
+ * bulk_import_queue is using the fifo queue.
+ * To avoid freeing fifo queue under bulk_import_queue use
+ * job lock to synchronize
+ */
+ if (job->wire_lock)
+ PR_Lock(job->wire_lock);
+
import_fifo_destroy(job);
+
+ if (job->wire_lock)
+ PR_Unlock(job->wire_lock);
+ }
+
if (NULL != job->uuid_namespace)
slapi_ch_free((void **)&job->uuid_namespace);
if (job->wire_lock)
7 years, 8 months
dirsrvtests/tests
by Mark Reynolds
dirsrvtests/tests/suites/replication/cleanallruv_test.py | 4 +
dirsrvtests/tests/tickets/ticket47536_test.py | 1
dirsrvtests/tests/tickets/ticket47819_test.py | 8 ++-
dirsrvtests/tests/tickets/ticket47838_test.py | 8 +--
dirsrvtests/tests/tickets/ticket48194_test.py | 38 +++++++++++----
dirsrvtests/tests/tickets/ticket48784_test.py | 4 +
6 files changed, 47 insertions(+), 16 deletions(-)
New commits:
commit bed1bd35b9c37ab2909c1fab8bd2d95b8291863c
Author: Mark Reynolds <mreynolds(a)redhat.com>
Date: Fri Aug 19 11:21:24 2016 -0400
Ticket 48832 - Fix more CI test failures
Description: More timing issues needed to be addressed by adding sleeps
to certain tests.
https://fedorahosted.org/389/ticket/48832
Reviewed by: nhosoi(Thanks!)
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
index e1518bd..afed323 100644
--- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
@@ -584,6 +584,7 @@ def restore_master4(topology):
topology.master4.start(timeout=30)
topology.master1.agreement.init(SUFFIX, HOST_MASTER_4, PORT_MASTER_4)
topology.master1.waitForReplInit(topology.m1_m4_agmt)
+ time.sleep(5)
#
# Test Replication is working
@@ -594,6 +595,7 @@ def restore_master4(topology):
else:
log.fatal('restore_master4: Replication is not working from m1 -> m2.')
assert False
+ time.sleep(1)
# Check replication is working from master 1 to master 4...
if topology.master1.testReplication(DEFAULT_SUFFIX, topology.master4):
@@ -601,6 +603,7 @@ def restore_master4(topology):
else:
log.fatal('restore_master4: Replication is not working from m1 -> m4.')
assert False
+ time.sleep(1)
# Check replication is working from master 4 to master1...
if topology.master4.testReplication(DEFAULT_SUFFIX, topology.master1):
@@ -608,6 +611,7 @@ def restore_master4(topology):
else:
log.fatal('restore_master4: Replication is not working from m4 -> 1.')
assert False
+ time.sleep(5)
log.info('Master 4 has been successfully restored.')
diff --git a/dirsrvtests/tests/tickets/ticket47536_test.py b/dirsrvtests/tests/tickets/ticket47536_test.py
index fe47ab7..cf20746 100644
--- a/dirsrvtests/tests/tickets/ticket47536_test.py
+++ b/dirsrvtests/tests/tickets/ticket47536_test.py
@@ -269,6 +269,7 @@ def create_keys_certs(topology):
noisewdfd = open(noisefile, "w")
noisewdfd.write(noise.readline())
noisewdfd.close()
+ time.sleep(1)
cmdline = ['certutil', '-N', '-d', m1confdir, '-f', pwdfile]
log.info("##### Create key3.db and cert8.db database (master1): %s" % cmdline)
diff --git a/dirsrvtests/tests/tickets/ticket47819_test.py b/dirsrvtests/tests/tickets/ticket47819_test.py
index d127fc0..2168fe5 100644
--- a/dirsrvtests/tests/tickets/ticket47819_test.py
+++ b/dirsrvtests/tests/tickets/ticket47819_test.py
@@ -139,17 +139,20 @@ def test_ticket47819(topology):
exportTask.exportLDIF(DEFAULT_SUFFIX, None, ldif_file, args)
except ValueError:
assert False
+ time.sleep(1)
# open the ldif file, get the lines, then rewrite the file
ldif = open(ldif_file, "r")
lines = ldif.readlines()
ldif.close()
+ time.sleep(1)
ldif = open(ldif_file, "w")
for line in lines:
if not line.lower().startswith('nstombstonecsn'):
ldif.write(line)
ldif.close()
+ time.sleep(1)
# import the new ldif file
log.info('Import replication LDIF file...')
@@ -161,6 +164,7 @@ def test_ticket47819(topology):
except ValueError:
os.remove(ldif_file)
assert False
+ time.sleep(1)
# Search for the tombstone again
log.info('Search for tombstone entries...')
@@ -190,6 +194,7 @@ def test_ticket47819(topology):
fixupTombTask.fixupTombstones(DEFAULT_BENAME, args)
except:
assert False
+ time.sleep(1)
# Search for tombstones with nsTombstoneCSN - better not find any
log.info('Search for tombstone entries...')
@@ -211,7 +216,6 @@ def test_ticket47819(topology):
fixupTombTask.fixupTombstones(DEFAULT_BENAME, args)
except:
assert False
-
time.sleep(1)
# Search for tombstones with nsTombstoneCSN - better find some
@@ -244,7 +248,7 @@ def test_ticket47819(topology):
# Wait for the interval to pass
log.info('Wait for tombstone purge interval to pass...')
- time.sleep(6)
+ time.sleep(10)
# Add an entry to trigger replication
log.info('Perform an update to help trigger tombstone purging...')
diff --git a/dirsrvtests/tests/tickets/ticket47838_test.py b/dirsrvtests/tests/tickets/ticket47838_test.py
index 475bec4..a9d4307 100644
--- a/dirsrvtests/tests/tickets/ticket47838_test.py
+++ b/dirsrvtests/tests/tickets/ticket47838_test.py
@@ -127,6 +127,7 @@ def test_47838_init(topology):
noisewdfd = open(noisefile, "w")
noisewdfd.write(noise.readline())
noisewdfd.close()
+ time.sleep(1)
log.info("\n######################### Create key3.db and cert8.db database ######################\n")
os.system("ls %s" % pwdfile)
@@ -137,7 +138,8 @@ def test_47838_init(topology):
os.system('certutil -G -d %s -z %s -f %s' % (conf_dir, noisefile, pwdfile))
log.info("\n######################### Creating self-signed CA certificate ######################\n")
- os.system('( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' % (conf_dir, noisefile, pwdfile))
+ os.system('( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' %
+ (conf_dir, noisefile, pwdfile))
log.info("\n######################### Exporting the CA certificate to cacert.asc ######################\n")
cafile = '%s/cacert.asc' % conf_dir
@@ -161,6 +163,7 @@ def test_47838_init(topology):
pinfd = open(pinfile, "w")
pinfd.write(pintxt)
pinfd.close()
+ time.sleep(1)
log.info("\n######################### enable SSL in the directory server with all ciphers ######################\n")
topology.standalone.simple_bind_s(DN_DM, PASSWORD)
@@ -213,7 +216,6 @@ def test_47838_run_0(topology):
time.sleep(5)
log.info("\n######################### Restarting the server ######################\n")
topology.standalone.restart(timeout=120)
-
enabled = os.popen('egrep "SSL alert:" %s | egrep \": enabled\" | wc -l' % topology.standalone.errlog)
disabled = os.popen('egrep "SSL alert:" %s | egrep \": disabled\" | wc -l' % topology.standalone.errlog)
ecount = int(enabled.readline().rstrip())
@@ -250,7 +252,7 @@ def test_47838_run_1(topology):
topology.standalone.simple_bind_s(DN_DM, PASSWORD)
topology.standalone.modify_s(CONFIG_DN, [(ldap.MOD_REPLACE, 'nsslapd-errorlog-level', '64')])
- time.sleep(5)
+ time.sleep(1)
# Make sure allowWeakCipher is not set.
topology.standalone.modify_s(ENCRYPTION_DN, [(ldap.MOD_DELETE, 'allowWeakCipher', None)])
diff --git a/dirsrvtests/tests/tickets/ticket48194_test.py b/dirsrvtests/tests/tickets/ticket48194_test.py
index 8e04d8b..92f4371 100644
--- a/dirsrvtests/tests/tickets/ticket48194_test.py
+++ b/dirsrvtests/tests/tickets/ticket48194_test.py
@@ -33,6 +33,7 @@ plus_all_dcount = 0
plus_all_ecount_noweak = 0
plus_all_dcount_noweak = 0
+
class TopologyStandalone(object):
def __init__(self, standalone):
standalone.open()
@@ -109,6 +110,7 @@ def test_init(topology):
noisewdfd = open(noisefile, "w")
noisewdfd.write(noise.readline())
noisewdfd.close()
+ time.sleep(1)
log.info("\n######################### Create key3.db and cert8.db database ######################\n")
os.system("ls %s" % pwdfile)
@@ -119,7 +121,8 @@ def test_init(topology):
os.system('certutil -G -d %s -z %s -f %s' % (conf_dir, noisefile, pwdfile))
log.info("\n######################### Creating self-signed CA certificate ######################\n")
- os.system('( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' % (conf_dir, noisefile, pwdfile))
+ os.system('( echo y ; echo ; echo y ) | certutil -S -n "CA certificate" -s "cn=CAcert" -x -t "CT,," -m 1000 -v 120 -d %s -z %s -f %s -2' %
+ (conf_dir, noisefile, pwdfile))
log.info("\n######################### Exporting the CA certificate to cacert.asc ######################\n")
cafile = '%s/cacert.asc' % conf_dir
@@ -135,7 +138,8 @@ def test_init(topology):
log.info("\n######################### Generate the server certificate ######################\n")
ohostname = os.popen('hostname --fqdn', "r")
myhostname = ohostname.readline()
- os.system('certutil -S -n "%s" -s "cn=%s,ou=389 Directory Server" -c "CA certificate" -t "u,u,u" -m 1001 -v 120 -d %s -z %s -f %s' % (SERVERCERT, myhostname.rstrip(), conf_dir, noisefile, pwdfile))
+ os.system('certutil -S -n "%s" -s "cn=%s,ou=389 Directory Server" -c "CA certificate" -t "u,u,u" -m 1001 -v 120 -d %s -z %s -f %s' %
+ (SERVERCERT, myhostname.rstrip(), conf_dir, noisefile, pwdfile))
log.info("\n######################### create the pin file ######################\n")
pinfile = '%s/pin.txt' % (conf_dir)
@@ -143,6 +147,7 @@ def test_init(topology):
pinfd = open(pinfile, "w")
pinfd.write(pintxt)
pinfd.close()
+ time.sleep(1)
log.info("\n######################### enable SSL in the directory server with all ciphers ######################\n")
topology.standalone.simple_bind_s(DN_DM, PASSWORD)
@@ -162,6 +167,7 @@ def test_init(topology):
'nsSSLToken': 'internal (software)',
'nsSSLActivation': 'on'})))
+
def connectWithOpenssl(topology, cipher, expect):
"""
Connect with the given cipher
@@ -204,6 +210,7 @@ def connectWithOpenssl(topology, cipher, expect):
proc.stdin.close()
assert False
+
def test_run_0(topology):
"""
Check nsSSL3Ciphers: +all
@@ -221,6 +228,7 @@ def test_run_0(topology):
connectWithOpenssl(topology, 'RC4-SHA', True)
connectWithOpenssl(topology, 'AES256-SHA256', True)
+
def test_run_1(topology):
"""
Check nsSSL3Ciphers: +all
@@ -238,12 +246,13 @@ def test_run_1(topology):
topology.standalone.stop(timeout=10)
os.system('mv %s %s.48194_0' % (topology.standalone.errlog, topology.standalone.errlog))
os.system('touch %s' % (topology.standalone.errlog))
- time.sleep(1)
+ time.sleep(2)
topology.standalone.start(timeout=120)
connectWithOpenssl(topology, 'RC4-SHA', False)
connectWithOpenssl(topology, 'AES256-SHA256', True)
+
def test_run_2(topology):
"""
Check nsSSL3Ciphers: +rsa_aes_128_sha,+rsa_aes_256_sha
@@ -259,7 +268,7 @@ def test_run_2(topology):
topology.standalone.stop(timeout=10)
os.system('mv %s %s.48194_1' % (topology.standalone.errlog, topology.standalone.errlog))
os.system('touch %s' % (topology.standalone.errlog))
- time.sleep(1)
+ time.sleep(2)
topology.standalone.start(timeout=120)
connectWithOpenssl(topology, 'RC4-SHA', False)
@@ -267,6 +276,7 @@ def test_run_2(topology):
connectWithOpenssl(topology, 'AES128-SHA', True)
connectWithOpenssl(topology, 'AES256-SHA', True)
+
def test_run_3(topology):
"""
Check nsSSL3Ciphers: -all
@@ -288,6 +298,7 @@ def test_run_3(topology):
connectWithOpenssl(topology, 'RC4-SHA', False)
connectWithOpenssl(topology, 'AES256-SHA256', False)
+
def test_run_4(topology):
"""
Check no nsSSL3Ciphers
@@ -303,12 +314,13 @@ def test_run_4(topology):
topology.standalone.stop(timeout=10)
os.system('mv %s %s.48194_3' % (topology.standalone.errlog, topology.standalone.errlog))
os.system('touch %s' % (topology.standalone.errlog))
- time.sleep(1)
+ time.sleep(2)
topology.standalone.start(timeout=120)
connectWithOpenssl(topology, 'RC4-SHA', False)
connectWithOpenssl(topology, 'AES256-SHA256', True)
+
def test_run_5(topology):
"""
Check nsSSL3Ciphers: default
@@ -324,12 +336,13 @@ def test_run_5(topology):
topology.standalone.stop(timeout=10)
os.system('mv %s %s.48194_4' % (topology.standalone.errlog, topology.standalone.errlog))
os.system('touch %s' % (topology.standalone.errlog))
- time.sleep(1)
+ time.sleep(2)
topology.standalone.start(timeout=120)
connectWithOpenssl(topology, 'RC4-SHA', False)
connectWithOpenssl(topology, 'AES256-SHA256', True)
+
def test_run_6(topology):
"""
Check nsSSL3Ciphers: +all,-TLS_RSA_WITH_AES_256_CBC_SHA256
@@ -345,13 +358,14 @@ def test_run_6(topology):
topology.standalone.stop(timeout=10)
os.system('mv %s %s.48194_5' % (topology.standalone.errlog, topology.standalone.errlog))
os.system('touch %s' % (topology.standalone.errlog))
- time.sleep(1)
+ time.sleep(2)
topology.standalone.start(timeout=120)
connectWithOpenssl(topology, 'RC4-SHA', False)
connectWithOpenssl(topology, 'AES256-SHA256', False)
connectWithOpenssl(topology, 'AES128-SHA', True)
+
def test_run_7(topology):
"""
Check nsSSL3Ciphers: -all,+rsa_rc4_128_md5
@@ -367,13 +381,14 @@ def test_run_7(topology):
topology.standalone.stop(timeout=10)
os.system('mv %s %s.48194_6' % (topology.standalone.errlog, topology.standalone.errlog))
os.system('touch %s' % (topology.standalone.errlog))
- time.sleep(1)
+ time.sleep(2)
topology.standalone.start(timeout=120)
connectWithOpenssl(topology, 'RC4-SHA', False)
connectWithOpenssl(topology, 'AES256-SHA256', False)
connectWithOpenssl(topology, 'RC4-MD5', True)
+
def test_run_8(topology):
"""
Check nsSSL3Ciphers: default + allowWeakCipher: off
@@ -389,12 +404,13 @@ def test_run_8(topology):
topology.standalone.stop(timeout=10)
os.system('mv %s %s.48194_7' % (topology.standalone.errlog, topology.standalone.errlog))
os.system('touch %s' % (topology.standalone.errlog))
- time.sleep(1)
+ time.sleep(2)
topology.standalone.start(timeout=120)
connectWithOpenssl(topology, 'RC4-SHA', False)
connectWithOpenssl(topology, 'AES256-SHA256', True)
+
def test_run_9(topology):
"""
Check no nsSSL3Ciphers
@@ -413,12 +429,13 @@ def test_run_9(topology):
topology.standalone.stop(timeout=10)
os.system('mv %s %s.48194_8' % (topology.standalone.errlog, topology.standalone.errlog))
os.system('touch %s' % (topology.standalone.errlog))
- time.sleep(1)
+ time.sleep(2)
topology.standalone.start(timeout=120)
connectWithOpenssl(topology, 'RC4-SHA', True)
connectWithOpenssl(topology, 'AES256-SHA256', True)
+
def test_run_10(topology):
"""
Check nsSSL3Ciphers: -TLS_RSA_WITH_NULL_MD5,+TLS_RSA_WITH_RC4_128_MD5,
@@ -449,6 +466,7 @@ def test_run_10(topology):
connectWithOpenssl(topology, 'RC4-MD5', True)
connectWithOpenssl(topology, 'AES256-SHA256', False)
+
def test_run_11(topology):
"""
Check nsSSL3Ciphers: +fortezza
diff --git a/dirsrvtests/tests/tickets/ticket48784_test.py b/dirsrvtests/tests/tickets/ticket48784_test.py
index a94415a..2898807 100644
--- a/dirsrvtests/tests/tickets/ticket48784_test.py
+++ b/dirsrvtests/tests/tickets/ticket48784_test.py
@@ -181,6 +181,7 @@ def enable_ssl(server, ldapsport, mycert):
'nsSSLToken': 'internal (software)',
'nsSSLActivation': 'on'})))
+
def doAndPrintIt(cmdline, filename):
proc = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if filename is None:
@@ -206,6 +207,7 @@ def doAndPrintIt(cmdline, filename):
if filename is not None:
fd.close()
+ time.sleep(1)
def create_keys_certs(topology):
@@ -245,6 +247,7 @@ def create_keys_certs(topology):
noisewdfd = open(noisefile, "w")
noisewdfd.write(noise.readline())
noisewdfd.close()
+ time.sleep(1)
cmdline = ['certutil', '-N', '-d', m1confdir, '-f', pwdfile]
log.info("##### Create key3.db and cert8.db database (master1): %s" % cmdline)
@@ -333,7 +336,6 @@ def create_keys_certs(topology):
log.info("##### restart master1")
topology.master1.restart(timeout=10)
-
log.info("\n######################### Creating SSL Keys and Certs Done ######################\n")
7 years, 8 months
ldap/servers
by William Brown
ldap/servers/slapd/auditlog.c | 15 +++++---
ldap/servers/slapd/log.c | 71 +++++++++++++++++++++++-----------------
ldap/servers/slapd/proto-slap.h | 4 +-
3 files changed, 53 insertions(+), 37 deletions(-)
New commits:
commit 5fed8021a0487c092af6038d4a7dcce1ef3fab75
Author: William Brown <firstyear(a)redhat.com>
Date: Fri Aug 19 12:49:17 2016 +1000
Ticket 48958 - Audit fail log doesn't work if audit log disabled.
Bug Description: Due to a configuration interpretation issue, when audit was
not enabled, but auditfail was with no log defined, the fail log should write to
the audit log location on failed events, but audit events should not be written.
This did not work.
Fix Description: This was because when we wrote to the audit file in the
abscence of the auditfail log, the audit enabled state was checked. This adds a
check to determine what the source event was from, and to check the correct log
enabled state during the event processing.
https://fedorahosted.org/389/ticket/48958
Author: wibrown
Review by: nhosoi (Thank you!)
diff --git a/ldap/servers/slapd/auditlog.c b/ldap/servers/slapd/auditlog.c
index 0f4cc94..ec7111b 100644
--- a/ldap/servers/slapd/auditlog.c
+++ b/ldap/servers/slapd/auditlog.c
@@ -33,7 +33,7 @@ static int audit_hide_unhashed_pw = 1;
static int auditfail_hide_unhashed_pw = 1;
/* Forward Declarations */
-static void write_audit_file(int logtype, int optype, const char *dn, void *change, int flag, time_t curtime, int rc );
+static void write_audit_file(int logtype, int optype, const char *dn, void *change, int flag, time_t curtime, int rc, int sourcelog );
static const char *modrdn_changes[4];
@@ -98,7 +98,7 @@ write_audit_log_entry( Slapi_PBlock *pb )
curtime = current_time();
/* log the raw, unnormalized DN */
dn = slapi_sdn_get_udn(sdn);
- write_audit_file(SLAPD_AUDIT_LOG, operation_get_type(op), dn, change, flag, curtime, LDAP_SUCCESS);
+ write_audit_file(SLAPD_AUDIT_LOG, operation_get_type(op), dn, change, flag, curtime, LDAP_SUCCESS, SLAPD_AUDIT_LOG);
}
void
@@ -169,10 +169,10 @@ write_auditfail_log_entry( Slapi_PBlock *pb )
auditfail_config = config_get_auditfaillog();
if (auditfail_config == NULL || strlen(auditfail_config) == 0) {
/* If no auditfail log write to audit log */
- write_audit_file(SLAPD_AUDIT_LOG, operation_get_type(op), dn, change, flag, curtime, pbrc);
+ write_audit_file(SLAPD_AUDIT_LOG, operation_get_type(op), dn, change, flag, curtime, pbrc, SLAPD_AUDITFAIL_LOG);
} else {
/* If we have our own auditfail log path */
- write_audit_file(SLAPD_AUDITFAIL_LOG, operation_get_type(op), dn, change, flag, curtime, pbrc);
+ write_audit_file(SLAPD_AUDITFAIL_LOG, operation_get_type(op), dn, change, flag, curtime, pbrc, SLAPD_AUDITFAIL_LOG);
}
slapi_ch_free_string(&auditfail_config);
}
@@ -181,6 +181,7 @@ write_auditfail_log_entry( Slapi_PBlock *pb )
/*
* Function: write_audit_file
* Arguments:
+ * logtype - Destination where the message will go.
* optype - type of LDAP operation being logged
* dn - distinguished name of entry being changed
* change - pointer to the actual change operation
@@ -188,6 +189,7 @@ write_auditfail_log_entry( Slapi_PBlock *pb )
* flag - only used by modrdn operations - value of deleteoldrdn flag
* curtime - the current time
* rc - The ldap result code. Used in conjunction with auditfail
+ * sourcelog - The source of the message (audit or auditfail)
* Returns: nothing
*/
static void
@@ -198,7 +200,8 @@ write_audit_file(
void *change,
int flag,
time_t curtime,
- int rc
+ int rc,
+ int sourcelog
)
{
LDAPMod **mods;
@@ -359,7 +362,7 @@ write_audit_file(
switch (logtype)
{
case SLAPD_AUDIT_LOG:
- slapd_log_audit (l->ls_buf, l->ls_len);
+ slapd_log_audit (l->ls_buf, l->ls_len, sourcelog);
break;
case SLAPD_AUDITFAIL_LOG:
slapd_log_auditfail (l->ls_buf, l->ls_len);
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
index a16c395..ae8b5f8 100644
--- a/ldap/servers/slapd/log.c
+++ b/ldap/servers/slapd/log.c
@@ -1962,14 +1962,26 @@ auditfail_log_openf( char *pathname, int locked)
int
slapd_log_audit (
- char *buffer,
- int buf_len)
+ char *buffer,
+ int buf_len,
+ int sourcelog)
{
/* We use this to route audit log entries to where they need to go */
int retval = LDAP_SUCCESS;
int lbackend = loginfo.log_backend; /* We copy this to make these next checks atomic */
+
+ int state = 0;
+ if (sourcelog == SLAPD_AUDIT_LOG) {
+ state = loginfo.log_audit_state;
+ } else if (sourcelog == SLAPD_AUDITFAIL_LOG ) {
+ state = loginfo.log_auditfail_state;
+ } else {
+ /* How did we even get here! */
+ return 1;
+ }
+
if (lbackend & LOGGING_BACKEND_INTERNAL) {
- retval = slapd_log_audit_internal(buffer, buf_len);
+ retval = slapd_log_audit_internal(buffer, buf_len, state);
}
if (retval != LDAP_SUCCESS) {
@@ -1989,33 +2001,34 @@ slapd_log_audit (
int
slapd_log_audit_internal (
- char *buffer,
- int buf_len)
+ char *buffer,
+ int buf_len,
+ int state)
{
- if ( (loginfo.log_audit_state & LOGGING_ENABLED) && (loginfo.log_audit_file != NULL) ){
- LOG_AUDIT_LOCK_WRITE( );
- if (log__needrotation(loginfo.log_audit_fdes,
- SLAPD_AUDIT_LOG) == LOG_ROTATE) {
- if (log__open_auditlogfile(LOGFILE_NEW, 1) != LOG_SUCCESS) {
- LDAPDebug(LDAP_DEBUG_ANY,
- "LOGINFO: Unable to open audit file:%s\n",
- loginfo.log_audit_file,0,0);
- LOG_AUDIT_UNLOCK_WRITE();
- return 0;
- }
- while (loginfo.log_audit_rotationsyncclock <= loginfo.log_audit_ctime) {
- loginfo.log_audit_rotationsyncclock += PR_ABS(loginfo.log_audit_rotationtime_secs);
- }
- }
- if (loginfo.log_audit_state & LOGGING_NEED_TITLE) {
- log_write_title( loginfo.log_audit_fdes);
- loginfo.log_audit_state &= ~LOGGING_NEED_TITLE;
- }
- LOG_WRITE_NOW_NO_ERR(loginfo.log_audit_fdes, buffer, buf_len, 0);
- LOG_AUDIT_UNLOCK_WRITE();
- return 0;
- }
- return 0;
+ if ( (state & LOGGING_ENABLED) && (loginfo.log_audit_file != NULL) ){
+ LOG_AUDIT_LOCK_WRITE( );
+ if (log__needrotation(loginfo.log_audit_fdes,
+ SLAPD_AUDIT_LOG) == LOG_ROTATE) {
+ if (log__open_auditlogfile(LOGFILE_NEW, 1) != LOG_SUCCESS) {
+ LDAPDebug(LDAP_DEBUG_ANY,
+ "LOGINFO: Unable to open audit file:%s\n",
+ loginfo.log_audit_file,0,0);
+ LOG_AUDIT_UNLOCK_WRITE();
+ return 0;
+ }
+ while (loginfo.log_audit_rotationsyncclock <= loginfo.log_audit_ctime) {
+ loginfo.log_audit_rotationsyncclock += PR_ABS(loginfo.log_audit_rotationtime_secs);
+ }
+ }
+ if (state & LOGGING_NEED_TITLE) {
+ log_write_title( loginfo.log_audit_fdes);
+ state &= ~LOGGING_NEED_TITLE;
+ }
+ LOG_WRITE_NOW_NO_ERR(loginfo.log_audit_fdes, buffer, buf_len, 0);
+ LOG_AUDIT_UNLOCK_WRITE();
+ return 0;
+ }
+ return 0;
}
/******************************************************************************
* write in the audit fail log
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 6bc1065..1f37010 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -766,8 +766,8 @@ int slapi_log_access( int level, char *fmt, ... )
#else
;
#endif
-int slapd_log_audit(char *buffer, int buf_len);
-int slapd_log_audit_internal(char *buffer, int buf_len);
+int slapd_log_audit(char *buffer, int buf_len, int sourcelog);
+int slapd_log_audit_internal(char *buffer, int buf_len, int state);
int slapd_log_auditfail(char *buffer, int buf_len);
int slapd_log_auditfail_internal(char *buffer, int buf_len);
void log_access_flush();
7 years, 8 months