[389-ds-base] branch 389-ds-base-1.4.0 updated: Issue 49232 - Truncate the message when buffer capacity is exceeded
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
mreynolds pushed a commit to branch 389-ds-base-1.4.0
in repository 389-ds-base.
The following commit(s) were added to refs/heads/389-ds-base-1.4.0 by this push:
new 915901a Issue 49232 - Truncate the message when buffer capacity is exceeded
915901a is described below
commit 915901a9c29a5509028cc09405dd24228c083b57
Author: Simon Pichugin <spichugi(a)redhat.com>
AuthorDate: Fri May 31 14:50:12 2019 +0200
Issue 49232 - Truncate the message when buffer capacity is exceeded
Bug Description: When the access log buffer capacity is exceeded we log
an emergency error and the access log line is not logged at all.
Fix Description: Log the error message to errors log and log the access
log message but truncate its elements (for the search access log message).
Or just log what is in the buffer in other cases.
Add CI test to ds_logs test suite for the basic feature testing.
https://pagure.io/389-ds-base/issue/49232
Reviewed by: mreynolds, tbordaz, firstyear (Thanks!!)
---
dirsrvtests/tests/suites/ds_logs/ds_logs_test.py | 17 +++----
ldap/servers/slapd/log.c | 19 ++++---
ldap/servers/slapd/log.h | 5 ++
ldap/servers/slapd/opshared.c | 65 ++++++++++++------------
4 files changed, 57 insertions(+), 49 deletions(-)
diff --git a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
index cfea0d2..525d2a9 100644
--- a/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
+++ b/dirsrvtests/tests/suites/ds_logs/ds_logs_test.py
@@ -6,13 +6,12 @@
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
#
-from random import sample
-
+import os
+import logging
import pytest
from lib389.tasks import *
from lib389.utils import *
from lib389.topologies import topology_st
-
from lib389.idm.user import UserAccounts
logging.getLogger(__name__).setLevel(logging.DEBUG)
@@ -29,11 +28,11 @@ def add_users(topology_st, users_num):
uid = 1000 + i
users.create(properties={
'uid': 'testuser%d' % uid,
- 'cn' : 'testuser%d' % uid,
- 'sn' : 'user',
- 'uidNumber' : '%d' % uid,
- 'gidNumber' : '%d' % uid,
- 'homeDirectory' : '/home/testuser%d' % uid
+ 'cn': 'testuser%d' % uid,
+ 'sn': 'user',
+ 'uidNumber': '%d' % uid,
+ 'gidNumber': '%d' % uid,
+ 'homeDirectory': '/home/testuser%d' % uid
})
def search_users(topology_st):
@@ -64,7 +63,7 @@ def test_check_default(topology_st):
default = topology_st.standalone.config.get_attr_val_utf8(PLUGIN_TIMESTAMP)
# Now check it should be ON by default
- assert (default == "on")
+ assert default == "on"
log.debug(default)
@pytest.mark.bz1273549
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
index f308a48..bfcf574 100644
--- a/ldap/servers/slapd/log.c
+++ b/ldap/servers/slapd/log.c
@@ -81,8 +81,6 @@ static int slapi_log_map[] = {
#define SLAPI_LOG_MIN SLAPI_LOG_FATAL /* from slapi-plugin.h */
#define SLAPI_LOG_MAX SLAPI_LOG_DEBUG /* from slapi-plugin.h */
-#define TBUFSIZE 50 /* size for time buffers */
-#define SLAPI_LOG_BUFSIZ 2048 /* size for data buffers */
/**************************************************************************
* PROTOTYPES
@@ -2553,8 +2551,9 @@ vslapd_log_access(char *fmt, va_list ap)
{
char buffer[SLAPI_LOG_BUFSIZ];
char vbuf[SLAPI_LOG_BUFSIZ];
- int blen = TBUFSIZE;
- int vlen;
+ int32_t blen = TBUFSIZE;
+ int32_t vlen;
+ int32_t rc = LDAP_SUCCESS;
time_t tnl;
/* We do this sooner, because that we we can use the message in other calls */
@@ -2594,14 +2593,18 @@ vslapd_log_access(char *fmt, va_list ap)
if (SLAPI_LOG_BUFSIZ - blen < vlen) {
/* We won't be able to fit the message in! Uh-oh! */
- /* Should we actually just do the snprintf, and warn that message was truncated? */
- log__error_emergency("Insufficent buffer capacity to fit timestamp and message!", 1, 0);
- return -1;
+ /* If the issue is not resolved during the fmt string creation (see op_shared_search()),
+ * we truncate the line and still log the message allowing the admin to check if
+ * someone is trying to do something bad. */
+ vlen = strlen(vbuf); /* Truncated length */
+ memcpy(&vbuf[vlen-4], "...\n", 4); /* Replace last characters with three dots and a new line character */
+ slapi_log_err(SLAPI_LOG_ERR, "vslapd_log_access", "Insufficient buffer capacity to fit timestamp and message! The line in the access log was truncated\n");
+ rc = -1;
}
log_append_buffer2(tnl, loginfo.log_access_buffer, buffer, blen, vbuf, vlen);
- return (LDAP_SUCCESS);
+ return (rc);
}
int
diff --git a/ldap/servers/slapd/log.h b/ldap/servers/slapd/log.h
index a283b33..9fb4e74 100644
--- a/ldap/servers/slapd/log.h
+++ b/ldap/servers/slapd/log.h
@@ -233,3 +233,8 @@ struct logging_opts
#define LOG_AUDITFAIL_UNLOCK_READ() slapi_rwlock_unlock(loginfo.log_auditfail_rwlock)
#define LOG_AUDITFAIL_LOCK_WRITE() slapi_rwlock_wrlock(loginfo.log_auditfail_rwlock)
#define LOG_AUDITFAIL_UNLOCK_WRITE() slapi_rwlock_unlock(loginfo.log_auditfail_rwlock)
+
+/* For using with slapi_log_access */
+#define TBUFSIZE 50 /* size for time buffers */
+#define SLAPI_LOG_BUFSIZ 2048 /* size for data buffers */
+#define SLAPI_ACCESS_LOG_FMTBUF 128 /* size for access log formating line buffer */
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index 5c5acb3..8eab3fa 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -13,6 +13,7 @@
/* opshared.c - functions shared between regular and internal operations */
+#include "log.h"
#include "slap.h"
#define PAGEDRESULTS_PAGE_END 1
@@ -314,47 +315,16 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
proxy_err = proxyauth_get_dn(pb, &proxydn, &errtext);
if (operation_is_flag_set(operation, OP_FLAG_ACTION_LOG_ACCESS)) {
- char *fmtstr;
+ char fmtstr[SLAPI_ACCESS_LOG_FMTBUF];
uint64_t connid;
int32_t op_id;
int32_t op_internal_id;
int32_t op_nested_count;
-#define SLAPD_SEARCH_FMTSTR_BASE "conn=%" PRIu64 " op=%d SRCH base=\"%s\" scope=%d "
-#define SLAPD_SEARCH_FMTSTR_BASE_INT_INT "conn=Internal(%" PRIu64 ") op=%d(%d)(%d) SRCH base=\"%s\" scope=%d "
-#define SLAPD_SEARCH_FMTSTR_BASE_EXT_INT "conn=%" PRIu64 " (Internal) op=%d(%d)(%d) SRCH base=\"%s\" scope=%d "
-#define SLAPD_SEARCH_FMTSTR_REMAINDER " attrs=%s%s%s\n"
-
PR_ASSERT(fstr);
if (internal_op) {
get_internal_conn_op(&connid, &op_id, &op_internal_id, &op_nested_count);
}
- if (strlen(fstr) > 1024) {
- /*
- * slapi_log_access() throws away log lines that are longer than
- * 2048 characters, so we limit the filter string to 1024 (better
- * to log something rather than nothing)
- */
- if (!internal_op) {
- fmtstr = SLAPD_SEARCH_FMTSTR_BASE "filter=\"%.1024s...\"" SLAPD_SEARCH_FMTSTR_REMAINDER;
- } else {
- if (connid == 0) {
- fmtstr = SLAPD_SEARCH_FMTSTR_BASE_INT_INT "filter=\"%.1024s...\"" SLAPD_SEARCH_FMTSTR_REMAINDER;
- } else {
- fmtstr = SLAPD_SEARCH_FMTSTR_BASE_EXT_INT "filter=\"%.1024s...\"" SLAPD_SEARCH_FMTSTR_REMAINDER;
- }
- }
- } else {
- if (!internal_op) {
- fmtstr = SLAPD_SEARCH_FMTSTR_BASE "filter=\"%s\"" SLAPD_SEARCH_FMTSTR_REMAINDER;
- } else {
- if (connid == 0) {
- fmtstr = SLAPD_SEARCH_FMTSTR_BASE_INT_INT "filter=\"%s\"" SLAPD_SEARCH_FMTSTR_REMAINDER;
- } else {
- fmtstr = SLAPD_SEARCH_FMTSTR_BASE_EXT_INT "filter=\"%s\"" SLAPD_SEARCH_FMTSTR_REMAINDER;
- }
- }
- }
if (NULL == attrs) {
attrliststr = "ALL";
@@ -368,6 +338,37 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
proxystr = slapi_ch_smprintf(" authzid=\"%s\"", proxydn);
}
+#define SLAPD_SEARCH_FMTSTR_CONN_OP "conn=%" PRIu64 " op=%d"
+#define SLAPD_SEARCH_FMTSTR_CONN_OP_INT_INT "conn=Internal(%" PRIu64 ") op=%d(%d)(%d)"
+#define SLAPD_SEARCH_FMTSTR_CONN_OP_EXT_INT "conn=%" PRIu64 " (Internal) op=%d(%d)(%d)"
+#define SLAPD_SEARCH_FMTSTR_REMAINDER "%s%s\n"
+
+#define SLAPD_SEARCH_BUFPART 512
+#define LOG_ACCESS_FORMAT_BUFSIZ(arg, logstr, bufsiz) ((strlen(arg)) < (bufsiz) ? (logstr "%s") : \
+ (logstr "%." STRINGIFYDEFINE(bufsiz) "s..."))
+/* Define a separate macro for attributes because when we strip it we should take care of the quotes */
+#define LOG_ACCESS_FORMAT_ATTR_BUFSIZ(arg, logstr, bufsiz) ((strlen(arg)) < (bufsiz) ? (logstr "%s") : \
+ (logstr "%." STRINGIFYDEFINE(bufsiz) "s...\""))
+
+ /*
+ * slapi_log_access() throws away log lines that are longer than
+ * 2048 characters, so we limit the filter, base and attrs strings to 512
+ * (better to log something rather than nothing)
+ */
+ if (!internal_op) {
+ strcpy(fmtstr, SLAPD_SEARCH_FMTSTR_CONN_OP);
+ } else {
+ if (connid == 0) {
+ strcpy(fmtstr, SLAPD_SEARCH_FMTSTR_CONN_OP_INT_INT);
+ } else {
+ strcpy(fmtstr, SLAPD_SEARCH_FMTSTR_CONN_OP_EXT_INT);
+ }
+ }
+ strcat(fmtstr, LOG_ACCESS_FORMAT_BUFSIZ(normbase, " SRCH base=\"", SLAPD_SEARCH_BUFPART));
+ strcat(fmtstr, LOG_ACCESS_FORMAT_BUFSIZ(fstr, "\" scope=%d filter=\"", SLAPD_SEARCH_BUFPART));
+ strcat(fmtstr, LOG_ACCESS_FORMAT_ATTR_BUFSIZ(attrliststr, "\" attrs=", SLAPD_SEARCH_BUFPART));
+ strcat(fmtstr, SLAPD_SEARCH_FMTSTR_REMAINDER);
+
if (!internal_op) {
slapi_log_access(LDAP_DEBUG_STATS, fmtstr,
pb_conn->c_connid,
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
4 years, 8 months
[389-ds-base] branch 389-ds-base-1.3.9 updated: Ticket 50542 - Entry cache contention during base search
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
tbordaz pushed a commit to branch 389-ds-base-1.3.9
in repository 389-ds-base.
The following commit(s) were added to refs/heads/389-ds-base-1.3.9 by this push:
new 35b42aa Ticket 50542 - Entry cache contention during base search
35b42aa is described below
commit 35b42aad913231497b7553cbc16ab58453f58f1a
Author: Thierry Bordaz <tbordaz(a)redhat.com>
AuthorDate: Thu Aug 8 12:05:00 2019 +0200
Ticket 50542 - Entry cache contention during base search
Bug Description:
During a base search the entry cache lock is acquired to retrieve the target entry.
Later when the candidate list is built, the entry cache lock is also acquired
to retrieve the candidate that is actually the target entry itself
So for a base search the entry cache lock is accessed 4 times (2 acquires + 2 releases)
It is very easy to create a huge contention (e.g. dereferencing large group) increasing
etime
Fix Description:
The idea is to acquire the entry, from the entry cache (with refcnt++) when searching the base
search. Then instead of returning the entry (refcnt--) the entry is kept in the operation until
the operation completes. If later we need the entry (to send it back to the client), the entry is
picked up from the operation not from the entry cache lookup
https://pagure.io/389-ds-base/issue/50542
Reviewed by: Ludwig Krispenz, William Brown
Platforms tested: F29
Flag Day: no
Doc impact: no
---
ldap/servers/slapd/back-ldbm/ldbm_search.c | 45 ++++++++++++++++++++++++++----
ldap/servers/slapd/operation.c | 32 +++++++++++++++++++++
ldap/servers/slapd/opshared.c | 36 ++++++++++++++++++++++--
ldap/servers/slapd/proto-slap.h | 4 +++
ldap/servers/slapd/slap.h | 9 ++++++
5 files changed, 118 insertions(+), 8 deletions(-)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
index 8f31118..c8f5719 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
@@ -551,6 +551,13 @@ ldbm_back_search(Slapi_PBlock *pb)
LDBM_SRCH_DEFAULT_RESULT, NULL, 1, &vlv_request_control, NULL, candidates);
}
}
+ /* We have the base search entry and a callback to "cache_return" it.
+ * Keep it into the operation to avoid additional cache fetch/return
+ */
+ if (e && be->be_entry_release) {
+ operation_set_target_entry(operation, (void *) e);
+ operation_set_target_entry_id(operation, e->ep_id);
+ }
/*
* If this is a persistent search then the client is only
@@ -807,7 +814,6 @@ ldbm_back_search(Slapi_PBlock *pb)
}
}
- CACHE_RETURN(&inst->inst_cache, &e);
/*
* if the candidate list is an allids list, arrange for access log
@@ -1345,6 +1351,27 @@ ldbm_back_next_search_entry(Slapi_PBlock *pb)
return ldbm_back_next_search_entry_ext(pb, 0);
}
+/* The reference on the target_entry (base search) is stored in the operation
+ * This is to prevent additional cache find/return that require cache lock.
+ *
+ * The target entry is acquired during be->be_search (when building the candidate list).
+ * and is returned once the operation completes (or fail).
+ *
+ * The others entries sent back to the client have been acquired/returned during send_results_ext.
+ * If the target entry is sent back to the client it is not returned (refcnt--) during the send_results_ext.
+ *
+ * This function returns(refcnt-- in the entry cache) the entry unless it is
+ * the target_entry (base search). target_entry will be return once the operation
+ * completes
+ */
+static void
+non_target_cache_return(Slapi_Operation *op, struct cache *cache, struct backentry **e)
+{
+ if (e && (*e != operation_get_target_entry(op))) {
+ CACHE_RETURN(cache, e);
+ }
+}
+
int
ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
{
@@ -1447,7 +1474,7 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
/* If we are using the extension, the front end will tell
* us when to do this so we don't do it now */
if (sr->sr_entry && !use_extension) {
- CACHE_RETURN(&inst->inst_cache, &(sr->sr_entry));
+ non_target_cache_return(op, &inst->inst_cache, &(sr->sr_entry));
sr->sr_entry = NULL;
}
@@ -1559,7 +1586,13 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
}
/* get the entry */
- e = id2entry(be, id, &txn, &err);
+ e = operation_get_target_entry(op);
+ if ((e == NULL) || (id != operation_get_target_entry_id(op))) {
+ /* if the entry is not the target_entry (base search)
+ * we need to fetch it from the entry cache (it was not
+ * referenced in the operation) */
+ e = id2entry(be, id, &txn, &err);
+ }
if (e == NULL) {
if (err != 0 && err != DB_NOTFOUND) {
slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_next_search_entry_ext", "next_search_entry db err %d\n",
@@ -1679,7 +1712,7 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
/* check size limit */
if (slimit >= 0) {
if (--slimit < 0) {
- CACHE_RETURN(&inst->inst_cache, &e);
+ non_target_cache_return(op, &inst->inst_cache, &e);
slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET_SIZE_ESTIMATE, &estimate);
delete_search_result_set(pb, &sr);
slapi_send_ldap_result(pb, LDAP_SIZELIMIT_EXCEEDED, NULL, NULL, nentries, urls);
@@ -1717,12 +1750,12 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
rc = 0;
goto bail;
} else {
- CACHE_RETURN(&inst->inst_cache, &(sr->sr_entry));
+ non_target_cache_return(op, &inst->inst_cache, &(sr->sr_entry));
sr->sr_entry = NULL;
}
} else {
/* Failed the filter test, and this isn't a VLV Search */
- CACHE_RETURN(&inst->inst_cache, &(sr->sr_entry));
+ non_target_cache_return(op, &inst->inst_cache, &(sr->sr_entry));
sr->sr_entry = NULL;
if (LDAP_UNWILLING_TO_PERFORM == filter_test) {
/* Need to catch this error to detect the vattr loop */
diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c
index 4a05e0a..8186fd3 100644
--- a/ldap/servers/slapd/operation.c
+++ b/ldap/servers/slapd/operation.c
@@ -354,6 +354,38 @@ operation_is_flag_set(Slapi_Operation *op, int flag)
return op->o_flags & flag;
}
+void *
+operation_get_target_entry(Slapi_Operation *op)
+{
+ PR_ASSERT(op);
+
+ return op->o_target_entry;
+}
+
+void
+operation_set_target_entry(Slapi_Operation *op, void *target_entry)
+{
+ PR_ASSERT(op);
+
+ op->o_target_entry = target_entry;
+}
+
+u_int32_t
+operation_get_target_entry_id(Slapi_Operation *op)
+{
+ PR_ASSERT(op);
+
+ return op->o_target_entry_id;
+}
+
+void
+operation_set_target_entry_id(Slapi_Operation *op, u_int32_t target_entry_id)
+{
+ PR_ASSERT(op);
+
+ op->o_target_entry_id = target_entry_id;
+}
+
Slapi_DN *
operation_get_target_spec(Slapi_Operation *op)
{
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index cf6cdff..b9fc835 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -193,6 +193,28 @@ slapi_attr_is_last_mod(char *attr)
return 0;
}
+/* The reference on the target_entry (base search) is stored in the operation
+ * This is to prevent additional cache find/return that require cache lock.
+ *
+ * The target entry is acquired during be->be_search (when building the candidate list).
+ * and is returned once the operation completes (or fail).
+ *
+ * The others entries sent back to the client have been acquired/returned during send_results_ext.
+ * If the target entry is sent back to the client it is not returned (refcnt--) during the send_results_ext.
+ *
+ * This function only returns (refcnt-- in the entry cache) the target_entry (base search).
+ * It is called at the operation level (op_shared_search)
+ *
+ */
+static void
+cache_return_target_entry(Slapi_PBlock *pb, Slapi_Backend *be, Slapi_Operation *operation)
+{
+ if (operation_get_target_entry(operation) && be->be_entry_release) {
+ (*be->be_entry_release)(pb, operation_get_target_entry(operation));
+ operation_set_target_entry(operation, NULL);
+ operation_set_target_entry_id(operation, 0);
+ }
+}
/*
* Returns: 0 - if the operation is successful
* < 0 - if operation fails.
@@ -252,6 +274,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
/* get search parameters */
slapi_pblock_get(pb, SLAPI_ORIGINAL_TARGET_DN, &base);
slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &sdn);
+ slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
if (NULL == sdn) {
sdn = slapi_sdn_new_dn_byval(base);
@@ -276,7 +299,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
slapi_pblock_get(pb, SLAPI_SEARCH_SCOPE, &scope);
slapi_pblock_get(pb, SLAPI_SEARCH_STRFILTER, &fstr);
slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &attrs);
- slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
+
if (operation == NULL) {
op_shared_log_error_access(pb, "SRCH", base, "NULL operation");
send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, "NULL operation", 0, NULL);
@@ -808,6 +831,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
* the error has already been sent
* stop the search here
*/
+ cache_return_target_entry(pb, be, operation);
goto free_and_return;
}
@@ -815,6 +839,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
case SLAPI_FAIL_DISKFULL:
operation_out_of_disk_space();
+ cache_return_target_entry(pb, be, operation);
goto free_and_return;
case 0: /* search was successful and we need to send the result */
@@ -840,6 +865,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
/* no more entries && no more backends */
curr_search_count = -1;
} else if (rc < 0) {
+ cache_return_target_entry(pb, be, operation);
goto free_and_return;
}
} else {
@@ -852,6 +878,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
(pagedresults_set_search_result_set_size_estimate(pb_conn, operation, estimate, pr_idx) < 0) ||
(pagedresults_set_with_sort(pb_conn, operation, with_sort, pr_idx) < 0)) {
pagedresults_unlock(pb_conn, pr_idx);
+ cache_return_target_entry(pb, be, operation);
goto free_and_return;
}
pagedresults_unlock(pb_conn, pr_idx);
@@ -867,6 +894,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
pagedresults_set_response_control(pb, 0, estimate, -1, pr_idx);
send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL);
rc = LDAP_SUCCESS;
+ cache_return_target_entry(pb, be, operation);
goto free_and_return;
}
pagedresults_set_response_control(pb, 0, estimate, curr_search_count, pr_idx);
@@ -880,10 +908,14 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
* LDAP error should already have been sent to the client
* stop the search, free and return
*/
- if (rc != 0)
+ if (rc != 0) {
+ cache_return_target_entry(pb, be, operation);
goto free_and_return;
+ }
break;
}
+ /* cache return the target_entry */
+ cache_return_target_entry(pb, be, operation);
}
nentries += pnentries;
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index e37f702..d9fb8fd 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -873,6 +873,10 @@ void operation_free(Slapi_Operation **op, Connection *conn);
int slapi_op_abandoned(Slapi_PBlock *pb);
void operation_out_of_disk_space(void);
int get_operation_object_type(void);
+void *operation_get_target_entry(Slapi_Operation *op);
+void operation_set_target_entry(Slapi_Operation *op, void *target_void);
+u_int32_t operation_get_target_entry_id(Slapi_Operation *op);
+void operation_set_target_entry_id(Slapi_Operation *op, u_int32_t target_entry_id);
Slapi_DN *operation_get_target_spec(Slapi_Operation *op);
void operation_set_target_spec(Slapi_Operation *op, const Slapi_DN *target_spec);
void operation_set_target_spec_str(Slapi_Operation *op, const char *target_spec);
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index bce7209..a8908d9 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1545,6 +1545,15 @@ typedef struct op
unsigned long o_flags; /* flags for this operation */
void *o_extension; /* plugins are able to extend the Operation object */
Slapi_DN *o_target_spec; /* used to decide which plugins should be called for the operation */
+ void *o_target_entry; /* Only used for SEARCH operation
+ * reference of search target entry (base search) in the entry cache
+ * When it is set the refcnt (of the entry in the entry cache) as been increased
+ */
+ u_int32_t o_target_entry_id; /* Only used for SEARCH operation
+ * contains the ID of the o_target_entry. In send_result we have ID of the candidates, this
+ * accelerates the tests as we have not to retrieve for each candidate the
+ * ep_id inside the o_target_entry.
+ */
unsigned long o_abandoned_op; /* operation abandoned by this operation - used to decide which plugins to invoke */
struct slapi_operation_parameters o_params;
struct slapi_operation_results o_results;
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
4 years, 8 months
[389-ds-base] branch 389-ds-base-1.4.0 updated: Ticket 50542 - Entry cache contention during base search
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
tbordaz pushed a commit to branch 389-ds-base-1.4.0
in repository 389-ds-base.
The following commit(s) were added to refs/heads/389-ds-base-1.4.0 by this push:
new 4f98f3b Ticket 50542 - Entry cache contention during base search
4f98f3b is described below
commit 4f98f3ba595534e25808397e6452d5aa69bdf4ea
Author: Thierry Bordaz <tbordaz(a)redhat.com>
AuthorDate: Thu Aug 8 12:05:00 2019 +0200
Ticket 50542 - Entry cache contention during base search
Bug Description:
During a base search the entry cache lock is acquired to retrieve the target entry.
Later when the candidate list is built, the entry cache lock is also acquired
to retrieve the candidate that is actually the target entry itself
So for a base search the entry cache lock is accessed 4 times (2 acquires + 2 releases)
It is very easy to create a huge contention (e.g. dereferencing large group) increasing
etime
Fix Description:
The idea is to acquire the entry, from the entry cache (with refcnt++) when searching the base
search. Then instead of returning the entry (refcnt--) the entry is kept in the operation until
the operation completes. If later we need the entry (to send it back to the client), the entry is
picked up from the operation not from the entry cache lookup
https://pagure.io/389-ds-base/issue/50542
Reviewed by: Ludwig Krispenz, William Brown
Platforms tested: F29
Flag Day: no
Doc impact: no
---
ldap/servers/slapd/back-ldbm/ldbm_search.c | 45 ++++++++++++++++++++++++++----
ldap/servers/slapd/operation.c | 32 +++++++++++++++++++++
ldap/servers/slapd/opshared.c | 36 ++++++++++++++++++++++--
ldap/servers/slapd/proto-slap.h | 4 +++
ldap/servers/slapd/slap.h | 9 ++++++
5 files changed, 118 insertions(+), 8 deletions(-)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_search.c b/ldap/servers/slapd/back-ldbm/ldbm_search.c
index 7f3600e..7e1c52d 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_search.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_search.c
@@ -564,6 +564,13 @@ ldbm_back_search(Slapi_PBlock *pb)
LDBM_SRCH_DEFAULT_RESULT, NULL, 1, &vlv_request_control, NULL, candidates);
}
}
+ /* We have the base search entry and a callback to "cache_return" it.
+ * Keep it into the operation to avoid additional cache fetch/return
+ */
+ if (e && be->be_entry_release) {
+ operation_set_target_entry(operation, (void *) e);
+ operation_set_target_entry_id(operation, e->ep_id);
+ }
/*
* If this is a persistent search then the client is only
@@ -820,7 +827,6 @@ ldbm_back_search(Slapi_PBlock *pb)
}
}
- CACHE_RETURN(&inst->inst_cache, &e);
/*
* if the candidate list is an allids list, arrange for access log
@@ -1358,6 +1364,27 @@ ldbm_back_next_search_entry(Slapi_PBlock *pb)
return ldbm_back_next_search_entry_ext(pb, 0);
}
+/* The reference on the target_entry (base search) is stored in the operation
+ * This is to prevent additional cache find/return that require cache lock.
+ *
+ * The target entry is acquired during be->be_search (when building the candidate list).
+ * and is returned once the operation completes (or fail).
+ *
+ * The others entries sent back to the client have been acquired/returned during send_results_ext.
+ * If the target entry is sent back to the client it is not returned (refcnt--) during the send_results_ext.
+ *
+ * This function returns(refcnt-- in the entry cache) the entry unless it is
+ * the target_entry (base search). target_entry will be return once the operation
+ * completes
+ */
+static void
+non_target_cache_return(Slapi_Operation *op, struct cache *cache, struct backentry **e)
+{
+ if (e && (*e != operation_get_target_entry(op))) {
+ CACHE_RETURN(cache, e);
+ }
+}
+
int
ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
{
@@ -1460,7 +1487,7 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
/* If we are using the extension, the front end will tell
* us when to do this so we don't do it now */
if (sr->sr_entry && !use_extension) {
- CACHE_RETURN(&inst->inst_cache, &(sr->sr_entry));
+ non_target_cache_return(op, &inst->inst_cache, &(sr->sr_entry));
sr->sr_entry = NULL;
}
@@ -1572,7 +1599,13 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
}
/* get the entry */
- e = id2entry(be, id, &txn, &err);
+ e = operation_get_target_entry(op);
+ if ((e == NULL) || (id != operation_get_target_entry_id(op))) {
+ /* if the entry is not the target_entry (base search)
+ * we need to fetch it from the entry cache (it was not
+ * referenced in the operation) */
+ e = id2entry(be, id, &txn, &err);
+ }
if (e == NULL) {
if (err != 0 && err != DB_NOTFOUND) {
slapi_log_err(SLAPI_LOG_ERR, "ldbm_back_next_search_entry_ext", "next_search_entry db err %d\n",
@@ -1692,7 +1725,7 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
/* check size limit */
if (slimit >= 0) {
if (--slimit < 0) {
- CACHE_RETURN(&inst->inst_cache, &e);
+ non_target_cache_return(op, &inst->inst_cache, &e);
slapi_pblock_set(pb, SLAPI_SEARCH_RESULT_SET_SIZE_ESTIMATE, &estimate);
delete_search_result_set(pb, &sr);
slapi_send_ldap_result(pb, LDAP_SIZELIMIT_EXCEEDED, NULL, NULL, nentries, urls);
@@ -1730,12 +1763,12 @@ ldbm_back_next_search_entry_ext(Slapi_PBlock *pb, int use_extension)
rc = 0;
goto bail;
} else {
- CACHE_RETURN(&inst->inst_cache, &(sr->sr_entry));
+ non_target_cache_return(op, &inst->inst_cache, &(sr->sr_entry));
sr->sr_entry = NULL;
}
} else {
/* Failed the filter test, and this isn't a VLV Search */
- CACHE_RETURN(&inst->inst_cache, &(sr->sr_entry));
+ non_target_cache_return(op, &inst->inst_cache, &(sr->sr_entry));
sr->sr_entry = NULL;
if (LDAP_UNWILLING_TO_PERFORM == filter_test) {
/* Need to catch this error to detect the vattr loop */
diff --git a/ldap/servers/slapd/operation.c b/ldap/servers/slapd/operation.c
index 4a05e0a..8186fd3 100644
--- a/ldap/servers/slapd/operation.c
+++ b/ldap/servers/slapd/operation.c
@@ -354,6 +354,38 @@ operation_is_flag_set(Slapi_Operation *op, int flag)
return op->o_flags & flag;
}
+void *
+operation_get_target_entry(Slapi_Operation *op)
+{
+ PR_ASSERT(op);
+
+ return op->o_target_entry;
+}
+
+void
+operation_set_target_entry(Slapi_Operation *op, void *target_entry)
+{
+ PR_ASSERT(op);
+
+ op->o_target_entry = target_entry;
+}
+
+u_int32_t
+operation_get_target_entry_id(Slapi_Operation *op)
+{
+ PR_ASSERT(op);
+
+ return op->o_target_entry_id;
+}
+
+void
+operation_set_target_entry_id(Slapi_Operation *op, u_int32_t target_entry_id)
+{
+ PR_ASSERT(op);
+
+ op->o_target_entry_id = target_entry_id;
+}
+
Slapi_DN *
operation_get_target_spec(Slapi_Operation *op)
{
diff --git a/ldap/servers/slapd/opshared.c b/ldap/servers/slapd/opshared.c
index dd69173..5c5acb3 100644
--- a/ldap/servers/slapd/opshared.c
+++ b/ldap/servers/slapd/opshared.c
@@ -192,6 +192,28 @@ slapi_attr_is_last_mod(char *attr)
return 0;
}
+/* The reference on the target_entry (base search) is stored in the operation
+ * This is to prevent additional cache find/return that require cache lock.
+ *
+ * The target entry is acquired during be->be_search (when building the candidate list).
+ * and is returned once the operation completes (or fail).
+ *
+ * The others entries sent back to the client have been acquired/returned during send_results_ext.
+ * If the target entry is sent back to the client it is not returned (refcnt--) during the send_results_ext.
+ *
+ * This function only returns (refcnt-- in the entry cache) the target_entry (base search).
+ * It is called at the operation level (op_shared_search)
+ *
+ */
+static void
+cache_return_target_entry(Slapi_PBlock *pb, Slapi_Backend *be, Slapi_Operation *operation)
+{
+ if (operation_get_target_entry(operation) && be->be_entry_release) {
+ (*be->be_entry_release)(pb, operation_get_target_entry(operation));
+ operation_set_target_entry(operation, NULL);
+ operation_set_target_entry_id(operation, 0);
+ }
+}
/*
* Returns: 0 - if the operation is successful
* < 0 - if operation fails.
@@ -251,6 +273,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
/* get search parameters */
slapi_pblock_get(pb, SLAPI_ORIGINAL_TARGET_DN, &base);
slapi_pblock_get(pb, SLAPI_SEARCH_TARGET_SDN, &sdn);
+ slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
if (NULL == sdn) {
sdn = slapi_sdn_new_dn_byval(base);
@@ -275,7 +298,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
slapi_pblock_get(pb, SLAPI_SEARCH_SCOPE, &scope);
slapi_pblock_get(pb, SLAPI_SEARCH_STRFILTER, &fstr);
slapi_pblock_get(pb, SLAPI_SEARCH_ATTRS, &attrs);
- slapi_pblock_get(pb, SLAPI_OPERATION, &operation);
+
if (operation == NULL) {
op_shared_log_error_access(pb, "SRCH", base, "NULL operation");
send_ldap_result(pb, LDAP_OPERATIONS_ERROR, NULL, "NULL operation", 0, NULL);
@@ -822,6 +845,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
* the error has already been sent
* stop the search here
*/
+ cache_return_target_entry(pb, be, operation);
goto free_and_return;
}
@@ -829,6 +853,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
case SLAPI_FAIL_DISKFULL:
operation_out_of_disk_space();
+ cache_return_target_entry(pb, be, operation);
goto free_and_return;
case 0: /* search was successful and we need to send the result */
@@ -854,6 +879,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
/* no more entries && no more backends */
curr_search_count = -1;
} else if (rc < 0) {
+ cache_return_target_entry(pb, be, operation);
goto free_and_return;
}
} else {
@@ -866,6 +892,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
(pagedresults_set_search_result_set_size_estimate(pb_conn, operation, estimate, pr_idx) < 0) ||
(pagedresults_set_with_sort(pb_conn, operation, with_sort, pr_idx) < 0)) {
pagedresults_unlock(pb_conn, pr_idx);
+ cache_return_target_entry(pb, be, operation);
goto free_and_return;
}
pagedresults_unlock(pb_conn, pr_idx);
@@ -881,6 +908,7 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
pagedresults_set_response_control(pb, 0, estimate, -1, pr_idx);
send_ldap_result(pb, 0, NULL, "Simple Paged Results Search abandoned", 0, NULL);
rc = LDAP_SUCCESS;
+ cache_return_target_entry(pb, be, operation);
goto free_and_return;
}
pagedresults_set_response_control(pb, 0, estimate, curr_search_count, pr_idx);
@@ -894,10 +922,14 @@ op_shared_search(Slapi_PBlock *pb, int send_result)
* LDAP error should already have been sent to the client
* stop the search, free and return
*/
- if (rc != 0)
+ if (rc != 0) {
+ cache_return_target_entry(pb, be, operation);
goto free_and_return;
+ }
break;
}
+ /* cache return the target_entry */
+ cache_return_target_entry(pb, be, operation);
}
nentries += pnentries;
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 932828d..4bf5fbf 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -883,6 +883,10 @@ void operation_free(Slapi_Operation **op, Connection *conn);
int slapi_op_abandoned(Slapi_PBlock *pb);
void operation_out_of_disk_space(void);
int get_operation_object_type(void);
+void *operation_get_target_entry(Slapi_Operation *op);
+void operation_set_target_entry(Slapi_Operation *op, void *target_void);
+u_int32_t operation_get_target_entry_id(Slapi_Operation *op);
+void operation_set_target_entry_id(Slapi_Operation *op, u_int32_t target_entry_id);
Slapi_DN *operation_get_target_spec(Slapi_Operation *op);
void operation_set_target_spec(Slapi_Operation *op, const Slapi_DN *target_spec);
void operation_set_target_spec_str(Slapi_Operation *op, const char *target_spec);
diff --git a/ldap/servers/slapd/slap.h b/ldap/servers/slapd/slap.h
index 2d2de11..2ae395c 100644
--- a/ldap/servers/slapd/slap.h
+++ b/ldap/servers/slapd/slap.h
@@ -1553,6 +1553,15 @@ typedef struct op
unsigned long o_flags; /* flags for this operation */
void *o_extension; /* plugins are able to extend the Operation object */
Slapi_DN *o_target_spec; /* used to decide which plugins should be called for the operation */
+ void *o_target_entry; /* Only used for SEARCH operation
+ * reference of search target entry (base search) in the entry cache
+ * When it is set the refcnt (of the entry in the entry cache) as been increased
+ */
+ u_int32_t o_target_entry_id; /* Only used for SEARCH operation
+ * contains the ID of the o_target_entry. In send_result we have ID of the candidates, this
+ * accelerates the tests as we have not to retrieve for each candidate the
+ * ep_id inside the o_target_entry.
+ */
unsigned long o_abandoned_op; /* operation abandoned by this operation - used to decide which plugins to invoke */
struct slapi_operation_parameters o_params;
struct slapi_operation_results o_results;
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
4 years, 8 months
[389-ds-base] branch master updated: Issue 50538 - Move CI test to individual file
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
mreynolds pushed a commit to branch master
in repository 389-ds-base.
The following commit(s) were added to refs/heads/master by this push:
new 21ba842 Issue 50538 - Move CI test to individual file
21ba842 is described below
commit 21ba8427fa66795e3ef586768a84e0fa80fe3554
Author: Mark Reynolds <mreynolds(a)redhat.com>
AuthorDate: Fri Aug 9 09:31:23 2019 -0400
Issue 50538 - Move CI test to individual file
Description: The CI test needs to be a standalone file as it needs
a clean environment to run correctly
relates: https://pagure.io/389-ds-base/issue/50538
Reviewed by: lkrispenz(Thanks!)
---
.../replication/cleanallruv_max_tasks_test.py | 72 ++++++++++++++++++++++
.../tests/suites/replication/cleanallruv_test.py | 43 -------------
2 files changed, 72 insertions(+), 43 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_max_tasks_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_max_tasks_test.py
new file mode 100644
index 0000000..e3bbc97
--- /dev/null
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_max_tasks_test.py
@@ -0,0 +1,72 @@
+# --- BEGIN COPYRIGHT BLOCK ---
+# Copyright (C) 2019 Red Hat, Inc.
+# All rights reserved.
+#
+# License: GPL (version 3 or any later version).
+# See LICENSE for details.
+# --- END COPYRIGHT BLOCK ---
+#
+import threading
+import pytest
+import random
+from lib389 import DirSrv
+from lib389.tasks import *
+from lib389.utils import *
+from lib389.topologies import topology_m4, topology_m2
+from lib389._constants import *
+
+pytestmark = pytest.mark.tier1
+
+
+def test_max_tasks(topology_m4):
+ """Test we can not create more than 64 cleaning tasks
+
+ This test needs to be a standalone test becuase there is no easy way to
+ "restore" the instance after running this test
+
+ :id: c34d0b40-3c3e-4f53-8656-5e4c2a310a1f
+ :setup: Replication setup with four masters
+ :steps:
+ 1. Stop masters 3 & 4
+ 2. Create over 64 tasks between m1 and m2
+ 3. Check logs to see if (>64) tasks were rejected
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+
+ # Stop masters 3 & 4
+ m1 = topology_m4.ms["master1"]
+ m2 = topology_m4.ms["master2"]
+ m3 = topology_m4.ms["master3"]
+ m4 = topology_m4.ms["master4"]
+ m3.stop()
+ m4.stop()
+
+ # Add over 64 tasks between master1 & 2 to try to exceed the 64 task limit
+ for i in range(1, 64):
+ cruv_task = CleanAllRUVTask(m1)
+ cruv_task.create(properties={
+ 'replica-id': str(i),
+ 'replica-base-dn': DEFAULT_SUFFIX,
+ 'replica-force-cleaning': 'no', # This forces these tasks to stick around
+ })
+ cruv_task = CleanAllRUVTask(m2)
+ cruv_task.create(properties={
+ 'replica-id': "10" + str(i),
+ 'replica-base-dn': DEFAULT_SUFFIX,
+ 'replica-force-cleaning': 'yes', # This allows the tasks to propagate
+ })
+
+ # Check the errors log for our error message in master 1
+ assert m1.searchErrorsLog('Exceeded maximum number of active CLEANALLRUV tasks')
+
+
+if __name__ == '__main__':
+ # Run isolated
+ # -s for DEBUG mode
+ CURRENT_FILE = os.path.realpath(__file__)
+ pytest.main("-s %s" % CURRENT_FILE)
+
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
index 8beb1fd..1721699 100644
--- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
@@ -820,49 +820,6 @@ def test_clean_shutdown_crash(topology_m2):
assert not m1.detectDisorderlyShutdown()
-def test_max_tasks(topology_m4):
- """Test we can not create more than 64 cleaning tasks
-
- :id: c34d0b40-3c3e-4f53-8656-5e4c2a310a1f
- :setup: Replication setup with four masters
- :steps:
- 1. Stop masters 3 & 4
- 2. Create over 64 tasks between m1 and m2
- 3. Check logs to see if (>65) tasks were rejected
-
- :expectedresults:
- 1. Success
- 2. Success
- 3. Success
- """
-
- # Stop masters 3 & 4
- m1 = topology_m4.ms["master1"]
- m2 = topology_m4.ms["master2"]
- m3 = topology_m4.ms["master3"]
- m4 = topology_m4.ms["master4"]
- m3.stop()
- m4.stop()
-
- # Add over 64 tasks between master1 & 2 to try to exceed the 64 task limit
- for i in range(1, 64):
- cruv_task = CleanAllRUVTask(m1)
- cruv_task.create(properties={
- 'replica-id': str(i),
- 'replica-base-dn': DEFAULT_SUFFIX,
- 'replica-force-cleaning': 'no', # This forces these tasks to stick around
- })
- cruv_task = CleanAllRUVTask(m2)
- cruv_task.create(properties={
- 'replica-id': "10" + str(i),
- 'replica-base-dn': DEFAULT_SUFFIX,
- 'replica-force-cleaning': 'yes', # This allows the tasks to propagate
- })
-
- # Check the errors log for our error message in master 1
- assert m1.searchErrorsLog('Exceeded maximum number of active CLEANALLRUV tasks')
-
-
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
4 years, 8 months
[389-ds-base] branch 389-ds-base-1.3.9 updated: Issue 50538 - cleanAllRUV task limit is not enforced for replicated tasks
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
mreynolds pushed a commit to branch 389-ds-base-1.3.9
in repository 389-ds-base.
The following commit(s) were added to refs/heads/389-ds-base-1.3.9 by this push:
new 5956610 Issue 50538 - cleanAllRUV task limit is not enforced for replicated tasks
5956610 is described below
commit 5956610211d3d4360444917440c18f320fe2b39a
Author: Mark Reynolds <mreynolds(a)redhat.com>
AuthorDate: Wed Aug 7 20:36:53 2019 -0400
Issue 50538 - cleanAllRUV task limit is not enforced for replicated tasks
Bug Description:
There is a hard limit of 64 concurrent cleanAllRUV tasks, but this limit is
only enforced when creating "new" tasks. It was not enforced when a task was
received via an extended operation. There were also race conditions in the
existing logic that allowed the array of cleaned rids to get corrupted . This
allowed for a very large number of task threads to be created.
Fix Description:
Maintain a new counter to keep track of the number of clean and abort threads
to make sure it never over runs the rid array buffers.
relates: https://pagure.io/389-ds-base/issue/50538
Reviewed by: lkrispenz(Thanks!)
---
.../tests/suites/replication/cleanallruv_test.py | 47 +++-
ldap/servers/plugins/replication/repl5.h | 7 +-
.../plugins/replication/repl5_replica_config.c | 247 +++++++++++----------
ldap/servers/plugins/replication/repl_extop.c | 19 +-
4 files changed, 202 insertions(+), 118 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
index 620a53e..43801dd 100644
--- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2016 Red Hat, Inc.
+# Copyright (C) 2019 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -7,7 +7,6 @@
# --- END COPYRIGHT BLOCK ---
#
import threading
-
import pytest
from lib389.tasks import *
from lib389.utils import *
@@ -859,6 +858,50 @@ def test_multiple_tasks_with_force(topology_m4):
restore_master4(topology_m4)
+def test_max_tasks(topology_m4):
+ """Test we can not create more than 64 cleaning tasks
+
+ :id: c34d0b40-3c3e-4f53-8656-5e4c2a310a1f
+ :setup: Replication setup with four masters
+ :steps:
+ 1. Stop masters 3 & 4
+ 2. Create over 64 tasks between m1 and m2
+ 3. Check logs to see if (>65) tasks were rejected
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+
+ # Stop masters 3 & 4
+ m1 = topology_m4.ms["master1"]
+ m2 = topology_m4.ms["master2"]
+ m3 = topology_m4.ms["master3"]
+ m4 = topology_m4.ms["master4"]
+ m3.stop()
+ m4.stop()
+
+ # Add over 64 tasks between master1 & 2 to try to exceed the 64 task limit
+ for i in range(1, 64):
+ cruv_task = CleanAllRUVTask(m1)
+ cruv_task.create(properties={
+ 'replica-id': str(i),
+ 'replica-base-dn': DEFAULT_SUFFIX,
+ 'replica-force-cleaning': 'no', # This forces these tasks to stick around
+ })
+ cruv_task = CleanAllRUVTask(m2)
+ cruv_task.create(properties={
+ 'replica-id': "10" + str(i),
+ 'replica-base-dn': DEFAULT_SUFFIX,
+ 'replica-force-cleaning': 'yes', # This allows the tasks to propagate
+ })
+
+ # Check the errors log for our error message in master 1
+ assert m1.searchErrorsLog('Exceeded maximum number of active CLEANALLRUV tasks')
+>>>>>>> ab24aa4cb... Issue 50538 - cleanAllRUV task limit is not enforced for replicated tasks
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 3c7f06f..b06c6fb 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -80,6 +80,8 @@
#define CLEANRUV_FINISHED "finished"
#define CLEANRUV_CLEANING "cleaning"
#define CLEANRUV_NO_MAXCSN "no maxcsn"
+#define CLEANALLRUV_ID "CleanAllRUV Task"
+#define ABORT_CLEANALLRUV_ID "Abort CleanAllRUV Task"
/* DS 5.0 replication protocol error codes */
#define NSDS50_REPL_REPLICA_READY 0x00 /* Replica ready, go ahead */
@@ -784,6 +786,7 @@ void multimaster_mtnode_construct_replicas(void);
void multimaster_be_state_change(void *handle, char *be_name, int old_be_state, int new_be_state);
#define CLEANRIDSIZ 64 /* maximum number for concurrent CLEANALLRUV tasks */
+#define CLEANRID_BUFSIZ 128
typedef struct _cleanruv_data
{
@@ -815,6 +818,8 @@ int get_replica_type(Replica *r);
int replica_execute_cleanruv_task_ext(Object *r, ReplicaId rid);
void add_cleaned_rid(cleanruv_data *data, char *maxcsn);
int is_cleaned_rid(ReplicaId rid);
+int32_t check_and_set_cleanruv_task_count(ReplicaId rid);
+int32_t check_and_set_abort_cleanruv_task_count(void);
int replica_cleanall_ruv_abort(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, int *returncode, char *returntext, void *arg);
void replica_cleanallruv_thread_ext(void *arg);
void stop_ruv_cleaning(void);
@@ -833,8 +838,6 @@ void set_cleaned_rid(ReplicaId rid);
void cleanruv_log(Slapi_Task *task, int rid, char *task_type, int sev_level, char *fmt, ...);
char *replica_cleanallruv_get_local_maxcsn(ReplicaId rid, char *base_dn);
-
-
/* replutil.c */
LDAPControl *create_managedsait_control(void);
LDAPControl *create_backend_control(Slapi_DN *sdn);
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index 62bfcf6..80a0797 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -30,17 +30,18 @@
#define CLEANALLRUV "CLEANALLRUV"
#define CLEANALLRUVLEN 11
#define REPLICA_RDN "cn=replica"
-#define CLEANALLRUV_ID "CleanAllRUV Task"
-#define ABORT_CLEANALLRUV_ID "Abort CleanAllRUV Task"
int slapi_log_urp = SLAPI_LOG_REPL;
-static ReplicaId cleaned_rids[CLEANRIDSIZ + 1] = {0};
-static ReplicaId pre_cleaned_rids[CLEANRIDSIZ + 1] = {0};
-static ReplicaId aborted_rids[CLEANRIDSIZ + 1] = {0};
-static Slapi_RWLock *rid_lock = NULL;
-static Slapi_RWLock *abort_rid_lock = NULL;
+static ReplicaId cleaned_rids[CLEANRID_BUFSIZ] = {0};
+static ReplicaId pre_cleaned_rids[CLEANRID_BUFSIZ] = {0};
+static ReplicaId aborted_rids[CLEANRID_BUFSIZ] = {0};
+static PRLock *rid_lock = NULL;
+static PRLock *abort_rid_lock = NULL;
static PRLock *notify_lock = NULL;
static PRCondVar *notify_cvar = NULL;
+static PRLock *task_count_lock = NULL;
+static int32_t clean_task_count = 0;
+static int32_t abort_task_count = 0;
/* Forward Declartions */
static int replica_config_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg);
@@ -67,8 +68,6 @@ static int replica_cleanallruv_send_abort_extop(Repl_Agmt *ra, Slapi_Task *task,
static int replica_cleanallruv_check_maxcsn(Repl_Agmt *agmt, char *basedn, char *rid_text, char *maxcsn, Slapi_Task *task);
static int replica_cleanallruv_replica_alive(Repl_Agmt *agmt);
static int replica_cleanallruv_check_ruv(char *repl_root, Repl_Agmt *ra, char *rid_text, Slapi_Task *task, char *force);
-static int get_cleanruv_task_count(void);
-static int get_abort_cleanruv_task_count(void);
static int replica_cleanup_task(Object *r, const char *task_name, char *returntext, int apply_mods);
static int replica_task_done(Replica *replica);
static void delete_cleaned_rid_config(cleanruv_data *data);
@@ -114,20 +113,27 @@ replica_config_init()
PR_GetError());
return -1;
}
- rid_lock = slapi_new_rwlock();
+ rid_lock = PR_NewLock();
if (rid_lock == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
"Failed to create rid_lock; NSPR error - %d\n",
PR_GetError());
return -1;
}
- abort_rid_lock = slapi_new_rwlock();
+ abort_rid_lock = PR_NewLock();
if (abort_rid_lock == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
"Failed to create abort_rid_lock; NSPR error - %d\n",
PR_GetError());
return -1;
}
+ task_count_lock = PR_NewLock();
+ if (task_count_lock == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
+ "Failed to create task_count_lock; NSPR error - %d\n",
+ PR_GetError());
+ return -1;
+ }
if ((notify_lock = PR_NewLock()) == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
"Failed to create notify lock; NSPR error - %d\n",
@@ -1483,12 +1489,6 @@ replica_execute_cleanall_ruv_task(Object *r, ReplicaId rid, Slapi_Task *task, co
cleanruv_log(pre_task, rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Initiating CleanAllRUV Task...");
- if (get_cleanruv_task_count() >= CLEANRIDSIZ) {
- /* we are already running the maximum number of tasks */
- cleanruv_log(pre_task, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
- "Exceeded maximum number of active CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
- return LDAP_UNWILLING_TO_PERFORM;
- }
/*
* Grab the replica
*/
@@ -1540,6 +1540,13 @@ replica_execute_cleanall_ruv_task(Object *r, ReplicaId rid, Slapi_Task *task, co
goto fail;
}
+ if (check_and_set_cleanruv_task_count(rid) != LDAP_SUCCESS) {
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
+ "Exceeded maximum number of active CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ goto fail;
+ }
+
/*
* Launch the cleanallruv thread. Once all the replicas are cleaned it will release the rid
*/
@@ -1547,6 +1554,9 @@ replica_execute_cleanall_ruv_task(Object *r, ReplicaId rid, Slapi_Task *task, co
if (data == NULL) {
cleanruv_log(pre_task, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Failed to allocate cleanruv_data. Aborting task.");
rc = -1;
+ PR_Lock(task_count_lock);
+ clean_task_count--;
+ PR_Unlock(task_count_lock);
goto fail;
}
data->repl_obj = r;
@@ -1629,13 +1639,13 @@ replica_cleanallruv_thread(void *arg)
int aborted = 0;
int rc = 0;
- if (!data || slapi_is_shutting_down()) {
- return; /* no data */
- }
-
/* Increase active thread count to prevent a race condition at server shutdown */
g_incr_active_threadcnt();
+ if (!data || slapi_is_shutting_down()) {
+ goto done;
+ }
+
if (data->task) {
slapi_task_inc_refcount(data->task);
slapi_log_err(SLAPI_LOG_PLUGIN, repl_plugin_name,
@@ -1682,16 +1692,13 @@ replica_cleanallruv_thread(void *arg)
slapi_task_begin(data->task, 1);
}
/*
- * Presetting the rid prevents duplicate thread creation, but allows the db and changelog to still
- * process updates from the rid.
- * set_cleaned_rid() blocks updates, so we don't want to do that... yet unless we are in force mode.
- * If we are forcing a clean independent of state of other servers for this RID we can set_cleaned_rid()
+ * We have already preset this rid, but if we are forcing a clean independent of state
+ * of other servers for this RID we can set_cleaned_rid()
*/
if (data->force) {
set_cleaned_rid(data->rid);
- } else {
- preset_cleaned_rid(data->rid);
}
+
rid_text = slapi_ch_smprintf("%d", data->rid);
csn_as_string(data->maxcsn, PR_FALSE, csnstr);
/*
@@ -1861,6 +1868,9 @@ done:
/*
* If the replicas are cleaned, release the rid
*/
+ if (slapi_is_shutting_down()) {
+ stop_ruv_cleaning();
+ }
if (!aborted && !slapi_is_shutting_down()) {
/*
* Success - the rid has been cleaned!
@@ -1879,10 +1889,9 @@ done:
} else {
cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Propagated task does not delete Keep alive entry (%d).", data->rid);
}
-
clean_agmts(data);
remove_cleaned_rid(data->rid);
- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Successfully cleaned rid(%d).", data->rid);
+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Successfully cleaned rid(%d)", data->rid);
} else {
/*
* Shutdown or abort
@@ -1915,6 +1924,10 @@ done:
slapi_ch_free_string(&data->force);
slapi_ch_free_string(&rid_text);
slapi_ch_free((void **)&data);
+ /* decrement task count */
+ PR_Lock(task_count_lock);
+ clean_task_count--;
+ PR_Unlock(task_count_lock);
g_decr_active_threadcnt();
}
@@ -2414,16 +2427,14 @@ replica_send_cleanruv_task(Repl_Agmt *agmt, cleanruv_data *clean_data)
int
is_cleaned_rid(ReplicaId rid)
{
- int i;
-
- slapi_rwlock_rdlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ && cleaned_rids[i] != 0; i++) {
+ PR_Lock(rid_lock);
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
if (rid == cleaned_rids[i]) {
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 1;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 0;
}
@@ -2431,16 +2442,14 @@ is_cleaned_rid(ReplicaId rid)
int
is_pre_cleaned_rid(ReplicaId rid)
{
- int i;
-
- slapi_rwlock_rdlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ && pre_cleaned_rids[i] != 0; i++) {
+ PR_Lock(rid_lock);
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
if (rid == pre_cleaned_rids[i]) {
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 1;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 0;
}
@@ -2453,14 +2462,14 @@ is_task_aborted(ReplicaId rid)
if (rid == 0) {
return 0;
}
- slapi_rwlock_rdlock(abort_rid_lock);
- for (i = 0; i < CLEANRIDSIZ && aborted_rids[i] != 0; i++) {
+ PR_Lock(abort_rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ && aborted_rids[i] != 0; i++) {
if (rid == aborted_rids[i]) {
- slapi_rwlock_unlock(abort_rid_lock);
+ PR_Unlock(abort_rid_lock);
return 1;
}
}
- slapi_rwlock_unlock(abort_rid_lock);
+ PR_Unlock(abort_rid_lock);
return 0;
}
@@ -2469,15 +2478,14 @@ preset_cleaned_rid(ReplicaId rid)
{
int i;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
+ PR_Lock(rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ && pre_cleaned_rids[i] != rid; i++) {
if (pre_cleaned_rids[i] == 0) {
pre_cleaned_rids[i] = rid;
- pre_cleaned_rids[i + 1] = 0;
break;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
}
/*
@@ -2490,14 +2498,13 @@ set_cleaned_rid(ReplicaId rid)
{
int i;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
+ PR_Lock(rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ && cleaned_rids[i] != rid; i++) {
if (cleaned_rids[i] == 0) {
cleaned_rids[i] = rid;
- cleaned_rids[i + 1] = 0;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
}
/*
@@ -2569,15 +2576,14 @@ add_aborted_rid(ReplicaId rid, Replica *r, char *repl_root)
int rc;
int i;
- slapi_rwlock_wrlock(abort_rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
+ PR_Lock(abort_rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ; i++) {
if (aborted_rids[i] == 0) {
aborted_rids[i] = rid;
- aborted_rids[i + 1] = 0;
break;
}
}
- slapi_rwlock_unlock(abort_rid_lock);
+ PR_Unlock(abort_rid_lock);
/*
* Write the rid to the config entry
*/
@@ -2620,21 +2626,24 @@ delete_aborted_rid(Replica *r, ReplicaId rid, char *repl_root, int skip)
char *data;
char *dn;
int rc;
- int i;
if (r == NULL)
return;
if (skip) {
/* skip the deleting of the config, and just remove the in memory rid */
- slapi_rwlock_wrlock(abort_rid_lock);
- for (i = 0; i < CLEANRIDSIZ && aborted_rids[i] != rid; i++)
- ; /* found rid, stop */
- for (; i < CLEANRIDSIZ; i++) {
- /* rewrite entire array */
- aborted_rids[i] = aborted_rids[i + 1];
- }
- slapi_rwlock_unlock(abort_rid_lock);
+ ReplicaId new_abort_rids[CLEANRID_BUFSIZ] = {0};
+ int32_t idx = 0;
+
+ PR_Lock(abort_rid_lock);
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
+ if (aborted_rids[i] != rid) {
+ new_abort_rids[idx] = aborted_rids[i];
+ idx++;
+ }
+ }
+ memcpy(aborted_rids, new_abort_rids, sizeof(new_abort_rids));
+ PR_Unlock(abort_rid_lock);
} else {
/* only remove the config, leave the in-memory rid */
dn = replica_get_dn(r);
@@ -2792,27 +2801,31 @@ bail:
void
remove_cleaned_rid(ReplicaId rid)
{
- int i;
- /*
- * Remove this rid, and optimize the array
- */
- slapi_rwlock_wrlock(rid_lock);
+ ReplicaId new_cleaned_rids[CLEANRID_BUFSIZ] = {0};
+ ReplicaId new_pre_cleaned_rids[CLEANRID_BUFSIZ] = {0};
+ size_t idx = 0;
+
+ PR_Lock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ && cleaned_rids[i] != rid; i++)
- ; /* found rid, stop */
- for (; i < CLEANRIDSIZ; i++) {
- /* rewrite entire array */
- cleaned_rids[i] = cleaned_rids[i + 1];
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
+ if (cleaned_rids[i] != rid) {
+ new_cleaned_rids[idx] = cleaned_rids[i];
+ idx++;
+ }
}
+ memcpy(cleaned_rids, new_cleaned_rids, sizeof(new_cleaned_rids));
+
/* now do the preset cleaned rids */
- for (i = 0; i < CLEANRIDSIZ && pre_cleaned_rids[i] != rid; i++)
- ; /* found rid, stop */
- for (; i < CLEANRIDSIZ; i++) {
- /* rewrite entire array */
- pre_cleaned_rids[i] = pre_cleaned_rids[i + 1];
+ idx = 0;
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
+ if (pre_cleaned_rids[i] != rid) {
+ new_pre_cleaned_rids[idx] = pre_cleaned_rids[i];
+ idx++;
+ }
}
+ memcpy(pre_cleaned_rids, new_pre_cleaned_rids, sizeof(new_pre_cleaned_rids));
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
}
/*
@@ -2840,16 +2853,6 @@ replica_cleanall_ruv_abort(Slapi_PBlock *pb __attribute__((unused)),
char *ridstr = NULL;
int rc = SLAPI_DSE_CALLBACK_OK;
- if (get_abort_cleanruv_task_count() >= CLEANRIDSIZ) {
- /* we are already running the maximum number of tasks */
- PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
- "Exceeded maximum number of active ABORT CLEANALLRUV tasks(%d)",
- CLEANRIDSIZ);
- cleanruv_log(task, -1, ABORT_CLEANALLRUV_ID, SLAPI_LOG_ERR, "%s", returntext);
- *returncode = LDAP_OPERATIONS_ERROR;
- return SLAPI_DSE_CALLBACK_ERROR;
- }
-
/* allocate new task now */
task = slapi_new_task(slapi_entry_get_ndn(e));
@@ -2934,6 +2937,16 @@ replica_cleanall_ruv_abort(Slapi_PBlock *pb __attribute__((unused)),
*/
certify_all = "no";
}
+
+ if (check_and_set_abort_cleanruv_task_count() != LDAP_SUCCESS) {
+ /* we are already running the maximum number of tasks */
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Exceeded maximum number of active ABORT CLEANALLRUV tasks(%d)",
+ CLEANRIDSIZ);
+ cleanruv_log(task, -1, ABORT_CLEANALLRUV_ID, SLAPI_LOG_ERR, "%s", returntext);
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
+ goto out;
+ }
/*
* Create payload
*/
@@ -3142,6 +3155,9 @@ done:
slapi_ch_free_string(&data->certify);
slapi_sdn_free(&data->sdn);
slapi_ch_free((void **)&data);
+ PR_Lock(task_count_lock);
+ abort_task_count--;
+ PR_Unlock(task_count_lock);
g_decr_active_threadcnt();
}
@@ -3493,36 +3509,43 @@ replica_cleanallruv_check_ruv(char *repl_root, Repl_Agmt *agmt, char *rid_text,
return rc;
}
-static int
-get_cleanruv_task_count(void)
+/*
+ * Before starting a cleanAllRUV task make sure there are not
+ * too many task threads already running. If everything is okay
+ * also pre-set the RID now so rebounding extended ops do not
+ * try to clean it over and over.
+ */
+int32_t
+check_and_set_cleanruv_task_count(ReplicaId rid)
{
- int i, count = 0;
+ int32_t rc = 0;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
- if (pre_cleaned_rids[i] != 0) {
- count++;
- }
+ PR_Lock(task_count_lock);
+ if (clean_task_count >= CLEANRIDSIZ) {
+ rc = -1;
+ } else {
+ clean_task_count++;
+ preset_cleaned_rid(rid);
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(task_count_lock);
- return count;
+ return rc;
}
-static int
-get_abort_cleanruv_task_count(void)
+int32_t
+check_and_set_abort_cleanruv_task_count(void)
{
- int i, count = 0;
+ int32_t rc = 0;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
- if (aborted_rids[i] != 0) {
- count++;
+ PR_Lock(task_count_lock);
+ if (abort_task_count > CLEANRIDSIZ) {
+ rc = -1;
+ } else {
+ abort_task_count++;
}
- }
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(task_count_lock);
- return count;
+ return rc;
}
/*
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
index 68e2544..0c2abb6 100644
--- a/ldap/servers/plugins/replication/repl_extop.c
+++ b/ldap/servers/plugins/replication/repl_extop.c
@@ -1393,6 +1393,12 @@ multimaster_extop_abort_cleanruv(Slapi_PBlock *pb)
rc = LDAP_OPERATIONS_ERROR;
goto out;
}
+ if (check_and_set_abort_cleanruv_task_count() != LDAP_SUCCESS) {
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
+ "Exceeded maximum number of active abort CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ goto out;
+ }
/*
* Prepare the abort data
*/
@@ -1499,6 +1505,7 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
if (force == NULL) {
force = "no";
}
+
maxcsn = csn_new();
csn_init_by_string(maxcsn, csnstr);
/*
@@ -1535,13 +1542,21 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
goto free_and_return;
}
+ if (check_and_set_cleanruv_task_count((ReplicaId)rid) != LDAP_SUCCESS) {
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
+ "Exceeded maximum number of active CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ goto free_and_return;
+ }
+
if (replica_get_type(r) != REPLICA_TYPE_READONLY) {
/*
* Launch the cleanruv monitoring thread. Once all the replicas are cleaned it will release the rid
*
* This will also release mtnode_ext->replica
*/
- slapi_log_err(SLAPI_LOG_INFO, repl_plugin_name, "multimaster_extop_cleanruv - CleanAllRUV Task - Launching cleanAllRUV thread...\n");
+
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Launching cleanAllRUV thread...\n");
data = (cleanruv_data *)slapi_ch_calloc(1, sizeof(cleanruv_data));
if (data == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multimaster_extop_cleanruv - CleanAllRUV Task - Failed to allocate "
@@ -1635,7 +1650,7 @@ free_and_return:
ber_printf(resp_bere, "{s}", CLEANRUV_ACCEPTED);
ber_flatten(resp_bere, &resp_bval);
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_VALUE, resp_bval);
- slapi_send_ldap_result(pb, LDAP_SUCCESS, NULL, NULL, 0, NULL);
+ slapi_send_ldap_result(pb, rc, NULL, NULL, 0, NULL);
/* resp_bere */
if (NULL != resp_bere) {
ber_free(resp_bere, 1);
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
4 years, 8 months
[389-ds-base] branch 389-ds-base-1.3.10 updated: Issue 50538 - cleanAllRUV task limit is not enforced for replicated tasks
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
mreynolds pushed a commit to branch 389-ds-base-1.3.10
in repository 389-ds-base.
The following commit(s) were added to refs/heads/389-ds-base-1.3.10 by this push:
new a6f9e10 Issue 50538 - cleanAllRUV task limit is not enforced for replicated tasks
a6f9e10 is described below
commit a6f9e109d53b1566218d82b3997c29b53af26126
Author: Mark Reynolds <mreynolds(a)redhat.com>
AuthorDate: Wed Aug 7 20:36:53 2019 -0400
Issue 50538 - cleanAllRUV task limit is not enforced for replicated tasks
Bug Description:
There is a hard limit of 64 concurrent cleanAllRUV tasks, but this limit is
only enforced when creating "new" tasks. It was not enforced when a task was
received via an extended operation. There were also race conditions in the
existing logic that allowed the array of cleaned rids to get corrupted . This
allowed for a very large number of task threads to be created.
Fix Description:
Maintain a new counter to keep track of the number of clean and abort threads
to make sure it never over runs the rid array buffers.
relates: https://pagure.io/389-ds-base/issue/50538
Reviewed by: lkrispenz(Thanks!)
---
.../tests/suites/replication/cleanallruv_test.py | 47 +++-
ldap/servers/plugins/replication/repl5.h | 7 +-
.../plugins/replication/repl5_replica_config.c | 247 +++++++++++----------
ldap/servers/plugins/replication/repl_extop.c | 19 +-
4 files changed, 202 insertions(+), 118 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
index 620a53e..43801dd 100644
--- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2016 Red Hat, Inc.
+# Copyright (C) 2019 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -7,7 +7,6 @@
# --- END COPYRIGHT BLOCK ---
#
import threading
-
import pytest
from lib389.tasks import *
from lib389.utils import *
@@ -859,6 +858,50 @@ def test_multiple_tasks_with_force(topology_m4):
restore_master4(topology_m4)
+def test_max_tasks(topology_m4):
+ """Test we can not create more than 64 cleaning tasks
+
+ :id: c34d0b40-3c3e-4f53-8656-5e4c2a310a1f
+ :setup: Replication setup with four masters
+ :steps:
+ 1. Stop masters 3 & 4
+ 2. Create over 64 tasks between m1 and m2
+ 3. Check logs to see if (>65) tasks were rejected
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+
+ # Stop masters 3 & 4
+ m1 = topology_m4.ms["master1"]
+ m2 = topology_m4.ms["master2"]
+ m3 = topology_m4.ms["master3"]
+ m4 = topology_m4.ms["master4"]
+ m3.stop()
+ m4.stop()
+
+ # Add over 64 tasks between master1 & 2 to try to exceed the 64 task limit
+ for i in range(1, 64):
+ cruv_task = CleanAllRUVTask(m1)
+ cruv_task.create(properties={
+ 'replica-id': str(i),
+ 'replica-base-dn': DEFAULT_SUFFIX,
+ 'replica-force-cleaning': 'no', # This forces these tasks to stick around
+ })
+ cruv_task = CleanAllRUVTask(m2)
+ cruv_task.create(properties={
+ 'replica-id': "10" + str(i),
+ 'replica-base-dn': DEFAULT_SUFFIX,
+ 'replica-force-cleaning': 'yes', # This allows the tasks to propagate
+ })
+
+ # Check the errors log for our error message in master 1
+ assert m1.searchErrorsLog('Exceeded maximum number of active CLEANALLRUV tasks')
+>>>>>>> ab24aa4cb... Issue 50538 - cleanAllRUV task limit is not enforced for replicated tasks
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 3c7f06f..b06c6fb 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -80,6 +80,8 @@
#define CLEANRUV_FINISHED "finished"
#define CLEANRUV_CLEANING "cleaning"
#define CLEANRUV_NO_MAXCSN "no maxcsn"
+#define CLEANALLRUV_ID "CleanAllRUV Task"
+#define ABORT_CLEANALLRUV_ID "Abort CleanAllRUV Task"
/* DS 5.0 replication protocol error codes */
#define NSDS50_REPL_REPLICA_READY 0x00 /* Replica ready, go ahead */
@@ -784,6 +786,7 @@ void multimaster_mtnode_construct_replicas(void);
void multimaster_be_state_change(void *handle, char *be_name, int old_be_state, int new_be_state);
#define CLEANRIDSIZ 64 /* maximum number for concurrent CLEANALLRUV tasks */
+#define CLEANRID_BUFSIZ 128
typedef struct _cleanruv_data
{
@@ -815,6 +818,8 @@ int get_replica_type(Replica *r);
int replica_execute_cleanruv_task_ext(Object *r, ReplicaId rid);
void add_cleaned_rid(cleanruv_data *data, char *maxcsn);
int is_cleaned_rid(ReplicaId rid);
+int32_t check_and_set_cleanruv_task_count(ReplicaId rid);
+int32_t check_and_set_abort_cleanruv_task_count(void);
int replica_cleanall_ruv_abort(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, int *returncode, char *returntext, void *arg);
void replica_cleanallruv_thread_ext(void *arg);
void stop_ruv_cleaning(void);
@@ -833,8 +838,6 @@ void set_cleaned_rid(ReplicaId rid);
void cleanruv_log(Slapi_Task *task, int rid, char *task_type, int sev_level, char *fmt, ...);
char *replica_cleanallruv_get_local_maxcsn(ReplicaId rid, char *base_dn);
-
-
/* replutil.c */
LDAPControl *create_managedsait_control(void);
LDAPControl *create_backend_control(Slapi_DN *sdn);
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index 62bfcf6..80a0797 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -30,17 +30,18 @@
#define CLEANALLRUV "CLEANALLRUV"
#define CLEANALLRUVLEN 11
#define REPLICA_RDN "cn=replica"
-#define CLEANALLRUV_ID "CleanAllRUV Task"
-#define ABORT_CLEANALLRUV_ID "Abort CleanAllRUV Task"
int slapi_log_urp = SLAPI_LOG_REPL;
-static ReplicaId cleaned_rids[CLEANRIDSIZ + 1] = {0};
-static ReplicaId pre_cleaned_rids[CLEANRIDSIZ + 1] = {0};
-static ReplicaId aborted_rids[CLEANRIDSIZ + 1] = {0};
-static Slapi_RWLock *rid_lock = NULL;
-static Slapi_RWLock *abort_rid_lock = NULL;
+static ReplicaId cleaned_rids[CLEANRID_BUFSIZ] = {0};
+static ReplicaId pre_cleaned_rids[CLEANRID_BUFSIZ] = {0};
+static ReplicaId aborted_rids[CLEANRID_BUFSIZ] = {0};
+static PRLock *rid_lock = NULL;
+static PRLock *abort_rid_lock = NULL;
static PRLock *notify_lock = NULL;
static PRCondVar *notify_cvar = NULL;
+static PRLock *task_count_lock = NULL;
+static int32_t clean_task_count = 0;
+static int32_t abort_task_count = 0;
/* Forward Declartions */
static int replica_config_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg);
@@ -67,8 +68,6 @@ static int replica_cleanallruv_send_abort_extop(Repl_Agmt *ra, Slapi_Task *task,
static int replica_cleanallruv_check_maxcsn(Repl_Agmt *agmt, char *basedn, char *rid_text, char *maxcsn, Slapi_Task *task);
static int replica_cleanallruv_replica_alive(Repl_Agmt *agmt);
static int replica_cleanallruv_check_ruv(char *repl_root, Repl_Agmt *ra, char *rid_text, Slapi_Task *task, char *force);
-static int get_cleanruv_task_count(void);
-static int get_abort_cleanruv_task_count(void);
static int replica_cleanup_task(Object *r, const char *task_name, char *returntext, int apply_mods);
static int replica_task_done(Replica *replica);
static void delete_cleaned_rid_config(cleanruv_data *data);
@@ -114,20 +113,27 @@ replica_config_init()
PR_GetError());
return -1;
}
- rid_lock = slapi_new_rwlock();
+ rid_lock = PR_NewLock();
if (rid_lock == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
"Failed to create rid_lock; NSPR error - %d\n",
PR_GetError());
return -1;
}
- abort_rid_lock = slapi_new_rwlock();
+ abort_rid_lock = PR_NewLock();
if (abort_rid_lock == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
"Failed to create abort_rid_lock; NSPR error - %d\n",
PR_GetError());
return -1;
}
+ task_count_lock = PR_NewLock();
+ if (task_count_lock == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
+ "Failed to create task_count_lock; NSPR error - %d\n",
+ PR_GetError());
+ return -1;
+ }
if ((notify_lock = PR_NewLock()) == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
"Failed to create notify lock; NSPR error - %d\n",
@@ -1483,12 +1489,6 @@ replica_execute_cleanall_ruv_task(Object *r, ReplicaId rid, Slapi_Task *task, co
cleanruv_log(pre_task, rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Initiating CleanAllRUV Task...");
- if (get_cleanruv_task_count() >= CLEANRIDSIZ) {
- /* we are already running the maximum number of tasks */
- cleanruv_log(pre_task, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
- "Exceeded maximum number of active CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
- return LDAP_UNWILLING_TO_PERFORM;
- }
/*
* Grab the replica
*/
@@ -1540,6 +1540,13 @@ replica_execute_cleanall_ruv_task(Object *r, ReplicaId rid, Slapi_Task *task, co
goto fail;
}
+ if (check_and_set_cleanruv_task_count(rid) != LDAP_SUCCESS) {
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
+ "Exceeded maximum number of active CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ goto fail;
+ }
+
/*
* Launch the cleanallruv thread. Once all the replicas are cleaned it will release the rid
*/
@@ -1547,6 +1554,9 @@ replica_execute_cleanall_ruv_task(Object *r, ReplicaId rid, Slapi_Task *task, co
if (data == NULL) {
cleanruv_log(pre_task, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Failed to allocate cleanruv_data. Aborting task.");
rc = -1;
+ PR_Lock(task_count_lock);
+ clean_task_count--;
+ PR_Unlock(task_count_lock);
goto fail;
}
data->repl_obj = r;
@@ -1629,13 +1639,13 @@ replica_cleanallruv_thread(void *arg)
int aborted = 0;
int rc = 0;
- if (!data || slapi_is_shutting_down()) {
- return; /* no data */
- }
-
/* Increase active thread count to prevent a race condition at server shutdown */
g_incr_active_threadcnt();
+ if (!data || slapi_is_shutting_down()) {
+ goto done;
+ }
+
if (data->task) {
slapi_task_inc_refcount(data->task);
slapi_log_err(SLAPI_LOG_PLUGIN, repl_plugin_name,
@@ -1682,16 +1692,13 @@ replica_cleanallruv_thread(void *arg)
slapi_task_begin(data->task, 1);
}
/*
- * Presetting the rid prevents duplicate thread creation, but allows the db and changelog to still
- * process updates from the rid.
- * set_cleaned_rid() blocks updates, so we don't want to do that... yet unless we are in force mode.
- * If we are forcing a clean independent of state of other servers for this RID we can set_cleaned_rid()
+ * We have already preset this rid, but if we are forcing a clean independent of state
+ * of other servers for this RID we can set_cleaned_rid()
*/
if (data->force) {
set_cleaned_rid(data->rid);
- } else {
- preset_cleaned_rid(data->rid);
}
+
rid_text = slapi_ch_smprintf("%d", data->rid);
csn_as_string(data->maxcsn, PR_FALSE, csnstr);
/*
@@ -1861,6 +1868,9 @@ done:
/*
* If the replicas are cleaned, release the rid
*/
+ if (slapi_is_shutting_down()) {
+ stop_ruv_cleaning();
+ }
if (!aborted && !slapi_is_shutting_down()) {
/*
* Success - the rid has been cleaned!
@@ -1879,10 +1889,9 @@ done:
} else {
cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Propagated task does not delete Keep alive entry (%d).", data->rid);
}
-
clean_agmts(data);
remove_cleaned_rid(data->rid);
- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Successfully cleaned rid(%d).", data->rid);
+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Successfully cleaned rid(%d)", data->rid);
} else {
/*
* Shutdown or abort
@@ -1915,6 +1924,10 @@ done:
slapi_ch_free_string(&data->force);
slapi_ch_free_string(&rid_text);
slapi_ch_free((void **)&data);
+ /* decrement task count */
+ PR_Lock(task_count_lock);
+ clean_task_count--;
+ PR_Unlock(task_count_lock);
g_decr_active_threadcnt();
}
@@ -2414,16 +2427,14 @@ replica_send_cleanruv_task(Repl_Agmt *agmt, cleanruv_data *clean_data)
int
is_cleaned_rid(ReplicaId rid)
{
- int i;
-
- slapi_rwlock_rdlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ && cleaned_rids[i] != 0; i++) {
+ PR_Lock(rid_lock);
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
if (rid == cleaned_rids[i]) {
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 1;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 0;
}
@@ -2431,16 +2442,14 @@ is_cleaned_rid(ReplicaId rid)
int
is_pre_cleaned_rid(ReplicaId rid)
{
- int i;
-
- slapi_rwlock_rdlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ && pre_cleaned_rids[i] != 0; i++) {
+ PR_Lock(rid_lock);
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
if (rid == pre_cleaned_rids[i]) {
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 1;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 0;
}
@@ -2453,14 +2462,14 @@ is_task_aborted(ReplicaId rid)
if (rid == 0) {
return 0;
}
- slapi_rwlock_rdlock(abort_rid_lock);
- for (i = 0; i < CLEANRIDSIZ && aborted_rids[i] != 0; i++) {
+ PR_Lock(abort_rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ && aborted_rids[i] != 0; i++) {
if (rid == aborted_rids[i]) {
- slapi_rwlock_unlock(abort_rid_lock);
+ PR_Unlock(abort_rid_lock);
return 1;
}
}
- slapi_rwlock_unlock(abort_rid_lock);
+ PR_Unlock(abort_rid_lock);
return 0;
}
@@ -2469,15 +2478,14 @@ preset_cleaned_rid(ReplicaId rid)
{
int i;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
+ PR_Lock(rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ && pre_cleaned_rids[i] != rid; i++) {
if (pre_cleaned_rids[i] == 0) {
pre_cleaned_rids[i] = rid;
- pre_cleaned_rids[i + 1] = 0;
break;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
}
/*
@@ -2490,14 +2498,13 @@ set_cleaned_rid(ReplicaId rid)
{
int i;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
+ PR_Lock(rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ && cleaned_rids[i] != rid; i++) {
if (cleaned_rids[i] == 0) {
cleaned_rids[i] = rid;
- cleaned_rids[i + 1] = 0;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
}
/*
@@ -2569,15 +2576,14 @@ add_aborted_rid(ReplicaId rid, Replica *r, char *repl_root)
int rc;
int i;
- slapi_rwlock_wrlock(abort_rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
+ PR_Lock(abort_rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ; i++) {
if (aborted_rids[i] == 0) {
aborted_rids[i] = rid;
- aborted_rids[i + 1] = 0;
break;
}
}
- slapi_rwlock_unlock(abort_rid_lock);
+ PR_Unlock(abort_rid_lock);
/*
* Write the rid to the config entry
*/
@@ -2620,21 +2626,24 @@ delete_aborted_rid(Replica *r, ReplicaId rid, char *repl_root, int skip)
char *data;
char *dn;
int rc;
- int i;
if (r == NULL)
return;
if (skip) {
/* skip the deleting of the config, and just remove the in memory rid */
- slapi_rwlock_wrlock(abort_rid_lock);
- for (i = 0; i < CLEANRIDSIZ && aborted_rids[i] != rid; i++)
- ; /* found rid, stop */
- for (; i < CLEANRIDSIZ; i++) {
- /* rewrite entire array */
- aborted_rids[i] = aborted_rids[i + 1];
- }
- slapi_rwlock_unlock(abort_rid_lock);
+ ReplicaId new_abort_rids[CLEANRID_BUFSIZ] = {0};
+ int32_t idx = 0;
+
+ PR_Lock(abort_rid_lock);
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
+ if (aborted_rids[i] != rid) {
+ new_abort_rids[idx] = aborted_rids[i];
+ idx++;
+ }
+ }
+ memcpy(aborted_rids, new_abort_rids, sizeof(new_abort_rids));
+ PR_Unlock(abort_rid_lock);
} else {
/* only remove the config, leave the in-memory rid */
dn = replica_get_dn(r);
@@ -2792,27 +2801,31 @@ bail:
void
remove_cleaned_rid(ReplicaId rid)
{
- int i;
- /*
- * Remove this rid, and optimize the array
- */
- slapi_rwlock_wrlock(rid_lock);
+ ReplicaId new_cleaned_rids[CLEANRID_BUFSIZ] = {0};
+ ReplicaId new_pre_cleaned_rids[CLEANRID_BUFSIZ] = {0};
+ size_t idx = 0;
+
+ PR_Lock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ && cleaned_rids[i] != rid; i++)
- ; /* found rid, stop */
- for (; i < CLEANRIDSIZ; i++) {
- /* rewrite entire array */
- cleaned_rids[i] = cleaned_rids[i + 1];
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
+ if (cleaned_rids[i] != rid) {
+ new_cleaned_rids[idx] = cleaned_rids[i];
+ idx++;
+ }
}
+ memcpy(cleaned_rids, new_cleaned_rids, sizeof(new_cleaned_rids));
+
/* now do the preset cleaned rids */
- for (i = 0; i < CLEANRIDSIZ && pre_cleaned_rids[i] != rid; i++)
- ; /* found rid, stop */
- for (; i < CLEANRIDSIZ; i++) {
- /* rewrite entire array */
- pre_cleaned_rids[i] = pre_cleaned_rids[i + 1];
+ idx = 0;
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
+ if (pre_cleaned_rids[i] != rid) {
+ new_pre_cleaned_rids[idx] = pre_cleaned_rids[i];
+ idx++;
+ }
}
+ memcpy(pre_cleaned_rids, new_pre_cleaned_rids, sizeof(new_pre_cleaned_rids));
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
}
/*
@@ -2840,16 +2853,6 @@ replica_cleanall_ruv_abort(Slapi_PBlock *pb __attribute__((unused)),
char *ridstr = NULL;
int rc = SLAPI_DSE_CALLBACK_OK;
- if (get_abort_cleanruv_task_count() >= CLEANRIDSIZ) {
- /* we are already running the maximum number of tasks */
- PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
- "Exceeded maximum number of active ABORT CLEANALLRUV tasks(%d)",
- CLEANRIDSIZ);
- cleanruv_log(task, -1, ABORT_CLEANALLRUV_ID, SLAPI_LOG_ERR, "%s", returntext);
- *returncode = LDAP_OPERATIONS_ERROR;
- return SLAPI_DSE_CALLBACK_ERROR;
- }
-
/* allocate new task now */
task = slapi_new_task(slapi_entry_get_ndn(e));
@@ -2934,6 +2937,16 @@ replica_cleanall_ruv_abort(Slapi_PBlock *pb __attribute__((unused)),
*/
certify_all = "no";
}
+
+ if (check_and_set_abort_cleanruv_task_count() != LDAP_SUCCESS) {
+ /* we are already running the maximum number of tasks */
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Exceeded maximum number of active ABORT CLEANALLRUV tasks(%d)",
+ CLEANRIDSIZ);
+ cleanruv_log(task, -1, ABORT_CLEANALLRUV_ID, SLAPI_LOG_ERR, "%s", returntext);
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
+ goto out;
+ }
/*
* Create payload
*/
@@ -3142,6 +3155,9 @@ done:
slapi_ch_free_string(&data->certify);
slapi_sdn_free(&data->sdn);
slapi_ch_free((void **)&data);
+ PR_Lock(task_count_lock);
+ abort_task_count--;
+ PR_Unlock(task_count_lock);
g_decr_active_threadcnt();
}
@@ -3493,36 +3509,43 @@ replica_cleanallruv_check_ruv(char *repl_root, Repl_Agmt *agmt, char *rid_text,
return rc;
}
-static int
-get_cleanruv_task_count(void)
+/*
+ * Before starting a cleanAllRUV task make sure there are not
+ * too many task threads already running. If everything is okay
+ * also pre-set the RID now so rebounding extended ops do not
+ * try to clean it over and over.
+ */
+int32_t
+check_and_set_cleanruv_task_count(ReplicaId rid)
{
- int i, count = 0;
+ int32_t rc = 0;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
- if (pre_cleaned_rids[i] != 0) {
- count++;
- }
+ PR_Lock(task_count_lock);
+ if (clean_task_count >= CLEANRIDSIZ) {
+ rc = -1;
+ } else {
+ clean_task_count++;
+ preset_cleaned_rid(rid);
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(task_count_lock);
- return count;
+ return rc;
}
-static int
-get_abort_cleanruv_task_count(void)
+int32_t
+check_and_set_abort_cleanruv_task_count(void)
{
- int i, count = 0;
+ int32_t rc = 0;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
- if (aborted_rids[i] != 0) {
- count++;
+ PR_Lock(task_count_lock);
+ if (abort_task_count > CLEANRIDSIZ) {
+ rc = -1;
+ } else {
+ abort_task_count++;
}
- }
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(task_count_lock);
- return count;
+ return rc;
}
/*
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
index 68e2544..0c2abb6 100644
--- a/ldap/servers/plugins/replication/repl_extop.c
+++ b/ldap/servers/plugins/replication/repl_extop.c
@@ -1393,6 +1393,12 @@ multimaster_extop_abort_cleanruv(Slapi_PBlock *pb)
rc = LDAP_OPERATIONS_ERROR;
goto out;
}
+ if (check_and_set_abort_cleanruv_task_count() != LDAP_SUCCESS) {
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
+ "Exceeded maximum number of active abort CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ goto out;
+ }
/*
* Prepare the abort data
*/
@@ -1499,6 +1505,7 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
if (force == NULL) {
force = "no";
}
+
maxcsn = csn_new();
csn_init_by_string(maxcsn, csnstr);
/*
@@ -1535,13 +1542,21 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
goto free_and_return;
}
+ if (check_and_set_cleanruv_task_count((ReplicaId)rid) != LDAP_SUCCESS) {
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
+ "Exceeded maximum number of active CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ goto free_and_return;
+ }
+
if (replica_get_type(r) != REPLICA_TYPE_READONLY) {
/*
* Launch the cleanruv monitoring thread. Once all the replicas are cleaned it will release the rid
*
* This will also release mtnode_ext->replica
*/
- slapi_log_err(SLAPI_LOG_INFO, repl_plugin_name, "multimaster_extop_cleanruv - CleanAllRUV Task - Launching cleanAllRUV thread...\n");
+
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Launching cleanAllRUV thread...\n");
data = (cleanruv_data *)slapi_ch_calloc(1, sizeof(cleanruv_data));
if (data == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multimaster_extop_cleanruv - CleanAllRUV Task - Failed to allocate "
@@ -1635,7 +1650,7 @@ free_and_return:
ber_printf(resp_bere, "{s}", CLEANRUV_ACCEPTED);
ber_flatten(resp_bere, &resp_bval);
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_VALUE, resp_bval);
- slapi_send_ldap_result(pb, LDAP_SUCCESS, NULL, NULL, 0, NULL);
+ slapi_send_ldap_result(pb, rc, NULL, NULL, 0, NULL);
/* resp_bere */
if (NULL != resp_bere) {
ber_free(resp_bere, 1);
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
4 years, 8 months
[389-ds-base] branch 389-ds-base-1.4.0 updated: Issue 50538 - cleanAllRUV task limit is not enforced for replicated tasks
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
mreynolds pushed a commit to branch 389-ds-base-1.4.0
in repository 389-ds-base.
The following commit(s) were added to refs/heads/389-ds-base-1.4.0 by this push:
new ab24aa4 Issue 50538 - cleanAllRUV task limit is not enforced for replicated tasks
ab24aa4 is described below
commit ab24aa4cb38dedbf12f7d1fcd9478722301cace6
Author: Mark Reynolds <mreynolds(a)redhat.com>
AuthorDate: Wed Aug 7 20:36:53 2019 -0400
Issue 50538 - cleanAllRUV task limit is not enforced for replicated tasks
Bug Description:
There is a hard limit of 64 concurrent cleanAllRUV tasks, but this limit is
only enforced when creating "new" tasks. It was not enforced when a task was
received via an extended operation. There were also race conditions in the
existing logic that allowed the array of cleaned rids to get corrupted . This
allowed for a very large number of task threads to be created.
Fix Description:
Maintain a new counter to keep track of the number of clean and abort threads
to make sure it never over runs the rid array buffers.
relates: https://pagure.io/389-ds-base/issue/50538
Reviewed by: lkrispenz(Thanks!)
---
.../tests/suites/replication/cleanallruv_test.py | 47 +++-
ldap/servers/plugins/replication/repl5.h | 7 +-
.../plugins/replication/repl5_replica_config.c | 247 +++++++++++----------
ldap/servers/plugins/replication/repl_extop.c | 19 +-
4 files changed, 202 insertions(+), 118 deletions(-)
diff --git a/dirsrvtests/tests/suites/replication/cleanallruv_test.py b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
index 2b244da..d55c5bd 100644
--- a/dirsrvtests/tests/suites/replication/cleanallruv_test.py
+++ b/dirsrvtests/tests/suites/replication/cleanallruv_test.py
@@ -1,5 +1,5 @@
# --- BEGIN COPYRIGHT BLOCK ---
-# Copyright (C) 2016 Red Hat, Inc.
+# Copyright (C) 2019 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
@@ -7,7 +7,6 @@
# --- END COPYRIGHT BLOCK ---
#
import threading
-
import pytest
import random
from lib389 import DirSrv
@@ -719,6 +718,50 @@ def test_multiple_tasks_with_force(topology_m4, m4rid):
log.fatal('test_abort: CleanAllRUV task was not aborted')
assert False
+
+def test_max_tasks(topology_m4):
+ """Test we can not create more than 64 cleaning tasks
+
+ :id: c34d0b40-3c3e-4f53-8656-5e4c2a310a1f
+ :setup: Replication setup with four masters
+ :steps:
+ 1. Stop masters 3 & 4
+ 2. Create over 64 tasks between m1 and m2
+ 3. Check logs to see if (>65) tasks were rejected
+
+ :expectedresults:
+ 1. Success
+ 2. Success
+ 3. Success
+ """
+
+ # Stop masters 3 & 4
+ m1 = topology_m4.ms["master1"]
+ m2 = topology_m4.ms["master2"]
+ m3 = topology_m4.ms["master3"]
+ m4 = topology_m4.ms["master4"]
+ m3.stop()
+ m4.stop()
+
+ # Add over 64 tasks between master1 & 2 to try to exceed the 64 task limit
+ for i in range(1, 64):
+ cruv_task = CleanAllRUVTask(m1)
+ cruv_task.create(properties={
+ 'replica-id': str(i),
+ 'replica-base-dn': DEFAULT_SUFFIX,
+ 'replica-force-cleaning': 'no', # This forces these tasks to stick around
+ })
+ cruv_task = CleanAllRUVTask(m2)
+ cruv_task.create(properties={
+ 'replica-id': "10" + str(i),
+ 'replica-base-dn': DEFAULT_SUFFIX,
+ 'replica-force-cleaning': 'yes', # This allows the tasks to propagate
+ })
+
+ # Check the errors log for our error message in master 1
+ assert m1.searchErrorsLog('Exceeded maximum number of active CLEANALLRUV tasks')
+
+
if __name__ == '__main__':
# Run isolated
# -s for DEBUG mode
diff --git a/ldap/servers/plugins/replication/repl5.h b/ldap/servers/plugins/replication/repl5.h
index 138578d..39a34a8 100644
--- a/ldap/servers/plugins/replication/repl5.h
+++ b/ldap/servers/plugins/replication/repl5.h
@@ -80,6 +80,8 @@
#define CLEANRUV_FINISHED "finished"
#define CLEANRUV_CLEANING "cleaning"
#define CLEANRUV_NO_MAXCSN "no maxcsn"
+#define CLEANALLRUV_ID "CleanAllRUV Task"
+#define ABORT_CLEANALLRUV_ID "Abort CleanAllRUV Task"
/* DS 5.0 replication protocol error codes */
#define NSDS50_REPL_REPLICA_READY 0x00 /* Replica ready, go ahead */
@@ -784,6 +786,7 @@ void multimaster_mtnode_construct_replicas(void);
void multimaster_be_state_change(void *handle, char *be_name, int old_be_state, int new_be_state);
#define CLEANRIDSIZ 64 /* maximum number for concurrent CLEANALLRUV tasks */
+#define CLEANRID_BUFSIZ 128
typedef struct _cleanruv_data
{
@@ -815,6 +818,8 @@ int get_replica_type(Replica *r);
int replica_execute_cleanruv_task_ext(Object *r, ReplicaId rid);
void add_cleaned_rid(cleanruv_data *data);
int is_cleaned_rid(ReplicaId rid);
+int32_t check_and_set_cleanruv_task_count(ReplicaId rid);
+int32_t check_and_set_abort_cleanruv_task_count(void);
int replica_cleanall_ruv_abort(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *eAfter, int *returncode, char *returntext, void *arg);
void replica_cleanallruv_thread_ext(void *arg);
void stop_ruv_cleaning(void);
@@ -833,8 +838,6 @@ void set_cleaned_rid(ReplicaId rid);
void cleanruv_log(Slapi_Task *task, int rid, char *task_type, int sev_level, char *fmt, ...);
char *replica_cleanallruv_get_local_maxcsn(ReplicaId rid, char *base_dn);
-
-
/* replutil.c */
LDAPControl *create_managedsait_control(void);
LDAPControl *create_backend_control(Slapi_DN *sdn);
diff --git a/ldap/servers/plugins/replication/repl5_replica_config.c b/ldap/servers/plugins/replication/repl5_replica_config.c
index 7649aa1..a5d85e7 100644
--- a/ldap/servers/plugins/replication/repl5_replica_config.c
+++ b/ldap/servers/plugins/replication/repl5_replica_config.c
@@ -30,17 +30,18 @@
#define CLEANALLRUV "CLEANALLRUV"
#define CLEANALLRUVLEN 11
#define REPLICA_RDN "cn=replica"
-#define CLEANALLRUV_ID "CleanAllRUV Task"
-#define ABORT_CLEANALLRUV_ID "Abort CleanAllRUV Task"
int slapi_log_urp = SLAPI_LOG_REPL;
-static ReplicaId cleaned_rids[CLEANRIDSIZ + 1] = {0};
-static ReplicaId pre_cleaned_rids[CLEANRIDSIZ + 1] = {0};
-static ReplicaId aborted_rids[CLEANRIDSIZ + 1] = {0};
-static Slapi_RWLock *rid_lock = NULL;
-static Slapi_RWLock *abort_rid_lock = NULL;
+static ReplicaId cleaned_rids[CLEANRID_BUFSIZ] = {0};
+static ReplicaId pre_cleaned_rids[CLEANRID_BUFSIZ] = {0};
+static ReplicaId aborted_rids[CLEANRID_BUFSIZ] = {0};
+static PRLock *rid_lock = NULL;
+static PRLock *abort_rid_lock = NULL;
static PRLock *notify_lock = NULL;
static PRCondVar *notify_cvar = NULL;
+static PRLock *task_count_lock = NULL;
+static int32_t clean_task_count = 0;
+static int32_t abort_task_count = 0;
/* Forward Declartions */
static int replica_config_add(Slapi_PBlock *pb, Slapi_Entry *e, Slapi_Entry *entryAfter, int *returncode, char *returntext, void *arg);
@@ -67,8 +68,6 @@ static int replica_cleanallruv_send_abort_extop(Repl_Agmt *ra, Slapi_Task *task,
static int replica_cleanallruv_check_maxcsn(Repl_Agmt *agmt, char *basedn, char *rid_text, char *maxcsn, Slapi_Task *task);
static int replica_cleanallruv_replica_alive(Repl_Agmt *agmt);
static int replica_cleanallruv_check_ruv(char *repl_root, Repl_Agmt *ra, char *rid_text, Slapi_Task *task, char *force);
-static int get_cleanruv_task_count(void);
-static int get_abort_cleanruv_task_count(void);
static int replica_cleanup_task(Object *r, const char *task_name, char *returntext, int apply_mods);
static int replica_task_done(Replica *replica);
static void delete_cleaned_rid_config(cleanruv_data *data);
@@ -114,20 +113,27 @@ replica_config_init()
PR_GetError());
return -1;
}
- rid_lock = slapi_new_rwlock();
+ rid_lock = PR_NewLock();
if (rid_lock == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
"Failed to create rid_lock; NSPR error - %d\n",
PR_GetError());
return -1;
}
- abort_rid_lock = slapi_new_rwlock();
+ abort_rid_lock = PR_NewLock();
if (abort_rid_lock == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
"Failed to create abort_rid_lock; NSPR error - %d\n",
PR_GetError());
return -1;
}
+ task_count_lock = PR_NewLock();
+ if (task_count_lock == NULL) {
+ slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
+ "Failed to create task_count_lock; NSPR error - %d\n",
+ PR_GetError());
+ return -1;
+ }
if ((notify_lock = PR_NewLock()) == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "replica_config_init - "
"Failed to create notify lock; NSPR error - %d\n",
@@ -1534,12 +1540,6 @@ replica_execute_cleanall_ruv_task(Object *r, ReplicaId rid, Slapi_Task *task, co
cleanruv_log(pre_task, rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Initiating CleanAllRUV Task...");
- if (get_cleanruv_task_count() >= CLEANRIDSIZ) {
- /* we are already running the maximum number of tasks */
- cleanruv_log(pre_task, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
- "Exceeded maximum number of active CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
- return LDAP_UNWILLING_TO_PERFORM;
- }
/*
* Grab the replica
*/
@@ -1591,6 +1591,13 @@ replica_execute_cleanall_ruv_task(Object *r, ReplicaId rid, Slapi_Task *task, co
goto fail;
}
+ if (check_and_set_cleanruv_task_count(rid) != LDAP_SUCCESS) {
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
+ "Exceeded maximum number of active CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ goto fail;
+ }
+
/*
* Launch the cleanallruv thread. Once all the replicas are cleaned it will release the rid
*/
@@ -1598,6 +1605,9 @@ replica_execute_cleanall_ruv_task(Object *r, ReplicaId rid, Slapi_Task *task, co
if (data == NULL) {
cleanruv_log(pre_task, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Failed to allocate cleanruv_data. Aborting task.");
rc = -1;
+ PR_Lock(task_count_lock);
+ clean_task_count--;
+ PR_Unlock(task_count_lock);
goto fail;
}
data->repl_obj = r;
@@ -1680,13 +1690,13 @@ replica_cleanallruv_thread(void *arg)
int aborted = 0;
int rc = 0;
- if (!data || slapi_is_shutting_down()) {
- return; /* no data */
- }
-
/* Increase active thread count to prevent a race condition at server shutdown */
g_incr_active_threadcnt();
+ if (!data || slapi_is_shutting_down()) {
+ goto done;
+ }
+
if (data->task) {
slapi_task_inc_refcount(data->task);
slapi_log_err(SLAPI_LOG_PLUGIN, repl_plugin_name,
@@ -1733,16 +1743,13 @@ replica_cleanallruv_thread(void *arg)
slapi_task_begin(data->task, 1);
}
/*
- * Presetting the rid prevents duplicate thread creation, but allows the db and changelog to still
- * process updates from the rid.
- * set_cleaned_rid() blocks updates, so we don't want to do that... yet unless we are in force mode.
- * If we are forcing a clean independent of state of other servers for this RID we can set_cleaned_rid()
+ * We have already preset this rid, but if we are forcing a clean independent of state
+ * of other servers for this RID we can set_cleaned_rid()
*/
if (data->force) {
set_cleaned_rid(data->rid);
- } else {
- preset_cleaned_rid(data->rid);
}
+
rid_text = slapi_ch_smprintf("%d", data->rid);
csn_as_string(data->maxcsn, PR_FALSE, csnstr);
/*
@@ -1912,6 +1919,9 @@ done:
/*
* If the replicas are cleaned, release the rid
*/
+ if (slapi_is_shutting_down()) {
+ stop_ruv_cleaning();
+ }
if (!aborted && !slapi_is_shutting_down()) {
/*
* Success - the rid has been cleaned!
@@ -1930,10 +1940,9 @@ done:
} else {
cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Propagated task does not delete Keep alive entry (%d).", data->rid);
}
-
clean_agmts(data);
remove_cleaned_rid(data->rid);
- cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Successfully cleaned rid(%d).", data->rid);
+ cleanruv_log(data->task, data->rid, CLEANALLRUV_ID, SLAPI_LOG_INFO, "Successfully cleaned rid(%d)", data->rid);
} else {
/*
* Shutdown or abort
@@ -1966,6 +1975,10 @@ done:
slapi_ch_free_string(&data->force);
slapi_ch_free_string(&rid_text);
slapi_ch_free((void **)&data);
+ /* decrement task count */
+ PR_Lock(task_count_lock);
+ clean_task_count--;
+ PR_Unlock(task_count_lock);
g_decr_active_threadcnt();
}
@@ -2463,16 +2476,14 @@ replica_send_cleanruv_task(Repl_Agmt *agmt, cleanruv_data *clean_data)
int
is_cleaned_rid(ReplicaId rid)
{
- int i;
-
- slapi_rwlock_rdlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ && cleaned_rids[i] != 0; i++) {
+ PR_Lock(rid_lock);
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
if (rid == cleaned_rids[i]) {
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 1;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 0;
}
@@ -2480,16 +2491,14 @@ is_cleaned_rid(ReplicaId rid)
int
is_pre_cleaned_rid(ReplicaId rid)
{
- int i;
-
- slapi_rwlock_rdlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ && pre_cleaned_rids[i] != 0; i++) {
+ PR_Lock(rid_lock);
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
if (rid == pre_cleaned_rids[i]) {
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 1;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
return 0;
}
@@ -2502,14 +2511,14 @@ is_task_aborted(ReplicaId rid)
if (rid == 0) {
return 0;
}
- slapi_rwlock_rdlock(abort_rid_lock);
- for (i = 0; i < CLEANRIDSIZ && aborted_rids[i] != 0; i++) {
+ PR_Lock(abort_rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ && aborted_rids[i] != 0; i++) {
if (rid == aborted_rids[i]) {
- slapi_rwlock_unlock(abort_rid_lock);
+ PR_Unlock(abort_rid_lock);
return 1;
}
}
- slapi_rwlock_unlock(abort_rid_lock);
+ PR_Unlock(abort_rid_lock);
return 0;
}
@@ -2518,15 +2527,14 @@ preset_cleaned_rid(ReplicaId rid)
{
int i;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
+ PR_Lock(rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ && pre_cleaned_rids[i] != rid; i++) {
if (pre_cleaned_rids[i] == 0) {
pre_cleaned_rids[i] = rid;
- pre_cleaned_rids[i + 1] = 0;
break;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
}
/*
@@ -2539,14 +2547,13 @@ set_cleaned_rid(ReplicaId rid)
{
int i;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
+ PR_Lock(rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ && cleaned_rids[i] != rid; i++) {
if (cleaned_rids[i] == 0) {
cleaned_rids[i] = rid;
- cleaned_rids[i + 1] = 0;
}
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
}
/*
@@ -2622,15 +2629,14 @@ add_aborted_rid(ReplicaId rid, Replica *r, char *repl_root, char *certify_all, P
int rc;
int i;
- slapi_rwlock_wrlock(abort_rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
+ PR_Lock(abort_rid_lock);
+ for (i = 0; i < CLEANRID_BUFSIZ; i++) {
if (aborted_rids[i] == 0) {
aborted_rids[i] = rid;
- aborted_rids[i + 1] = 0;
break;
}
}
- slapi_rwlock_unlock(abort_rid_lock);
+ PR_Unlock(abort_rid_lock);
/*
* Write the rid to the config entry
*/
@@ -2673,21 +2679,24 @@ delete_aborted_rid(Replica *r, ReplicaId rid, char *repl_root, char *certify_all
char *data;
char *dn;
int rc;
- int i;
if (r == NULL)
return;
if (skip) {
/* skip the deleting of the config, and just remove the in memory rid */
- slapi_rwlock_wrlock(abort_rid_lock);
- for (i = 0; i < CLEANRIDSIZ && aborted_rids[i] != rid; i++)
- ; /* found rid, stop */
- for (; i < CLEANRIDSIZ; i++) {
- /* rewrite entire array */
- aborted_rids[i] = aborted_rids[i + 1];
- }
- slapi_rwlock_unlock(abort_rid_lock);
+ ReplicaId new_abort_rids[CLEANRID_BUFSIZ] = {0};
+ int32_t idx = 0;
+
+ PR_Lock(abort_rid_lock);
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
+ if (aborted_rids[i] != rid) {
+ new_abort_rids[idx] = aborted_rids[i];
+ idx++;
+ }
+ }
+ memcpy(aborted_rids, new_abort_rids, sizeof(new_abort_rids));
+ PR_Unlock(abort_rid_lock);
} else {
/* only remove the config, leave the in-memory rid */
dn = replica_get_dn(r);
@@ -2833,27 +2842,31 @@ bail:
void
remove_cleaned_rid(ReplicaId rid)
{
- int i;
- /*
- * Remove this rid, and optimize the array
- */
- slapi_rwlock_wrlock(rid_lock);
+ ReplicaId new_cleaned_rids[CLEANRID_BUFSIZ] = {0};
+ ReplicaId new_pre_cleaned_rids[CLEANRID_BUFSIZ] = {0};
+ size_t idx = 0;
+
+ PR_Lock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ && cleaned_rids[i] != rid; i++)
- ; /* found rid, stop */
- for (; i < CLEANRIDSIZ; i++) {
- /* rewrite entire array */
- cleaned_rids[i] = cleaned_rids[i + 1];
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
+ if (cleaned_rids[i] != rid) {
+ new_cleaned_rids[idx] = cleaned_rids[i];
+ idx++;
+ }
}
+ memcpy(cleaned_rids, new_cleaned_rids, sizeof(new_cleaned_rids));
+
/* now do the preset cleaned rids */
- for (i = 0; i < CLEANRIDSIZ && pre_cleaned_rids[i] != rid; i++)
- ; /* found rid, stop */
- for (; i < CLEANRIDSIZ; i++) {
- /* rewrite entire array */
- pre_cleaned_rids[i] = pre_cleaned_rids[i + 1];
+ idx = 0;
+ for (size_t i = 0; i < CLEANRID_BUFSIZ; i++) {
+ if (pre_cleaned_rids[i] != rid) {
+ new_pre_cleaned_rids[idx] = pre_cleaned_rids[i];
+ idx++;
+ }
}
+ memcpy(pre_cleaned_rids, new_pre_cleaned_rids, sizeof(new_pre_cleaned_rids));
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(rid_lock);
}
/*
@@ -2883,16 +2896,6 @@ replica_cleanall_ruv_abort(Slapi_PBlock *pb __attribute__((unused)),
char *ridstr = NULL;
int rc = SLAPI_DSE_CALLBACK_OK;
- if (get_abort_cleanruv_task_count() >= CLEANRIDSIZ) {
- /* we are already running the maximum number of tasks */
- PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
- "Exceeded maximum number of active ABORT CLEANALLRUV tasks(%d)",
- CLEANRIDSIZ);
- cleanruv_log(task, -1, ABORT_CLEANALLRUV_ID, SLAPI_LOG_ERR, "%s", returntext);
- *returncode = LDAP_OPERATIONS_ERROR;
- return SLAPI_DSE_CALLBACK_ERROR;
- }
-
/* allocate new task now */
task = slapi_new_task(slapi_entry_get_ndn(e));
@@ -2977,6 +2980,16 @@ replica_cleanall_ruv_abort(Slapi_PBlock *pb __attribute__((unused)),
*/
certify_all = "no";
}
+
+ if (check_and_set_abort_cleanruv_task_count() != LDAP_SUCCESS) {
+ /* we are already running the maximum number of tasks */
+ PR_snprintf(returntext, SLAPI_DSE_RETURNTEXT_SIZE,
+ "Exceeded maximum number of active ABORT CLEANALLRUV tasks(%d)",
+ CLEANRIDSIZ);
+ cleanruv_log(task, -1, ABORT_CLEANALLRUV_ID, SLAPI_LOG_ERR, "%s", returntext);
+ *returncode = LDAP_UNWILLING_TO_PERFORM;
+ goto out;
+ }
/*
* Create payload
*/
@@ -3191,6 +3204,9 @@ done:
slapi_ch_free_string(&data->certify);
slapi_sdn_free(&data->sdn);
slapi_ch_free((void **)&data);
+ PR_Lock(task_count_lock);
+ abort_task_count--;
+ PR_Unlock(task_count_lock);
g_decr_active_threadcnt();
}
@@ -3542,36 +3558,43 @@ replica_cleanallruv_check_ruv(char *repl_root, Repl_Agmt *agmt, char *rid_text,
return rc;
}
-static int
-get_cleanruv_task_count(void)
+/*
+ * Before starting a cleanAllRUV task make sure there are not
+ * too many task threads already running. If everything is okay
+ * also pre-set the RID now so rebounding extended ops do not
+ * try to clean it over and over.
+ */
+int32_t
+check_and_set_cleanruv_task_count(ReplicaId rid)
{
- int i, count = 0;
+ int32_t rc = 0;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
- if (pre_cleaned_rids[i] != 0) {
- count++;
- }
+ PR_Lock(task_count_lock);
+ if (clean_task_count >= CLEANRIDSIZ) {
+ rc = -1;
+ } else {
+ clean_task_count++;
+ preset_cleaned_rid(rid);
}
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(task_count_lock);
- return count;
+ return rc;
}
-static int
-get_abort_cleanruv_task_count(void)
+int32_t
+check_and_set_abort_cleanruv_task_count(void)
{
- int i, count = 0;
+ int32_t rc = 0;
- slapi_rwlock_wrlock(rid_lock);
- for (i = 0; i < CLEANRIDSIZ; i++) {
- if (aborted_rids[i] != 0) {
- count++;
+ PR_Lock(task_count_lock);
+ if (abort_task_count > CLEANRIDSIZ) {
+ rc = -1;
+ } else {
+ abort_task_count++;
}
- }
- slapi_rwlock_unlock(rid_lock);
+ PR_Unlock(task_count_lock);
- return count;
+ return rc;
}
/*
diff --git a/ldap/servers/plugins/replication/repl_extop.c b/ldap/servers/plugins/replication/repl_extop.c
index b49cb8c..5bed849 100644
--- a/ldap/servers/plugins/replication/repl_extop.c
+++ b/ldap/servers/plugins/replication/repl_extop.c
@@ -1393,6 +1393,12 @@ multimaster_extop_abort_cleanruv(Slapi_PBlock *pb)
rc = LDAP_OPERATIONS_ERROR;
goto out;
}
+ if (check_and_set_abort_cleanruv_task_count() != LDAP_SUCCESS) {
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
+ "Exceeded maximum number of active abort CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ goto out;
+ }
/*
* Prepare the abort data
*/
@@ -1499,6 +1505,7 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
if (force == NULL) {
force = "no";
}
+
maxcsn = csn_new();
csn_init_by_string(maxcsn, csnstr);
/*
@@ -1535,13 +1542,21 @@ multimaster_extop_cleanruv(Slapi_PBlock *pb)
goto free_and_return;
}
+ if (check_and_set_cleanruv_task_count((ReplicaId)rid) != LDAP_SUCCESS) {
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR,
+ "Exceeded maximum number of active CLEANALLRUV tasks(%d)", CLEANRIDSIZ);
+ rc = LDAP_UNWILLING_TO_PERFORM;
+ goto free_and_return;
+ }
+
if (replica_get_type(r) != REPLICA_TYPE_READONLY) {
/*
* Launch the cleanruv monitoring thread. Once all the replicas are cleaned it will release the rid
*
* This will also release mtnode_ext->replica
*/
- slapi_log_err(SLAPI_LOG_INFO, repl_plugin_name, "multimaster_extop_cleanruv - CleanAllRUV Task - Launching cleanAllRUV thread...\n");
+
+ cleanruv_log(NULL, rid, CLEANALLRUV_ID, SLAPI_LOG_ERR, "Launching cleanAllRUV thread...\n");
data = (cleanruv_data *)slapi_ch_calloc(1, sizeof(cleanruv_data));
if (data == NULL) {
slapi_log_err(SLAPI_LOG_ERR, repl_plugin_name, "multimaster_extop_cleanruv - CleanAllRUV Task - Failed to allocate "
@@ -1635,7 +1650,7 @@ free_and_return:
ber_printf(resp_bere, "{s}", CLEANRUV_ACCEPTED);
ber_flatten(resp_bere, &resp_bval);
slapi_pblock_set(pb, SLAPI_EXT_OP_RET_VALUE, resp_bval);
- slapi_send_ldap_result(pb, LDAP_SUCCESS, NULL, NULL, 0, NULL);
+ slapi_send_ldap_result(pb, rc, NULL, NULL, 0, NULL);
/* resp_bere */
if (NULL != resp_bere) {
ber_free(resp_bere, 1);
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
4 years, 8 months
[389-ds-base] branch 389-ds-base-1.3.8 updated: Issue 50536 - Audit log heading written to log after every update
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
mreynolds pushed a commit to branch 389-ds-base-1.3.8
in repository 389-ds-base.
The following commit(s) were added to refs/heads/389-ds-base-1.3.8 by this push:
new f4133b7 Issue 50536 - Audit log heading written to log after every update
f4133b7 is described below
commit f4133b7da5d1892a6f961a346dfb74ffe8d90e85
Author: Mark Reynolds <mreynolds(a)redhat.com>
AuthorDate: Wed Aug 7 16:57:17 2019 -0400
Issue 50536 - Audit log heading written to log after every update
Bug Description: Once the audit log is rotated the log "title" is incorrectly
written to the log after every single update. This happened
becuase when we udpated the state of the log it was applied
to a local variable, and not the log info structure itself.
Fix Description: After writting the "title", update the state of the log using
a pointer to the log info structure.
relates: https://pagure.io/389-ds-base/issue/50536
Reviewed by: lkrispenz(Thanks!)
---
ldap/servers/slapd/log.c | 14 +++++++-------
ldap/servers/slapd/proto-slap.h | 2 +-
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
index 7dd7154..d1ec37b 100644
--- a/ldap/servers/slapd/log.c
+++ b/ldap/servers/slapd/log.c
@@ -2010,11 +2010,11 @@ slapd_log_audit(
int retval = LDAP_SUCCESS;
int lbackend = loginfo.log_backend; /* We copy this to make these next checks atomic */
- int state = 0;
+ int *state;
if (sourcelog == SLAPD_AUDIT_LOG) {
- state = loginfo.log_audit_state;
+ state = &loginfo.log_audit_state;
} else if (sourcelog == SLAPD_AUDITFAIL_LOG) {
- state = loginfo.log_auditfail_state;
+ state = &loginfo.log_auditfail_state;
} else {
/* How did we even get here! */
return 1;
@@ -2043,9 +2043,9 @@ int
slapd_log_audit_internal(
char *buffer,
int buf_len,
- int state)
+ int *state)
{
- if ((state & LOGGING_ENABLED) && (loginfo.log_audit_file != NULL)) {
+ if ((*state & LOGGING_ENABLED) && (loginfo.log_audit_file != NULL)) {
LOG_AUDIT_LOCK_WRITE();
if (log__needrotation(loginfo.log_audit_fdes,
SLAPD_AUDIT_LOG) == LOG_ROTATE) {
@@ -2059,9 +2059,9 @@ slapd_log_audit_internal(
loginfo.log_audit_rotationsyncclock += PR_ABS(loginfo.log_audit_rotationtime_secs);
}
}
- if (state & LOGGING_NEED_TITLE) {
+ if (*state & LOGGING_NEED_TITLE) {
log_write_title(loginfo.log_audit_fdes);
- state &= ~LOGGING_NEED_TITLE;
+ *state &= ~LOGGING_NEED_TITLE;
}
LOG_WRITE_NOW_NO_ERR(loginfo.log_audit_fdes, buffer, buf_len, 0);
LOG_AUDIT_UNLOCK_WRITE();
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index 7a429b2..a261d23 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -777,7 +777,7 @@ int slapi_log_access(int level, char *fmt, ...)
;
#endif
int slapd_log_audit(char *buffer, int buf_len, int sourcelog);
-int slapd_log_audit_internal(char *buffer, int buf_len, int state);
+int slapd_log_audit_internal(char *buffer, int buf_len, int *state);
int slapd_log_auditfail(char *buffer, int buf_len);
int slapd_log_auditfail_internal(char *buffer, int buf_len);
void log_access_flush(void);
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
4 years, 8 months
[389-ds-base] branch 389-ds-base-1.3.9 updated: Issue 50536 - Audit log heading written to log after every update
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
mreynolds pushed a commit to branch 389-ds-base-1.3.9
in repository 389-ds-base.
The following commit(s) were added to refs/heads/389-ds-base-1.3.9 by this push:
new 5eabd88 Issue 50536 - Audit log heading written to log after every update
5eabd88 is described below
commit 5eabd88de8f5ea6b26fb315ff89f717b18e03fca
Author: Mark Reynolds <mreynolds(a)redhat.com>
AuthorDate: Wed Aug 7 16:57:17 2019 -0400
Issue 50536 - Audit log heading written to log after every update
Bug Description: Once the audit log is rotated the log "title" is incorrectly
written to the log after every single update. This happened
becuase when we udpated the state of the log it was applied
to a local variable, and not the log info structure itself.
Fix Description: After writting the "title", update the state of the log using
a pointer to the log info structure.
relates: https://pagure.io/389-ds-base/issue/50536
Reviewed by: lkrispenz(Thanks!)
---
ldap/servers/slapd/log.c | 14 +++++++-------
ldap/servers/slapd/proto-slap.h | 2 +-
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
index 2456abf..f308a48 100644
--- a/ldap/servers/slapd/log.c
+++ b/ldap/servers/slapd/log.c
@@ -2073,11 +2073,11 @@ slapd_log_audit(
int retval = LDAP_SUCCESS;
int lbackend = loginfo.log_backend; /* We copy this to make these next checks atomic */
- int state = 0;
+ int *state;
if (sourcelog == SLAPD_AUDIT_LOG) {
- state = loginfo.log_audit_state;
+ state = &loginfo.log_audit_state;
} else if (sourcelog == SLAPD_AUDITFAIL_LOG) {
- state = loginfo.log_auditfail_state;
+ state = &loginfo.log_auditfail_state;
} else {
/* How did we even get here! */
return 1;
@@ -2106,9 +2106,9 @@ int
slapd_log_audit_internal(
char *buffer,
int buf_len,
- int state)
+ int *state)
{
- if ((state & LOGGING_ENABLED) && (loginfo.log_audit_file != NULL)) {
+ if ((*state & LOGGING_ENABLED) && (loginfo.log_audit_file != NULL)) {
LOG_AUDIT_LOCK_WRITE();
if (log__needrotation(loginfo.log_audit_fdes,
SLAPD_AUDIT_LOG) == LOG_ROTATE) {
@@ -2122,9 +2122,9 @@ slapd_log_audit_internal(
loginfo.log_audit_rotationsyncclock += PR_ABS(loginfo.log_audit_rotationtime_secs);
}
}
- if (state & LOGGING_NEED_TITLE) {
+ if (*state & LOGGING_NEED_TITLE) {
log_write_title(loginfo.log_audit_fdes);
- state &= ~LOGGING_NEED_TITLE;
+ *state &= ~LOGGING_NEED_TITLE;
}
LOG_WRITE_NOW_NO_ERR(loginfo.log_audit_fdes, buffer, buf_len, 0);
LOG_AUDIT_UNLOCK_WRITE();
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index a0648ca..e37f702 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -777,7 +777,7 @@ int slapi_log_access(int level, char *fmt, ...)
;
#endif
int slapd_log_audit(char *buffer, int buf_len, int sourcelog);
-int slapd_log_audit_internal(char *buffer, int buf_len, int state);
+int slapd_log_audit_internal(char *buffer, int buf_len, int *state);
int slapd_log_auditfail(char *buffer, int buf_len);
int slapd_log_auditfail_internal(char *buffer, int buf_len);
void log_access_flush(void);
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
4 years, 8 months
[389-ds-base] branch 389-ds-base-1.3.10 updated: Issue 50536 - Audit log heading written to log after every update
by pagure@pagure.io
This is an automated email from the git hooks/post-receive script.
mreynolds pushed a commit to branch 389-ds-base-1.3.10
in repository 389-ds-base.
The following commit(s) were added to refs/heads/389-ds-base-1.3.10 by this push:
new 51f3026 Issue 50536 - Audit log heading written to log after every update
51f3026 is described below
commit 51f3026fdb0e8e3050726def4781e3af97fe357f
Author: Mark Reynolds <mreynolds(a)redhat.com>
AuthorDate: Wed Aug 7 16:57:17 2019 -0400
Issue 50536 - Audit log heading written to log after every update
Bug Description: Once the audit log is rotated the log "title" is incorrectly
written to the log after every single update. This happened
becuase when we udpated the state of the log it was applied
to a local variable, and not the log info structure itself.
Fix Description: After writting the "title", update the state of the log using
a pointer to the log info structure.
relates: https://pagure.io/389-ds-base/issue/50536
Reviewed by: lkrispenz(Thanks!)
---
ldap/servers/slapd/log.c | 14 +++++++-------
ldap/servers/slapd/proto-slap.h | 2 +-
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/ldap/servers/slapd/log.c b/ldap/servers/slapd/log.c
index 2456abf..f308a48 100644
--- a/ldap/servers/slapd/log.c
+++ b/ldap/servers/slapd/log.c
@@ -2073,11 +2073,11 @@ slapd_log_audit(
int retval = LDAP_SUCCESS;
int lbackend = loginfo.log_backend; /* We copy this to make these next checks atomic */
- int state = 0;
+ int *state;
if (sourcelog == SLAPD_AUDIT_LOG) {
- state = loginfo.log_audit_state;
+ state = &loginfo.log_audit_state;
} else if (sourcelog == SLAPD_AUDITFAIL_LOG) {
- state = loginfo.log_auditfail_state;
+ state = &loginfo.log_auditfail_state;
} else {
/* How did we even get here! */
return 1;
@@ -2106,9 +2106,9 @@ int
slapd_log_audit_internal(
char *buffer,
int buf_len,
- int state)
+ int *state)
{
- if ((state & LOGGING_ENABLED) && (loginfo.log_audit_file != NULL)) {
+ if ((*state & LOGGING_ENABLED) && (loginfo.log_audit_file != NULL)) {
LOG_AUDIT_LOCK_WRITE();
if (log__needrotation(loginfo.log_audit_fdes,
SLAPD_AUDIT_LOG) == LOG_ROTATE) {
@@ -2122,9 +2122,9 @@ slapd_log_audit_internal(
loginfo.log_audit_rotationsyncclock += PR_ABS(loginfo.log_audit_rotationtime_secs);
}
}
- if (state & LOGGING_NEED_TITLE) {
+ if (*state & LOGGING_NEED_TITLE) {
log_write_title(loginfo.log_audit_fdes);
- state &= ~LOGGING_NEED_TITLE;
+ *state &= ~LOGGING_NEED_TITLE;
}
LOG_WRITE_NOW_NO_ERR(loginfo.log_audit_fdes, buffer, buf_len, 0);
LOG_AUDIT_UNLOCK_WRITE();
diff --git a/ldap/servers/slapd/proto-slap.h b/ldap/servers/slapd/proto-slap.h
index a0648ca..e37f702 100644
--- a/ldap/servers/slapd/proto-slap.h
+++ b/ldap/servers/slapd/proto-slap.h
@@ -777,7 +777,7 @@ int slapi_log_access(int level, char *fmt, ...)
;
#endif
int slapd_log_audit(char *buffer, int buf_len, int sourcelog);
-int slapd_log_audit_internal(char *buffer, int buf_len, int state);
+int slapd_log_audit_internal(char *buffer, int buf_len, int *state);
int slapd_log_auditfail(char *buffer, int buf_len);
int slapd_log_auditfail_internal(char *buffer, int buf_len);
void log_access_flush(void);
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.
4 years, 8 months