This is an automated email from the git hooks/post-receive script.
firstyear pushed a commit to branch master
in repository 389-ds-base.
commit 708938e37854bb6b42ab393e1b6de1b7606c61a0
Author: William Brown <firstyear(a)redhat.com>
Date: Tue Jul 25 16:11:32 2017 +1000
Ticket 49331 - change autoscaling defaults
Bug Description: In autotuning we default to a split of 40% to
dbcache and 60 to entry. This isn't always the best split, given
we cap the dbcache.
In some performance tests I noticed an improvement when increasing
the dbcache so I want to propose we change the scaling to 25% split
and a cap of 1.5GB.
Fix Description: Change the values to 25% and 1.5GB cap
https://pagure.io/389-ds-base/issue/49331
Author: wibrown
Review by: mreynolds (Thanks!)
---
ldap/servers/slapd/back-ldbm/ldbm_config.c | 2 +-
ldap/servers/slapd/back-ldbm/start.c | 9 +++++----
ldap/servers/slapd/dn.c | 20 ++++++++++++--------
3 files changed, 18 insertions(+), 13 deletions(-)
diff --git a/ldap/servers/slapd/back-ldbm/ldbm_config.c
b/ldap/servers/slapd/back-ldbm/ldbm_config.c
index b4fb734..2ef4652 100644
--- a/ldap/servers/slapd/back-ldbm/ldbm_config.c
+++ b/ldap/servers/slapd/back-ldbm/ldbm_config.c
@@ -1855,7 +1855,7 @@ static config_info ldbm_config[] = {
{CONFIG_DB_HOME_DIRECTORY, CONFIG_TYPE_STRING, "",
&ldbm_config_db_home_directory_get, &ldbm_config_db_home_directory_set, 0},
{CONFIG_IMPORT_CACHE_AUTOSIZE, CONFIG_TYPE_INT, "-1",
&ldbm_config_import_cache_autosize_get, &ldbm_config_import_cache_autosize_set,
CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_CACHE_AUTOSIZE, CONFIG_TYPE_INT, "10",
&ldbm_config_cache_autosize_get, &ldbm_config_cache_autosize_set,
CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
- {CONFIG_CACHE_AUTOSIZE_SPLIT, CONFIG_TYPE_INT, "40",
&ldbm_config_cache_autosize_split_get, &ldbm_config_cache_autosize_split_set,
CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
+ {CONFIG_CACHE_AUTOSIZE_SPLIT, CONFIG_TYPE_INT, "25",
&ldbm_config_cache_autosize_split_get, &ldbm_config_cache_autosize_split_set,
CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_IMPORT_CACHESIZE, CONFIG_TYPE_SIZE_T, "16777216",
&ldbm_config_import_cachesize_get, &ldbm_config_import_cachesize_set,
CONFIG_FLAG_ALWAYS_SHOW | CONFIG_FLAG_ALLOW_RUNNING_CHANGE},
{CONFIG_IDL_SWITCH, CONFIG_TYPE_STRING, "new",
&ldbm_config_idl_get_idl_new, &ldbm_config_idl_set_tune,
CONFIG_FLAG_ALWAYS_SHOW},
{CONFIG_IDL_UPDATE, CONFIG_TYPE_ONOFF, "on",
&ldbm_config_idl_get_update, &ldbm_config_idl_set_update, 0},
diff --git a/ldap/servers/slapd/back-ldbm/start.c b/ldap/servers/slapd/back-ldbm/start.c
index 46cc212..fc70bf6 100644
--- a/ldap/servers/slapd/back-ldbm/start.c
+++ b/ldap/servers/slapd/back-ldbm/start.c
@@ -102,7 +102,7 @@ ldbm_back_start_autotune(struct ldbminfo *li)
* default from ldbm_config.c
*/
if (li->li_cache_autosize_split == 0) {
- autosize_db_percentage_split = 40;
+ autosize_db_percentage_split = 25;
} else {
autosize_db_percentage_split = li->li_cache_autosize_split;
}
@@ -134,12 +134,13 @@ ldbm_back_start_autotune(struct ldbminfo *li)
/* It's valid, lets divide it up and set according to user prefs */
db_size = (autosize_db_percentage_split * zone_size) / 100;
- /* Cap the DB size at 512MB, as this doesn't help perf much more (lkrispen's
advice) */
+ /* Cap the DB size at 1.5G, as this doesn't help perf much more (lkrispen's
advice) */
/* NOTE: Do we need a minimum DB size? */
- if (db_size > (512 * MEGABYTE)) {
- db_size = (512 * MEGABYTE);
+ if (db_size > (1536 * MEGABYTE)) {
+ db_size = (1536 * MEGABYTE);
}
+
/* NOTE: Because of how we workout entry_size, even if
* have autosize split to say ... 90% for dbcache, because
* we cap db_size, we use zone_size - db_size, meaning that entry
diff --git a/ldap/servers/slapd/dn.c b/ldap/servers/slapd/dn.c
index 05ca161..915b230 100644
--- a/ldap/servers/slapd/dn.c
+++ b/ldap/servers/slapd/dn.c
@@ -2841,19 +2841,23 @@ slapi_sdn_get_size(const Slapi_DN *sdn)
* | 0 | 1 | 2 | 3 |
* -------------------------
* -------------------------
- * | A | | C | D |
- * |n: C | |n: |n: A |
- * |p: D | |p: A |p: E |
+ * | A | | E | D |
+ * |n: C | |n: D |n: A |
+ * |p: D | |p: |p: E |
* -------------------------
- * | E |
- * |n: D |
- * |p: |
+ * | C |
+ * |n: |
+ * |p: A |
* -------
*
- * Now when we do a look up of "E" we'll collide on bucket 2, and then
descend down til
- * we exhaust, or find our element. If we were to remove C, we would just promote E to
+ * Now when we do a look up of "C" we'll collide on bucket 2, and then
descend down til
+ * we exhaust, or find our element. If we were to remove E, we would just promote C to
* be the head of that slot.
*
+ * It's slightly quicker to insert at the head of the slot, and means that given we
+ * *just* added the element, we are likely to use it again sooner, so we reduce the
+ * number of comparisons.
+ *
* Again, I did test both with and without this - with was much faster, and relies on
* how even our hash distribution is *and* that generally with small table sizes we
* have small capacity, so we evict some values and keep these chains short.
--
To stop receiving notification emails like this one, please contact
the administrator of this repository.