modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml
| 69 -
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-env.sh
| 240 ---
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra.yaml
| 653 ----------
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/log4j-server.properties
| 45
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/passwd.properties
| 23
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra-env.sh
| 239 +++
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml
| 653 ++++++++++
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.log4j-server.properties
| 45
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.passwd.properties
| 23
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/BundleProperty.java
| 17
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
| 91 -
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
| 119 +
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/DeploymentException.java
| 48
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/DeploymentOptions.java
| 331 +++--
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/DeploymentProperty.java
| 20
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/UnmanagedDeployer.java
| 135 --
modules/common/cassandra-ccm/cassandra-ccm-core/src/main/resources/cassandra.properties
| 35
modules/common/cassandra-ccm/cassandra-ccm-core/src/test/java/org/rhq/cassandra/DeployerITest.java
| 137 ++
modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-env.sh
| 239 +++
modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml
| 653 ++++++++++
modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.log4j-server.properties
| 45
modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
| 14
modules/common/cassandra-installer/src/main/resources/logging.properties
| 3
modules/common/cassandra-installer/src/main/resources/module/main/module.xml
| 5
modules/core/util/src/main/java/org/rhq/core/util/TokenReplacingReader.java
| 3
25 files changed, 2567 insertions(+), 1318 deletions(-)
New commits:
commit 4309f03eb14b2310567d9ccabe517218eaef66a7
Author: John Sanda <jsanda(a)redhat.com>
Date: Mon Apr 1 16:00:28 2013 -0400
remove dependency on bundles for cassandra deployer code
UnmanagedDeployer has been renamed to Deployer. There is no longer notion of
managed deployment since Cassandra will not be installed via agent provisioning.
The deployment code has been greatly simplified by the removal of the bundle
deployment code. It was only being used for variable substitution. That is now
performed by TokenReplacingReader.
This is also the initial commit for DeployerITest which is an integration test
that verifies deployment is performed as expected.
Lastly, TokenRelacingReader has been updated to properly handle escaping
strings.
diff --git a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml
b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml
index 04bfde9..1cba7be 100644
--- a/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml
+++ b/modules/common/cassandra-ccm/cassandra-ccm-core/pom.xml
@@ -20,18 +20,6 @@
<dependencies>
<dependency>
<groupId>${project.groupId}</groupId>
- <artifactId>rhq-ant-bundle-common</artifactId>
- <version>${project.version}</version>
- <exclusions>
- <exclusion>
- <groupId>org.liquibase</groupId>
- <artifactId>liquibase-core</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
-
- <dependency>
- <groupId>${project.groupId}</groupId>
<artifactId>rhq-core-plugin-api</artifactId>
<version>${project.version}</version>
</dependency>
@@ -59,6 +47,13 @@
<artifactId>platform</artifactId>
<version>${cassandra.jna.version}</version>
</dependency>
+
+ <dependency>
+ <groupId>com.googlecode.java-diff-utils</groupId>
+ <artifactId>diffutils</artifactId>
+ <version>1.2.1</version>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
@@ -86,9 +81,9 @@
<exclude>src/main/resources/cassandra/conf/cassandra-env.sh</exclude>
</excludes>
</resource>
- <resource>
- <directory>src/main/cassandra/cql</directory>
- </resource>
+ <!--<resource>-->
+ <!--<directory>src/main/cassandra/cql</directory>-->
+ <!--</resource>-->
</resources>
<delimiters>
<delimiter>${*}</delimiter>
@@ -154,19 +149,53 @@
<copy
file="${settings.localRepository}/net/java/dev/jna/jna/${cassandra.jna.version}/jna-${cassandra.jna.version}.jar"
todir="${cassandra.dir}/lib"/>
<move file="${project.build.outputDirectory}/cassandra/conf"
todir="${cassandra.dir}"/>
+ <delete dir="${cassandra.dir}/javadoc"/>
<zip basedir="${cassandra.dir}"
destfile="${cassandra.distro.zip}"/>
<delete dir="${cassandra.dir}"/>
- <zip basedir="${project.build.outputDirectory}"
-
destfile="${project.build.outputDirectory}/cassandra-bundle.zip"
- includes="${cassandra.distro.filename},deploy.xml"/>
<delete
file="${project.build.outputDirectory}/deploy.xml"/>
- <delete
file="${project.build.outputDirectory}/cassandra}"/>
- <delete file="${cassandra.distro.zip}"/>
+ <!--<delete
dir="${project.build.outputDirectory}/cassandra"/>-->
</target>
</configuration>
</execution>
</executions>
</plugin>
+ <plugin>
+ <artifactId>maven-surefire-plugin</artifactId>
+ <configuration>
+ <excludes>
+ <exclude>**/*ITest.java</exclude>
+ </excludes>
+ </configuration>
+ </plugin>
+ <plugin>
+ <artifactId>maven-failsafe-plugin</artifactId>
+ <version>2.13</version>
+ <executions>
+ <execution>
+ <id>integration-test</id>
+ <goals>
+ <goal>integration-test</goal>
+ </goals>
+ <configuration>
+ <includes>
+ <include>**/*ITest.java</include>
+ </includes>
+ <systemPropertyVariables>
+
<rhq.storage.deploy-dir>${project.build.directory}/rhq-storage</rhq.storage.deploy-dir>
+ </systemPropertyVariables>
+ </configuration>
+ </execution>
+ <execution>
+ <id>verify</id>
+ <goals>
+ <goal>verify</goal>
+ </goals>
+ <configuration>
+ <testFailureIgnore>false</testFailureIgnore>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
</plugins>
</build>
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-env.sh
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-env.sh
deleted file mode 100644
index e523f97..0000000
---
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra-env.sh
+++ /dev/null
@@ -1,240 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-#
http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-calculate_heap_sizes()
-{
- case "`uname`" in
- Linux)
- system_memory_in_mb=`free -m | awk '/Mem:/ {print $2}'`
- system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*'
/proc/cpuinfo`
- ;;
- FreeBSD)
- system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'`
- system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
- system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
- ;;
- SunOS)
- system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'`
- system_cpu_cores=`psrinfo | wc -l`
- ;;
- Darwin)
- system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'`
- system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
- system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
- ;;
- *)
- # assume reasonable defaults for e.g. a modern desktop or
- # cheap server
- system_memory_in_mb="2048"
- system_cpu_cores="2"
- ;;
- esac
-
- # some systems like the raspberry pi don't report cores, use at least 1
- if [ "$system_cpu_cores" -lt "1" ]
- then
- system_cpu_cores="1"
- fi
-
- # set max heap size based on the following
- # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB))
- # calculate 1/2 ram and cap to 1024MB
- # calculate 1/4 ram and cap to 8192MB
- # pick the max
- half_system_memory_in_mb=`expr $system_memory_in_mb / 2`
- quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2`
- if [ "$half_system_memory_in_mb" -gt "1024" ]
- then
- half_system_memory_in_mb="1024"
- fi
- if [ "$quarter_system_memory_in_mb" -gt "8192" ]
- then
- quarter_system_memory_in_mb="8192"
- fi
- if [ "$half_system_memory_in_mb" -gt
"$quarter_system_memory_in_mb" ]
- then
- max_heap_size_in_mb="$half_system_memory_in_mb"
- else
- max_heap_size_in_mb="$quarter_system_memory_in_mb"
- fi
- MAX_HEAP_SIZE="${max_heap_size_in_mb}M"
-
- # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size)
- max_sensible_yg_per_core_in_mb="100"
- max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*"
$system_cpu_cores`
-
- desired_yg_in_mb=`expr $max_heap_size_in_mb / 4`
-
- if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ]
- then
- HEAP_NEWSIZE="${max_sensible_yg_in_mb}M"
- else
- HEAP_NEWSIZE="${desired_yg_in_mb}M"
- fi
-}
-
-# Determine the sort of JVM we'll be running on.
-
-java_ver_output=`"${JAVA:-java}" -version 2>&1`
-
-jvmver=`echo "$java_ver_output" | awk -F'"' 'NR==1 {print
$2}'`
-JVM_VERSION=${jvmver%_*}
-JVM_PATCH_VERSION=${jvmver#*_}
-
-jvm=`echo "$java_ver_output" | awk 'NR==2 {print $1}'`
-case "$jvm" in
- OpenJDK)
- JVM_VENDOR=OpenJDK
- # this will be "64-Bit" or "32-Bit"
- JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'`
- ;;
- "Java(TM)")
- JVM_VENDOR=Oracle
- # this will be "64-Bit" or "32-Bit"
- JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'`
- ;;
- *)
- # Help fill in other JVM values
- JVM_VENDOR=other
- JVM_ARCH=unknown
- ;;
-esac
-
-
-# Override these to set the amount of memory to allocate to the JVM at
-# start-up. For production use you may wish to adjust this for your
-# environment. MAX_HEAP_SIZE is the total amount of memory dedicated
-# to the Java heap; HEAP_NEWSIZE refers to the size of the young
-# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set
-# or not (if you set one, set the other).
-#
-# The main trade-off for the young generation is that the larger it
-# is, the longer GC pause times will be. The shorter it is, the more
-# expensive GC will be (usually).
-#
-# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause
-# times. If in doubt, and if you do not particularly want to tweak, go with
-# 100 MB per physical CPU core.
-
-#MAX_HEAP_SIZE="4G"
-#HEAP_NEWSIZE="800M"
-
-if [ "x$MAX_HEAP_SIZE" = "x" ] && [
"x$HEAP_NEWSIZE" = "x" ]; then
- calculate_heap_sizes
-else
- if [ "x$MAX_HEAP_SIZE" = "x" ] || [ "x$HEAP_NEWSIZE" =
"x" ]; then
- echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs (see
cassandra-env.sh)"
- exit 1
- fi
-fi
-
-# Specifies the default port over which Cassandra will be available for
-# JMX connections.
-JMX_PORT="@@jmx.port@(a)"
-
-
-# Here we create the arguments that will get passed to the jvm when
-# starting cassandra.
-
-JVM_EXTRA_OPTS="@@cassandra.ring.delay.property@@@@cassandra.ring.delay@(a)"
-JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS
-Dpasswd.properties=@@rhq.cassandra.password.properties.file@(a)"
-JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS
-Daccess.properties=@@rhq.cassandra.access.properties.file@(a)"
-
-# enable assertions. disabling this in production will give a modest
-# performance benefit (around 5%).
-JVM_OPTS="$JVM_OPTS -ea"
-
-# add the jamm javaagent
-if [ "$JVM_VENDOR" != "OpenJDK" -o "$JVM_VERSION" \>
"1.6.0" ] \
- || [ "$JVM_VERSION" = "1.6.0" -a "$JVM_PATCH_VERSION"
-ge 23 ]
-then
- JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar"
-fi
-
-# enable thread priorities, primarily so we can give periodic tasks
-# a lower priority to avoid interfering with client workload
-JVM_OPTS="$JVM_OPTS -XX:+UseThreadPriorities"
-# allows lowering thread priority without being root. see
-#
http://tech.stolsvik.com/2010/01/linux-java-thread-priorities-workaround....
-JVM_OPTS="$JVM_OPTS -XX:ThreadPriorityPolicy=42"
-
-# min and max heap sizes should be set to the same value to avoid
-# stop-the-world GC pauses during resize, and so that we can lock the
-# heap in memory on startup to prevent any of it from being swapped
-# out.
-JVM_OPTS="$JVM_OPTS -Xms@@rhq.cassandra.max.heap.size@(a)"
-JVM_OPTS="$JVM_OPTS -Xmx@@rhq.cassandra.max.heap.size@(a)"
-JVM_OPTS="$JVM_OPTS -Xmn@@rhq.cassandra.heap.new.size@(a)"
-JVM_OPTS="$JVM_OPTS -XX:+HeapDumpOnOutOfMemoryError"
-
-# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR
-if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then
- JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date
+%s`-pid$$.hprof"
-fi
-
-
-startswith() { [ "${1#$2}" != "$1" ]; }
-
-if [ "`uname`" = "Linux" ] ; then
- # reduce the per-thread stack size to minimize the impact of Thrift
- # thread-per-client. (Best practice is for client connections to
- # be pooled anyway.) Only do so on Linux where it is known to be
- # supported.
- # u34 and greater need 180k
- JVM_OPTS="$JVM_OPTS -Xss180k"
-fi
-echo "xss = $JVM_OPTS"
-
-# GC tuning options
-JVM_OPTS="$JVM_OPTS -XX:+UseParNewGC"
-JVM_OPTS="$JVM_OPTS -XX:+UseConcMarkSweepGC"
-JVM_OPTS="$JVM_OPTS -XX:+CMSParallelRemarkEnabled"
-JVM_OPTS="$JVM_OPTS -XX:SurvivorRatio=8"
-JVM_OPTS="$JVM_OPTS -XX:MaxTenuringThreshold=1"
-JVM_OPTS="$JVM_OPTS -XX:CMSInitiatingOccupancyFraction=75"
-JVM_OPTS="$JVM_OPTS -XX:+UseCMSInitiatingOccupancyOnly"
-
-# GC logging options -- uncomment to enable
-# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDetails"
-# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDateStamps"
-# JVM_OPTS="$JVM_OPTS -XX:+PrintHeapAtGC"
-# JVM_OPTS="$JVM_OPTS -XX:+PrintTenuringDistribution"
-# JVM_OPTS="$JVM_OPTS -XX:+PrintGCApplicationStoppedTime"
-# JVM_OPTS="$JVM_OPTS -XX:+PrintPromotionFailure"
-# JVM_OPTS="$JVM_OPTS -XX:PrintFLSStatistics=1"
-# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date +%s`.log"
-
-# uncomment to have Cassandra JVM listen for remote debuggers/profilers on port 1414
-# JVM_OPTS="$JVM_OPTS -Xdebug -Xnoagent
-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1414"
-
-# Prefer binding to IPv4 network intefaces (when net.ipv6.bindv6only=1). See
-#
http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6342561 (short version:
-# comment out this entry to enable IPv6 support).
-JVM_OPTS="$JVM_OPTS -Djava.net.preferIPv4Stack=true"
-
-# jmx: metrics and administration interface
-#
-# add this if you're having trouble connecting:
-# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>"
-#
-# see
-#
https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems...
-# for more on configuring JMX through firewalls, etc. (Short version:
-# get it working with no firewall first.)
-JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT"
-JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false"
-JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false"
-JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS"
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra.yaml
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra.yaml
deleted file mode 100644
index dab627f..0000000
---
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/cassandra.yaml
+++ /dev/null
@@ -1,653 +0,0 @@
-# Cassandra storage config YAML
-
-# NOTE:
-# See
http://wiki.apache.org/cassandra/StorageConfiguration for
-# full explanations of configuration directives
-# /NOTE
-
-# The name of the cluster. This is mainly used to prevent machines in
-# one logical cluster from joining another.
-cluster_name: @@cluster.name(a)@
-
-# This defines the number of tokens randomly assigned to this node on the ring
-# The more tokens, relative to other nodes, the larger the proportion of data
-# that this node will store. You probably want all nodes to have the same number
-# of tokens assuming they have equal hardware capability.
-#
-# If you leave this unspecified, Cassandra will use the default of 1 token for legacy
compatibility,
-# and will use the initial_token as described below.
-#
-# Specifying initial_token will override this setting.
-#
-# If you already have a cluster with 1 token per node, and wish to migrate to
-# multiple tokens per node, see
http://wiki.apache.org/cassandra/Operations
-num_tokens: @@rhq.cassandra.num_tokens(a)@
-
-# If you haven't specified num_tokens, or have set it to the default of 1 then
-# you should always specify InitialToken when setting up a production
-# cluster for the first time, and often when adding capacity later.
-# The principle is that each node should be given an equal slice of
-# the token ring; see
http://wiki.apache.org/cassandra/Operations
-# for more details.
-#
-# If blank, Cassandra will request a token bisecting the range of
-# the heaviest-loaded existing node. If there is no load information
-# available, such as is the case with a new cluster, it will pick
-# a random token, which will lead to hot spots.
-#initial_token:
-
-# See
http://wiki.apache.org/cassandra/HintedHandoff
-hinted_handoff_enabled: true
-# this defines the maximum amount of time a dead host will have hints
-# generated. After it has been dead this long, new hints for it will not be
-# created until it has been seen alive and gone down again.
-max_hint_window_in_ms: 10800000 # 3 hours
-# throttle in KB's per second, per delivery thread
-hinted_handoff_throttle_in_kb: 1024
-# Number of threads with which to deliver hints;
-# Consider increasing this number when you have multi-dc deployments, since
-# cross-dc handoff tends to be slower
-max_hints_delivery_threads: 2
-
-# The following setting populates the page cache on memtable flush and compaction
-# WARNING: Enable this setting only when the whole node's data fits in memory.
-# Defaults to: false
-# populate_io_cache_on_flush: false
-
-# authentication backend, implementing IAuthenticator; used to identify users
-authenticator: @@rhq.cassandra.authenticator(a)@
-
-# authorization backend, implementing IAUthorizer; used to limit access/provide
permissions
-authorizer: @@rhq.cassandra.authorizer(a)@
-
-# The partitioner is responsible for distributing rows (by key) across
-# nodes in the cluster. Any IPartitioner may be used, including your
-# own as long as it is on the classpath. Out of the box, Cassandra
-# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner
-# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}.
-#
-# - RandomPartitioner distributes rows across the cluster evenly by md5.
-# This is the default prior to 1.2 and is retained for compatibility.
-# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128
-# Hash Function instead of md5. When in doubt, this is the best option.
-# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
-# scanning rows in key order, but the ordering can generate hot spots
-# for sequential insertion workloads.
-# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
-# - keys in a less-efficient format and only works with keys that are
-# UTF8-encoded Strings.
-# - CollatingOPP colates according to EN,US rules rather than lexical byte
-# ordering. Use this as an example if you need custom collation.
-#
-# See
http://wiki.apache.org/cassandra/Operations for more on
-# partitioners and token selection.
-partitioner: org.apache.cassandra.dht.Murmur3Partitioner
-
-# directories where Cassandra should store data on disk.
-data_file_directories:
- - @@data.dir(a)@
-
-# commit log
-commitlog_directory: @@commitlog.dir(a)@
-
-# policy for data disk failures:
-# stop: shut down gossip and Thrift, leaving the node effectively dead, but
-# still inspectable via JMX.
-# best_effort: stop using the failed disk and respond to requests based on
-# remaining available sstables. This means you WILL see obsolete
-# data at CL.ONE!
-# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
-disk_failure_policy: stop
-
-# Maximum size of the key cache in memory.
-#
-# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
-# minimum, sometimes more. The key cache is fairly tiny for the amount of
-# time it saves, so it's worthwhile to use it at large numbers.
-# The row cache saves even more time, but must store the whole values of
-# its rows, so it is extremely space-intensive. It's best to only use the
-# row cache if you have hot rows or static rows.
-#
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)).
Set to 0 to disable key cache.
-key_cache_size_in_mb:
-
-# Duration in seconds after which Cassandra should
-# safe the keys cache. Caches are saved to saved_caches_directory as
-# specified in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 14400 or 4 hours.
-key_cache_save_period: 14400
-
-# Number of keys from the key cache to save
-# Disabled by default, meaning all keys are going to be saved
-# key_cache_keys_to_save: 100
-
-# Maximum size of the row cache in memory.
-# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
-#
-# Default value is 0, to disable row caching.
-row_cache_size_in_mb: 0
-
-# Duration in seconds after which Cassandra should
-# safe the row cache. Caches are saved to saved_caches_directory as specified
-# in this configuration file.
-#
-# Saved caches greatly improve cold-start speeds, and is relatively cheap in
-# terms of I/O for the key cache. Row cache saving is much more expensive and
-# has limited use.
-#
-# Default is 0 to disable saving the row cache.
-row_cache_save_period: 0
-
-# Number of keys from the row cache to save
-# Disabled by default, meaning all keys are going to be saved
-# row_cache_keys_to_save: 100
-
-# The provider for the row cache to use.
-#
-# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider
-#
-# SerializingCacheProvider serialises the contents of the row and stores
-# it in native memory, i.e., off the JVM Heap. Serialized rows take
-# significantly less memory than "live" rows in the JVM, so you can cache
-# more rows in a given memory footprint. And storing the cache off-heap
-# means you can use smaller heap sizes, reducing the impact of GC pauses.
-#
-# It is also valid to specify the fully-qualified class name to a class
-# that implements org.apache.cassandra.cache.IRowCacheProvider.
-#
-# Defaults to SerializingCacheProvider
-row_cache_provider: SerializingCacheProvider
-
-# saved caches
-saved_caches_directory: @@saved.caches.dir(a)@
-
-# commitlog_sync may be either "periodic" or "batch."
-# When in batch mode, Cassandra won't ack writes until the commit log
-# has been fsynced to disk. It will wait up to
-# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
-# performing the sync.
-#
-# commitlog_sync: batch
-# commitlog_sync_batch_window_in_ms: 50
-#
-# the other option is "periodic" where writes may be acked immediately
-# and the CommitLog is simply synced every commitlog_sync_period_in_ms
-# milliseconds.
-commitlog_sync: periodic
-commitlog_sync_period_in_ms: 10000
-
-# The size of the individual commitlog file segments. A commitlog
-# segment may be archived, deleted, or recycled once all the data
-# in it (potentally from each columnfamily in the system) has been
-# flushed to sstables.
-#
-# The default size is 32, which is almost always fine, but if you are
-# archiving commitlog segments (see commitlog_archiving.properties),
-# then you probably want a finer granularity of archiving; 8 or 16 MB
-# is reasonable.
-commitlog_segment_size_in_mb: 32
-
-# any class that implements the SeedProvider interface and has a
-# constructor that takes a Map<String, String> of parameters will do.
-seed_provider:
- # Addresses of hosts that are deemed contact points.
- # Cassandra nodes use this list of hosts to find each other and learn
- # the topology of the ring. You must change this if you are running
- # multiple nodes!
- - class_name: org.apache.cassandra.locator.SimpleSeedProvider
- parameters:
- # seeds is actually a comma-delimited list of addresses.
- # Ex: "<ip1>,<ip2>,<ip3>"
- - seeds: "@@seeds@@"
-
-# emergency pressure valve: each time heap usage after a full (CMS)
-# garbage collection is above this fraction of the max, Cassandra will
-# flush the largest memtables.
-#
-# Set to 1.0 to disable. Setting this lower than
-# CMSInitiatingOccupancyFraction is not likely to be useful.
-#
-# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
-# it is most effective under light to moderate load, or read-heavy
-# workloads; under truly massive write load, it will often be too
-# little, too late.
-flush_largest_memtables_at: 0.75
-
-# emergency pressure valve #2: the first time heap usage after a full
-# (CMS) garbage collection is above this fraction of the max,
-# Cassandra will reduce cache maximum _capacity_ to the given fraction
-# of the current _size_. Should usually be set substantially above
-# flush_largest_memtables_at, since that will have less long-term
-# impact on the system.
-#
-# Set to 1.0 to disable. Setting this lower than
-# CMSInitiatingOccupancyFraction is not likely to be useful.
-reduce_cache_sizes_at: 0.85
-reduce_cache_capacity_to: 0.6
-
-# For workloads with more data than can fit in memory, Cassandra's
-# bottleneck will be reads that need to fetch data from
-# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
-# order to allow the operations to enqueue low enough in the stack
-# that the OS and drives can reorder them.
-#
-# On the other hand, since writes are almost never IO bound, the ideal
-# number of "concurrent_writes" is dependent on the number of cores in
-# your system; (8 * number_of_cores) is a good rule of thumb.
-concurrent_reads: 32
-concurrent_writes: 32
-
-# Total memory to use for memtables. Cassandra will flush the largest
-# memtable when this much memory is used.
-# If omitted, Cassandra will set it to 1/3 of the heap.
-# memtable_total_space_in_mb: 2048
-
-# Total space to use for commitlogs. Since commitlog segments are
-# mmapped, and hence use up address space, the default size is 32
-# on 32-bit JVMs, and 1024 on 64-bit JVMs.
-#
-# If space gets above this value (it will round up to the next nearest
-# segment multiple), Cassandra will flush every dirty CF in the oldest
-# segment and remove it. So a small total commitlog space will tend
-# to cause more flush activity on less-active columnfamilies.
-# commitlog_total_space_in_mb: 4096
-
-# This sets the amount of memtable flush writer threads. These will
-# be blocked by disk io, and each one will hold a memtable in memory
-# while blocked. If you have a large heap and many data directories,
-# you can increase this value for better flush performance.
-# By default this will be set to the amount of data directories defined.
-#memtable_flush_writers: 1
-
-# the number of full memtables to allow pending flush, that is,
-# waiting for a writer thread. At a minimum, this should be set to
-# the maximum number of secondary indexes created on a single CF.
-memtable_flush_queue_size: 4
-
-# Whether to, when doing sequential writing, fsync() at intervals in
-# order to force the operating system to flush the dirty
-# buffers. Enable this to avoid sudden dirty buffer flushing from
-# impacting read latencies. Almost always a good idea on SSD:s; not
-# necessarily on platters.
-trickle_fsync: false
-trickle_fsync_interval_in_kb: 10240
-
-# TCP port, for commands and data
-storage_port: @@rhq.cassandra.storage.port(a)@
-
-# SSL port, for encrypted communication. Unused unless enabled in
-# encryption_options
-ssl_storage_port: @@rhq.cassandra.ssl.storage.port(a)@
-
-# Address to bind to and tell other Cassandra nodes to connect to. You
-# _must_ change this if you want multiple nodes to be able to
-# communicate!
-#
-# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
-# will always do the Right Thing *if* the node is properly configured
-# (hostname, name resolution, etc), and the Right Thing is to use the
-# address associated with the hostname (it might not be).
-#
-# Setting this to 0.0.0.0 is always wrong.
-listen_address: @@listen.address(a)@
-
-# Address to broadcast to other Cassandra nodes
-# Leaving this blank will set it to the same value as listen_address
-# broadcast_address: 1.2.3.4
-
-
-# Whether to start the native transport server.
-# Currently, only the thrift server is started by default because the native
-# transport is considered beta.
-# Please note that the address on which the native transport is bound is the
-# same as the rpc_address. The port however is different and specified below.
-start_native_transport: true
-# port for the CQL native transport to listen for clients on
-native_transport_port: @@rhq.cassandra.native_transport_port(a)@
-# The minimum and maximum threads for handling requests when the native
-# transport is used. The meaning is those is similar to the one of
-# rpc_min_threads and rpc_max_threads, though the default differ slightly and
-# are the ones below:
-# native_transport_min_threads: 16
-native_transport_max_threads: @@rhq.casandra.native_transport_max_threads(a)@
-
-
-# Whether to start the thrift rpc server.
-start_rpc: true
-# The address to bind the Thrift RPC service to -- clients connect
-# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
-# you want Thrift to listen on all interfaces.
-#
-# Leaving this blank has the same effect it does for ListenAddress,
-# (i.e. it will be based on the configured hostname of the node).
-rpc_address: @@rpc.address(a)@
-# port for Thrift to listen for clients on
-rpc_port: @@rhq.cassandra.rpc_port(a)@
-
-# enable or disable keepalive on rpc connections
-rpc_keepalive: true
-
-# Cassandra provides three out-of-the-box options for the RPC Server:
-#
-# sync -> One thread per thrift connection. For a very large number of clients,
memory
-# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack
size
-# per thread, and that will correspond to your use of virtual memory (but
physical memory
-# may be limited depending on use of stack space).
-#
-# hsha -> Stands for "half synchronous, half asynchronous." All thrift
clients are handled
-# asynchronously using a small number of threads that does not vary with the
amount
-# of thrift clients (and thus scales well to many clients). The rpc requests are
still
-# synchronous (one thread per active request).
-#
-# The default is sync because on Windows hsha is about 30% slower. On Linux,
-# sync/hsha performance is about the same, with hsha of course using less memory.
-#
-# Alternatively, can provide your own RPC server by providing the fully-qualified class
name
-# of an o.a.c.t.TServerFactory that can create an instance of it.
-rpc_server_type: sync
-
-# Uncomment rpc_min|max_thread to set request pool size limits.
-#
-# Regardless of your choice of RPC server (see above), the number of maximum requests in
the
-# RPC thread pool dictates how many concurrent requests are possible (but if you are
using the sync
-# RPC server, it also dictates the number of clients that can be connected at all).
-#
-# The default is unlimited and thus provide no protection against clients overwhelming
the server. You are
-# encouraged to set a maximum that makes sense for you in production, but do keep in mind
that
-# rpc_max_threads represents the maximum number of client requests this server may
execute concurrently.
-#
-# rpc_min_threads: 16
-# rpc_max_threads: 2048
-
-# uncomment to set socket buffer sizes on rpc connections
-# rpc_send_buff_size_in_bytes:
-# rpc_recv_buff_size_in_bytes:
-
-# Frame size for thrift (maximum field length).
-thrift_framed_transport_size_in_mb: 15
-
-# The max length of a thrift message, including all fields and
-# internal thrift overhead.
-thrift_max_message_length_in_mb: 16
-
-# Set to true to have Cassandra create a hard link to each sstable
-# flushed or streamed locally in a backups/ subdirectory of the
-# Keyspace data. Removing these links is the operator's
-# responsibility.
-incremental_backups: false
-
-# Whether or not to take a snapshot before each compaction. Be
-# careful using this option, since Cassandra won't clean up the
-# snapshots for you. Mostly useful if you're paranoid when there
-# is a data format change.
-snapshot_before_compaction: false
-
-# Whether or not a snapshot is taken of the data before keyspace truncation
-# or dropping of column families. The STRONGLY advised default of true
-# should be used to provide data safety. If you set this flag to false, you will
-# lose data on truncation or drop.
-auto_snapshot: true
-
-# Add column indexes to a row after its contents reach this size.
-# Increase if your column values are large, or if you have a very large
-# number of columns. The competing causes are, Cassandra has to
-# deserialize this much of the row to read a single column, so you want
-# it to be small - at least if you do many partial-row reads - but all
-# the index data is read for each access, so you don't want to generate
-# that wastefully either.
-column_index_size_in_kb: 64
-
-# Size limit for rows being compacted in memory. Larger rows will spill
-# over to disk and use a slower two-pass compaction process. A message
-# will be logged specifying the row key.
-in_memory_compaction_limit_in_mb: 64
-
-# Number of simultaneous compactions to allow, NOT including
-# validation "compactions" for anti-entropy repair. Simultaneous
-# compactions can help preserve read performance in a mixed read/write
-# workload, by mitigating the tendency of small sstables to accumulate
-# during a single long running compactions. The default is usually
-# fine and if you experience problems with compaction running too
-# slowly or too fast, you should look at
-# compaction_throughput_mb_per_sec first.
-#
-# concurrent_compactors defaults to the number of cores.
-# Uncomment to make compaction mono-threaded, the pre-0.8 default.
-#concurrent_compactors: 1
-
-# Multi-threaded compaction. When enabled, each compaction will use
-# up to one thread per core, plus one thread per sstable being merged.
-# This is usually only useful for SSD-based hardware: otherwise,
-# your concern is usually to get compaction to do LESS i/o (see:
-# compaction_throughput_mb_per_sec), not more.
-multithreaded_compaction: false
-
-# Throttles compaction to the given total throughput across the entire
-# system. The faster you insert data, the faster you need to compact in
-# order to keep the sstable count down, but in general, setting this to
-# 16 to 32 times the rate you are inserting data is more than sufficient.
-# Setting this to 0 disables throttling. Note that this account for all types
-# of compaction, including validation compaction.
-compaction_throughput_mb_per_sec: 16
-
-# Track cached row keys during compaction, and re-cache their new
-# positions in the compacted sstable. Disable if you use really large
-# key caches.
-compaction_preheat_key_cache: true
-
-# Throttles all outbound streaming file transfers on this node to the
-# given total throughput in Mbps. This is necessary because Cassandra does
-# mostly sequential IO when streaming data during bootstrap or repair, which
-# can lead to saturating the network connection and degrading rpc performance.
-# When unset, the default is 400 Mbps or 50 MB/s.
-# stream_throughput_outbound_megabits_per_sec: 400
-
-# How long the coordinator should wait for read operations to complete
-read_request_timeout_in_ms: 10000
-# How long the coordinator should wait for seq or index scans to complete
-range_request_timeout_in_ms: 10000
-# How long the coordinator should wait for writes to complete
-write_request_timeout_in_ms: 10000
-# How long the coordinator should wait for truncates to complete
-# (This can be much longer, because we need to flush all CFs
-# to make sure we can clear out anythink in the commitlog that could
-# cause truncated data to reappear.)
-truncate_request_timeout_in_ms: 60000
-# The default timeout for other, miscellaneous operations
-request_timeout_in_ms: 10000
-
-# Enable operation timeout information exchange between nodes to accurately
-# measure request timeouts, If disabled cassandra will assuming the request
-# was forwarded to the replica instantly by the coordinator
-#
-# Warning: before enabling this property make sure to ntp is installed
-# and the times are synchronized between the nodes.
-cross_node_timeout: false
-
-# Enable socket timeout for streaming operation.
-# When a timeout occurs during streaming, streaming is retried from the start
-# of the current file. This *can* involve re-streaming an important amount of
-# data, so you should avoid setting the value too low.
-# Default value is 0, which never timeout streams.
-# streaming_socket_timeout_in_ms: 0
-
-# phi value that must be reached for a host to be marked down.
-# most users should never need to adjust this.
-# phi_convict_threshold: 8
-
-# endpoint_snitch -- Set this to a class that implements
-# IEndpointSnitch. The snitch has two functions:
-# - it teaches Cassandra enough about your network topology to route
-# requests efficiently
-# - it allows Cassandra to spread replicas around your cluster to avoid
-# correlated failures. It does this by grouping machines into
-# "datacenters" and "racks." Cassandra will do its best not to
have
-# more than one replica on the same "rack" (which may not actually
-# be a physical location)
-#
-# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
-# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
-# ARE PLACED.
-#
-# Out of the box, Cassandra provides
-# - SimpleSnitch:
-# Treats Strategy order as proximity. This improves cache locality
-# when disabling read repair, which can further improve throughput.
-# Only appropriate for single-datacenter deployments.
-# - PropertyFileSnitch:
-# Proximity is determined by rack and data center, which are
-# explicitly configured in cassandra-topology.properties.
-# - GossipingPropertyFileSnitch
-# The rack and datacenter for the local node are defined in
-# cassandra-rackdc.properties and propagated to other nodes via gossip. If
-# cassandra-topology.properties exists, it is used as a fallback, allowing
-# migration from the PropertyFileSnitch.
-# - RackInferringSnitch:
-# Proximity is determined by rack and data center, which are
-# assumed to correspond to the 3rd and 2nd octet of each node's
-# IP address, respectively. Unless this happens to match your
-# deployment conventions (as it did Facebook's), this is best used
-# as an example of writing a custom Snitch class.
-# - Ec2Snitch:
-# Appropriate for EC2 deployments in a single Region. Loads Region
-# and Availability Zone information from the EC2 API. The Region is
-# treated as the Datacenter, and the Availability Zone as the rack.
-# Only private IPs are used, so this will not work across multiple
-# Regions.
-# - Ec2MultiRegionSnitch:
-# Uses public IPs as broadcast_address to allow cross-region
-# connectivity. (Thus, you should set seed addresses to the public
-# IP as well.) You will need to open the storage_port or
-# ssl_storage_port on the public IP firewall. (For intra-Region
-# traffic, Cassandra will switch to the private IP after
-# establishing a connection.)
-#
-# You can use a custom Snitch by setting this to the full class name
-# of the snitch, which will be assumed to be on your classpath.
-endpoint_snitch: SimpleSnitch
-
-# controls how often to perform the more expensive part of host score
-# calculation
-dynamic_snitch_update_interval_in_ms: 100
-# controls how often to reset all host scores, allowing a bad host to
-# possibly recover
-dynamic_snitch_reset_interval_in_ms: 600000
-# if set greater than zero and read_repair_chance is < 1.0, this will allow
-# 'pinning' of replicas to hosts in order to increase cache capacity.
-# The badness threshold will control how much worse the pinned host has to be
-# before the dynamic snitch will prefer other replicas over it. This is
-# expressed as a double which represents a percentage. Thus, a value of
-# 0.2 means Cassandra would continue to prefer the static snitch values
-# until the pinned host was 20% worse than the fastest.
-dynamic_snitch_badness_threshold: 0.1
-
-# request_scheduler -- Set this to a class that implements
-# RequestScheduler, which will schedule incoming client requests
-# according to the specific policy. This is useful for multi-tenancy
-# with a single Cassandra cluster.
-# NOTE: This is specifically for requests from the client and does
-# not affect inter node communication.
-# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
-# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
-# client requests to a node with a separate queue for each
-# request_scheduler_id. The scheduler is further customized by
-# request_scheduler_options as described below.
-request_scheduler: org.apache.cassandra.scheduler.NoScheduler
-
-# Scheduler Options vary based on the type of scheduler
-# NoScheduler - Has no options
-# RoundRobin
-# - throttle_limit -- The throttle_limit is the number of in-flight
-# requests per client. Requests beyond
-# that limit are queued up until
-# running requests can complete.
-# The value of 80 here is twice the number of
-# concurrent_reads + concurrent_writes.
-# - default_weight -- default_weight is optional and allows for
-# overriding the default which is 1.
-# - weights -- Weights are optional and will default to 1 or the
-# overridden default_weight. The weight translates into how
-# many requests are handled during each turn of the
-# RoundRobin, based on the scheduler id.
-#
-# request_scheduler_options:
-# throttle_limit: 80
-# default_weight: 5
-# weights:
-# Keyspace1: 1
-# Keyspace2: 5
-
-# request_scheduler_id -- An identifer based on which to perform
-# the request scheduling. Currently the only valid option is keyspace.
-# request_scheduler_id: keyspace
-
-# index_interval controls the sampling of entries from the primrary
-# row index in terms of space versus time. The larger the interval,
-# the smaller and less effective the sampling will be. In technicial
-# terms, the interval coresponds to the number of index entries that
-# are skipped between taking each sample. All the sampled entries
-# must fit in memory. Generally, a value between 128 and 512 here
-# coupled with a large key cache size on CFs results in the best trade
-# offs. This value is not often changed, however if you have many
-# very small rows (many to an OS page), then increasing this will
-# often lower memory usage without a impact on performance.
-index_interval: 128
-
-# Enable or disable inter-node encryption
-# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
-# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
-# suite for authentication, key exchange and encryption of the actual data transfers.
-# NOTE: No custom encryption options are enabled at the moment
-# The available internode options are : all, none, dc, rack
-#
-# If set to dc cassandra will encrypt the traffic between the DCs
-# If set to rack cassandra will encrypt the traffic between the racks
-#
-# The passwords used in these options must match the passwords used when generating
-# the keystore and truststore. For instructions on generating these files, see:
-#
http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/J...
-#
-server_encryption_options:
- internode_encryption: none
- keystore: conf/.keystore
- keystore_password: cassandra
- truststore: conf/.truststore
- truststore_password: cassandra
- # More advanced defaults below:
- # protocol: TLS
- # algorithm: SunX509
- # store_type: JKS
- # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
- # require_client_auth: false
-
-# enable or disable client/server encryption.
-client_encryption_options:
- enabled: false
- keystore: conf/.keystore
- keystore_password: cassandra
- # More advanced defaults below:
- # protocol: TLS
- # algorithm: SunX509
- # store_type: JKS
- # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
-
-
-# internode_compression controls whether traffic between nodes is
-# compressed.
-# can be: all - all traffic is compressed
-# dc - traffic between different datacenters is compressed
-# none - nothing is compressed.
-internode_compression: all
-
-# Enable or disable tcp_nodelay for inter-dc communication.
-# Disabling it will result in larger (but fewer) network packets being sent,
-# reducing overhead from the TCP protocol itself, at the cost of increasing
-# latency if you block for cross-datacenter responses.
-inter_dc_tcp_nodelay: true
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/log4j-server.properties
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/log4j-server.properties
deleted file mode 100644
index 8f3fcf3..0000000
---
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/log4j-server.properties
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-#
http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# for production, you should probably set pattern to %c instead of %l.
-# (%l is slower.)
-
-# output messages into a rolling log file as well as stdout
-log4j.rootLogger=@@logging.level@(a),stdout,R
-
-# stdout
-log4j.appender.stdout=org.apache.log4j.ConsoleAppender
-log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
-log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
-
-# rolling log file
-log4j.appender.R=org.apache.log4j.RollingFileAppender
-log4j.appender.R.maxFileSize=20MB
-log4j.appender.R.maxBackupIndex=50
-log4j.appender.R.layout=org.apache.log4j.PatternLayout
-log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
-# Edit the next line to point to your logs directory
-log4j.appender.R.File=@@log.dir@@/@@rhq.cassandra.log.file.name(a)@
-log4j.appender.R.Threshold=@@logging.level(a)@
-
-# Application logging options
-#log4j.logger.org.apache.cassandra=DEBUG
-#log4j.logger.org.apache.cassandra.db=DEBUG
-#log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG
-
-# Adding this to avoid thrift logging disconnect errors.
-log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR
-
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/passwd.properties
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/passwd.properties
deleted file mode 100644
index e6c3d9b..0000000
---
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/passwd.properties
+++ /dev/null
@@ -1,23 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-#
http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# This is a sample password file for SimpleAuthenticator. The format of
-# this file is username=password. If -Dpasswd.mode=MD5 then the password
-# is represented as an md5 digest, otherwise it is cleartext (keep this
-# in mind when setting file mode and ownership).
-
-cassandra=cassandra
-@@rhq.cassandra.username@@=@@rhq.cassandra.password(a)@
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra-env.sh
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra-env.sh
new file mode 100644
index 0000000..bff254f
--- /dev/null
+++
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra-env.sh
@@ -0,0 +1,239 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#
http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+calculate_heap_sizes()
+{
+ case "`uname`" in
+ Linux)
+ system_memory_in_mb=`free -m | awk '/Mem:/ {print $2}'`
+ system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*'
/proc/cpuinfo`
+ ;;
+ FreeBSD)
+ system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'`
+ system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
+ system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
+ ;;
+ SunOS)
+ system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'`
+ system_cpu_cores=`psrinfo | wc -l`
+ ;;
+ Darwin)
+ system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'`
+ system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
+ system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
+ ;;
+ *)
+ # assume reasonable defaults for e.g. a modern desktop or
+ # cheap server
+ system_memory_in_mb="2048"
+ system_cpu_cores="2"
+ ;;
+ esac
+
+ # some systems like the raspberry pi don't report cores, use at least 1
+ if [ "$system_cpu_cores" -lt "1" ]
+ then
+ system_cpu_cores="1"
+ fi
+
+ # set max heap size based on the following
+ # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB))
+ # calculate 1/2 ram and cap to 1024MB
+ # calculate 1/4 ram and cap to 8192MB
+ # pick the max
+ half_system_memory_in_mb=`expr $system_memory_in_mb / 2`
+ quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2`
+ if [ "$half_system_memory_in_mb" -gt "1024" ]
+ then
+ half_system_memory_in_mb="1024"
+ fi
+ if [ "$quarter_system_memory_in_mb" -gt "8192" ]
+ then
+ quarter_system_memory_in_mb="8192"
+ fi
+ if [ "$half_system_memory_in_mb" -gt
"$quarter_system_memory_in_mb" ]
+ then
+ max_heap_size_in_mb="$half_system_memory_in_mb"
+ else
+ max_heap_size_in_mb="$quarter_system_memory_in_mb"
+ fi
+ MAX_HEAP_SIZE="\${max_heap_size_in_mb}M"
+
+ # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size)
+ max_sensible_yg_per_core_in_mb="100"
+ max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*"
$system_cpu_cores`
+
+ desired_yg_in_mb=`expr $max_heap_size_in_mb / 4`
+
+ if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ]
+ then
+ HEAP_NEWSIZE="\${max_sensible_yg_in_mb}M"
+ else
+ HEAP_NEWSIZE="\${desired_yg_in_mb}M"
+ fi
+}
+
+# Determine the sort of JVM we'll be running on.
+
+java_ver_output=`"\${JAVA:-java}" -version 2>&1`
+
+jvmver=`echo "$java_ver_output" | awk -F'"' 'NR==1 {print
$2}'`
+JVM_VERSION=\${jvmver%_*}
+JVM_PATCH_VERSION=\${jvmver#*_}
+
+jvm=`echo "$java_ver_output" | awk 'NR==2 {print $1}'`
+case "$jvm" in
+ OpenJDK)
+ JVM_VENDOR=OpenJDK
+ # this will be "64-Bit" or "32-Bit"
+ JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'`
+ ;;
+ "Java(TM)")
+ JVM_VENDOR=Oracle
+ # this will be "64-Bit" or "32-Bit"
+ JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'`
+ ;;
+ *)
+ # Help fill in other JVM values
+ JVM_VENDOR=other
+ JVM_ARCH=unknown
+ ;;
+esac
+
+
+# Override these to set the amount of memory to allocate to the JVM at
+# start-up. For production use you may wish to adjust this for your
+# environment. MAX_HEAP_SIZE is the total amount of memory dedicated
+# to the Java heap; HEAP_NEWSIZE refers to the size of the young
+# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set
+# or not (if you set one, set the other).
+#
+# The main trade-off for the young generation is that the larger it
+# is, the longer GC pause times will be. The shorter it is, the more
+# expensive GC will be (usually).
+#
+# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause
+# times. If in doubt, and if you do not particularly want to tweak, go with
+# 100 MB per physical CPU core.
+
+#MAX_HEAP_SIZE="4G"
+#HEAP_NEWSIZE="800M"
+
+if [ "x$MAX_HEAP_SIZE" = "x" ] && [
"x$HEAP_NEWSIZE" = "x" ]; then
+ calculate_heap_sizes
+else
+ if [ "x$MAX_HEAP_SIZE" = "x" ] || [ "x$HEAP_NEWSIZE" =
"x" ]; then
+ echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs (see
cassandra-env.sh)"
+ exit 1
+ fi
+fi
+
+# Specifies the default port over which Cassandra will be available for
+# JMX connections.
+JMX_PORT="${rhq.cassandra.jmx.port}"
+
+
+# Here we create the arguments that will get passed to the jvm when
+# starting cassandra.
+
+JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS
-Dpasswd.properties=${rhq.cassandra.password.properties.file}"
+JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS
-Daccess.properties=${rhq.cassandra.access.properties.file}"
+
+# enable assertions. disabling this in production will give a modest
+# performance benefit (around 5%).
+JVM_OPTS="$JVM_OPTS -ea"
+
+# add the jamm javaagent
+if [ "$JVM_VENDOR" != "OpenJDK" -o "$JVM_VERSION" \\>
"1.6.0" ] \\
+ || [ "$JVM_VERSION" = "1.6.0" -a "$JVM_PATCH_VERSION"
-ge 23 ]
+then
+ JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar"
+fi
+
+# enable thread priorities, primarily so we can give periodic tasks
+# a lower priority to avoid interfering with client workload
+JVM_OPTS="$JVM_OPTS -XX:+UseThreadPriorities"
+# allows lowering thread priority without being root. see
+#
http://tech.stolsvik.com/2010/01/linux-java-thread-priorities-workaround....
+JVM_OPTS="$JVM_OPTS -XX:ThreadPriorityPolicy=42"
+
+# min and max heap sizes should be set to the same value to avoid
+# stop-the-world GC pauses during resize, and so that we can lock the
+# heap in memory on startup to prevent any of it from being swapped
+# out.
+JVM_OPTS="$JVM_OPTS -Xms${rhq.cassandra.max.heap.size}"
+JVM_OPTS="$JVM_OPTS -Xmx${rhq.cassandra.max.heap.size}"
+JVM_OPTS="$JVM_OPTS -Xmn${rhq.cassandra.heap.new.size}"
+JVM_OPTS="$JVM_OPTS -XX:+HeapDumpOnOutOfMemoryError"
+
+# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR
+if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then
+ JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date
+%s`-pid$$.hprof"
+fi
+
+
+startswith() { [ "\${1#$2}" != "$1" ]; }
+
+if [ "`uname`" = "Linux" ] ; then
+ # reduce the per-thread stack size to minimize the impact of Thrift
+ # thread-per-client. (Best practice is for client connections to
+ # be pooled anyway.) Only do so on Linux where it is known to be
+ # supported.
+ # u34 and greater need 180k
+ JVM_OPTS="$JVM_OPTS -Xss180k"
+fi
+echo "xss = $JVM_OPTS"
+
+# GC tuning options
+JVM_OPTS="$JVM_OPTS -XX:+UseParNewGC"
+JVM_OPTS="$JVM_OPTS -XX:+UseConcMarkSweepGC"
+JVM_OPTS="$JVM_OPTS -XX:+CMSParallelRemarkEnabled"
+JVM_OPTS="$JVM_OPTS -XX:SurvivorRatio=8"
+JVM_OPTS="$JVM_OPTS -XX:MaxTenuringThreshold=1"
+JVM_OPTS="$JVM_OPTS -XX:CMSInitiatingOccupancyFraction=75"
+JVM_OPTS="$JVM_OPTS -XX:+UseCMSInitiatingOccupancyOnly"
+
+# GC logging options -- uncomment to enable
+# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDetails"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDateStamps"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintHeapAtGC"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintTenuringDistribution"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintGCApplicationStoppedTime"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintPromotionFailure"
+# JVM_OPTS="$JVM_OPTS -XX:PrintFLSStatistics=1"
+# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date +%s`.log"
+
+# uncomment to have Cassandra JVM listen for remote debuggers/profilers on port 1414
+# JVM_OPTS="$JVM_OPTS -Xdebug -Xnoagent
-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1414"
+
+# Prefer binding to IPv4 network intefaces (when net.ipv6.bindv6only=1). See
+#
http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6342561 (short version:
+# comment out this entry to enable IPv6 support).
+JVM_OPTS="$JVM_OPTS -Djava.net.preferIPv4Stack=true"
+
+# jmx: metrics and administration interface
+#
+# add this if you're having trouble connecting:
+# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>"
+#
+# see
+#
https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems...
+# for more on configuring JMX through firewalls, etc. (Short version:
+# get it working with no firewall first.)
+JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT"
+JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false"
+JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false"
+JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS"
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml
new file mode 100644
index 0000000..cf7be70
--- /dev/null
+++
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.cassandra.yaml
@@ -0,0 +1,653 @@
+# Cassandra storage config YAML
+
+# NOTE:
+# See
http://wiki.apache.org/cassandra/StorageConfiguration for
+# full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: ${cluster.name}
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+#
+# If you leave this unspecified, Cassandra will use the default of 1 token for legacy
compatibility,
+# and will use the initial_token as described below.
+#
+# Specifying initial_token will override this setting.
+#
+# If you already have a cluster with 1 token per node, and wish to migrate to
+# multiple tokens per node, see
http://wiki.apache.org/cassandra/Operations
+num_tokens: ${rhq.cassandra.num_tokens}
+
+# If you haven't specified num_tokens, or have set it to the default of 1 then
+# you should always specify InitialToken when setting up a production
+# cluster for the first time, and often when adding capacity later.
+# The principle is that each node should be given an equal slice of
+# the token ring; see
http://wiki.apache.org/cassandra/Operations
+# for more details.
+#
+# If blank, Cassandra will request a token bisecting the range of
+# the heaviest-loaded existing node. If there is no load information
+# available, such as is the case with a new cluster, it will pick
+# a random token, which will lead to hot spots.
+#initial_token:
+
+# See
http://wiki.apache.org/cassandra/HintedHandoff
+hinted_handoff_enabled: true
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, new hints for it will not be
+# created until it has been seen alive and gone down again.
+max_hint_window_in_ms: 10800000 # 3 hours
+# throttle in KB's per second, per delivery thread
+hinted_handoff_throttle_in_kb: 1024
+# Number of threads with which to deliver hints;
+# Consider increasing this number when you have multi-dc deployments, since
+# cross-dc handoff tends to be slower
+max_hints_delivery_threads: 2
+
+# The following setting populates the page cache on memtable flush and compaction
+# WARNING: Enable this setting only when the whole node's data fits in memory.
+# Defaults to: false
+# populate_io_cache_on_flush: false
+
+# authentication backend, implementing IAuthenticator; used to identify users
+authenticator: ${rhq.cassandra.authenticator}
+
+# authorization backend, implementing IAUthorizer; used to limit access/provide
permissions
+authorizer: ${rhq.cassandra.authorizer}
+
+# The partitioner is responsible for distributing rows (by key) across
+# nodes in the cluster. Any IPartitioner may be used, including your
+# own as long as it is on the classpath. Out of the box, Cassandra
+# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner
+# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}.
+#
+# - RandomPartitioner distributes rows across the cluster evenly by md5.
+# This is the default prior to 1.2 and is retained for compatibility.
+# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128
+# Hash Function instead of md5. When in doubt, this is the best option.
+# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
+# scanning rows in key order, but the ordering can generate hot spots
+# for sequential insertion workloads.
+# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
+# - keys in a less-efficient format and only works with keys that are
+# UTF8-encoded Strings.
+# - CollatingOPP colates according to EN,US rules rather than lexical byte
+# ordering. Use this as an example if you need custom collation.
+#
+# See
http://wiki.apache.org/cassandra/Operations for more on
+# partitioners and token selection.
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# directories where Cassandra should store data on disk.
+data_file_directories:
+ - ${rhq.cassandra.data.dir}
+
+# commit log
+commitlog_directory: ${rhq.cassandra.commitlog.dir}
+
+# policy for data disk failures:
+# stop: shut down gossip and Thrift, leaving the node effectively dead, but
+# still inspectable via JMX.
+# best_effort: stop using the failed disk and respond to requests based on
+# remaining available sstables. This means you WILL see obsolete
+# data at CL.ONE!
+# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+disk_failure_policy: stop
+
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must store the whole values of
+# its rows, so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)).
Set to 0 to disable key cache.
+key_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# safe the keys cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Maximum size of the row cache in memory.
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should
+# safe the row cache. Caches are saved to saved_caches_directory as specified
+# in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save
+# Disabled by default, meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# The provider for the row cache to use.
+#
+# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider
+#
+# SerializingCacheProvider serialises the contents of the row and stores
+# it in native memory, i.e., off the JVM Heap. Serialized rows take
+# significantly less memory than "live" rows in the JVM, so you can cache
+# more rows in a given memory footprint. And storing the cache off-heap
+# means you can use smaller heap sizes, reducing the impact of GC pauses.
+#
+# It is also valid to specify the fully-qualified class name to a class
+# that implements org.apache.cassandra.cache.IRowCacheProvider.
+#
+# Defaults to SerializingCacheProvider
+row_cache_provider: SerializingCacheProvider
+
+# saved caches
+saved_caches_directory: ${rhq.cassandra.saved.caches.dir}
+
+# commitlog_sync may be either "periodic" or "batch."
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk. It will wait up to
+# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
+# performing the sync.
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 50
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentally from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+commitlog_segment_size_in_mb: 32
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map<String, String> of parameters will do.
+seed_provider:
+ # Addresses of hosts that are deemed contact points.
+ # Cassandra nodes use this list of hosts to find each other and learn
+ # the topology of the ring. You must change this if you are running
+ # multiple nodes!
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # seeds is actually a comma-delimited list of addresses.
+ # Ex: "<ip1>,<ip2>,<ip3>"
+ - seeds: "${rhq.cassandra.seeds}"
+
+# emergency pressure valve: each time heap usage after a full (CMS)
+# garbage collection is above this fraction of the max, Cassandra will
+# flush the largest memtables.
+#
+# Set to 1.0 to disable. Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+#
+# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
+# it is most effective under light to moderate load, or read-heavy
+# workloads; under truly massive write load, it will often be too
+# little, too late.
+flush_largest_memtables_at: 0.75
+
+# emergency pressure valve #2: the first time heap usage after a full
+# (CMS) garbage collection is above this fraction of the max,
+# Cassandra will reduce cache maximum _capacity_ to the given fraction
+# of the current _size_. Should usually be set substantially above
+# flush_largest_memtables_at, since that will have less long-term
+# impact on the system.
+#
+# Set to 1.0 to disable. Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+reduce_cache_sizes_at: 0.85
+reduce_cache_capacity_to: 0.6
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 32
+concurrent_writes: 32
+
+# Total memory to use for memtables. Cassandra will flush the largest
+# memtable when this much memory is used.
+# If omitted, Cassandra will set it to 1/3 of the heap.
+# memtable_total_space_in_mb: 2048
+
+# Total space to use for commitlogs. Since commitlog segments are
+# mmapped, and hence use up address space, the default size is 32
+# on 32-bit JVMs, and 1024 on 64-bit JVMs.
+#
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Cassandra will flush every dirty CF in the oldest
+# segment and remove it. So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
+# commitlog_total_space_in_mb: 4096
+
+# This sets the amount of memtable flush writer threads. These will
+# be blocked by disk io, and each one will hold a memtable in memory
+# while blocked. If you have a large heap and many data directories,
+# you can increase this value for better flush performance.
+# By default this will be set to the amount of data directories defined.
+#memtable_flush_writers: 1
+
+# the number of full memtables to allow pending flush, that is,
+# waiting for a writer thread. At a minimum, this should be set to
+# the maximum number of secondary indexes created on a single CF.
+memtable_flush_queue_size: 4
+
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSD:s; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
+# TCP port, for commands and data
+storage_port: ${rhq.cassandra.storage.port}
+
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+ssl_storage_port: ${rhq.cassandra.ssl.storage.port}
+
+# Address to bind to and tell other Cassandra nodes to connect to. You
+# _must_ change this if you want multiple nodes to be able to
+# communicate!
+#
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing *if* the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting this to 0.0.0.0 is always wrong.
+listen_address: ${rhq.cassandra.listen.address}
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+
+# Whether to start the native transport server.
+# Currently, only the thrift server is started by default because the native
+# transport is considered beta.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+start_native_transport: true
+# port for the CQL native transport to listen for clients on
+native_transport_port: ${rhq.cassandra.native_transport_port}
+# The minimum and maximum threads for handling requests when the native
+# transport is used. The meaning is those is similar to the one of
+# rpc_min_threads and rpc_max_threads, though the default differ slightly and
+# are the ones below:
+# native_transport_min_threads: 16
+native_transport_max_threads: ${rhq.casandra.native_transport_max_threads}
+
+
+# Whether to start the thrift rpc server.
+start_rpc: true
+# The address to bind the Thrift RPC service to -- clients connect
+# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
+# you want Thrift to listen on all interfaces.
+#
+# Leaving this blank has the same effect it does for ListenAddress,
+# (i.e. it will be based on the configured hostname of the node).
+rpc_address: ${rpc.address}
+# port for Thrift to listen for clients on
+rpc_port: ${rhq.cassandra.rpc_port}
+
+# enable or disable keepalive on rpc connections
+rpc_keepalive: true
+
+# Cassandra provides three out-of-the-box options for the RPC Server:
+#
+# sync -> One thread per thrift connection. For a very large number of clients,
memory
+# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack
size
+# per thread, and that will correspond to your use of virtual memory (but
physical memory
+# may be limited depending on use of stack space).
+#
+# hsha -> Stands for "half synchronous, half asynchronous." All thrift
clients are handled
+# asynchronously using a small number of threads that does not vary with the
amount
+# of thrift clients (and thus scales well to many clients). The rpc requests are
still
+# synchronous (one thread per active request).
+#
+# The default is sync because on Windows hsha is about 30% slower. On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+#
+# Alternatively, can provide your own RPC server by providing the fully-qualified class
name
+# of an o.a.c.t.TServerFactory that can create an instance of it.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max_thread to set request pool size limits.
+#
+# Regardless of your choice of RPC server (see above), the number of maximum requests in
the
+# RPC thread pool dictates how many concurrent requests are possible (but if you are
using the sync
+# RPC server, it also dictates the number of clients that can be connected at all).
+#
+# The default is unlimited and thus provide no protection against clients overwhelming
the server. You are
+# encouraged to set a maximum that makes sense for you in production, but do keep in mind
that
+# rpc_max_threads represents the maximum number of client requests this server may
execute concurrently.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum field length).
+thrift_framed_transport_size_in_mb: 15
+
+# The max length of a thrift message, including all fields and
+# internal thrift overhead.
+thrift_max_message_length_in_mb: 16
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# Keyspace data. Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+auto_snapshot: true
+
+# Add column indexes to a row after its contents reach this size.
+# Increase if your column values are large, or if you have a very large
+# number of columns. The competing causes are, Cassandra has to
+# deserialize this much of the row to read a single column, so you want
+# it to be small - at least if you do many partial-row reads - but all
+# the index data is read for each access, so you don't want to generate
+# that wastefully either.
+column_index_size_in_kb: 64
+
+# Size limit for rows being compacted in memory. Larger rows will spill
+# over to disk and use a slower two-pass compaction process. A message
+# will be logged specifying the row key.
+in_memory_compaction_limit_in_mb: 64
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair. Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# concurrent_compactors defaults to the number of cores.
+# Uncomment to make compaction mono-threaded, the pre-0.8 default.
+#concurrent_compactors: 1
+
+# Multi-threaded compaction. When enabled, each compaction will use
+# up to one thread per core, plus one thread per sstable being merged.
+# This is usually only useful for SSD-based hardware: otherwise,
+# your concern is usually to get compaction to do LESS i/o (see:
+# compaction_throughput_mb_per_sec), not more.
+multithreaded_compaction: false
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: 16
+
+# Track cached row keys during compaction, and re-cache their new
+# positions in the compacted sstable. Disable if you use really large
+# key caches.
+compaction_preheat_key_cache: true
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 400 Mbps or 50 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 400
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 10000
+# How long the coordinator should wait for seq or index scans to complete
+range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 10000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because we need to flush all CFs
+# to make sure we can clear out anythink in the commitlog that could
+# cause truncated data to reappear.)
+truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+request_timeout_in_ms: 10000
+
+# Enable operation timeout information exchange between nodes to accurately
+# measure request timeouts, If disabled cassandra will assuming the request
+# was forwarded to the replica instantly by the coordinator
+#
+# Warning: before enabling this property make sure to ntp is installed
+# and the times are synchronized between the nodes.
+cross_node_timeout: false
+
+# Enable socket timeout for streaming operation.
+# When a timeout occurs during streaming, streaming is retried from the start
+# of the current file. This *can* involve re-streaming an important amount of
+# data, so you should avoid setting the value too low.
+# Default value is 0, which never timeout streams.
+# streaming_socket_timeout_in_ms: 0
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch. The snitch has two functions:
+# - it teaches Cassandra enough about your network topology to route
+# requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+# correlated failures. It does this by grouping machines into
+# "datacenters" and "racks." Cassandra will do its best not to
have
+# more than one replica on the same "rack" (which may not actually
+# be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
+# Out of the box, Cassandra provides
+# - SimpleSnitch:
+# Treats Strategy order as proximity. This improves cache locality
+# when disabling read repair, which can further improve throughput.
+# Only appropriate for single-datacenter deployments.
+# - PropertyFileSnitch:
+# Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+# - GossipingPropertyFileSnitch
+# The rack and datacenter for the local node are defined in
+# cassandra-rackdc.properties and propagated to other nodes via gossip. If
+# cassandra-topology.properties exists, it is used as a fallback, allowing
+# migration from the PropertyFileSnitch.
+# - RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's
+# IP address, respectively. Unless this happens to match your
+# deployment conventions (as it did Facebook's), this is best used
+# as an example of writing a custom Snitch class.
+# - Ec2Snitch:
+# Appropriate for EC2 deployments in a single Region. Loads Region
+# and Availability Zone information from the EC2 API. The Region is
+# treated as the Datacenter, and the Availability Zone as the rack.
+# Only private IPs are used, so this will not work across multiple
+# Regions.
+# - Ec2MultiRegionSnitch:
+# Uses public IPs as broadcast_address to allow cross-region
+# connectivity. (Thus, you should set seed addresses to the public
+# IP as well.) You will need to open the storage_port or
+# ssl_storage_port on the public IP firewall. (For intra-Region
+# traffic, Cassandra will switch to the private IP after
+# establishing a connection.)
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it. This is
+# expressed as a double which represents a percentage. Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+# NoScheduler - Has no options
+# RoundRobin
+# - throttle_limit -- The throttle_limit is the number of in-flight
+# requests per client. Requests beyond
+# that limit are queued up until
+# running requests can complete.
+# The value of 80 here is twice the number of
+# concurrent_reads + concurrent_writes.
+# - default_weight -- default_weight is optional and allows for
+# overriding the default which is 1.
+# - weights -- Weights are optional and will default to 1 or the
+# overridden default_weight. The weight translates into how
+# many requests are handled during each turn of the
+# RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+# throttle_limit: 80
+# default_weight: 5
+# weights:
+# Keyspace1: 1
+# Keyspace2: 5
+
+# request_scheduler_id -- An identifer based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# index_interval controls the sampling of entries from the primrary
+# row index in terms of space versus time. The larger the interval,
+# the smaller and less effective the sampling will be. In technicial
+# terms, the interval coresponds to the number of index entries that
+# are skipped between taking each sample. All the sampled entries
+# must fit in memory. Generally, a value between 128 and 512 here
+# coupled with a large key cache size on CFs results in the best trade
+# offs. This value is not often changed, however if you have many
+# very small rows (many to an OS page), then increasing this will
+# often lower memory usage without a impact on performance.
+index_interval: 128
+
+# Enable or disable inter-node encryption
+# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
+# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
+# suite for authentication, key exchange and encryption of the actual data transfers.
+# NOTE: No custom encryption options are enabled at the moment
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore. For instructions on generating these files, see:
+#
http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/J...
+#
+server_encryption_options:
+ internode_encryption: none
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ truststore: conf/.truststore
+ truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+ # require_client_auth: false
+
+# enable or disable client/server encryption.
+client_encryption_options:
+ enabled: false
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# can be: all - all traffic is compressed
+# dc - traffic between different datacenters is compressed
+# none - nothing is compressed.
+internode_compression: all
+
+# Enable or disable tcp_nodelay for inter-dc communication.
+# Disabling it will result in larger (but fewer) network packets being sent,
+# reducing overhead from the TCP protocol itself, at the cost of increasing
+# latency if you block for cross-datacenter responses.
+inter_dc_tcp_nodelay: true
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.log4j-server.properties
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.log4j-server.properties
new file mode 100644
index 0000000..e1b3cd4
--- /dev/null
+++
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.log4j-server.properties
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#
http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# for production, you should probably set pattern to %c instead of %l.
+# (%l is slower.)
+
+# output messages into a rolling log file as well as stdout
+log4j.rootLogger=${rhq.cassandra.logging.level},stdout,R
+
+# stdout
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
+
+# rolling log file
+log4j.appender.R=org.apache.log4j.RollingFileAppender
+log4j.appender.R.maxFileSize=20MB
+log4j.appender.R.maxBackupIndex=50
+log4j.appender.R.layout=org.apache.log4j.PatternLayout
+log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
+# Edit the next line to point to your logs directory
+log4j.appender.R.File=${rhq.cassandra.log.file}
+log4j.appender.R.Threshold=${rhq.cassandra.logging.level}
+
+# Application logging options
+#log4j.logger.org.apache.cassandra=DEBUG
+#log4j.logger.org.apache.cassandra.db=DEBUG
+#log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG
+
+# Adding this to avoid thrift logging disconnect errors.
+log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR
+
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.passwd.properties
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.passwd.properties
new file mode 100644
index 0000000..6f1459c
--- /dev/null
+++
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/bundle/cassandra/conf/rhq.passwd.properties
@@ -0,0 +1,23 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#
http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This is a sample password file for SimpleAuthenticator. The format of
+# this file is username=password. If -Dpasswd.mode=MD5 then the password
+# is represented as an md5 digest, otherwise it is cleartext (keep this
+# in mind when setting file mode and ownership).
+
+cassandra=cassandra
+${rhq.cassandra.username}=${rhq.cassandra.password}
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/BundleProperty.java
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/BundleProperty.java
deleted file mode 100644
index dad9498..0000000
---
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/BundleProperty.java
+++ /dev/null
@@ -1,17 +0,0 @@
-package org.rhq.cassandra;
-
-import java.lang.annotation.ElementType;
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-import java.lang.annotation.Target;
-
-/**
- * @author John Sanda
- */
-(a)Retention(RetentionPolicy.RUNTIME)
-(a)Target({ElementType.TYPE, ElementType.METHOD })
-public @interface BundleProperty {
-
- String name();
-
-}
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
index 6713f36..1d7339e 100644
---
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
+++
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/CassandraClusterManager.java
@@ -32,7 +32,6 @@ import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
-import java.io.InputStream;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.HashSet;
@@ -102,53 +101,45 @@ public class CassandraClusterManager {
FileUtil.purge(clusterDir, false);
List<CassandraNode> nodes = new
ArrayList<CassandraNode>(deploymentOptions.getNumNodes());
- UnmanagedDeployer deployer = new UnmanagedDeployer();
- try {
- try {
- deployer.unpackBundle();
- } catch (CassandraException e) {
- log.error("Aborting cluster creation. Unable to unpack Cassandra
bunlde.");
- throw new RuntimeException("Aborting cluster creation. Unable to
unpack Cassandra bunlde", e);
- }
+ String seeds =
collectionToString(calculateLocalIPAddresses(deploymentOptions.getNumNodes()));
- String seeds =
collectionToString(calculateLocalIPAddresses(deploymentOptions.getNumNodes()));
-
- for (int i = 0; i < deploymentOptions.getNumNodes(); ++i) {
- File basedir = new File(deploymentOptions.getClusterDir(),
"node" + i);
- String address = getLocalIPAddress(i + 1);
-
- DeploymentOptions nodeOptions = new DeploymentOptions();
- nodeOptions.setSeeds(seeds);
- nodeOptions.setJmxPort(deploymentOptions.getJmxPort() + i);
- nodeOptions.setBasedir(basedir.getAbsolutePath());
- nodeOptions.setListenAddress(address);
- nodeOptions.setRpcAddress(address);
- nodeOptions.setCommitLogDir(new File(basedir,
"commit_log").getAbsolutePath());
- nodeOptions.setDataDir(new File(basedir,
"data").getAbsolutePath());
- nodeOptions.setSavedCachesDir(new File(basedir,
"saved_caches").getAbsolutePath());
- nodeOptions.setLogDir(new File(basedir,
"logs").getAbsolutePath());
-
- nodeOptions.merge(deploymentOptions);
- try {
- nodeOptions.load();
- deployer.deploy(nodeOptions, i);
- nodes.add(new CassandraNode(address, nodeOptions.getRpcPort(),
- nodeOptions.getNativeTransportPort()));
- installedNodeDirs.add(basedir);
- } catch (Exception e) {
- log.error("Failed to install node at " + basedir);
- throw new RuntimeException("Failed to install node at " +
basedir, e);
- }
- }
+ for (int i = 0; i < deploymentOptions.getNumNodes(); ++i) {
+ File basedir = new File(deploymentOptions.getClusterDir(), "node" +
i);
+ String address = getLocalIPAddress(i + 1);
+
+ DeploymentOptions nodeOptions = new DeploymentOptions();
+ nodeOptions.setSeeds(seeds);
+ nodeOptions.setJmxPort(deploymentOptions.getJmxPort() + i);
+ nodeOptions.setBasedir(basedir.getAbsolutePath());
+ nodeOptions.setListenAddress(address);
+ nodeOptions.setRpcAddress(address);
+ nodeOptions.setCommitLogDir(new File(basedir,
"commit_log").getAbsolutePath());
+ nodeOptions.setDataDir(new File(basedir,
"data").getAbsolutePath());
+ nodeOptions.setSavedCachesDir(new File(basedir,
"saved_caches").getAbsolutePath());
+
+ nodeOptions.merge(deploymentOptions);
try {
- FileUtil.writeFile(new ByteArrayInputStream(new byte[] {0}),
installedMarker);
- } catch (IOException e) {
- log.warn("Failed to write installed file marker to " +
installedMarker, e);
+ nodeOptions.load();
+ Deployer deployer = new Deployer();
+ deployer.setDeploymentOptions(nodeOptions);
+ deployer.unzipDistro();
+ deployer.applyConfigChanges();
+ deployer.updateFilePerms();
+
+ nodes.add(new CassandraNode(address, nodeOptions.getRpcPort(),
+ nodeOptions.getNativeTransportPort()));
+ installedNodeDirs.add(basedir);
+ } catch (Exception e) {
+ log.error("Failed to install node at " + basedir);
+ throw new RuntimeException("Failed to install node at " +
basedir, e);
}
- return nodes;
- } finally {
- deployer.cleanUpBundle();
}
+ try {
+ FileUtil.writeFile(new ByteArrayInputStream(new byte[]{0}),
installedMarker);
+ } catch (IOException e) {
+ log.warn("Failed to write installed file marker to " +
installedMarker, e);
+ }
+ return nodes;
}
private Set<String> calculateLocalIPAddresses(int numNodes) {
@@ -268,16 +259,4 @@ public class CassandraClusterManager {
return Long.parseLong(writer.getBuffer().toString());
}
- public List<String> getHostNames() {
- List<String> hosts = new
ArrayList<String>(deploymentOptions.getNumNodes());
- for (int i = 0; i < deploymentOptions.getNumNodes(); ++i) {
- hosts.add("127.0.0." + (i + 1));
- }
- return hosts;
- }
-
- public InputStream loadBundle() {
- return null;
- }
-
}
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
new file mode 100644
index 0000000..cb58257
--- /dev/null
+++
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/Deployer.java
@@ -0,0 +1,119 @@
+/*
+ *
+ * * RHQ Management Platform
+ * * Copyright (C) 2005-2012 Red Hat, Inc.
+ * * All rights reserved.
+ * *
+ * * This program is free software; you can redistribute it and/or modify
+ * * it under the terms of the GNU General Public License, version 2, as
+ * * published by the Free Software Foundation, and/or the GNU Lesser
+ * * General Public License, version 2.1, also as published by the Free
+ * * Software Foundation.
+ * *
+ * * This program is distributed in the hope that it will be useful,
+ * * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * * GNU General Public License and the GNU Lesser General Public License
+ * * for more details.
+ * *
+ * * You should have received a copy of the GNU General Public License
+ * * and the GNU Lesser General Public License along with this program;
+ * * if not, write to the Free Software Foundation, Inc.,
+ * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+package org.rhq.cassandra;
+
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.rhq.core.util.TokenReplacingReader;
+import org.rhq.core.util.ZipUtil;
+import org.rhq.core.util.stream.StreamUtil;
+
+/**
+ * Deployment consists of a few steps.
+ *
+ * <ol>
+ * <li>Unzip Cassandra to disk.</li>
+ * <li>Update configuration files like casssandra.yaml. This involves performing
variable substitution.</li>
+ * <li>Update file permissions to make scripts in the bin directory
executable.</li>
+ * </ol>
+ *
+ * The values used in the variable substitution are supplied by an instance of {@link
DeploymentOptions}.
+ *
+ * @author John Sanda
+ */
+public class Deployer {
+
+ private final Log log = LogFactory.getLog(Deployer.class);
+
+ private DeploymentOptions deploymentOptions;
+
+ public void setDeploymentOptions(DeploymentOptions deploymentOptions) {
+ this.deploymentOptions = deploymentOptions;
+ }
+
+ public void unzipDistro() throws DeploymentException {
+ InputStream inputStream =
getClass().getResourceAsStream("/cassandra.zip");
+ File deployDir = new File(deploymentOptions.getBasedir());
+ deployDir.mkdir();
+ try {
+ log.info("Unzipping storage node to " + deployDir);
+ ZipUtil.unzipFile(inputStream, deployDir);
+ } catch (IOException e) {
+ log.error("An error occurred while unzipping the storage zip file",
e);
+ throw new DeploymentException("An error occurred while unzipping the
storage zip file", e);
+ }
+ }
+
+ public void applyConfigChanges() throws DeploymentException {
+ File deployDir = new File(deploymentOptions.getBasedir());
+ File confDir = new File(deployDir, "conf");
+ Map<String, String> tokens = deploymentOptions.toMap();
+ tokens.put("cluster.name", "rhq");
+
+ applyConfigChanges(confDir, "cassandra.yaml", tokens);
+ applyConfigChanges(confDir, "log4j-server.properties", tokens);
+ applyConfigChanges(confDir, "cassandra-env.sh", tokens);
+ applyConfigChanges(confDir, "passwd.properties", tokens);
+ }
+
+ private void applyConfigChanges(File confDir, String fileName, Map<String,
String> tokens) throws DeploymentException {
+ File filteredFile = new File(confDir, fileName);
+ try {
+ if (log.isInfoEnabled()) {
+ log.info("Applying configuration changes to " + filteredFile);
+ }
+ File rhqFile = new File(confDir, "rhq." + fileName);
+ TokenReplacingReader reader = new TokenReplacingReader(new
FileReader(rhqFile), tokens);
+
+ StreamUtil.copy(reader, new FileWriter(filteredFile));
+ rhqFile.delete();
+ } catch (IOException e) {
+ log.error("An unexpected error occurred while apply configuration
changes to " + filteredFile, e);
+ throw new DeploymentException("An unexpected error occurred while apply
configuration changes to " +
+ filteredFile, e);
+ }
+ }
+
+ public void updateFilePerms() {
+ File deployDir = new File(deploymentOptions.getBasedir());
+ File binDir = new File(deployDir, "bin");
+
+ log.info("Updating file permissions in " + binDir);
+
+ for (File f : binDir.listFiles()) {
+ f.setExecutable(true);
+ }
+ }
+
+}
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/DeploymentException.java
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/DeploymentException.java
new file mode 100644
index 0000000..bda31b2
--- /dev/null
+++
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/DeploymentException.java
@@ -0,0 +1,48 @@
+/*
+ *
+ * * RHQ Management Platform
+ * * Copyright (C) 2005-2012 Red Hat, Inc.
+ * * All rights reserved.
+ * *
+ * * This program is free software; you can redistribute it and/or modify
+ * * it under the terms of the GNU General Public License, version 2, as
+ * * published by the Free Software Foundation, and/or the GNU Lesser
+ * * General Public License, version 2.1, also as published by the Free
+ * * Software Foundation.
+ * *
+ * * This program is distributed in the hope that it will be useful,
+ * * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * * GNU General Public License and the GNU Lesser General Public License
+ * * for more details.
+ * *
+ * * You should have received a copy of the GNU General Public License
+ * * and the GNU Lesser General Public License along with this program;
+ * * if not, write to the Free Software Foundation, Inc.,
+ * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+package org.rhq.cassandra;
+
+/**
+ * @author John Sanda
+ */
+public class DeploymentException extends Exception {
+
+ public DeploymentException() {
+ }
+
+ public DeploymentException(String message) {
+ super(message);
+ }
+
+ public DeploymentException(String message, Throwable cause) {
+ super(message, cause);
+ }
+
+ public DeploymentException(Throwable cause) {
+ super(cause);
+ }
+
+}
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/DeploymentOptions.java
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/DeploymentOptions.java
index 129c1ce..317cf46 100644
---
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/DeploymentOptions.java
+++
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/DeploymentOptions.java
@@ -25,13 +25,21 @@
package org.rhq.cassandra;
+import java.beans.BeanInfo;
+import java.beans.Introspector;
+import java.beans.PropertyDescriptor;
import java.io.IOException;
import java.io.InputStream;
+import java.lang.reflect.Method;
+import java.util.Map;
import java.util.Properties;
+import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.rhq.core.util.TokenReplacingProperties;
+
/**
* <p>
* A container for deployment options and Cassandra configuration settings. A
@@ -56,19 +64,13 @@ public class DeploymentOptions {
// If you add a new field make sure that it is exposed as a "sticky"
property. In
// other words, once set the property's value does not change again. See
- // setClusterDir below for an example. If the property corresponds to an
input-property
- // in deploy.xml, then annotate the property's getter method with @BundleProperty
and
- // set the name attribute to the name of the corresponding input-property in
deploy.xml.
+ // setClusterDir below for an example.
- private String bundleFileName;
- private String bundleName;
- private String bundleVersion;
private String clusterDir;
private String basedir;
private Integer numNodes;
private Boolean embedded;
private String loggingLevel;
- private Long ringDelay;
private Integer numTokens;
private Integer nativeTransportPort;
private Integer rpcPort;
@@ -80,7 +82,6 @@ public class DeploymentOptions {
private String dataDir;
private String commitLogDir;
private String savedCachesDir;
- private String logDir;
private String listenAddress;
private String rpcAddress;
private String passwordPropertiesFile;
@@ -133,19 +134,11 @@ public class DeploymentOptions {
}
private void init(Properties properties) {
-
setBundleFileName(properties.getProperty("rhq.cassandra.bundle.filename"));
- setBundleName(properties.getProperty("rhq.cassandra.bundle.name"));
-
setBundleVersion(properties.getProperty("rhq.cassandra.bundle.version"));
setClusterDir(loadProperty("rhq.cassandra.cluster.dir", properties));
setNumNodes(Integer.parseInt(loadProperty("rhq.cassandra.cluster.num-nodes",
properties)));
setEmbedded(Boolean.valueOf(loadProperty("rhq.cassandra.cluster.is-embedded",
properties)));
setLoggingLevel(loadProperty("rhq.cassandra.logging.level",
properties));
- String ringDelay = loadProperty("rhq.cassandra.ring.delay",
properties);
- if (ringDelay != null && !ringDelay.isEmpty()) {
- setRingDelay(Long.valueOf(ringDelay));
- }
-
setNumTokens(Integer.valueOf(loadProperty("rhq.cassandra.num-tokens",
properties)));
setNativeTransportPort(Integer.valueOf(loadProperty("rhq.cassandra.native-transport-port",
properties)));
setRpcPort(Integer.valueOf(loadProperty("rhq.cassandra.rpc-port",
properties)));
@@ -158,8 +151,7 @@ public class DeploymentOptions {
setDataDir(loadProperty("rhq.cassandra.data.dir", properties));
setCommitLogDir(loadProperty("rhq.cassandra.commitlog.dir",
properties));
setSavedCachesDir(loadProperty("rhq.cassandra.saved.caches.dir",
properties));
- setLogDir(loadProperty("rhq.cassandra.log.dir", properties));
- setLogFileName(loadProperty("rhq.cassandra.log.file.name",
properties));
+ setLogFileName(loadProperty("rhq.cassandra.log.file", properties));
setListenAddress(loadProperty("rhq.cassandra.listen.address",
properties));
setRpcAddress(loadProperty("rhq.cassandra.rpc.address", properties));
setPasswordPropertiesFile(loadProperty("rhq.cassandra.password.properties.file",
properties));
@@ -182,14 +174,10 @@ public class DeploymentOptions {
}
public void merge(DeploymentOptions other) {
- setBundleFileName(other.bundleFileName);
- setBundleName(other.bundleName);
- setBundleVersion(other.bundleVersion);
setClusterDir(other.clusterDir);
setNumNodes(other.numNodes);
setEmbedded(other.embedded);
setLoggingLevel(other.loggingLevel);
- setRingDelay(other.ringDelay);
setNumTokens(other.numTokens);
setNativeTransportPort(other.nativeTransportPort);
setNativeTransportMaxThreads(other.nativeTransportMaxThreads);
@@ -202,7 +190,6 @@ public class DeploymentOptions {
setDataDir(other.dataDir);
setCommitLogDir(other.commitLogDir);
setSavedCachesDir(other.savedCachesDir);
- setLogDir(other.logDir);
setLogFileName(other.logFileName);
setListenAddress(other.listenAddress);
setRpcAddress(other.rpcAddress);
@@ -216,122 +203,158 @@ public class DeploymentOptions {
setHeapNewSize(other.heapNewSize);
}
- public String getBundleFileName() {
- return bundleFileName;
- }
-
- public void setBundleFileName(String name) {
- if (bundleFileName == null) {
- bundleFileName = name;
- }
- }
-
- public String getBundleName() {
- return bundleName;
- }
-
- public void setBundleName(String name) {
- if (bundleName == null) {
- bundleName = name;
- }
- }
-
- public String getBundleVersion() {
- return bundleVersion;
- }
+ public TokenReplacingProperties toMap() {
+ try {
+ BeanInfo beanInfo = Introspector.getBeanInfo(DeploymentOptions.class);
+ Map<String, String> properties = new TreeMap<String, String>();
- public void setBundleVersion(String version) {
- if (bundleVersion == null) {
- bundleVersion = version;
+ for (PropertyDescriptor pd : beanInfo.getPropertyDescriptors()) {
+ if (pd.getReadMethod() == null) {
+ throw new RuntimeException("The [" + pd.getName() + "]
property must define a getter method");
+ }
+ Method method = pd.getReadMethod();
+ DeploymentProperty deploymentProperty =
method.getAnnotation(DeploymentProperty.class);
+ if (deploymentProperty != null) {
+ Object value = method.invoke(this, null);
+ if (value != null) {
+ properties.put(deploymentProperty.name(), value.toString());
+ }
+ }
+ }
+ return new TokenReplacingProperties(properties);
+ } catch (Exception e) {
+ throw new RuntimeException("Failed to convert " +
DeploymentOptions.class.getName() + " to a map", e);
}
}
- @BundleProperty(name = "cluster.dir")
+ /**
+ * @return The directory in which nodes will be installed. This only applies to
+ * embedded clusters.
+ */
+ @DeploymentProperty(name = "cluster.dir")
public String getClusterDir() {
return clusterDir;
}
+ /**
+ * @param dir The directory in which nodes will be installed. This only applies to
+ * embedded clusters.
+ */
public void setClusterDir(String dir) {
if (clusterDir == null) {
clusterDir = dir;
}
}
- @BundleProperty(name = "rhq.deploy.dir")
+ /**
+ * @return The directory in which the node will be installed.
+ */
+ @DeploymentProperty(name = "rhq.cassandra.basedir")
public String getBasedir() {
return basedir;
}
+ /**
+ * @param dir The directory in which the node will be installed.
+ */
public void setBasedir(String dir) {
if (basedir == null) {
basedir = dir;
}
}
+ /**
+ * @return The number of nodes in the cluster. This only applies to embedded
clusters.
+ */
+ @DeploymentProperty(name = "rhq.cassandra.cluster.num-nodes")
public int getNumNodes() {
return numNodes;
}
+ /**
+ * @param numNodes The number of nodes in the cluster. This only applies to embedded
+ * clusters.
+ */
public void setNumNodes(int numNodes) {
if (this.numNodes == null) {
this.numNodes = numNodes;
}
}
+ /**
+ * @return true is this is an embedded deployment, false otherwise. Note that an
+ * embedded cluster is one in which all nodes run on a single host and can only
accept
+ * requests from that same host.
+ */
+ @DeploymentProperty(name = "rhq.cassandra.cluster.is-embedded")
public boolean isEmbedded() {
return embedded;
}
+ /**
+ * @param embedded A flag that indicates whether or not this is an embedded
deployment.
+ * Note than embedded cluster is one in which all nodes run on a single host and can
+ * only accept requests from that same host.
+ */
public void setEmbedded(boolean embedded) {
if (this.embedded == null) {
this.embedded = embedded;
}
}
- @BundleProperty(name = "logging.level")
+ /**
+ * @return The log4j logging level that Cassandra uses
+ */
+ @DeploymentProperty(name = "rhq.cassandra.logging.level")
public String getLoggingLevel() {
return loggingLevel;
}
+ /**
+ * @param loggingLevel The log4j logging level that Cassandra uses
+ */
public void setLoggingLevel(String loggingLevel) {
if (this.loggingLevel == null) {
this.loggingLevel = loggingLevel;
}
}
- public Long getRingDelay() {
- return ringDelay;
- }
-
- public void setRingDelay(Long ringDelay) {
- if (this.ringDelay == null) {
- this.ringDelay = ringDelay;
- }
- }
-
- @BundleProperty(name = "rhq.cassandra.num_tokens")
+ /**
+ * @return The number of tokens assigned to this the node on the ring. Defaults to
256.
+ */
+ @DeploymentProperty(name = "rhq.cassandra.num_tokens")
public Integer getNumTokens() {
return numTokens;
}
+ /**
+ * @param numTokens The number of tokens assigned to this node on the ring. Defaults
to
+ * 256.
+ */
public void setNumTokens(int numTokens) {
if (this.numTokens == null) {
this.numTokens = numTokens;
}
}
- @BundleProperty(name = "rhq.cassandra.native_transport_port")
+ /**
+ * @return The port on which Cassandra listens for client requests.
+ */
+ @DeploymentProperty(name = "rhq.cassandra.native_transport_port")
public Integer getNativeTransportPort() {
return nativeTransportPort;
}
+ /**
+ * @param port The port on which Cassandra listens for client requests.
+ */
public void setNativeTransportPort(Integer port) {
if (nativeTransportPort == null) {
nativeTransportPort = port;
}
}
- @BundleProperty(name = "rhq.cassandra.rpc_port")
+ @DeploymentProperty(name = "rhq.cassandra.rpc_port")
public Integer getRpcPort() {
return rpcPort;
}
@@ -342,128 +365,187 @@ public class DeploymentOptions {
}
}
- @BundleProperty(name = "rhq.casandra.native_transport_max_threads")
+ /**
+ * @return The max number of threads to handle CQL requests
+ */
+ @DeploymentProperty(name = "rhq.casandra.native_transport_max_threads")
public Integer getNativeTransportMaxThreads() {
return nativeTransportMaxThreads;
}
- public void setNativeTransportMaxThreads(int numThreads) {
+ /**
+ * @param numThreads The max number of threads to handle CQL requests
+ */
+ public void setNativeTransportMaxThreads(Integer numThreads) {
if (nativeTransportMaxThreads == null) {
nativeTransportMaxThreads = numThreads;
}
}
- @BundleProperty(name = "rhq.cassandra.username")
+ /**
+ * @return The username RHQ will use to make client connections to Cassandra. This
is
+ * <strong>not</strong> a Cassandra configuration property. This
deployment property is
+ * written to rhq-server.properties at build time by the rhq-container.build.xml
script.
+ */
+ @DeploymentProperty(name = "rhq.cassandra.username")
public String getUsername() {
return username;
}
+ /**
+ * @param username The username RHQ will use to make client connections to
Cassandra.
+ * This is <strong>not</strong> a Cassandra configuration property. This
deployment
+ * property is written to rhq-server.properties at build time by the
+ * rhq-container.build.xml script.
+ */
public void setUsername(String username) {
if (this.username == null) {
this.username = username;
}
}
- @BundleProperty(name = "rhq.cassandra.password")
+ /**
+ * @return The password RHQ will use to make client connections to Cassandra. This
is
+ * <strong>not</strong> a Cassandra configuration property. This
deployment property is
+ * written to rhq-server.properties at build time by the rhq-container.build.xml
script.
+ */
+ @DeploymentProperty(name = "rhq.cassandra.password")
public String getPassword() {
return password;
}
+ /**
+ * @param password The password RHQ will use to make client connections to
Cassandra.
+ * This is <strong>not</strong> a Cassandra configuration property. This
deployment
+ * property is written to rhq-server.properties at build time by the
+ * rhq-container.build.xml script.
+ */
public void setPassword(String password) {
if (this.password == null) {
this.password = password;
}
}
- @BundleProperty(name = "rhq.cassandra.authenticator")
+ /**
+ * @return The FQCN of the class that handles Cassandra authentication
+ */
+ @DeploymentProperty(name = "rhq.cassandra.authenticator")
public String getAuthenticator() {
return authenticator;
}
+ /**
+ * @param authenticator The FQCN of the class that handles Cassandra authentication
+ */
public void setAuthenticator(String authenticator) {
if (this.authenticator == null) {
this.authenticator = authenticator;
}
}
- @BundleProperty(name = "rhq.cassandra.authorizer")
+ /**
+ * @return The FQCN of the class that handles Cassandra authorization
+ */
+ @DeploymentProperty(name = "rhq.cassandra.authorizer")
public String getAuthorizer() {
return authorizer;
}
+ /**
+ * @param authorizer The FQCN of the class that handles Cassandra authorization
+ */
public void setAuthorizer(String authorizer) {
if (this.authorizer == null) {
this.authorizer = authorizer;
}
}
- @BundleProperty(name = "data.dir")
+ /**
+ * @return The directory where Cassandra stores data on disk
+ */
+ @DeploymentProperty(name = "rhq.cassandra.data.dir")
public String getDataDir() {
return dataDir;
}
+ /**
+ * @param dir The directory where Cassandra stores data on disk
+ */
public void setDataDir(String dir) {
if (dataDir == null) {
dataDir = dir;
}
}
- @BundleProperty(name = "commitlog.dir")
+ /**
+ * @return The directory where Cassandra stores commit log files
+ */
+ @DeploymentProperty(name = "rhq.cassandra.commitlog.dir")
public String getCommitLogDir() {
return commitLogDir;
}
+ /**
+ * @param dir The directory where Cassandra stores commit log files
+ */
public void setCommitLogDir(String dir) {
if (commitLogDir == null) {
commitLogDir = dir;
}
}
- @BundleProperty(name = "saved.caches.dir")
+ /**
+ * @return The directory where Cassandra stores saved caches on disk
+ */
+ @DeploymentProperty(name = "rhq.cassandra.saved.caches.dir")
public String getSavedCachesDir() {
return savedCachesDir;
}
+ /**
+ * @param dir The direcotry where Cassandra stores saved caches on disk
+ */
public void setSavedCachesDir(String dir) {
if (savedCachesDir == null) {
savedCachesDir = dir;
}
}
- @BundleProperty(name = "log.dir")
- public String getLogDir() {
- return logDir;
- }
-
- public void setLogDir(String dir) {
- if (logDir == null) {
- logDir = dir;
- }
- }
-
- @BundleProperty(name = "rhq.cassandra.log.file.name")
+ /**
+ * @return The full path of the Log4J log file to which Cassandra writes.
+ */
+ @DeploymentProperty(name = "rhq.cassandra.log.file")
public String getLogFileName() {
return logFileName;
}
+ /**
+ * @param name The full path of the Log4J log file to which Cassandra writes.
+ */
public void setLogFileName(String name) {
if (logFileName == null) {
logFileName = name;
}
}
- @BundleProperty(name = "listen.address")
+ /**
+ * @return The address to which Cassandra binds and tells other node to connect to
+ */
+ @DeploymentProperty(name = "rhq.cassandra.listen.address")
public String getListenAddress() {
return listenAddress;
}
+ /**
+ * @param address The address to which Cassandra binds and tells other nodes to
connect to
+ */
public void setListenAddress(String address) {
if (listenAddress == null) {
listenAddress = address;
}
}
- @BundleProperty(name = "rpc.address")
+ @DeploymentProperty(name = "rpc.address")
public String getRpcAddress() {
return rpcAddress;
}
@@ -474,88 +556,145 @@ public class DeploymentOptions {
}
}
- @BundleProperty(name = "rhq.cassandra.password.properties.file")
+ /**
+ * @return The location of the password properties file used by SimpleAuthenticator
+ */
+ @DeploymentProperty(name = "rhq.cassandra.password.properties.file")
public String getPasswordPropertiesFile() {
return passwordPropertiesFile;
}
+ /**
+ * @param file The location of the password properties file used
+ */
public void setPasswordPropertiesFile(String file) {
if (passwordPropertiesFile == null) {
passwordPropertiesFile = file;
}
}
- @BundleProperty(name = "rhq.cassandra.access.properties.file")
+ /**
+ * @return The location of the authorization properties file used by
SimpleAuthorizer
+ */
+ @DeploymentProperty(name = "rhq.cassandra.access.properties.file")
public String getAccessPropertiesFile() {
return accessPropertiesFile;
}
+ /**
+ * @param file The location of the authorization properties file used by
SimpleAuthorizer
+ */
public void setAccessPropertiesFile(String file) {
if (accessPropertiesFile == null) {
accessPropertiesFile = file;
}
}
- @BundleProperty(name = "jmx.port")
+ /**
+ * @return The port on which Cassandra listens for JMX connections
+ */
+ @DeploymentProperty(name = "rhq.cassandra.jmx.port")
public Integer getJmxPort() {
return jmxPort;
}
+ /**
+ * @param port The port on which Cassandra listens for JMX connections
+ */
public void setJmxPort(Integer port) {
if (jmxPort == null) {
jmxPort = port;
}
}
- @BundleProperty(name = "rhq.cassandra.storage.port")
+ /**
+ * @return The port on which Cassandra listens for gossip requests
+ */
+ @DeploymentProperty(name = "rhq.cassandra.storage.port")
public Integer getStoragePort() {
return storagePort;
}
+ /**
+ * @param port The port on which Cassandra listens for gossip requests
+ */
public void setStoragePort(Integer port) {
if (storagePort == null) {
storagePort = port;
}
}
- @BundleProperty(name = "rhq.cassandra.ssl.storage.port")
+ /**
+ * @return The port on which Cassandra listens for encrypted gossip requests. Note
that
+ * this is only used if encryption is enabled.
+ */
+ @DeploymentProperty(name = "rhq.cassandra.ssl.storage.port")
public Integer getSslStoragePort() {
return sslStoragePort;
}
+ /**
+ * @param port The port on which Cassandra listens for encrypted gossip requests.
Note
+ * that this is only used if encryption is enabled.
+ */
public void setSslStoragePort(Integer port) {
if (sslStoragePort == null) {
sslStoragePort = port;
}
}
- @BundleProperty(name = "seeds")
+ /**
+ * @return A comma-delimited list of IP addresses/host names that are deemed contact
+ * points during node start up to learn about the ring topology.
+ */
+ @DeploymentProperty(name = "rhq.cassandra.seeds")
public String getSeeds() {
return seeds;
}
+ /**
+ * @param seeds A comma-delimited list of IP addresses/host names that are deemed
+ * contact points during node start up to learn about the ring topology.
+ */
public void setSeeds(String seeds) {
if (this.seeds == null) {
this.seeds = seeds;
}
}
- @BundleProperty(name = "rhq.cassandra.max.heap.size")
+ /**
+ * @return The value to use for both the max and min heap sizes. Defaults to
+ * ${MAX_HEAP_SIZE} which allows the cassandra-env.sh script to determine the value.
+ */
+ @DeploymentProperty(name = "rhq.cassandra.max.heap.size")
public String getHeapSize() {
return heapSize;
}
+ /**
+ * @param heapSize The value to use for both the max and min heap sizes. This needs
to
+ * be a value value recognized by the -Xmx and -Xms options such as 512M.
+ */
public void setHeapSize(String heapSize) {
if (this.heapSize == null) {
this.heapSize = heapSize;
}
}
- @BundleProperty(name = "rhq.cassandra.heap.new.size")
+ /**
+ * @return The value to use for the size of the new generation. Defaults to
+ * ${HEAP_NEWSIZE} which allows the cassandra-env.sh script to determine the value.
+ */
+ @DeploymentProperty(name = "rhq.cassandra.heap.new.size")
public String getHeapNewSize() {
return heapNewSize;
}
+ /**
+ * @param heapNewSize The value to use for the size of the new generation. This
needs
+ * to be a valid value recognized by the -Xmn option such as 256M.
+ * is passed directly to the -Xmn option so it
+ */
public void setHeapNewSize(String heapNewSize) {
if (this.heapNewSize == null) {
this.heapNewSize = heapNewSize;
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/DeploymentProperty.java
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/DeploymentProperty.java
new file mode 100644
index 0000000..8a74a8b
--- /dev/null
+++
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/DeploymentProperty.java
@@ -0,0 +1,20 @@
+package org.rhq.cassandra;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * Used to provide a mapping from properties defined in cassandra.properties to
properties
+ * in {@link DeploymentOptions}.
+ *
+ * @author John Sanda
+ */
+(a)Retention(RetentionPolicy.RUNTIME)
+(a)Target({ElementType.TYPE, ElementType.METHOD })
+public @interface DeploymentProperty {
+
+ String name();
+
+}
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/UnmanagedDeployer.java
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/UnmanagedDeployer.java
deleted file mode 100644
index 7eabb86..0000000
---
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/java/org/rhq/cassandra/UnmanagedDeployer.java
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- *
- * * RHQ Management Platform
- * * Copyright (C) 2005-2012 Red Hat, Inc.
- * * All rights reserved.
- * *
- * * This program is free software; you can redistribute it and/or modify
- * * it under the terms of the GNU General Public License, version 2, as
- * * published by the Free Software Foundation, and/or the GNU Lesser
- * * General Public License, version 2.1, also as published by the Free
- * * Software Foundation.
- * *
- * * This program is distributed in the hope that it will be useful,
- * * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * * GNU General Public License and the GNU Lesser General Public License
- * * for more details.
- * *
- * * You should have received a copy of the GNU General Public License
- * * and the GNU Lesser General Public License along with this program;
- * * if not, write to the Free Software Foundation, Inc.,
- * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- *
- */
-
-package org.rhq.cassandra;
-
-import java.beans.BeanInfo;
-import java.beans.Introspector;
-import java.beans.PropertyDescriptor;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.lang.reflect.Method;
-import java.util.Properties;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.rhq.bundle.ant.AntLauncher;
-import org.rhq.core.util.ZipUtil;
-import org.rhq.core.util.file.FileUtil;
-import org.rhq.core.util.stream.StreamUtil;
-
-/**
- * Performs unmanaged deployments of Cassandra nodes. The deployment is unmanaged in
that
- * it happens outside of the agent. Although it is unmanaged, the same Ant-based bundle
API
- * is used to execute the deployment.
- *
- * @author John Sanda
- */
-public class UnmanagedDeployer {
-
- private final Log log = LogFactory.getLog(UnmanagedDeployer.class);
-
- private DeploymentOptions deploymentOptions;
-
- private File bundleDir;
-
- public void unpackBundle() throws CassandraException {
- try {
- File bundleZipFile = unpackBundleZipFile();
- bundleDir = unpackBundle(bundleZipFile);
- } catch (IOException e) {
- log.error("An error occurred while unpacking the bundle", e);
- throw new CassandraException("An error occurred while unpacking the
bundle", e);
- }
- }
-
- public void cleanUpBundle() {
- if (bundleDir != null && bundleDir.exists()) {
- FileUtil.purge(bundleDir, true);
- }
- }
-
- public void deploy(DeploymentOptions options, int deploymentId) throws
CassandraException {
- Properties bundleProperties = createBundleProperties(options, deploymentId);
- runAnt(bundleProperties, bundleDir);
- }
-
- private Properties createBundleProperties(DeploymentOptions options, int
deploymentId) throws CassandraException {
- try {
- Properties properties = new Properties();
- properties.put("cluster.name", "rhq");
- properties.put("rhq.deploy.id", deploymentId);
- properties.put("rhq.deploy.phase", "install");
-
- BeanInfo beanInfo = Introspector.getBeanInfo(DeploymentOptions.class);
-
- for (PropertyDescriptor pd : beanInfo.getPropertyDescriptors()) {
- if (pd.getReadMethod() == null) {
- continue;
- }
- Method method = pd.getReadMethod();
- BundleProperty bundleProperty =
method.getAnnotation(BundleProperty.class);
- if (bundleProperty != null) {
- properties.put(bundleProperty.name(), method.invoke(options, null));
- }
- }
- return properties;
- } catch (Exception e) {
- throw new CassandraException("Failed to create bundle deployment
properties", e);
- }
- }
-
- private void runAnt(Properties deployProps, File bundleDir) throws CassandraException
{
- AntLauncher launcher = new AntLauncher();
- try {
- File recipeFile = new File(bundleDir, "deploy.xml");
- launcher.executeBundleDeployFile(recipeFile, deployProps, null);
- } catch (Exception e) {
- String msg = "Failed to execute local rhq cassandra bundle
deployment";
- //logException(msg, e);
- throw new CassandraException(msg, e);
- }
- }
-
- private File unpackBundleZipFile() throws IOException {
- InputStream bundleInputStream =
getClass().getResourceAsStream("/cassandra-bundle.zip");
- File bundleZipFile = File.createTempFile("cassandra-bundle.zip",
null);
- StreamUtil.copy(bundleInputStream, new FileOutputStream(bundleZipFile));
-
- return bundleZipFile;
- }
-
- private File unpackBundle(File bundleZipFile) throws IOException {
- File bundleDir = new File(System.getProperty("java.io.tmpdir"),
"rhq-cassandra-bundle");
- bundleDir.mkdir();
- ZipUtil.unzipFile(bundleZipFile, bundleDir);
-
- return bundleDir;
- }
-
-}
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/resources/cassandra.properties
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/resources/cassandra.properties
index 2c8b0dd..9bdc696 100644
---
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/resources/cassandra.properties
+++
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/main/resources/cassandra.properties
@@ -1,10 +1,6 @@
# These properties are used for the Cassandra bundle deployment and for embedded cluster
# deployments. Properties that affect embedded cluster deployments are used only in
# development and test environments, not production environments.
-cassandra.version=1.2.0-beta3
-rhq.cassandra.bundle.filename=/cassandra-bundle.zip
-rhq.cassandra.bundle.name=RHQ Cassandra Bundle
-rhq.cassandra.bundle.version=1.0
# The directory in which Cassandra is installed. This property must be
# specified.
@@ -16,11 +12,6 @@ rhq.cassandra.username=rhqadmin
# The password with which to authenticate requests to Cassandra.
rhq.cassandra.password=rhqadmin
-# When a node initializes it contacts a seed and then sleeps for RING_DELAY
(milliseconds)
-# to learn about other nodes in the cluster. This defaults to 30 seconds. Cassandra gets
-# the value from the cassandra.ring_delay_ms system property
-# rhq.cassandra.ring.delay
-
# Defines the number of tokens randomly assigned to a node on the ring. The more tokens,
# relative to other nodes, the larger the proportion of data that this node will store.
You
# probably want all nodes to have the same number of tokens assuming they have equal
@@ -38,7 +29,6 @@ rhq.cassandra.seeds=localhost
# A class that performs authentication. The value should be a fully qualified class name
# and implement IAuthenticator.
rhq.cassandra.authenticator=org.rhq.cassandra.auth.SimpleAuthenticator
-#rhq.cassandra.authenticator=org.apache.cassandra.auth.AllowAllAuthenticator
# A class that performs authorization. Used to limit/provide permissions. The value
should
# be a fully qualified class name and implement IAuthorizer.
@@ -46,11 +36,11 @@ rhq.cassandra.authorizer=org.rhq.cassandra.auth.SimpleAuthorizer
# The location of the password properties file used by SimpleAuthenticator. If a
relative
# path is specified, its location is resolved relative to Cassandra's bin directory.
-rhq.cassandra.password.properties.file=./../conf/passwd.properties
+rhq.cassandra.password.properties.file=${rhq.cassandra.basedir}/conf/passwd.properties
-# The location of the authorization properties file used by SimpleAuthority. If a
relative
+# The location of the authorization properties file used by SimpleAuthorizer. If a
relative
# path is specified, its location is resolved relative to Cassandra's bin directory.
-rhq.cassandra.access.properties.file=./../conf/access.properties
+rhq.cassandra.access.properties.file=${rhq.cassandra.basedir}/conf/access.properties
# The IP address or hostname that other Cassandra nodes will use to connect to
# this node.
@@ -90,12 +80,18 @@ rhq.cassandra.saved.caches.dir=${rhq.cassandra.basedir}/saved_caches
# The directory where Cassandra stored log4j log files.
rhq.cassandra.log.dir=${rhq.cassandra.basedir}/logs
-# The name of the log file to which Cassandra writes.
-rhq.cassandra.log.file.name=rhq-storage-node.log
+# The log file to which Cassandra writes.
+rhq.cassandra.log.file=${rhq.cassandra.basedir}/logs/rhq-storage-node.log
+# Used for both the min and max heap sizes for the Cassandra JVM.
rhq.cassandra.max.heap.size=\${MAX_HEAP_SIZE}
+
+# Heap new size refers to the size of the young generation.
rhq.cassandra.heap.new.size=\${HEAP_NEWSIZE}
+# The log4j logging level to use.
+rhq.cassandra.logging.level=DEBUG
+
# The remaining properties pertain to cluster configuration and are only used in
# development and testing environments when an embedded cluster is used. These
properties
# are also loaded into the container build (when the dev profile is active) in the
@@ -110,13 +106,14 @@ rhq.cassandra.heap.new.size=\${HEAP_NEWSIZE}
# properties that are set will be ignored as they are only used with embedded clusters.
rhq.cassandra.cluster.is-embedded=true
+###################################################################
+# Properties below this point apply ONLY to embedded clusters. #
+###################################################################
+
# The directory in which cluster nodes will be installed.
rhq.cassandra.cluster.dir=${rhq.rootDir}/cassandra
# The number of nodes in the cluster. This specifies how many nodes to install and
# configure. The top level or base directory for each node will be nodeN where N is the
# node number.
-rhq.cassandra.cluster.num-nodes=2
-
-# The log4j logging level to use on each node.
-rhq.cassandra.logging.level=DEBUG
+rhq.cassandra.cluster.num-nodes=2
\ No newline at end of file
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/java/org/rhq/cassandra/DeployerITest.java
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/java/org/rhq/cassandra/DeployerITest.java
new file mode 100644
index 0000000..ef367cb
--- /dev/null
+++
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/java/org/rhq/cassandra/DeployerITest.java
@@ -0,0 +1,137 @@
+/*
+ *
+ * * RHQ Management Platform
+ * * Copyright (C) 2005-2012 Red Hat, Inc.
+ * * All rights reserved.
+ * *
+ * * This program is free software; you can redistribute it and/or modify
+ * * it under the terms of the GNU General Public License, version 2, as
+ * * published by the Free Software Foundation, and/or the GNU Lesser
+ * * General Public License, version 2.1, also as published by the Free
+ * * Software Foundation.
+ * *
+ * * This program is distributed in the hope that it will be useful,
+ * * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * * GNU General Public License and the GNU Lesser General Public License
+ * * for more details.
+ * *
+ * * You should have received a copy of the GNU General Public License
+ * * and the GNU Lesser General Public License along with this program;
+ * * if not, write to the Free Software Foundation, Inc.,
+ * * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+package org.rhq.cassandra;
+
+import static java.util.Arrays.asList;
+import static org.testng.Assert.assertFalse;
+import static org.testng.Assert.assertTrue;
+
+import java.io.File;
+import java.io.FileReader;
+import java.util.List;
+
+import org.testng.annotations.BeforeClass;
+import org.testng.annotations.Test;
+
+import org.rhq.core.util.StringUtil;
+import org.rhq.core.util.stream.StreamUtil;
+
+import difflib.DiffUtils;
+import difflib.Patch;
+
+/**
+ * @author John Sanda
+ */
+public class DeployerITest {
+
+ private File deployDir;
+
+ private File confDir;
+
+ private Deployer deployer;
+
+ @BeforeClass
+ public void initDeployer() throws Exception {
+ deployDir = new File(System.getProperty("rhq.storage.deploy-dir",
System.getProperty("java.io.tmpdir")));
+ confDir = new File(deployDir, "conf");
+
+ DeploymentOptions deploymentOptions = new DeploymentOptions();
+ deploymentOptions.setBasedir(deployDir.getAbsolutePath());
+ deploymentOptions.setCommitLogDir("/var/lib/rhq/storage/commit_log");
+ deploymentOptions.setDataDir("/var/lib/rhq/storage/data");
+
deploymentOptions.setSavedCachesDir("/var/lib/rhq/storage/saved_caches");
+
deploymentOptions.setLogFileName("/var/lib/rhq/storage/logs/rhq-storage.log");
+ deploymentOptions.setPasswordPropertiesFile("conf/passwd.properties");
+ deploymentOptions.setAccessPropertiesFile("conf/access.properties");
+ deploymentOptions.load();
+
+ deployer = new Deployer();
+ deployer.setDeploymentOptions(deploymentOptions);
+ }
+
+ @Test
+ public void deploy() throws Exception {
+ deployer.unzipDistro();
+
+ // Just do some minimal tests to verify that things are where we expect. This is
+ // not intended to verify every single directory/path.
+ File binDir = new File(deployDir, "bin");
+ assertTrue(binDir.exists(), binDir + " does not exist");
+
+ File confDir = new File(deployDir, "conf");
+ assertTrue(confDir.exists(), confDir + " does not exist");
+
+ File libDir = new File(deployDir, "lib");
+ assertTrue(libDir.exists(), libDir + " does not exist");
+ }
+
+ @Test(dependsOnMethods = "deploy")
+ public void applyConfigChanges() throws Exception {
+ deployer.applyConfigChanges();
+ }
+
+ @Test(dependsOnMethods = "applyConfigChanges")
+ public void verifyConfigChangesToCassandraYaml() throws Exception {
+ assertFileDeployedAndUpdated("cassandra.yaml");
+ }
+
+ @Test(dependsOnMethods = "applyConfigChanges")
+ public void verifyConfigChangesToLog4J() throws Exception {
+ assertFileDeployedAndUpdated("log4j-server.properties");
+ }
+
+ @Test(dependsOnMethods = "applyConfigChanges")
+ public void verifyConfigChangesToCassandraEnv() throws Exception {
+ assertFileDeployedAndUpdated("cassandra-env.sh");
+ }
+
+ private void assertFileDeployedAndUpdated(String fileName) throws Exception {
+ File rhqFile = new File(confDir, "rhq." + fileName);
+ File file = new File(confDir, fileName);
+ assertTrue(file.exists(), file + " does not exist");
+ assertFalse(rhqFile.exists(), "Failed to delete " + rhqFile);
+ assertFileUpdated(file);
+ }
+
+ private void assertFileUpdated(File actualFile) throws Exception {
+ File expectedFile = new File(getClass().getResource("/expected." +
actualFile.getName()).toURI());
+ assertTrue(expectedFile.exists(), "Cannot verify that " +
actualFile.getName() + " has been updated. There " +
+ "should be a file named expected." + actualFile.getName() + "
in the root of the test classpath.");
+
+ String actualContents = StreamUtil.slurp(new FileReader(actualFile));
+ List<String> actualList = asList(actualContents.split("\\n"));
+
+ String expectedContents = StreamUtil.slurp(new FileReader(expectedFile));
+ List<String> expectedList =
asList(expectedContents.split("\\n"));
+
+ Patch patch = DiffUtils.diff(actualList, expectedList);
+ List<String> diffs = DiffUtils.generateUnifiedDiff(actualFile.getName(),
"expected.cassandra.yaml",
+ actualList, patch, 5);
+ assertTrue(patch.getDeltas().isEmpty(), actualFile.getName() + " was not
configured correctly. The " +
+ "following differences were found:\n" +
StringUtil.listToString(diffs, "\n"));
+ }
+
+}
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-env.sh
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-env.sh
new file mode 100644
index 0000000..1516305
--- /dev/null
+++
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra-env.sh
@@ -0,0 +1,239 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#
http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+calculate_heap_sizes()
+{
+ case "`uname`" in
+ Linux)
+ system_memory_in_mb=`free -m | awk '/Mem:/ {print $2}'`
+ system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*'
/proc/cpuinfo`
+ ;;
+ FreeBSD)
+ system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'`
+ system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
+ system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
+ ;;
+ SunOS)
+ system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'`
+ system_cpu_cores=`psrinfo | wc -l`
+ ;;
+ Darwin)
+ system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'`
+ system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024`
+ system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'`
+ ;;
+ *)
+ # assume reasonable defaults for e.g. a modern desktop or
+ # cheap server
+ system_memory_in_mb="2048"
+ system_cpu_cores="2"
+ ;;
+ esac
+
+ # some systems like the raspberry pi don't report cores, use at least 1
+ if [ "$system_cpu_cores" -lt "1" ]
+ then
+ system_cpu_cores="1"
+ fi
+
+ # set max heap size based on the following
+ # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB))
+ # calculate 1/2 ram and cap to 1024MB
+ # calculate 1/4 ram and cap to 8192MB
+ # pick the max
+ half_system_memory_in_mb=`expr $system_memory_in_mb / 2`
+ quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2`
+ if [ "$half_system_memory_in_mb" -gt "1024" ]
+ then
+ half_system_memory_in_mb="1024"
+ fi
+ if [ "$quarter_system_memory_in_mb" -gt "8192" ]
+ then
+ quarter_system_memory_in_mb="8192"
+ fi
+ if [ "$half_system_memory_in_mb" -gt
"$quarter_system_memory_in_mb" ]
+ then
+ max_heap_size_in_mb="$half_system_memory_in_mb"
+ else
+ max_heap_size_in_mb="$quarter_system_memory_in_mb"
+ fi
+ MAX_HEAP_SIZE="${max_heap_size_in_mb}M"
+
+ # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size)
+ max_sensible_yg_per_core_in_mb="100"
+ max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*"
$system_cpu_cores`
+
+ desired_yg_in_mb=`expr $max_heap_size_in_mb / 4`
+
+ if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ]
+ then
+ HEAP_NEWSIZE="${max_sensible_yg_in_mb}M"
+ else
+ HEAP_NEWSIZE="${desired_yg_in_mb}M"
+ fi
+}
+
+# Determine the sort of JVM we'll be running on.
+
+java_ver_output=`"${JAVA:-java}" -version 2>&1`
+
+jvmver=`echo "$java_ver_output" | awk -F'"' 'NR==1 {print
$2}'`
+JVM_VERSION=${jvmver%_*}
+JVM_PATCH_VERSION=${jvmver#*_}
+
+jvm=`echo "$java_ver_output" | awk 'NR==2 {print $1}'`
+case "$jvm" in
+ OpenJDK)
+ JVM_VENDOR=OpenJDK
+ # this will be "64-Bit" or "32-Bit"
+ JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'`
+ ;;
+ "Java(TM)")
+ JVM_VENDOR=Oracle
+ # this will be "64-Bit" or "32-Bit"
+ JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'`
+ ;;
+ *)
+ # Help fill in other JVM values
+ JVM_VENDOR=other
+ JVM_ARCH=unknown
+ ;;
+esac
+
+
+# Override these to set the amount of memory to allocate to the JVM at
+# start-up. For production use you may wish to adjust this for your
+# environment. MAX_HEAP_SIZE is the total amount of memory dedicated
+# to the Java heap; HEAP_NEWSIZE refers to the size of the young
+# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set
+# or not (if you set one, set the other).
+#
+# The main trade-off for the young generation is that the larger it
+# is, the longer GC pause times will be. The shorter it is, the more
+# expensive GC will be (usually).
+#
+# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause
+# times. If in doubt, and if you do not particularly want to tweak, go with
+# 100 MB per physical CPU core.
+
+#MAX_HEAP_SIZE="4G"
+#HEAP_NEWSIZE="800M"
+
+if [ "x$MAX_HEAP_SIZE" = "x" ] && [
"x$HEAP_NEWSIZE" = "x" ]; then
+ calculate_heap_sizes
+else
+ if [ "x$MAX_HEAP_SIZE" = "x" ] || [ "x$HEAP_NEWSIZE" =
"x" ]; then
+ echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs (see
cassandra-env.sh)"
+ exit 1
+ fi
+fi
+
+# Specifies the default port over which Cassandra will be available for
+# JMX connections.
+JMX_PORT="7200"
+
+
+# Here we create the arguments that will get passed to the jvm when
+# starting cassandra.
+
+JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Dpasswd.properties=conf/passwd.properties"
+JVM_EXTRA_OPTS="$JVM_EXTRA_OPTS -Daccess.properties=conf/access.properties"
+
+# enable assertions. disabling this in production will give a modest
+# performance benefit (around 5%).
+JVM_OPTS="$JVM_OPTS -ea"
+
+# add the jamm javaagent
+if [ "$JVM_VENDOR" != "OpenJDK" -o "$JVM_VERSION" \>
"1.6.0" ] \
+ || [ "$JVM_VERSION" = "1.6.0" -a "$JVM_PATCH_VERSION"
-ge 23 ]
+then
+ JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.2.5.jar"
+fi
+
+# enable thread priorities, primarily so we can give periodic tasks
+# a lower priority to avoid interfering with client workload
+JVM_OPTS="$JVM_OPTS -XX:+UseThreadPriorities"
+# allows lowering thread priority without being root. see
+#
http://tech.stolsvik.com/2010/01/linux-java-thread-priorities-workaround....
+JVM_OPTS="$JVM_OPTS -XX:ThreadPriorityPolicy=42"
+
+# min and max heap sizes should be set to the same value to avoid
+# stop-the-world GC pauses during resize, and so that we can lock the
+# heap in memory on startup to prevent any of it from being swapped
+# out.
+JVM_OPTS="$JVM_OPTS -Xms${MAX_HEAP_SIZE}"
+JVM_OPTS="$JVM_OPTS -Xmx${MAX_HEAP_SIZE}"
+JVM_OPTS="$JVM_OPTS -Xmn${HEAP_NEWSIZE}"
+JVM_OPTS="$JVM_OPTS -XX:+HeapDumpOnOutOfMemoryError"
+
+# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR
+if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then
+ JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date
+%s`-pid$$.hprof"
+fi
+
+
+startswith() { [ "${1#$2}" != "$1" ]; }
+
+if [ "`uname`" = "Linux" ] ; then
+ # reduce the per-thread stack size to minimize the impact of Thrift
+ # thread-per-client. (Best practice is for client connections to
+ # be pooled anyway.) Only do so on Linux where it is known to be
+ # supported.
+ # u34 and greater need 180k
+ JVM_OPTS="$JVM_OPTS -Xss180k"
+fi
+echo "xss = $JVM_OPTS"
+
+# GC tuning options
+JVM_OPTS="$JVM_OPTS -XX:+UseParNewGC"
+JVM_OPTS="$JVM_OPTS -XX:+UseConcMarkSweepGC"
+JVM_OPTS="$JVM_OPTS -XX:+CMSParallelRemarkEnabled"
+JVM_OPTS="$JVM_OPTS -XX:SurvivorRatio=8"
+JVM_OPTS="$JVM_OPTS -XX:MaxTenuringThreshold=1"
+JVM_OPTS="$JVM_OPTS -XX:CMSInitiatingOccupancyFraction=75"
+JVM_OPTS="$JVM_OPTS -XX:+UseCMSInitiatingOccupancyOnly"
+
+# GC logging options -- uncomment to enable
+# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDetails"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintGCDateStamps"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintHeapAtGC"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintTenuringDistribution"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintGCApplicationStoppedTime"
+# JVM_OPTS="$JVM_OPTS -XX:+PrintPromotionFailure"
+# JVM_OPTS="$JVM_OPTS -XX:PrintFLSStatistics=1"
+# JVM_OPTS="$JVM_OPTS -Xloggc:/var/log/cassandra/gc-`date +%s`.log"
+
+# uncomment to have Cassandra JVM listen for remote debuggers/profilers on port 1414
+# JVM_OPTS="$JVM_OPTS -Xdebug -Xnoagent
-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=1414"
+
+# Prefer binding to IPv4 network intefaces (when net.ipv6.bindv6only=1). See
+#
http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6342561 (short version:
+# comment out this entry to enable IPv6 support).
+JVM_OPTS="$JVM_OPTS -Djava.net.preferIPv4Stack=true"
+
+# jmx: metrics and administration interface
+#
+# add this if you're having trouble connecting:
+# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=<public name>"
+#
+# see
+#
https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems...
+# for more on configuring JMX through firewalls, etc. (Short version:
+# get it working with no firewall first.)
+JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT"
+JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false"
+JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false"
+JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS"
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml
new file mode 100644
index 0000000..e12a29b
--- /dev/null
+++
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.cassandra.yaml
@@ -0,0 +1,653 @@
+# Cassandra storage config YAML
+
+# NOTE:
+# See
http://wiki.apache.org/cassandra/StorageConfiguration for
+# full explanations of configuration directives
+# /NOTE
+
+# The name of the cluster. This is mainly used to prevent machines in
+# one logical cluster from joining another.
+cluster_name: rhq
+
+# This defines the number of tokens randomly assigned to this node on the ring
+# The more tokens, relative to other nodes, the larger the proportion of data
+# that this node will store. You probably want all nodes to have the same number
+# of tokens assuming they have equal hardware capability.
+#
+# If you leave this unspecified, Cassandra will use the default of 1 token for legacy
compatibility,
+# and will use the initial_token as described below.
+#
+# Specifying initial_token will override this setting.
+#
+# If you already have a cluster with 1 token per node, and wish to migrate to
+# multiple tokens per node, see
http://wiki.apache.org/cassandra/Operations
+num_tokens: 256
+
+# If you haven't specified num_tokens, or have set it to the default of 1 then
+# you should always specify InitialToken when setting up a production
+# cluster for the first time, and often when adding capacity later.
+# The principle is that each node should be given an equal slice of
+# the token ring; see
http://wiki.apache.org/cassandra/Operations
+# for more details.
+#
+# If blank, Cassandra will request a token bisecting the range of
+# the heaviest-loaded existing node. If there is no load information
+# available, such as is the case with a new cluster, it will pick
+# a random token, which will lead to hot spots.
+#initial_token:
+
+# See
http://wiki.apache.org/cassandra/HintedHandoff
+hinted_handoff_enabled: true
+# this defines the maximum amount of time a dead host will have hints
+# generated. After it has been dead this long, new hints for it will not be
+# created until it has been seen alive and gone down again.
+max_hint_window_in_ms: 10800000 # 3 hours
+# throttle in KB's per second, per delivery thread
+hinted_handoff_throttle_in_kb: 1024
+# Number of threads with which to deliver hints;
+# Consider increasing this number when you have multi-dc deployments, since
+# cross-dc handoff tends to be slower
+max_hints_delivery_threads: 2
+
+# The following setting populates the page cache on memtable flush and compaction
+# WARNING: Enable this setting only when the whole node's data fits in memory.
+# Defaults to: false
+# populate_io_cache_on_flush: false
+
+# authentication backend, implementing IAuthenticator; used to identify users
+authenticator: org.rhq.cassandra.auth.SimpleAuthenticator
+
+# authorization backend, implementing IAUthorizer; used to limit access/provide
permissions
+authorizer: org.rhq.cassandra.auth.SimpleAuthorizer
+
+# The partitioner is responsible for distributing rows (by key) across
+# nodes in the cluster. Any IPartitioner may be used, including your
+# own as long as it is on the classpath. Out of the box, Cassandra
+# provides org.apache.cassandra.dht.{Murmur3Partitioner, RandomPartitioner
+# ByteOrderedPartitioner, OrderPreservingPartitioner (deprecated)}.
+#
+# - RandomPartitioner distributes rows across the cluster evenly by md5.
+# This is the default prior to 1.2 and is retained for compatibility.
+# - Murmur3Partitioner is similar to RandomPartioner but uses Murmur3_128
+# Hash Function instead of md5. When in doubt, this is the best option.
+# - ByteOrderedPartitioner orders rows lexically by key bytes. BOP allows
+# scanning rows in key order, but the ordering can generate hot spots
+# for sequential insertion workloads.
+# - OrderPreservingPartitioner is an obsolete form of BOP, that stores
+# - keys in a less-efficient format and only works with keys that are
+# UTF8-encoded Strings.
+# - CollatingOPP colates according to EN,US rules rather than lexical byte
+# ordering. Use this as an example if you need custom collation.
+#
+# See
http://wiki.apache.org/cassandra/Operations for more on
+# partitioners and token selection.
+partitioner: org.apache.cassandra.dht.Murmur3Partitioner
+
+# directories where Cassandra should store data on disk.
+data_file_directories:
+ - /var/lib/rhq/storage/data
+
+# commit log
+commitlog_directory: /var/lib/rhq/storage/commit_log
+
+# policy for data disk failures:
+# stop: shut down gossip and Thrift, leaving the node effectively dead, but
+# still inspectable via JMX.
+# best_effort: stop using the failed disk and respond to requests based on
+# remaining available sstables. This means you WILL see obsolete
+# data at CL.ONE!
+# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra
+disk_failure_policy: stop
+
+# Maximum size of the key cache in memory.
+#
+# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the
+# minimum, sometimes more. The key cache is fairly tiny for the amount of
+# time it saves, so it's worthwhile to use it at large numbers.
+# The row cache saves even more time, but must store the whole values of
+# its rows, so it is extremely space-intensive. It's best to only use the
+# row cache if you have hot rows or static rows.
+#
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)).
Set to 0 to disable key cache.
+key_cache_size_in_mb:
+
+# Duration in seconds after which Cassandra should
+# safe the keys cache. Caches are saved to saved_caches_directory as
+# specified in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 14400 or 4 hours.
+key_cache_save_period: 14400
+
+# Number of keys from the key cache to save
+# Disabled by default, meaning all keys are going to be saved
+# key_cache_keys_to_save: 100
+
+# Maximum size of the row cache in memory.
+# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup.
+#
+# Default value is 0, to disable row caching.
+row_cache_size_in_mb: 0
+
+# Duration in seconds after which Cassandra should
+# safe the row cache. Caches are saved to saved_caches_directory as specified
+# in this configuration file.
+#
+# Saved caches greatly improve cold-start speeds, and is relatively cheap in
+# terms of I/O for the key cache. Row cache saving is much more expensive and
+# has limited use.
+#
+# Default is 0 to disable saving the row cache.
+row_cache_save_period: 0
+
+# Number of keys from the row cache to save
+# Disabled by default, meaning all keys are going to be saved
+# row_cache_keys_to_save: 100
+
+# The provider for the row cache to use.
+#
+# Supported values are: ConcurrentLinkedHashCacheProvider, SerializingCacheProvider
+#
+# SerializingCacheProvider serialises the contents of the row and stores
+# it in native memory, i.e., off the JVM Heap. Serialized rows take
+# significantly less memory than "live" rows in the JVM, so you can cache
+# more rows in a given memory footprint. And storing the cache off-heap
+# means you can use smaller heap sizes, reducing the impact of GC pauses.
+#
+# It is also valid to specify the fully-qualified class name to a class
+# that implements org.apache.cassandra.cache.IRowCacheProvider.
+#
+# Defaults to SerializingCacheProvider
+row_cache_provider: SerializingCacheProvider
+
+# saved caches
+saved_caches_directory: /var/lib/rhq/storage/saved_caches
+
+# commitlog_sync may be either "periodic" or "batch."
+# When in batch mode, Cassandra won't ack writes until the commit log
+# has been fsynced to disk. It will wait up to
+# commitlog_sync_batch_window_in_ms milliseconds for other writes, before
+# performing the sync.
+#
+# commitlog_sync: batch
+# commitlog_sync_batch_window_in_ms: 50
+#
+# the other option is "periodic" where writes may be acked immediately
+# and the CommitLog is simply synced every commitlog_sync_period_in_ms
+# milliseconds.
+commitlog_sync: periodic
+commitlog_sync_period_in_ms: 10000
+
+# The size of the individual commitlog file segments. A commitlog
+# segment may be archived, deleted, or recycled once all the data
+# in it (potentally from each columnfamily in the system) has been
+# flushed to sstables.
+#
+# The default size is 32, which is almost always fine, but if you are
+# archiving commitlog segments (see commitlog_archiving.properties),
+# then you probably want a finer granularity of archiving; 8 or 16 MB
+# is reasonable.
+commitlog_segment_size_in_mb: 32
+
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map<String, String> of parameters will do.
+seed_provider:
+ # Addresses of hosts that are deemed contact points.
+ # Cassandra nodes use this list of hosts to find each other and learn
+ # the topology of the ring. You must change this if you are running
+ # multiple nodes!
+ - class_name: org.apache.cassandra.locator.SimpleSeedProvider
+ parameters:
+ # seeds is actually a comma-delimited list of addresses.
+ # Ex: "<ip1>,<ip2>,<ip3>"
+ - seeds: "localhost"
+
+# emergency pressure valve: each time heap usage after a full (CMS)
+# garbage collection is above this fraction of the max, Cassandra will
+# flush the largest memtables.
+#
+# Set to 1.0 to disable. Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+#
+# RELYING ON THIS AS YOUR PRIMARY TUNING MECHANISM WILL WORK POORLY:
+# it is most effective under light to moderate load, or read-heavy
+# workloads; under truly massive write load, it will often be too
+# little, too late.
+flush_largest_memtables_at: 0.75
+
+# emergency pressure valve #2: the first time heap usage after a full
+# (CMS) garbage collection is above this fraction of the max,
+# Cassandra will reduce cache maximum _capacity_ to the given fraction
+# of the current _size_. Should usually be set substantially above
+# flush_largest_memtables_at, since that will have less long-term
+# impact on the system.
+#
+# Set to 1.0 to disable. Setting this lower than
+# CMSInitiatingOccupancyFraction is not likely to be useful.
+reduce_cache_sizes_at: 0.85
+reduce_cache_capacity_to: 0.6
+
+# For workloads with more data than can fit in memory, Cassandra's
+# bottleneck will be reads that need to fetch data from
+# disk. "concurrent_reads" should be set to (16 * number_of_drives) in
+# order to allow the operations to enqueue low enough in the stack
+# that the OS and drives can reorder them.
+#
+# On the other hand, since writes are almost never IO bound, the ideal
+# number of "concurrent_writes" is dependent on the number of cores in
+# your system; (8 * number_of_cores) is a good rule of thumb.
+concurrent_reads: 32
+concurrent_writes: 32
+
+# Total memory to use for memtables. Cassandra will flush the largest
+# memtable when this much memory is used.
+# If omitted, Cassandra will set it to 1/3 of the heap.
+# memtable_total_space_in_mb: 2048
+
+# Total space to use for commitlogs. Since commitlog segments are
+# mmapped, and hence use up address space, the default size is 32
+# on 32-bit JVMs, and 1024 on 64-bit JVMs.
+#
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Cassandra will flush every dirty CF in the oldest
+# segment and remove it. So a small total commitlog space will tend
+# to cause more flush activity on less-active columnfamilies.
+# commitlog_total_space_in_mb: 4096
+
+# This sets the amount of memtable flush writer threads. These will
+# be blocked by disk io, and each one will hold a memtable in memory
+# while blocked. If you have a large heap and many data directories,
+# you can increase this value for better flush performance.
+# By default this will be set to the amount of data directories defined.
+#memtable_flush_writers: 1
+
+# the number of full memtables to allow pending flush, that is,
+# waiting for a writer thread. At a minimum, this should be set to
+# the maximum number of secondary indexes created on a single CF.
+memtable_flush_queue_size: 4
+
+# Whether to, when doing sequential writing, fsync() at intervals in
+# order to force the operating system to flush the dirty
+# buffers. Enable this to avoid sudden dirty buffer flushing from
+# impacting read latencies. Almost always a good idea on SSD:s; not
+# necessarily on platters.
+trickle_fsync: false
+trickle_fsync_interval_in_kb: 10240
+
+# TCP port, for commands and data
+storage_port: 7000
+
+# SSL port, for encrypted communication. Unused unless enabled in
+# encryption_options
+ssl_storage_port: 7001
+
+# Address to bind to and tell other Cassandra nodes to connect to. You
+# _must_ change this if you want multiple nodes to be able to
+# communicate!
+#
+# Leaving it blank leaves it up to InetAddress.getLocalHost(). This
+# will always do the Right Thing *if* the node is properly configured
+# (hostname, name resolution, etc), and the Right Thing is to use the
+# address associated with the hostname (it might not be).
+#
+# Setting this to 0.0.0.0 is always wrong.
+listen_address: localhost
+
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
+
+# Whether to start the native transport server.
+# Currently, only the thrift server is started by default because the native
+# transport is considered beta.
+# Please note that the address on which the native transport is bound is the
+# same as the rpc_address. The port however is different and specified below.
+start_native_transport: true
+# port for the CQL native transport to listen for clients on
+native_transport_port: 9042
+# The minimum and maximum threads for handling requests when the native
+# transport is used. The meaning is those is similar to the one of
+# rpc_min_threads and rpc_max_threads, though the default differ slightly and
+# are the ones below:
+# native_transport_min_threads: 16
+native_transport_max_threads: 64
+
+
+# Whether to start the thrift rpc server.
+start_rpc: true
+# The address to bind the Thrift RPC service to -- clients connect
+# here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
+# you want Thrift to listen on all interfaces.
+#
+# Leaving this blank has the same effect it does for ListenAddress,
+# (i.e. it will be based on the configured hostname of the node).
+rpc_address: localhost
+# port for Thrift to listen for clients on
+rpc_port: 9160
+
+# enable or disable keepalive on rpc connections
+rpc_keepalive: true
+
+# Cassandra provides three out-of-the-box options for the RPC Server:
+#
+# sync -> One thread per thrift connection. For a very large number of clients,
memory
+# will be your limiting factor. On a 64 bit JVM, 128KB is the minimum stack
size
+# per thread, and that will correspond to your use of virtual memory (but
physical memory
+# may be limited depending on use of stack space).
+#
+# hsha -> Stands for "half synchronous, half asynchronous." All thrift
clients are handled
+# asynchronously using a small number of threads that does not vary with the
amount
+# of thrift clients (and thus scales well to many clients). The rpc requests are
still
+# synchronous (one thread per active request).
+#
+# The default is sync because on Windows hsha is about 30% slower. On Linux,
+# sync/hsha performance is about the same, with hsha of course using less memory.
+#
+# Alternatively, can provide your own RPC server by providing the fully-qualified class
name
+# of an o.a.c.t.TServerFactory that can create an instance of it.
+rpc_server_type: sync
+
+# Uncomment rpc_min|max_thread to set request pool size limits.
+#
+# Regardless of your choice of RPC server (see above), the number of maximum requests in
the
+# RPC thread pool dictates how many concurrent requests are possible (but if you are
using the sync
+# RPC server, it also dictates the number of clients that can be connected at all).
+#
+# The default is unlimited and thus provide no protection against clients overwhelming
the server. You are
+# encouraged to set a maximum that makes sense for you in production, but do keep in mind
that
+# rpc_max_threads represents the maximum number of client requests this server may
execute concurrently.
+#
+# rpc_min_threads: 16
+# rpc_max_threads: 2048
+
+# uncomment to set socket buffer sizes on rpc connections
+# rpc_send_buff_size_in_bytes:
+# rpc_recv_buff_size_in_bytes:
+
+# Frame size for thrift (maximum field length).
+thrift_framed_transport_size_in_mb: 15
+
+# The max length of a thrift message, including all fields and
+# internal thrift overhead.
+thrift_max_message_length_in_mb: 16
+
+# Set to true to have Cassandra create a hard link to each sstable
+# flushed or streamed locally in a backups/ subdirectory of the
+# Keyspace data. Removing these links is the operator's
+# responsibility.
+incremental_backups: false
+
+# Whether or not to take a snapshot before each compaction. Be
+# careful using this option, since Cassandra won't clean up the
+# snapshots for you. Mostly useful if you're paranoid when there
+# is a data format change.
+snapshot_before_compaction: false
+
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you will
+# lose data on truncation or drop.
+auto_snapshot: true
+
+# Add column indexes to a row after its contents reach this size.
+# Increase if your column values are large, or if you have a very large
+# number of columns. The competing causes are, Cassandra has to
+# deserialize this much of the row to read a single column, so you want
+# it to be small - at least if you do many partial-row reads - but all
+# the index data is read for each access, so you don't want to generate
+# that wastefully either.
+column_index_size_in_kb: 64
+
+# Size limit for rows being compacted in memory. Larger rows will spill
+# over to disk and use a slower two-pass compaction process. A message
+# will be logged specifying the row key.
+in_memory_compaction_limit_in_mb: 64
+
+# Number of simultaneous compactions to allow, NOT including
+# validation "compactions" for anti-entropy repair. Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
+# compaction_throughput_mb_per_sec first.
+#
+# concurrent_compactors defaults to the number of cores.
+# Uncomment to make compaction mono-threaded, the pre-0.8 default.
+#concurrent_compactors: 1
+
+# Multi-threaded compaction. When enabled, each compaction will use
+# up to one thread per core, plus one thread per sstable being merged.
+# This is usually only useful for SSD-based hardware: otherwise,
+# your concern is usually to get compaction to do LESS i/o (see:
+# compaction_throughput_mb_per_sec), not more.
+multithreaded_compaction: false
+
+# Throttles compaction to the given total throughput across the entire
+# system. The faster you insert data, the faster you need to compact in
+# order to keep the sstable count down, but in general, setting this to
+# 16 to 32 times the rate you are inserting data is more than sufficient.
+# Setting this to 0 disables throttling. Note that this account for all types
+# of compaction, including validation compaction.
+compaction_throughput_mb_per_sec: 16
+
+# Track cached row keys during compaction, and re-cache their new
+# positions in the compacted sstable. Disable if you use really large
+# key caches.
+compaction_preheat_key_cache: true
+
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 400 Mbps or 50 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 400
+
+# How long the coordinator should wait for read operations to complete
+read_request_timeout_in_ms: 10000
+# How long the coordinator should wait for seq or index scans to complete
+range_request_timeout_in_ms: 10000
+# How long the coordinator should wait for writes to complete
+write_request_timeout_in_ms: 10000
+# How long the coordinator should wait for truncates to complete
+# (This can be much longer, because we need to flush all CFs
+# to make sure we can clear out anythink in the commitlog that could
+# cause truncated data to reappear.)
+truncate_request_timeout_in_ms: 60000
+# The default timeout for other, miscellaneous operations
+request_timeout_in_ms: 10000
+
+# Enable operation timeout information exchange between nodes to accurately
+# measure request timeouts, If disabled cassandra will assuming the request
+# was forwarded to the replica instantly by the coordinator
+#
+# Warning: before enabling this property make sure to ntp is installed
+# and the times are synchronized between the nodes.
+cross_node_timeout: false
+
+# Enable socket timeout for streaming operation.
+# When a timeout occurs during streaming, streaming is retried from the start
+# of the current file. This *can* involve re-streaming an important amount of
+# data, so you should avoid setting the value too low.
+# Default value is 0, which never timeout streams.
+# streaming_socket_timeout_in_ms: 0
+
+# phi value that must be reached for a host to be marked down.
+# most users should never need to adjust this.
+# phi_convict_threshold: 8
+
+# endpoint_snitch -- Set this to a class that implements
+# IEndpointSnitch. The snitch has two functions:
+# - it teaches Cassandra enough about your network topology to route
+# requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+# correlated failures. It does this by grouping machines into
+# "datacenters" and "racks." Cassandra will do its best not to
have
+# more than one replica on the same "rack" (which may not actually
+# be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
+# Out of the box, Cassandra provides
+# - SimpleSnitch:
+# Treats Strategy order as proximity. This improves cache locality
+# when disabling read repair, which can further improve throughput.
+# Only appropriate for single-datacenter deployments.
+# - PropertyFileSnitch:
+# Proximity is determined by rack and data center, which are
+# explicitly configured in cassandra-topology.properties.
+# - GossipingPropertyFileSnitch
+# The rack and datacenter for the local node are defined in
+# cassandra-rackdc.properties and propagated to other nodes via gossip. If
+# cassandra-topology.properties exists, it is used as a fallback, allowing
+# migration from the PropertyFileSnitch.
+# - RackInferringSnitch:
+# Proximity is determined by rack and data center, which are
+# assumed to correspond to the 3rd and 2nd octet of each node's
+# IP address, respectively. Unless this happens to match your
+# deployment conventions (as it did Facebook's), this is best used
+# as an example of writing a custom Snitch class.
+# - Ec2Snitch:
+# Appropriate for EC2 deployments in a single Region. Loads Region
+# and Availability Zone information from the EC2 API. The Region is
+# treated as the Datacenter, and the Availability Zone as the rack.
+# Only private IPs are used, so this will not work across multiple
+# Regions.
+# - Ec2MultiRegionSnitch:
+# Uses public IPs as broadcast_address to allow cross-region
+# connectivity. (Thus, you should set seed addresses to the public
+# IP as well.) You will need to open the storage_port or
+# ssl_storage_port on the public IP firewall. (For intra-Region
+# traffic, Cassandra will switch to the private IP after
+# establishing a connection.)
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
+
+# controls how often to perform the more expensive part of host score
+# calculation
+dynamic_snitch_update_interval_in_ms: 100
+# controls how often to reset all host scores, allowing a bad host to
+# possibly recover
+dynamic_snitch_reset_interval_in_ms: 600000
+# if set greater than zero and read_repair_chance is < 1.0, this will allow
+# 'pinning' of replicas to hosts in order to increase cache capacity.
+# The badness threshold will control how much worse the pinned host has to be
+# before the dynamic snitch will prefer other replicas over it. This is
+# expressed as a double which represents a percentage. Thus, a value of
+# 0.2 means Cassandra would continue to prefer the static snitch values
+# until the pinned host was 20% worse than the fastest.
+dynamic_snitch_badness_threshold: 0.1
+
+# request_scheduler -- Set this to a class that implements
+# RequestScheduler, which will schedule incoming client requests
+# according to the specific policy. This is useful for multi-tenancy
+# with a single Cassandra cluster.
+# NOTE: This is specifically for requests from the client and does
+# not affect inter node communication.
+# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place
+# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of
+# client requests to a node with a separate queue for each
+# request_scheduler_id. The scheduler is further customized by
+# request_scheduler_options as described below.
+request_scheduler: org.apache.cassandra.scheduler.NoScheduler
+
+# Scheduler Options vary based on the type of scheduler
+# NoScheduler - Has no options
+# RoundRobin
+# - throttle_limit -- The throttle_limit is the number of in-flight
+# requests per client. Requests beyond
+# that limit are queued up until
+# running requests can complete.
+# The value of 80 here is twice the number of
+# concurrent_reads + concurrent_writes.
+# - default_weight -- default_weight is optional and allows for
+# overriding the default which is 1.
+# - weights -- Weights are optional and will default to 1 or the
+# overridden default_weight. The weight translates into how
+# many requests are handled during each turn of the
+# RoundRobin, based on the scheduler id.
+#
+# request_scheduler_options:
+# throttle_limit: 80
+# default_weight: 5
+# weights:
+# Keyspace1: 1
+# Keyspace2: 5
+
+# request_scheduler_id -- An identifer based on which to perform
+# the request scheduling. Currently the only valid option is keyspace.
+# request_scheduler_id: keyspace
+
+# index_interval controls the sampling of entries from the primrary
+# row index in terms of space versus time. The larger the interval,
+# the smaller and less effective the sampling will be. In technicial
+# terms, the interval coresponds to the number of index entries that
+# are skipped between taking each sample. All the sampled entries
+# must fit in memory. Generally, a value between 128 and 512 here
+# coupled with a large key cache size on CFs results in the best trade
+# offs. This value is not often changed, however if you have many
+# very small rows (many to an OS page), then increasing this will
+# often lower memory usage without a impact on performance.
+index_interval: 128
+
+# Enable or disable inter-node encryption
+# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that
+# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
+# suite for authentication, key exchange and encryption of the actual data transfers.
+# NOTE: No custom encryption options are enabled at the moment
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
+#
+# The passwords used in these options must match the passwords used when generating
+# the keystore and truststore. For instructions on generating these files, see:
+#
http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/J...
+#
+server_encryption_options:
+ internode_encryption: none
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ truststore: conf/.truststore
+ truststore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+ # require_client_auth: false
+
+# enable or disable client/server encryption.
+client_encryption_options:
+ enabled: false
+ keystore: conf/.keystore
+ keystore_password: cassandra
+ # More advanced defaults below:
+ # protocol: TLS
+ # algorithm: SunX509
+ # store_type: JKS
+ # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]
+
+
+# internode_compression controls whether traffic between nodes is
+# compressed.
+# can be: all - all traffic is compressed
+# dc - traffic between different datacenters is compressed
+# none - nothing is compressed.
+internode_compression: all
+
+# Enable or disable tcp_nodelay for inter-dc communication.
+# Disabling it will result in larger (but fewer) network packets being sent,
+# reducing overhead from the TCP protocol itself, at the cost of increasing
+# latency if you block for cross-datacenter responses.
+inter_dc_tcp_nodelay: true
diff --git
a/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.log4j-server.properties
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.log4j-server.properties
new file mode 100644
index 0000000..197a591
--- /dev/null
+++
b/modules/common/cassandra-ccm/cassandra-ccm-core/src/test/resources/expected.log4j-server.properties
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+#
http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# for production, you should probably set pattern to %c instead of %l.
+# (%l is slower.)
+
+# output messages into a rolling log file as well as stdout
+log4j.rootLogger=DEBUG,stdout,R
+
+# stdout
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%5p %d{HH:mm:ss,SSS} %m%n
+
+# rolling log file
+log4j.appender.R=org.apache.log4j.RollingFileAppender
+log4j.appender.R.maxFileSize=20MB
+log4j.appender.R.maxBackupIndex=50
+log4j.appender.R.layout=org.apache.log4j.PatternLayout
+log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %F (line %L) %m%n
+# Edit the next line to point to your logs directory
+log4j.appender.R.File=/var/lib/rhq/storage/logs/rhq-storage.log
+log4j.appender.R.Threshold=DEBUG
+
+# Application logging options
+#log4j.logger.org.apache.cassandra=DEBUG
+#log4j.logger.org.apache.cassandra.db=DEBUG
+#log4j.logger.org.apache.cassandra.service.StorageProxy=DEBUG
+
+# Adding this to avoid thrift logging disconnect errors.
+log4j.logger.org.apache.thrift.server.TNonblockingServer=ERROR
+
diff --git
a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
index 91b19e8..60ebc62 100644
---
a/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
+++
b/modules/common/cassandra-installer/src/main/java/org/rhq/storage/installer/StorageInstaller.java
@@ -51,8 +51,8 @@ import org.apache.commons.cli.PosixParser;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.rhq.cassandra.Deployer;
import org.rhq.cassandra.DeploymentOptions;
-import org.rhq.cassandra.UnmanagedDeployer;
import org.rhq.cassandra.installer.RMIContextFactory;
import org.rhq.core.pluginapi.util.ProcessExecutionUtility;
import org.rhq.core.system.OperatingSystemType;
@@ -206,7 +206,6 @@ public class StorageInstaller {
deploymentOptions.setCommitLogDir(cmdLine.getOptionValue(commitLogDir));
deploymentOptions.setDataDir(dataDir);
deploymentOptions.setSavedCachesDir(savedCachesDir);
- deploymentOptions.setLogDir(logDir.getAbsolutePath());
deploymentOptions.setLoggingLevel("INFO");
deploymentOptions.setRpcPort(rpcPort);
deploymentOptions.setJmxPort(getPort(cmdLine, "jmx-port",
jmxPort));
@@ -225,11 +224,12 @@ public class StorageInstaller {
"to which the storage node will need to store data.",
errors);
}
- UnmanagedDeployer deployer = new UnmanagedDeployer();
- deployer.unpackBundle();
- deployer.deploy(deploymentOptions, 1);
- log.info("Finished installing RHQ Storage Node. Performing post-install
clean up...");
- deployer.cleanUpBundle();
+ Deployer deployer = new Deployer();
+ deployer.setDeploymentOptions(deploymentOptions);
+ deployer.unzipDistro();
+ deployer.applyConfigChanges();
+ deployer.updateFilePerms();
+ log.info("Finished installing RHQ Storage Node.");
log.info("Updating rhq-server.properties...");
PropertiesFileUpdate serverPropertiesUpdater = getServerProperties();
diff --git a/modules/common/cassandra-installer/src/main/resources/logging.properties
b/modules/common/cassandra-installer/src/main/resources/logging.properties
index 15a0e92..6b2a56e 100644
--- a/modules/common/cassandra-installer/src/main/resources/logging.properties
+++ b/modules/common/cassandra-installer/src/main/resources/logging.properties
@@ -1,5 +1,5 @@
# Additional logger names to configure (root logger is always configured)
-loggers=org.rhq, org.rhq.storage.installer.StorageInstaller
+loggers=org.rhq, org.rhq.storage.installer.StorageInstaller, org.rhq.cassandra.Deployer
# Root logger level
logger.level=${rhq.storage.installer.loglevel:INFO}
@@ -8,6 +8,7 @@ logger.handlers=FILE,CONSOLE
logger.org.rhq.level=WARN
logger.org.rhq.storage.installer.StorageInstaller.level=INFO
+logger.org.rhq.cassandra.Deployer.level=INFO
# Console handler configuration
handler.CONSOLE=org.jboss.logmanager.handlers.ConsoleHandler
diff --git a/modules/common/cassandra-installer/src/main/resources/module/main/module.xml
b/modules/common/cassandra-installer/src/main/resources/module/main/module.xml
index f9d19ae..b872dfa 100644
--- a/modules/common/cassandra-installer/src/main/resources/module/main/module.xml
+++ b/modules/common/cassandra-installer/src/main/resources/module/main/module.xml
@@ -6,15 +6,10 @@
<resources>
<resource-root path="${project.build.finalName}.jar"/>
<resource-root
path="rhq-cassandra-ccm-core-${project.version}.jar"/>
- <resource-root path="rhq-ant-bundle-common-${project.version}.jar"/>
<resource-root path="rhq-core-util-${project.version}.jar"/>
<resource-root path="jdom-1.0.jar"/>
<resource-root path="i18nlog-1.0.10.jar"/>
<resource-root
path="rhq-core-native-system-${project.version}.jar"/>
- <resource-root path="ant-1.8.0.jar"/>
- <resource-root path="ant-launcher-1.8.0.jar"/>
- <resource-root path="ant-nodeps-1.8.0.jar"/>
- <resource-root path="ant-contrib-1.0b3.jar"/>
<resource-root path="rhq-core-plugin-api-${project.version}.jar"/>
<resource-root path="rhq-core-domain-${project.version}.jar"/>
<resource-root path="commons-lang-2.4.jar"/>
diff --git a/modules/core/util/src/main/java/org/rhq/core/util/TokenReplacingReader.java
b/modules/core/util/src/main/java/org/rhq/core/util/TokenReplacingReader.java
index 3303b76..9ee592a 100644
--- a/modules/core/util/src/main/java/org/rhq/core/util/TokenReplacingReader.java
+++ b/modules/core/util/src/main/java/org/rhq/core/util/TokenReplacingReader.java
@@ -84,7 +84,8 @@ public class TokenReplacingReader extends Reader {
if (data == '\\') {
escaping = true;
- return data;
+ return pushbackReader.read();
+// return data;
}
if (data != '$')