modules/common/cassandra-installer/pom.xml
| 5
modules/core/dbutils/pom.xml
| 2
modules/core/dbutils/src/main/scripts/dbsetup/sysconfig-data.xml
| 6
modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml
| 14
modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
| 3
modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java
| 2
modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/ServerInstallUtil.java
| 69 ++++
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
| 36 --
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
| 3
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java
| 143 +++++++---
modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java
| 4
modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
| 5
modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
| 4
13 files changed, 209 insertions(+), 87 deletions(-)
New commits:
commit 9b3c7ffa8ce5232e96326ebee4f5c4ac93bc3d02
Author: John Sanda <jsanda(a)redhat.com>
Date: Sat Aug 31 12:29:25 2013 -0400
more fixes/improvements for storage node (un)deployment
There was a deadlock issue that could manifest itself in the
performAddNodeMaintenanceIfNecessary and in the
performRemoveNodeMaintenanceIfNecessary methods when an occurred. Both methods
had nested transactions in which both the outer and inner transactions tried to
update the same storage node entity. The transactions aren no longer nested.
This commit also adds support for continuing the (un)deployment in situations
where the event notifications for the node joining/leaving the cluster are
missed.
diff --git
a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java
b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java
index 52e85ab..44c0e9d 100644
---
a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java
+++
b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerBean.java
@@ -1,6 +1,7 @@
package org.rhq.enterprise.server.storage;
import java.net.InetAddress;
+import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
@@ -10,8 +11,11 @@ import javax.ejb.Stateless;
import javax.ejb.TransactionAttribute;
import javax.ejb.TransactionAttributeType;
import javax.persistence.EntityManager;
+import javax.persistence.EntityNotFoundException;
import javax.persistence.PersistenceContext;
+import com.datastax.driver.core.Host;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.joda.time.Hours;
@@ -186,20 +190,21 @@ public class StorageNodeOperationsHandlerBean implements
StorageNodeOperationsHa
}
}
- @Override
+ @TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED)
public void performAddNodeMaintenanceIfNecessary(InetAddress storageNodeAddress) {
try {
- StorageNode storageNode =
entityManager.createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS,
- StorageNode.class).setParameter("address",
storageNodeAddress.getHostAddress()).getSingleResult();
+ StorageNode storageNode =
storageNodeManager.findStorageNodeByAddress(storageNodeAddress);
if (storageNode.getOperationMode() == StorageNode.OperationMode.BOOTSTRAP) {
// TODO need to add support for HA deployments
// If multiple RHQ servers are running, they will all receive the event
// notification that the node is up and will all wind up calling this
method.
- storageNode = storageNodeOperationsHandler.setMode(storageNode,
StorageNode.OperationMode.ADD_MAINTENANCE);
- performAddNodeMaintenance(subjectManager.getOverlord(), storageNode);
- } else {
- log.info(storageNode + " has already been bootstrapped. Skipping add
node maintenance.");
+ // We can probably handle this with optimistic locking, adding a version
field
+ // to the StorageNode class. Then only one writer would succeed in
updating
+ // the operation mode.
+ storageNode = storageNodeOperationsHandler.setMode(storageNode,
+ StorageNode.OperationMode.ADD_MAINTENANCE);
+
storageNodeOperationsHandler.performAddMaintenance(subjectManager.getOverlord(),
storageNode);
}
} catch (Exception e) {
String msg = "Aborting storage node deployment due to unexpected error
while performing add node " +
@@ -210,19 +215,10 @@ public class StorageNodeOperationsHandlerBean implements
StorageNodeOperationsHa
}
@Override
+ @TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED)
public void performAddNodeMaintenance(Subject subject, StorageNode storageNode) {
try {
- List<StorageNode> clusterNodes =
entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE,
- StorageNode.class).setParameter("operationMode",
StorageNode.OperationMode.NORMAL)
- .getResultList();
- for (StorageNode node : clusterNodes) {
- node.setMaintenancePending(true);
- }
- storageNode.setMaintenancePending(true);
- clusterNodes.add(storageNode);
- boolean runRepair = updateSchemaIfNecessary(clusterNodes.size() - 1,
clusterNodes.size());
- performAddNodeMaintenance(subject, storageNode, runRepair,
createPropertyListOfAddresses(SEEDS_LIST,
- clusterNodes));
+ storageNodeOperationsHandler.performAddMaintenance(subject, storageNode);
} catch (Exception e) {
String msg = "Aborting storage node deployment due to unexpected error
while performing add node " +
"maintenance.";
@@ -231,6 +227,21 @@ public class StorageNodeOperationsHandlerBean implements
StorageNodeOperationsHa
}
}
+ @Override
+ public void performAddMaintenance(Subject subject, StorageNode storageNode) {
+ List<StorageNode> clusterNodes =
entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE,
+ StorageNode.class).setParameter("operationMode",
StorageNode.OperationMode.NORMAL)
+ .getResultList();
+ for (StorageNode node : clusterNodes) {
+ node.setMaintenancePending(true);
+ }
+ storageNode.setMaintenancePending(true);
+ clusterNodes.add(storageNode);
+ boolean runRepair = updateSchemaIfNecessary(clusterNodes.size() - 1,
clusterNodes.size());
+ performAddNodeMaintenance(subject, storageNode, runRepair,
createPropertyListOfAddresses(SEEDS_LIST,
+ clusterNodes));
+ }
+
private void performAddNodeMaintenance(Subject subject, StorageNode storageNode,
boolean runRepair,
PropertyList seedsList) {
if (log.isInfoEnabled()) {
@@ -246,18 +257,21 @@ public class StorageNodeOperationsHandlerBean implements
StorageNodeOperationsHa
}
@Override
+ @TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED)
public void performRemoveNodeMaintenanceIfNecessary(InetAddress storageNodeAddress)
{
try {
- StorageNode storageNode =
entityManager.createNamedQuery(StorageNode.QUERY_FIND_BY_ADDRESS,
- StorageNode.class).setParameter("address",
storageNodeAddress.getHostAddress()).getSingleResult();
+ StorageNode storageNode =
storageNodeManager.findStorageNodeByAddress(storageNodeAddress);
if (storageNode.getOperationMode() == StorageNode.OperationMode.DECOMMISSION)
{
// TODO need to add support for HA deployments
// If multiple RHQ servers are running, they will all receive the event
// notification that the node is up and will all wind up calling this
method.
+ // We can probably handle this with optimistic locking, adding a version
field
+ // to the StorageNode class. Then only one writer would succeed in
updating
+ // the operation mode.
storageNode = storageNodeOperationsHandler.setMode(storageNode,
StorageNode.OperationMode.REMOVE_MAINTENANCE);
- performRemoveNodeMaintenance(subjectManager.getOverlord(), storageNode);
+
storageNodeOperationsHandler.performRemoveMaintenance(subjectManager.getOverlord(),
storageNode);
} else {
log.info("Remove node maintenance has already been run for " +
storageNode);
}
@@ -270,17 +284,10 @@ public class StorageNodeOperationsHandlerBean implements
StorageNodeOperationsHa
}
@Override
+ @TransactionAttribute(TransactionAttributeType.NOT_SUPPORTED)
public void performRemoveNodeMaintenance(Subject subject, StorageNode storageNode) {
try {
- List<StorageNode> clusterNodes =
entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE,
- StorageNode.class).setParameter("operationMode",
StorageNode.OperationMode.NORMAL)
- .getResultList();
- for (StorageNode node : clusterNodes) {
- node.setMaintenancePending(true);
- }
- boolean runRepair = storageNode.isMaintenancePending();
- performRemoveNodeMaintenance(subject, clusterNodes.get(0), runRepair,
- createPropertyListOfAddresses(SEEDS_LIST, clusterNodes));
+ storageNodeOperationsHandler.performRemoveMaintenance(subject, storageNode);
} catch (Exception e) {
String msg = "Aborting undeployment due to unexpected error while
performing remove node maintenance.";
log.error(msg, e);
@@ -288,6 +295,19 @@ public class StorageNodeOperationsHandlerBean implements
StorageNodeOperationsHa
}
}
+ @Override
+ public void performRemoveMaintenance(Subject subject, StorageNode storageNode) {
+ List<StorageNode> clusterNodes =
entityManager.createNamedQuery(StorageNode.QUERY_FIND_ALL_BY_MODE,
+ StorageNode.class).setParameter("operationMode",
StorageNode.OperationMode.NORMAL)
+ .getResultList();
+ for (StorageNode node : clusterNodes) {
+ node.setMaintenancePending(true);
+ }
+ boolean runRepair = storageNode.isMaintenancePending();
+ performRemoveNodeMaintenance(subject, clusterNodes.get(0), runRepair,
+ createPropertyListOfAddresses(SEEDS_LIST, clusterNodes));
+ }
+
private void performRemoveNodeMaintenance(Subject subject, StorageNode storageNode,
boolean runRepair,
PropertyList seedsList) {
if (log.isInfoEnabled()) {
@@ -470,8 +490,31 @@ public class StorageNodeOperationsHandlerBean implements
StorageNodeOperationsHa
deploymentOperationFailed(newStorageNode, resourceOperationHistory);
return;
default: // SUCCESS
- // Nothing to do because we wait for the C* driver to notify us that the
- // storage node has joined the cluster before we proceed with the work
flow.
+ try {
+ log.info("The prepare for bootstrap operation completed
successfully for " + newStorageNode);
+ // 30 seconds is more than a sufficient amount of time to wait for
the
+ // event notification from storage session especially considering
that
+ // nodes gossip every second.
+ Thread.sleep(1000 * 30);
+ entityManager.refresh(newStorageNode);
+ if (newStorageNode.getOperationMode() ==
StorageNode.OperationMode.BOOTSTRAP) {
+ InetAddress address =
InetAddress.getByName(newStorageNode.getAddress());
+ if (isPartOfCluster(address)) {
+ log.warn("We have missed the event notification about
" + newStorageNode + " joining the " +
+ "cluster. The next phase of deployment will be
started.");
+
storageNodeOperationsHandler.performAddNodeMaintenanceIfNecessary(address);
+ } else {
+ newStorageNode.setErrorMessage("The prepare for
bootstrap operation completed " +
+ "successfully but it appears that " +
newStorageNode + " is not yet part of the " +
+ "ring.");
+ }
+ }
+ } catch (InterruptedException e) {
+ throw new RuntimeException("Interrupted while waiting to verify
that " + newStorageNode +
+ " has joined the cluster", e);
+ } catch (UnknownHostException e) {
+ throw new RuntimeException("Failed to parse address for " +
newStorageNode, e);
+ }
}
}
@@ -630,6 +673,32 @@ public class StorageNodeOperationsHandlerBean implements
StorageNodeOperationsHa
break;
default: // SUCCESS
log.info("Successfully decommissioned " + storageNode);
+ try {
+ // 30 seconds is more than a sufficient amount of time to wait for
the
+ // event notification from storage session especially considering
that
+ // nodes gossip every second.
+ Thread.sleep(1000 * 30);
+ entityManager.refresh(storageNode);
+ if (storageNode.getOperationMode() ==
StorageNode.OperationMode.DECOMMISSION) {
+ InetAddress address =
InetAddress.getByName(storageNode.getAddress());
+ if (isPartOfCluster(address)) {
+ storageNode.setErrorMessage("The decommission operation
completed successfully but it " +
+ "appears that " + storageNode + " is still
part of ring.");
+ } else {
+ log.warn("We have missed the event notification about
" + storageNode + " leaving the " +
+ "cluster. The next phase of undeployment will be
started.");
+
storageNodeOperationsHandler.performRemoveNodeMaintenanceIfNecessary(address);
+ }
+ }
+ } catch (EntityNotFoundException e) {
+ // We can hit this if the storage node entity has already been
deleted
+ // from the database. It can be ignored.
+ } catch (InterruptedException e) {
+ throw new RuntimeException("Interrupted while waiting to verify
that " + storageNode +
+ " has been decommissioned and removed from the
cluster", e);
+ } catch (UnknownHostException e) {
+ throw new RuntimeException("Failed to parse address for " +
storageNode, e);
+ }
}
}
@@ -900,4 +969,14 @@ public class StorageNodeOperationsHandlerBean implements
StorageNodeOperationsHa
return list;
}
+ private boolean isPartOfCluster(InetAddress address) {
+ StorageSession session = storageClientManager.getSession();
+ for (Host host : session.getCluster().getMetadata().getAllHosts()) {
+ if (host.getAddress().equals(address)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
}
diff --git
a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java
b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java
index c04de18..b968709 100644
---
a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java
+++
b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/storage/StorageNodeOperationsHandlerLocal.java
@@ -42,6 +42,8 @@ public interface StorageNodeOperationsHandlerLocal {
void performAddNodeMaintenance(Subject subject, StorageNode storageNode);
+ void performAddMaintenance(Subject subject, StorageNode storageNode);
+
void uninstall(Subject subject, StorageNode storageNode);
void detachFromResource(StorageNode storageNode);
@@ -52,6 +54,8 @@ public interface StorageNodeOperationsHandlerLocal {
void performRemoveNodeMaintenance(Subject subject, StorageNode storageNode);
+ void performRemoveMaintenance(Subject subject, StorageNode storageNode);
+
void runRepair(Subject subject, List<StorageNode> clusterNodes);
void handleRepair(ResourceOperationHistory operationHistory);
commit 4942a69aa71fbe2949361d2dc5b3a05a11583c16
Author: John Sanda <jsanda(a)redhat.com>
Date: Fri Aug 30 16:43:41 2013 -0400
initialze storage cluster settings at install time
The storage installer now also adds the gossip port to rhq-server.properties.
The server installer now initializes the cluster settings, namely the cql and
gossip ports. This was previously done in server code when the first storage
node is imported into inventory. This simplifies things. We are able to remove
the conditional logic for initializing the cluster settings. And we no longer
need to store the cql and gossip ports in the storage node's plugin
configuration.
diff --git a/modules/common/cassandra-installer/pom.xml
b/modules/common/cassandra-installer/pom.xml
index 4c737fc..8f67575 100644
--- a/modules/common/cassandra-installer/pom.xml
+++ b/modules/common/cassandra-installer/pom.xml
@@ -101,6 +101,11 @@
<version>${project.version}</version>
</artifactItem>
<artifactItem>
+ <groupId>${project.groupId}</groupId>
+ <artifactId>rhq-cassandra-util</artifactId>
+ <version>${project.version}</version>
+ </artifactItem>
+ <artifactItem>
<groupId>org.yaml</groupId>
<artifactId>snakeyaml</artifactId>
<version>${cassandra.snakeyaml.version}</version>
diff --git a/modules/core/dbutils/pom.xml b/modules/core/dbutils/pom.xml
index 44d73e8..7a33a06 100644
--- a/modules/core/dbutils/pom.xml
+++ b/modules/core/dbutils/pom.xml
@@ -17,7 +17,7 @@
<description>Database schema setup, upgrade and other
utilities</description>
<properties>
- <db.schema.version>2.139</db.schema.version>
+ <db.schema.version>2.140</db.schema.version>
<rhq.ds.type-mapping>${rhq.test.ds.type-mapping}</rhq.ds.type-mapping>
<rhq.ds.server-name>${rhq.test.ds.server-name}</rhq.ds.server-name>
<rhq.ds.db-name>${rhq.test.ds.db-name}</rhq.ds.db-name>
diff --git a/modules/core/dbutils/src/main/scripts/dbsetup/sysconfig-data.xml
b/modules/core/dbutils/src/main/scripts/dbsetup/sysconfig-data.xml
index a225833..c6da7d2 100644
--- a/modules/core/dbutils/src/main/scripts/dbsetup/sysconfig-data.xml
+++ b/modules/core/dbutils/src/main/scripts/dbsetup/sysconfig-data.xml
@@ -142,6 +142,12 @@
<data ID="60" PROPERTY_KEY="STORAGE_AUTOMATIC_DEPLOYMENT"
PROPERTY_VALUE="true" DEFAULT_PROPERTY_VALUE="true"
FREAD_ONLY="TRUE"/>
+
+ <!-- STORAGE_CQL_PORT and STORAGE_GOSSIP_PORT will get updated by the installer
-->
+ <data ID="61" PROPERTY_KEY="STORAGE_CQL_PORT"
FREAD_ONLY="TRUE"/>
+
+ <data ID="62" PROPERTY_KEY="STORAGE_GOSSIP_PORT"
FREAD_ONLY="TRUE"/>
+
</table>
</dbsetup>
diff --git a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml
b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml
index 543f857..2a04ea4 100644
--- a/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml
+++ b/modules/core/dbutils/src/main/scripts/dbupgrade/db-upgrade.xml
@@ -2228,6 +2228,20 @@
</schema-directSQL>
</schemaSpec>
+ <schemaSpec version="2.140">
+ <schema-directSQL>
+ <statement desc="Adding default storage cql port">
+ INSERT INTO rhq_system_config (id, property_key, property_value,
default_property_value, fread_only)
+ VALUES (61, 'STORAGE_CQL_PORT', null, null, true)
+ </statement>
+ </schema-directSQL>
+ <schema-directSQL>
+ <statement desc="Adding default storage gossip port">
+ INSERT INTO rhq_system_config (id, property_key, property_value,
default_property_value, fread_only)
+ VALUES (62, 'STORAGE_GOSSIP_PORT', null, null, true)
+ </statement>
+ </schema-directSQL>
+ </schemaSpec>
</dbupgrade>
</target>
</project>
diff --git a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
index 2bc16a7..2bda549 100644
--- a/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
+++ b/modules/enterprise/server/appserver/src/main/scripts/rhq-container.build.xml
@@ -694,7 +694,8 @@ rhq.storage.nodes=
# Both properties are required.
#
rhq.storage.cql-port=${rhq.cassandra.native-transport-port}
-rhq.storage.gossip-port=${rhq.cassandra.storage-port}
+rhq.storage.gossip-port=${rhq.cassandra.storage.port}
+}
# If enabled data sent to and from storage nodes will be compressed using
# snappy (
https://code.google.com/p/snappy) compression. Note that this is an
diff --git
a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java
b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java
index 03a34cb..caf8aa5 100644
---
a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java
+++
b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/InstallerServiceImpl.java
@@ -502,9 +502,9 @@ public class InstallerServiceImpl implements InstallerService {
// ensure the server info is up to date and stored in the DB
ServerInstallUtil.storeServerDetails(serverProperties, clearTextDbPassword,
serverDetails);
-
ServerInstallUtil.persistStorageNodesIfNecessary(serverProperties,
clearTextDbPassword,
parseNodeInformation(serverProperties));
+ ServerInstallUtil.persistStorageClusterSettingsIfNecessary(serverProperties,
clearTextDbPassword);
}
@Override
diff --git
a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/ServerInstallUtil.java
b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/ServerInstallUtil.java
index 8900d8a..49ecd60 100644
---
a/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/ServerInstallUtil.java
+++
b/modules/enterprise/server/installer/src/main/java/org/rhq/enterprise/server/installer/ServerInstallUtil.java
@@ -918,7 +918,7 @@ public class ServerInstallUtil {
Connection connection = null;
Statement queryStatement = null;
ResultSet resultSet = null;
- PreparedStatement insertStatement = null;
+ PreparedStatement insertStorageNode = null;
try {
String dbUrl =
serverProperties.get(ServerProperties.PROP_DATABASE_CONNECTION_URL);
@@ -941,21 +941,21 @@ public class ServerInstallUtil {
try {
LOG.info("Persisting to database new storage nodes for values
specified in server configuration property [rhq.storage.nodes]");
- insertStatement = connection.prepareStatement(
+ insertStorageNode = connection.prepareStatement(
"INSERT INTO rhq_storage_node (id, address, cql_port,
operation_mode, ctime, mtime) " +
"VALUES (?, ?, ?, ?, ?, ?)"
);
int id = 1001;
for (StorageNode storageNode : storageNodes) {
- insertStatement.setInt(1, id);
- insertStatement.setString(2, storageNode.getAddress());
- insertStatement.setInt(3, storageNode.getCqlPort());
- insertStatement.setString(4,
StorageNode.OperationMode.INSTALLED.toString());
- insertStatement.setLong(5, System.currentTimeMillis());
- insertStatement.setLong(6, System.currentTimeMillis());
-
- insertStatement.executeUpdate();
+ insertStorageNode.setInt(1, id);
+ insertStorageNode.setString(2, storageNode.getAddress());
+ insertStorageNode.setInt(3, storageNode.getCqlPort());
+ insertStorageNode.setString(4,
StorageNode.OperationMode.INSTALLED.toString());
+ insertStorageNode.setLong(5, System.currentTimeMillis());
+ insertStorageNode.setLong(6, System.currentTimeMillis());
+
+ insertStorageNode.executeUpdate();
id += 1;
}
@@ -974,7 +974,54 @@ public class ServerInstallUtil {
if (db != null) {
db.closeResultSet(resultSet);
db.closeStatement(queryStatement);
- db.closeStatement(insertStatement);
+ db.closeStatement(insertStorageNode);
+ db.closeConnection(connection);
+ }
+ }
+ }
+
+ public static void persistStorageClusterSettingsIfNecessary(HashMap<String,
String> serverProperties,
+ String password) throws Exception {
+ DatabaseType db = null;
+ Connection connection = null;
+ PreparedStatement updateClusterSetting = null;
+
+ try {
+ String dbUrl =
serverProperties.get(ServerProperties.PROP_DATABASE_CONNECTION_URL);
+ String userName =
serverProperties.get(ServerProperties.PROP_DATABASE_USERNAME);
+ connection = getDatabaseConnection(dbUrl, userName, password);
+ db = DatabaseTypeFactory.getDatabaseType(connection);
+
+ if (!(db instanceof PostgresqlDatabaseType || db instanceof
OracleDatabaseType)) {
+ throw new IllegalArgumentException("Unknown database type, can't
continue: " + db);
+ }
+
+ connection = getDatabaseConnection(dbUrl, userName, password);
+ connection.setAutoCommit(false);
+
+ updateClusterSetting = connection.prepareStatement(
+ "UPDATE rhq_system_config " +
+ "SET property_value = ?, default_property_value = ? " +
+ "WHERE property_key = ? AND property_value IS NULL AND
default_property_value IS NULL");
+
+ updateClusterSetting.setString(1,
serverProperties.get("rhq.storage.cql-port"));
+ updateClusterSetting.setString(2,
serverProperties.get("rhq.storage.cql-port"));
+ updateClusterSetting.setString(3, "STORAGE_CQL_PORT");
+ updateClusterSetting.executeUpdate();
+
+ updateClusterSetting.setString(1,
serverProperties.get("rhq.storage.gossip-port"));
+ updateClusterSetting.setString(2,
serverProperties.get("rhq.storage.gossip-port"));
+ updateClusterSetting.setString(3, "STORAGE_GOSSIP_PORT");
+ updateClusterSetting.executeUpdate();
+
+ connection.commit();
+ } catch (SQLException e) {
+ LOG.error("Failed to initialize storage cluster settings. Transaction
will be rolled back.", e);
+ connection.rollback();
+ throw e;
+ } finally {
+ if (db != null) {
+ db.closeStatement(updateClusterSetting);
db.closeConnection(connection);
}
}
diff --git
a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
index eda6ae1..4f84d44 100644
---
a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
+++
b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerBean.java
@@ -182,15 +182,14 @@ public class StorageNodeManagerBean implements
StorageNodeManagerLocal, StorageN
}
storageNode.setResource(resource);
storageNode.setOperationMode(OperationMode.NORMAL);
- initClusterSettingsIfNecessary(pluginConfig);
} else {
- storageNode = createStorageNode(resource);
+ StorageClusterSettings clusterSettings =
storageClusterSettingsManager.getClusterSettings(
+ subjectManager.getOverlord());
+ storageNode = createStorageNode(resource, clusterSettings);
if (log.isInfoEnabled()) {
log.info("Scheduling cluster maintenance to deploy " +
storageNode + " into the storage cluster...");
}
- StorageClusterSettings clusterSettings =
storageClusterSettingsManager.getClusterSettings(
- subjectManager.getOverlord());
if (clusterSettings.getAutomaticDeployment()) {
log.info("Deploying " + storageNode);
deployStorageNode(subjectManager.getOverlord(), storageNode);
@@ -205,39 +204,14 @@ public class StorageNodeManagerBean implements
StorageNodeManagerLocal, StorageN
}
}
- private void initClusterSettingsIfNecessary(Configuration pluginConfig) {
- // TODO Need to handle non-repeatable reads here (probably a post 4.9 task)
- //
- // If a user deploys two storage nodes prior to installing the RHQ server, then
we
- // could end up in this method concurrently for both storage nodes. The settings
- // would be committed for each node with the second commit winning. The problem
is
- // that is the cluster settings differ for the two nodes, it will be silently
- // ignored. This scenario will happen infrequently so it should be sufficient to
- // resolve it with optimistic locking. The second writer should fail with an
- // OptimisticLockException.
-
- log.info("Initializing storage cluster settings");
-
- StorageClusterSettings clusterSettings =
storageClusterSettingsManager.getClusterSettings(subjectManager
- .getOverlord());
- if (clusterSettings != null) {
- log.info("Cluster settings have already been set. Skipping
initialization.");
- return;
- }
- clusterSettings = new StorageClusterSettings();
-
clusterSettings.setCqlPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY)));
-
clusterSettings.setGossipPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_GOSSIP_PORT_PROPERTY)));
- storageClusterSettingsManager.setClusterSettings(subjectManager.getOverlord(),
clusterSettings);
- }
-
@Override
@TransactionAttribute(TransactionAttributeType.REQUIRES_NEW)
- public StorageNode createStorageNode(Resource resource) {
+ public StorageNode createStorageNode(Resource resource, StorageClusterSettings
clusterSettings) {
Configuration pluginConfig = resource.getPluginConfiguration();
StorageNode storageNode = new StorageNode();
storageNode.setAddress(pluginConfig.getSimpleValue(RHQ_STORAGE_ADDRESS_PROPERTY));
-
storageNode.setCqlPort(Integer.parseInt(pluginConfig.getSimpleValue(RHQ_STORAGE_CQL_PORT_PROPERTY)));
+ storageNode.setCqlPort(clusterSettings.getCqlPort());
storageNode.setResource(resource);
storageNode.setOperationMode(OperationMode.INSTALLED);
diff --git
a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
index f740a27..60091ee 100644
---
a/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
+++
b/modules/enterprise/server/jar/src/main/java/org/rhq/enterprise/server/cloud/StorageNodeManagerLocal.java
@@ -26,6 +26,7 @@ import javax.ejb.Local;
import org.rhq.core.domain.alert.Alert;
import org.rhq.core.domain.auth.Subject;
+import org.rhq.core.domain.cloud.StorageClusterSettings;
import org.rhq.core.domain.cloud.StorageNode;
import org.rhq.core.domain.cloud.StorageNodeConfigurationComposite;
import org.rhq.core.domain.cloud.StorageNodeLoadComposite;
@@ -179,7 +180,7 @@ public interface StorageNodeManagerLocal {
Map<String, List<MeasurementDataNumericHighLowComposite>>
findStorageNodeLoadDataForLast(Subject subject, StorageNode node, long beginTime, long
endTime, int numPoints);
- StorageNode createStorageNode(Resource resource);
+ StorageNode createStorageNode(Resource resource, StorageClusterSettings
clusterSettings);
void deployStorageNode(Subject subject, StorageNode storageNode);
diff --git
a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
index f6bd888..c823a98 100644
---
a/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
+++
b/modules/plugins/cassandra/src/main/java/org/rhq/plugins/cassandra/CassandraNodeDiscoveryComponent.java
@@ -51,8 +51,6 @@ public class CassandraNodeDiscoveryComponent extends
JMXDiscoveryComponent {
protected static final String HOST_PROPERTY = "host";
protected static final String CLUSTER_NAME_PROPERTY = "clusterName";
- protected static final String NATIVE_TRANSPORT_PORT_PROPERTY =
"nativeTransportPort";
- protected static final String STORAGE_PORT_PROPERTY = "storagePort";
protected static final String JMX_PORT_PROPERTY = "jmxPort";
protected static final String AUTHENTICATOR_PROPERTY = "authenticator";
protected static final String YAML_PROPERTY = "yamlConfiguration";
@@ -156,9 +154,6 @@ public class CassandraNodeDiscoveryComponent extends
JMXDiscoveryComponent {
pluginConfig.put(new PropertySimple(YAML_PROPERTY,
yamlConfigurationFile.getAbsolutePath()));
pluginConfig.put(new PropertySimple(CLUSTER_NAME_PROPERTY,
yamlEditor.getClusterName()));
pluginConfig.put(new PropertySimple(HOST_PROPERTY,
yamlEditor.getListenAddress()));
- pluginConfig.put(new PropertySimple(NATIVE_TRANSPORT_PORT_PROPERTY,
- yamlEditor.getNativeTransportPort()));
- pluginConfig.put(new PropertySimple(STORAGE_PORT_PROPERTY,
yamlEditor.getStoragePort()));
pluginConfig.put(new PropertySimple(AUTHENTICATOR_PROPERTY,
yamlEditor.getAuthenticator()));
}
}
diff --git a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
index 13931d1..1ae564a 100644
--- a/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
+++ b/modules/plugins/rhq-storage/src/main/resources/META-INF/rhq-plugin.xml
@@ -29,10 +29,6 @@
description="The type used to establish the EMS connection
to the RHQ Storage Node."/>
<c:simple-property name="baseDir" displayName="Base
Directory" description="The base directory from which the Cassandra Daemon was
launched." required="false"/>
<c:simple-property name="yamlConfiguration" displayName="YAML
Configuration File" description="YAML Configuration File"/>
- <c:simple-property name="nativeTransportPort" default="9142"
type="integer" readOnly="true"
- description="The port on which the Storage Node listens for
CQL client connections."/>
- <c:simple-property name="storagePort" default="7100"
type="integer" readOnly="true"
- description="The port on which the Storage Node listens for
internode communication."/>
<c:simple-property name="jmxPort" description="The JMX port for
the RHQ Storage Node" default="7299" type="integer"
readOnly="true"/>
<c:simple-property name="host" description="The host on which the
RHQ Storage Node listens to CQL client connections"
default="localhost"/>
<c:simple-property name="clusterName" description="Cluster
name" default="localhost"/>